Skip to content

Commit bbd71e2

Browse files
Andy Rossjoerchan
Andy Ross
authored andcommitted
[nrf fromtree] kernel/queue: Remove interior use of k_poll()
The k_queue data structure, when CONFIG_POLL was enabled, would inexplicably use k_poll() as its blocking mechanism instead of the original wait_q/pend() code. This was actually racy, see commit b173e43. The code was structured as a condition variable: using a spinlock around the queue data before deciding to block. But unlike pend_current_thread(), k_poll() cannot atomically release a lock. A workaround had been in place for this, and then accidentally reverted (both by me!) because the code looked "wrong". This is just fragile, there's no reason to have two implementations of k_queue_get(). Remove. Note that this also removes a test case in the work_queue test where (when CONFIG_POLL was enabled, but not otherwise) it was checking for the ability to immediately cancel a delayed work item that was submitted with a timeout of K_NO_WAIT (i.e. "queue it immediately"). This DOES NOT work with the origina/non-poll queue backend, and has never been a documented behavior of k_delayed_work_submit_to_queue() under any circumstances. I don't know why we were testing this. Fixes #25904 Signed-off-by: Andy Ross <[email protected]> (cherry picked from commit 99c2d2d) Signed-off-by: Joakim Andersson <[email protected]>
1 parent 3a31123 commit bbd71e2

File tree

3 files changed

+8
-70
lines changed

3 files changed

+8
-70
lines changed

include/kernel.h

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2149,12 +2149,9 @@ static inline u32_t k_cycle_get_32(void)
21492149
struct k_queue {
21502150
sys_sflist_t data_q;
21512151
struct k_spinlock lock;
2152-
union {
2153-
_wait_q_t wait_q;
2154-
2155-
_POLL_EVENT;
2156-
};
2152+
_wait_q_t wait_q;
21572153

2154+
_POLL_EVENT;
21582155
_OBJECT_TRACING_NEXT_PTR(k_queue)
21592156
_OBJECT_TRACING_LINKED_FLAG
21602157
};
@@ -2163,10 +2160,8 @@ struct k_queue {
21632160
{ \
21642161
.data_q = SYS_SLIST_STATIC_INIT(&obj.data_q), \
21652162
.lock = { }, \
2166-
{ \
2167-
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2168-
_POLL_EVENT_OBJ_INIT(obj) \
2169-
}, \
2163+
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2164+
_POLL_EVENT_OBJ_INIT(obj) \
21702165
_OBJECT_TRACING_INIT \
21712166
}
21722167

kernel/queue.c

Lines changed: 4 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -100,36 +100,31 @@ static inline void z_vrfy_k_queue_init(struct k_queue *queue)
100100
#include <syscalls/k_queue_init_mrsh.c>
101101
#endif
102102

103-
#if !defined(CONFIG_POLL)
104103
static void prepare_thread_to_run(struct k_thread *thread, void *data)
105104
{
106105
z_thread_return_value_set_with_data(thread, 0, data);
107106
z_ready_thread(thread);
108107
}
109-
#endif /* CONFIG_POLL */
110108

111-
#ifdef CONFIG_POLL
112109
static inline void handle_poll_events(struct k_queue *queue, u32_t state)
113110
{
111+
#ifdef CONFIG_POLL
114112
z_handle_obj_poll_events(&queue->poll_events, state);
115-
}
116113
#endif
114+
}
117115

118116
void z_impl_k_queue_cancel_wait(struct k_queue *queue)
119117
{
120118
k_spinlock_key_t key = k_spin_lock(&queue->lock);
121-
#if !defined(CONFIG_POLL)
122119
struct k_thread *first_pending_thread;
123120

124121
first_pending_thread = z_unpend_first_thread(&queue->wait_q);
125122

126123
if (first_pending_thread != NULL) {
127124
prepare_thread_to_run(first_pending_thread, NULL);
128125
}
129-
#else
130-
handle_poll_events(queue, K_POLL_STATE_CANCELLED);
131-
#endif /* !CONFIG_POLL */
132126

127+
handle_poll_events(queue, K_POLL_STATE_CANCELLED);
133128
z_reschedule(&queue->lock, key);
134129
}
135130

@@ -146,7 +141,6 @@ static s32_t queue_insert(struct k_queue *queue, void *prev, void *data,
146141
bool alloc)
147142
{
148143
k_spinlock_key_t key = k_spin_lock(&queue->lock);
149-
#if !defined(CONFIG_POLL)
150144
struct k_thread *first_pending_thread;
151145

152146
first_pending_thread = z_unpend_first_thread(&queue->wait_q);
@@ -156,7 +150,6 @@ static s32_t queue_insert(struct k_queue *queue, void *prev, void *data,
156150
z_reschedule(&queue->lock, key);
157151
return 0;
158152
}
159-
#endif /* !CONFIG_POLL */
160153

161154
/* Only need to actually allocate if no threads are pending */
162155
if (alloc) {
@@ -173,12 +166,9 @@ static s32_t queue_insert(struct k_queue *queue, void *prev, void *data,
173166
} else {
174167
sys_sfnode_init(data, 0x0);
175168
}
176-
sys_sflist_insert(&queue->data_q, prev, data);
177169

178-
#if defined(CONFIG_POLL)
170+
sys_sflist_insert(&queue->data_q, prev, data);
179171
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
180-
#endif /* CONFIG_POLL */
181-
182172
z_reschedule(&queue->lock, key);
183173
return 0;
184174
}
@@ -238,7 +228,6 @@ int k_queue_append_list(struct k_queue *queue, void *head, void *tail)
238228
}
239229

240230
k_spinlock_key_t key = k_spin_lock(&queue->lock);
241-
#if !defined(CONFIG_POLL)
242231
struct k_thread *thread = NULL;
243232

244233
if (head != NULL) {
@@ -255,13 +244,8 @@ int k_queue_append_list(struct k_queue *queue, void *head, void *tail)
255244
sys_sflist_append_list(&queue->data_q, head, tail);
256245
}
257246

258-
#else
259-
sys_sflist_append_list(&queue->data_q, head, tail);
260247
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
261-
#endif /* !CONFIG_POLL */
262-
263248
z_reschedule(&queue->lock, key);
264-
265249
return 0;
266250
}
267251

@@ -292,32 +276,6 @@ int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
292276
return 0;
293277
}
294278

295-
#if defined(CONFIG_POLL)
296-
static void *k_queue_poll(struct k_queue *queue, k_timeout_t timeout)
297-
{
298-
struct k_poll_event event;
299-
int err;
300-
k_spinlock_key_t key;
301-
void *val;
302-
303-
k_poll_event_init(&event, K_POLL_TYPE_FIFO_DATA_AVAILABLE,
304-
K_POLL_MODE_NOTIFY_ONLY, queue);
305-
306-
event.state = K_POLL_STATE_NOT_READY;
307-
err = k_poll(&event, 1, timeout);
308-
309-
if (err && err != -EAGAIN) {
310-
return NULL;
311-
}
312-
313-
key = k_spin_lock(&queue->lock);
314-
val = z_queue_node_peek(sys_sflist_get(&queue->data_q), true);
315-
k_spin_unlock(&queue->lock, key);
316-
317-
return val;
318-
}
319-
#endif /* CONFIG_POLL */
320-
321279
void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout)
322280
{
323281
k_spinlock_key_t key = k_spin_lock(&queue->lock);
@@ -337,16 +295,9 @@ void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout)
337295
return NULL;
338296
}
339297

340-
#if defined(CONFIG_POLL)
341-
k_spin_unlock(&queue->lock, key);
342-
343-
return k_queue_poll(queue, timeout);
344-
345-
#else
346298
int ret = z_pend_curr(&queue->lock, key, &queue->wait_q, timeout);
347299

348300
return (ret != 0) ? NULL : _current->base.swap_data;
349-
#endif /* CONFIG_POLL */
350301
}
351302

352303
#ifdef CONFIG_USERSPACE

tests/kernel/workq/work_queue/src/main.c

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -286,14 +286,6 @@ static void coop_delayed_work_cancel_main(int arg1, int arg2)
286286

287287
TC_PRINT(" - Cancel delayed work from coop thread\n");
288288
k_delayed_work_cancel(&delayed_tests[1].work);
289-
290-
#if defined(CONFIG_POLL)
291-
k_delayed_work_submit(&delayed_tests[2].work,
292-
K_NO_WAIT /* Submit immediately */);
293-
294-
TC_PRINT(" - Cancel pending delayed work from coop thread\n");
295-
k_delayed_work_cancel(&delayed_tests[2].work);
296-
#endif
297289
}
298290

299291
/**

0 commit comments

Comments
 (0)