Skip to content

Commit a9a2384

Browse files
committed
CLEANUP: sched: remove duplicate code in run_tasks_from_list()
Now that ->wake_date is common to tasks and tasklets, we don't need anymore to carry a duplicate control block to read and update it for tasks and tasklets. And given that this code was present early in the if/else fork between tasks and tasklets, taking it out of the block allows to move the task part into a more visible "else" branch that also allows to factor the epilogue that resets th_ctx->current and updates profile_entry->cpu_time, which also used to be duplicated. Overall, doing just that saved 253 bytes in the function, or ~1/6, which is not bad considering that it's on a hot path. And the code got much ore readable.
1 parent e0e6d81 commit a9a2384

File tree

1 file changed

+71
-97
lines changed

1 file changed

+71
-97
lines changed

src/task.c

Lines changed: 71 additions & 97 deletions
Original file line numberDiff line numberDiff line change
@@ -559,28 +559,30 @@ unsigned int run_tasks_from_lists(unsigned int budgets[])
559559
ctx = t->context;
560560
process = t->process;
561561
t->calls++;
562+
563+
th_ctx->sched_wake_date = t->wake_date;
564+
if (th_ctx->sched_wake_date) {
565+
uint32_t now_ns = now_mono_time();
566+
uint32_t lat = now_ns - th_ctx->sched_wake_date;
567+
568+
t->wake_date = 0;
569+
th_ctx->sched_call_date = now_ns;
570+
profile_entry = sched_activity_entry(sched_activity, t->process);
571+
th_ctx->sched_profile_entry = profile_entry;
572+
HA_ATOMIC_ADD(&profile_entry->lat_time, lat);
573+
HA_ATOMIC_INC(&profile_entry->calls);
574+
}
575+
__ha_barrier_store();
576+
562577
th_ctx->current = t;
563578
_HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_STUCK); // this thread is still running
564579

565580
_HA_ATOMIC_DEC(&th_ctx->rq_total);
581+
LIST_DEL_INIT(&((struct tasklet *)t)->list);
582+
__ha_barrier_store();
566583

567584
if (t->state & TASK_F_TASKLET) {
568-
LIST_DEL_INIT(&((struct tasklet *)t)->list);
569-
__ha_barrier_store();
570-
571-
th_ctx->sched_wake_date = ((struct tasklet *)t)->wake_date;
572-
if (th_ctx->sched_wake_date) {
573-
uint32_t now_ns = now_mono_time();
574-
uint32_t lat = now_ns - th_ctx->sched_wake_date;
575-
576-
((struct tasklet *)t)->wake_date = 0;
577-
th_ctx->sched_call_date = now_ns;
578-
profile_entry = sched_activity_entry(sched_activity, t->process);
579-
th_ctx->sched_profile_entry = profile_entry;
580-
HA_ATOMIC_ADD(&profile_entry->lat_time, lat);
581-
HA_ATOMIC_INC(&profile_entry->calls);
582-
}
583-
585+
/* this is a tasklet */
584586
state = _HA_ATOMIC_FETCH_AND(&t->state, TASK_PERSISTENT);
585587
__ha_barrier_atomic_store();
586588

@@ -594,98 +596,70 @@ unsigned int run_tasks_from_lists(unsigned int budgets[])
594596
__ha_barrier_store();
595597
continue;
596598
}
599+
} else {
600+
/* This is a regular task */
601+
602+
/* We must be the exclusive owner of the TASK_RUNNING bit, and
603+
* have to be careful that the task is not being manipulated on
604+
* another thread finding it expired in wake_expired_tasks().
605+
* The TASK_RUNNING bit will be set during these operations,
606+
* they are extremely rare and do not last long so the best to
607+
* do here is to wait.
608+
*/
609+
state = _HA_ATOMIC_LOAD(&t->state);
610+
do {
611+
while (unlikely(state & TASK_RUNNING)) {
612+
__ha_cpu_relax();
613+
state = _HA_ATOMIC_LOAD(&t->state);
614+
}
615+
} while (!_HA_ATOMIC_CAS(&t->state, &state, (state & TASK_PERSISTENT) | TASK_RUNNING));
597616

598-
if (th_ctx->sched_wake_date)
599-
HA_ATOMIC_ADD(&profile_entry->cpu_time, (uint32_t)(now_mono_time() - th_ctx->sched_call_date));
600-
601-
done++;
602-
th_ctx->current = NULL;
603-
__ha_barrier_store();
604-
continue;
605-
}
606-
607-
LIST_DEL_INIT(&((struct tasklet *)t)->list);
608-
__ha_barrier_store();
609-
610-
th_ctx->sched_wake_date = t->wake_date;
611-
if (unlikely(t->wake_date)) {
612-
uint32_t now_ns = now_mono_time();
613-
uint32_t lat = now_ns - t->wake_date;
614-
615-
t->wake_date = 0;
616-
th_ctx->sched_call_date = now_ns;
617-
profile_entry = sched_activity_entry(sched_activity, t->process);
618-
th_ctx->sched_profile_entry = profile_entry;
619-
HA_ATOMIC_ADD(&profile_entry->lat_time, lat);
620-
HA_ATOMIC_INC(&profile_entry->calls);
621-
}
617+
__ha_barrier_atomic_store();
622618

623-
__ha_barrier_store();
619+
_HA_ATOMIC_DEC(&ha_thread_ctx[tid].tasks_in_list);
624620

625-
/* We must be the exclusive owner of the TASK_RUNNING bit, and
626-
* have to be careful that the task is not being manipulated on
627-
* another thread finding it expired in wake_expired_tasks().
628-
* The TASK_RUNNING bit will be set during these operations,
629-
* they are extremely rare and do not last long so the best to
630-
* do here is to wait.
631-
*/
632-
state = _HA_ATOMIC_LOAD(&t->state);
633-
do {
634-
while (unlikely(state & TASK_RUNNING)) {
635-
__ha_cpu_relax();
636-
state = _HA_ATOMIC_LOAD(&t->state);
621+
/* Note for below: if TASK_KILLED arrived before we've read the state, we
622+
* directly free the task. Otherwise it will be seen after processing and
623+
* it's freed on the exit path.
624+
*/
625+
if (likely(!(state & TASK_KILLED) && process == process_stream))
626+
t = process_stream(t, ctx, state);
627+
else if (!(state & TASK_KILLED) && process != NULL)
628+
t = process(t, ctx, state);
629+
else {
630+
task_unlink_wq(t);
631+
__task_free(t);
632+
th_ctx->current = NULL;
633+
__ha_barrier_store();
634+
/* We don't want max_processed to be decremented if
635+
* we're just freeing a destroyed task, we should only
636+
* do so if we really ran a task.
637+
*/
638+
continue;
637639
}
638-
} while (!_HA_ATOMIC_CAS(&t->state, &state, (state & TASK_PERSISTENT) | TASK_RUNNING));
639-
640-
__ha_barrier_atomic_store();
641-
642-
/* OK then this is a regular task */
643-
644-
_HA_ATOMIC_DEC(&ha_thread_ctx[tid].tasks_in_list);
645640

646-
/* Note for below: if TASK_KILLED arrived before we've read the state, we
647-
* directly free the task. Otherwise it will be seen after processing and
648-
* it's freed on the exit path.
649-
*/
650-
if (likely(!(state & TASK_KILLED) && process == process_stream))
651-
t = process_stream(t, ctx, state);
652-
else if (!(state & TASK_KILLED) && process != NULL)
653-
t = process(t, ctx, state);
654-
else {
655-
task_unlink_wq(t);
656-
__task_free(t);
657-
th_ctx->current = NULL;
658-
__ha_barrier_store();
659-
/* We don't want max_processed to be decremented if
660-
* we're just freeing a destroyed task, we should only
661-
* do so if we really ran a task.
641+
/* If there is a pending state we have to wake up the task
642+
* immediately, else we defer it into wait queue
662643
*/
663-
continue;
644+
if (t != NULL) {
645+
state = _HA_ATOMIC_LOAD(&t->state);
646+
if (unlikely(state & TASK_KILLED)) {
647+
task_unlink_wq(t);
648+
__task_free(t);
649+
}
650+
else {
651+
task_queue(t);
652+
task_drop_running(t, 0);
653+
}
654+
}
664655
}
656+
665657
th_ctx->current = NULL;
666658
__ha_barrier_store();
667659

668660
/* stats are only registered for non-zero wake dates */
669-
if (unlikely(th_ctx->sched_wake_date)) {
670-
uint32_t cpu = (uint32_t)now_mono_time() - th_ctx->sched_call_date;
671-
672-
HA_ATOMIC_ADD(&profile_entry->cpu_time, cpu);
673-
}
674-
675-
/* If there is a pending state we have to wake up the task
676-
* immediately, else we defer it into wait queue
677-
*/
678-
if (t != NULL) {
679-
state = _HA_ATOMIC_LOAD(&t->state);
680-
if (unlikely(state & TASK_KILLED)) {
681-
task_unlink_wq(t);
682-
__task_free(t);
683-
}
684-
else {
685-
task_queue(t);
686-
task_drop_running(t, 0);
687-
}
688-
}
661+
if (unlikely(th_ctx->sched_wake_date))
662+
HA_ATOMIC_ADD(&profile_entry->cpu_time, (uint32_t)(now_mono_time() - th_ctx->sched_call_date));
689663
done++;
690664
}
691665
th_ctx->current_queue = -1;

0 commit comments

Comments
 (0)