linux/kernel/task_work.c
<<
>>
Prefs
   1#include <linux/spinlock.h>
   2#include <linux/task_work.h>
   3#include <linux/tracehook.h>
   4
   5int
   6task_work_add(struct task_struct *task, struct callback_head *twork, bool notify)
   7{
   8        struct callback_head *last, *first;
   9        unsigned long flags;
  10
  11        /*
  12         * Not inserting the new work if the task has already passed
  13         * exit_task_work() is the responisbility of callers.
  14         */
  15        raw_spin_lock_irqsave(&task->pi_lock, flags);
  16        last = task->task_works;
  17        first = last ? last->next : twork;
  18        twork->next = first;
  19        if (last)
  20                last->next = twork;
  21        task->task_works = twork;
  22        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  23
  24        /* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */
  25        if (notify)
  26                set_notify_resume(task);
  27        return 0;
  28}
  29
  30struct callback_head *
  31task_work_cancel(struct task_struct *task, task_work_func_t func)
  32{
  33        unsigned long flags;
  34        struct callback_head *last, *res = NULL;
  35
  36        raw_spin_lock_irqsave(&task->pi_lock, flags);
  37        last = task->task_works;
  38        if (last) {
  39                struct callback_head *q = last, *p = q->next;
  40                while (1) {
  41                        if (p->func == func) {
  42                                q->next = p->next;
  43                                if (p == last)
  44                                        task->task_works = q == p ? NULL : q;
  45                                res = p;
  46                                break;
  47                        }
  48                        if (p == last)
  49                                break;
  50                        q = p;
  51                        p = q->next;
  52                }
  53        }
  54        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  55        return res;
  56}
  57
  58void task_work_run(void)
  59{
  60        struct task_struct *task = current;
  61        struct callback_head *p, *q;
  62
  63        while (1) {
  64                raw_spin_lock_irq(&task->pi_lock);
  65                p = task->task_works;
  66                task->task_works = NULL;
  67                raw_spin_unlock_irq(&task->pi_lock);
  68
  69                if (unlikely(!p))
  70                        return;
  71
  72                q = p->next; /* head */
  73                p->next = NULL; /* cut it */
  74                while (q) {
  75                        p = q->next;
  76                        q->func(q);
  77                        q = p;
  78                        cond_resched();
  79                }
  80        }
  81}
  82
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.