linux/kernel/posix-cpu-timers.c
<<
>>
Prefs
   1/*
   2 * Implement CPU time clocks for the POSIX clock interface.
   3 */
   4
   5#include <linux/sched.h>
   6#include <linux/posix-timers.h>
   7#include <linux/errno.h>
   8#include <linux/math64.h>
   9#include <asm/uaccess.h>
  10#include <linux/kernel_stat.h>
  11
  12/*
  13 * Called after updating RLIMIT_CPU to set timer expiration if necessary.
  14 */
  15void update_rlimit_cpu(unsigned long rlim_new)
  16{
  17        cputime_t cputime;
  18
  19        cputime = secs_to_cputime(rlim_new);
  20        if (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
  21            cputime_gt(current->signal->it_prof_expires, cputime)) {
  22                spin_lock_irq(&current->sighand->siglock);
  23                set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
  24                spin_unlock_irq(&current->sighand->siglock);
  25        }
  26}
  27
  28static int check_clock(const clockid_t which_clock)
  29{
  30        int error = 0;
  31        struct task_struct *p;
  32        const pid_t pid = CPUCLOCK_PID(which_clock);
  33
  34        if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
  35                return -EINVAL;
  36
  37        if (pid == 0)
  38                return 0;
  39
  40        read_lock(&tasklist_lock);
  41        p = find_task_by_vpid(pid);
  42        if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
  43                   same_thread_group(p, current) : thread_group_leader(p))) {
  44                error = -EINVAL;
  45        }
  46        read_unlock(&tasklist_lock);
  47
  48        return error;
  49}
  50
  51static inline union cpu_time_count
  52timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
  53{
  54        union cpu_time_count ret;
  55        ret.sched = 0;          /* high half always zero when .cpu used */
  56        if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  57                ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
  58        } else {
  59                ret.cpu = timespec_to_cputime(tp);
  60        }
  61        return ret;
  62}
  63
  64static void sample_to_timespec(const clockid_t which_clock,
  65                               union cpu_time_count cpu,
  66                               struct timespec *tp)
  67{
  68        if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
  69                *tp = ns_to_timespec(cpu.sched);
  70        else
  71                cputime_to_timespec(cpu.cpu, tp);
  72}
  73
  74static inline int cpu_time_before(const clockid_t which_clock,
  75                                  union cpu_time_count now,
  76                                  union cpu_time_count then)
  77{
  78        if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  79                return now.sched < then.sched;
  80        }  else {
  81                return cputime_lt(now.cpu, then.cpu);
  82        }
  83}
  84static inline void cpu_time_add(const clockid_t which_clock,
  85                                union cpu_time_count *acc,
  86                                union cpu_time_count val)
  87{
  88        if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  89                acc->sched += val.sched;
  90        }  else {
  91                acc->cpu = cputime_add(acc->cpu, val.cpu);
  92        }
  93}
  94static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
  95                                                union cpu_time_count a,
  96                                                union cpu_time_count b)
  97{
  98        if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  99                a.sched -= b.sched;
 100        }  else {
 101                a.cpu = cputime_sub(a.cpu, b.cpu);
 102        }
 103        return a;
 104}
 105
 106/*
 107 * Divide and limit the result to res >= 1
 108 *
 109 * This is necessary to prevent signal delivery starvation, when the result of
 110 * the division would be rounded down to 0.
 111 */
 112static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
 113{
 114        cputime_t res = cputime_div(time, div);
 115
 116        return max_t(cputime_t, res, 1);
 117}
 118
 119/*
 120 * Update expiry time from increment, and increase overrun count,
 121 * given the current clock sample.
 122 */
 123static void bump_cpu_timer(struct k_itimer *timer,
 124                                  union cpu_time_count now)
 125{
 126        int i;
 127
 128        if (timer->it.cpu.incr.sched == 0)
 129                return;
 130
 131        if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
 132                unsigned long long delta, incr;
 133
 134                if (now.sched < timer->it.cpu.expires.sched)
 135                        return;
 136                incr = timer->it.cpu.incr.sched;
 137                delta = now.sched + incr - timer->it.cpu.expires.sched;
 138                /* Don't use (incr*2 < delta), incr*2 might overflow. */
 139                for (i = 0; incr < delta - incr; i++)
 140                        incr = incr << 1;
 141                for (; i >= 0; incr >>= 1, i--) {
 142                        if (delta < incr)
 143                                continue;
 144                        timer->it.cpu.expires.sched += incr;
 145                        timer->it_overrun += 1 << i;
 146                        delta -= incr;
 147                }
 148        } else {
 149                cputime_t delta, incr;
 150
 151                if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu))
 152                        return;
 153                incr = timer->it.cpu.incr.cpu;
 154                delta = cputime_sub(cputime_add(now.cpu, incr),
 155                                    timer->it.cpu.expires.cpu);
 156                /* Don't use (incr*2 < delta), incr*2 might overflow. */
 157                for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
 158                             incr = cputime_add(incr, incr);
 159                for (; i >= 0; incr = cputime_halve(incr), i--) {
 160                        if (cputime_lt(delta, incr))
 161                                continue;
 162                        timer->it.cpu.expires.cpu =
 163                                cputime_add(timer->it.cpu.expires.cpu, incr);
 164                        timer->it_overrun += 1 << i;
 165                        delta = cputime_sub(delta, incr);
 166                }
 167        }
 168}
 169
 170static inline cputime_t prof_ticks(struct task_struct *p)
 171{
 172        return cputime_add(p->utime, p->stime);
 173}
 174static inline cputime_t virt_ticks(struct task_struct *p)
 175{
 176        return p->utime;
 177}
 178
 179int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
 180{
 181        int error = check_clock(which_clock);
 182        if (!error) {
 183                tp->tv_sec = 0;
 184                tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
 185                if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
 186                        /*
 187                         * If sched_clock is using a cycle counter, we
 188                         * don't have any idea of its true resolution
 189                         * exported, but it is much more than 1s/HZ.
 190                         */
 191                        tp->tv_nsec = 1;
 192                }
 193        }
 194        return error;
 195}
 196
 197int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
 198{
 199        /*
 200         * You can never reset a CPU clock, but we check for other errors
 201         * in the call before failing with EPERM.
 202         */
 203        int error = check_clock(which_clock);
 204        if (error == 0) {
 205                error = -EPERM;
 206        }
 207        return error;
 208}
 209
 210
 211/*
 212 * Sample a per-thread clock for the given task.
 213 */
 214static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
 215                            union cpu_time_count *cpu)
 216{
 217        switch (CPUCLOCK_WHICH(which_clock)) {
 218        default:
 219                return -EINVAL;
 220        case CPUCLOCK_PROF:
 221                cpu->cpu = prof_ticks(p);
 222                break;
 223        case CPUCLOCK_VIRT:
 224                cpu->cpu = virt_ticks(p);
 225                break;
 226        case CPUCLOCK_SCHED:
 227                cpu->sched = task_sched_runtime(p);
 228                break;
 229        }
 230        return 0;
 231}
 232
 233void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
 234{
 235        struct sighand_struct *sighand;
 236        struct signal_struct *sig;
 237        struct task_struct *t;
 238
 239        *times = INIT_CPUTIME;
 240
 241        rcu_read_lock();
 242        sighand = rcu_dereference(tsk->sighand);
 243        if (!sighand)
 244                goto out;
 245
 246        sig = tsk->signal;
 247
 248        t = tsk;
 249        do {
 250                times->utime = cputime_add(times->utime, t->utime);
 251                times->stime = cputime_add(times->stime, t->stime);
 252                times->sum_exec_runtime += t->se.sum_exec_runtime;
 253
 254                t = next_thread(t);
 255        } while (t != tsk);
 256
 257        times->utime = cputime_add(times->utime, sig->utime);
 258        times->stime = cputime_add(times->stime, sig->stime);
 259        times->sum_exec_runtime += sig->sum_sched_runtime;
 260out:
 261        rcu_read_unlock();
 262}
 263
 264static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
 265{
 266        if (cputime_gt(b->utime, a->utime))
 267                a->utime = b->utime;
 268
 269        if (cputime_gt(b->stime, a->stime))
 270                a->stime = b->stime;
 271
 272        if (b->sum_exec_runtime > a->sum_exec_runtime)
 273                a->sum_exec_runtime = b->sum_exec_runtime;
 274}
 275
 276void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
 277{
 278        struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
 279        struct task_cputime sum;
 280        unsigned long flags;
 281
 282        spin_lock_irqsave(&cputimer->lock, flags);
 283        if (!cputimer->running) {
 284                cputimer->running = 1;
 285                /*
 286                 * The POSIX timer interface allows for absolute time expiry
 287                 * values through the TIMER_ABSTIME flag, therefore we have
 288                 * to synchronize the timer to the clock every time we start
 289                 * it.
 290                 */
 291                thread_group_cputime(tsk, &sum);
 292                update_gt_cputime(&cputimer->cputime, &sum);
 293        }
 294        *times = cputimer->cputime;
 295        spin_unlock_irqrestore(&cputimer->lock, flags);
 296}
 297
 298/*
 299 * Sample a process (thread group) clock for the given group_leader task.
 300 * Must be called with tasklist_lock held for reading.
 301 */
 302static int cpu_clock_sample_group(const clockid_t which_clock,
 303                                  struct task_struct *p,
 304                                  union cpu_time_count *cpu)
 305{
 306        struct task_cputime cputime;
 307
 308        switch (CPUCLOCK_WHICH(which_clock)) {
 309        default:
 310                return -EINVAL;
 311        case CPUCLOCK_PROF:
 312                thread_group_cputime(p, &cputime);
 313                cpu->cpu = cputime_add(cputime.utime, cputime.stime);
 314                break;
 315        case CPUCLOCK_VIRT:
 316                thread_group_cputime(p, &cputime);
 317                cpu->cpu = cputime.utime;
 318                break;
 319        case CPUCLOCK_SCHED:
 320                cpu->sched = thread_group_sched_runtime(p);
 321                break;
 322        }
 323        return 0;
 324}
 325
 326
 327int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
 328{
 329        const pid_t pid = CPUCLOCK_PID(which_clock);
 330        int error = -EINVAL;
 331        union cpu_time_count rtn;
 332
 333        if (pid == 0) {
 334                /*
 335                 * Special case constant value for our own clocks.
 336                 * We don't have to do any lookup to find ourselves.
 337                 */
 338                if (CPUCLOCK_PERTHREAD(which_clock)) {
 339                        /*
 340                         * Sampling just ourselves we can do with no locking.
 341                         */
 342                        error = cpu_clock_sample(which_clock,
 343                                                 current, &rtn);
 344                } else {
 345                        read_lock(&tasklist_lock);
 346                        error = cpu_clock_sample_group(which_clock,
 347                                                       current, &rtn);
 348                        read_unlock(&tasklist_lock);
 349                }
 350        } else {
 351                /*
 352                 * Find the given PID, and validate that the caller
 353                 * should be able to see it.
 354                 */
 355                struct task_struct *p;
 356                rcu_read_lock();
 357                p = find_task_by_vpid(pid);
 358                if (p) {
 359                        if (CPUCLOCK_PERTHREAD(which_clock)) {
 360                                if (same_thread_group(p, current)) {
 361                                        error = cpu_clock_sample(which_clock,
 362                                                                 p, &rtn);
 363                                }
 364                        } else {
 365                                read_lock(&tasklist_lock);
 366                                if (thread_group_leader(p) && p->signal) {
 367                                        error =
 368                                            cpu_clock_sample_group(which_clock,
 369                                                                   p, &rtn);
 370                                }
 371                                read_unlock(&tasklist_lock);
 372                        }
 373                }
 374                rcu_read_unlock();
 375        }
 376
 377        if (error)
 378                return error;
 379        sample_to_timespec(which_clock, rtn, tp);
 380        return 0;
 381}
 382
 383
 384/*
 385 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
 386 * This is called from sys_timer_create with the new timer already locked.
 387 */
 388int posix_cpu_timer_create(struct k_itimer *new_timer)
 389{
 390        int ret = 0;
 391        const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
 392        struct task_struct *p;
 393
 394        if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
 395                return -EINVAL;
 396
 397        INIT_LIST_HEAD(&new_timer->it.cpu.entry);
 398        new_timer->it.cpu.incr.sched = 0;
 399        new_timer->it.cpu.expires.sched = 0;
 400
 401        read_lock(&tasklist_lock);
 402        if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
 403                if (pid == 0) {
 404                        p = current;
 405                } else {
 406                        p = find_task_by_vpid(pid);
 407                        if (p && !same_thread_group(p, current))
 408                                p = NULL;
 409                }
 410        } else {
 411                if (pid == 0) {
 412                        p = current->group_leader;
 413                } else {
 414                        p = find_task_by_vpid(pid);
 415                        if (p && !thread_group_leader(p))
 416                                p = NULL;
 417                }
 418        }
 419        new_timer->it.cpu.task = p;
 420        if (p) {
 421                get_task_struct(p);
 422        } else {
 423                ret = -EINVAL;
 424        }
 425        read_unlock(&tasklist_lock);
 426
 427        return ret;
 428}
 429
 430/*
 431 * Clean up a CPU-clock timer that is about to be destroyed.
 432 * This is called from timer deletion with the timer already locked.
 433 * If we return TIMER_RETRY, it's necessary to release the timer's lock
 434 * and try again.  (This happens when the timer is in the middle of firing.)
 435 */
 436int posix_cpu_timer_del(struct k_itimer *timer)
 437{
 438        struct task_struct *p = timer->it.cpu.task;
 439        int ret = 0;
 440
 441        if (likely(p != NULL)) {
 442                read_lock(&tasklist_lock);
 443                if (unlikely(p->signal == NULL)) {
 444                        /*
 445                         * We raced with the reaping of the task.
 446                         * The deletion should have cleared us off the list.
 447                         */
 448                        BUG_ON(!list_empty(&timer->it.cpu.entry));
 449                } else {
 450                        spin_lock(&p->sighand->siglock);
 451                        if (timer->it.cpu.firing)
 452                                ret = TIMER_RETRY;
 453                        else
 454                                list_del(&timer->it.cpu.entry);
 455                        spin_unlock(&p->sighand->siglock);
 456                }
 457                read_unlock(&tasklist_lock);
 458
 459                if (!ret)
 460                        put_task_struct(p);
 461        }
 462
 463        return ret;
 464}
 465
 466/*
 467 * Clean out CPU timers still ticking when a thread exited.  The task
 468 * pointer is cleared, and the expiry time is replaced with the residual
 469 * time for later timer_gettime calls to return.
 470 * This must be called with the siglock held.
 471 */
 472static void cleanup_timers(struct list_head *head,
 473                           cputime_t utime, cputime_t stime,
 474                           unsigned long long sum_exec_runtime)
 475{
 476        struct cpu_timer_list *timer, *next;
 477        cputime_t ptime = cputime_add(utime, stime);
 478
 479        list_for_each_entry_safe(timer, next, head, entry) {
 480                list_del_init(&timer->entry);
 481                if (cputime_lt(timer->expires.cpu, ptime)) {
 482                        timer->expires.cpu = cputime_zero;
 483                } else {
 484                        timer->expires.cpu = cputime_sub(timer->expires.cpu,
 485                                                         ptime);
 486                }
 487        }
 488
 489        ++head;
 490        list_for_each_entry_safe(timer, next, head, entry) {
 491                list_del_init(&timer->entry);
 492                if (cputime_lt(timer->expires.cpu, utime)) {
 493                        timer->expires.cpu = cputime_zero;
 494                } else {
 495                        timer->expires.cpu = cputime_sub(timer->expires.cpu,
 496                                                         utime);
 497                }
 498        }
 499
 500        ++head;
 501        list_for_each_entry_safe(timer, next, head, entry) {
 502                list_del_init(&timer->entry);
 503                if (timer->expires.sched < sum_exec_runtime) {
 504                        timer->expires.sched = 0;
 505                } else {
 506                        timer->expires.sched -= sum_exec_runtime;
 507                }
 508        }
 509}
 510
 511/*
 512 * These are both called with the siglock held, when the current thread
 513 * is being reaped.  When the final (leader) thread in the group is reaped,
 514 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
 515 */
 516void posix_cpu_timers_exit(struct task_struct *tsk)
 517{
 518        cleanup_timers(tsk->cpu_timers,
 519                       tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
 520
 521}
 522void posix_cpu_timers_exit_group(struct task_struct *tsk)
 523{
 524        struct task_cputime cputime;
 525
 526        thread_group_cputimer(tsk, &cputime);
 527        cleanup_timers(tsk->signal->cpu_timers,
 528                       cputime.utime, cputime.stime, cputime.sum_exec_runtime);
 529}
 530
 531static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
 532{
 533        /*
 534         * That's all for this thread or process.
 535         * We leave our residual in expires to be reported.
 536         */
 537        put_task_struct(timer->it.cpu.task);
 538        timer->it.cpu.task = NULL;
 539        timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
 540                                             timer->it.cpu.expires,
 541                                             now);
 542}
 543
 544/*
 545 * Insert the timer on the appropriate list before any timers that
 546 * expire later.  This must be called with the tasklist_lock held
 547 * for reading, and interrupts disabled.
 548 */
 549static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
 550{
 551        struct task_struct *p = timer->it.cpu.task;
 552        struct list_head *head, *listpos;
 553        struct cpu_timer_list *const nt = &timer->it.cpu;
 554        struct cpu_timer_list *next;
 555        unsigned long i;
 556
 557        head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
 558                p->cpu_timers : p->signal->cpu_timers);
 559        head += CPUCLOCK_WHICH(timer->it_clock);
 560
 561        BUG_ON(!irqs_disabled());
 562        spin_lock(&p->sighand->siglock);
 563
 564        listpos = head;
 565        if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
 566                list_for_each_entry(next, head, entry) {
 567                        if (next->expires.sched > nt->expires.sched)
 568                                break;
 569                        listpos = &next->entry;
 570                }
 571        } else {
 572                list_for_each_entry(next, head, entry) {
 573                        if (cputime_gt(next->expires.cpu, nt->expires.cpu))
 574                                break;
 575                        listpos = &next->entry;
 576                }
 577        }
 578        list_add(&nt->entry, listpos);
 579
 580        if (listpos == head) {
 581                /*
 582                 * We are the new earliest-expiring timer.
 583                 * If we are a thread timer, there can always
 584                 * be a process timer telling us to stop earlier.
 585                 */
 586
 587                if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
 588                        switch (CPUCLOCK_WHICH(timer->it_clock)) {
 589                        default:
 590                                BUG();
 591                        case CPUCLOCK_PROF:
 592                                if (cputime_eq(p->cputime_expires.prof_exp,
 593                                               cputime_zero) ||
 594                                    cputime_gt(p->cputime_expires.prof_exp,
 595                                               nt->expires.cpu))
 596                                        p->cputime_expires.prof_exp =
 597                                                nt->expires.cpu;
 598                                break;
 599                        case CPUCLOCK_VIRT:
 600                                if (cputime_eq(p->cputime_expires.virt_exp,
 601                                               cputime_zero) ||
 602                                    cputime_gt(p->cputime_expires.virt_exp,
 603                                               nt->expires.cpu))
 604                                        p->cputime_expires.virt_exp =
 605                                                nt->expires.cpu;
 606                                break;
 607                        case CPUCLOCK_SCHED:
 608                                if (p->cputime_expires.sched_exp == 0 ||
 609                                    p->cputime_expires.sched_exp >
 610                                                        nt->expires.sched)
 611                                        p->cputime_expires.sched_exp =
 612                                                nt->expires.sched;
 613                                break;
 614                        }
 615                } else {
 616                        /*
 617                         * For a process timer, set the cached expiration time.
 618                         */
 619                        switch (CPUCLOCK_WHICH(timer->it_clock)) {
 620                        default:
 621                                BUG();
 622                        case CPUCLOCK_VIRT:
 623                                if (!cputime_eq(p->signal->it_virt_expires,
 624                                                cputime_zero) &&
 625                                    cputime_lt(p->signal->it_virt_expires,
 626                                               timer->it.cpu.expires.cpu))
 627                                        break;
 628                                p->signal->cputime_expires.virt_exp =
 629                                        timer->it.cpu.expires.cpu;
 630                                break;
 631                        case CPUCLOCK_PROF:
 632                                if (!cputime_eq(p->signal->it_prof_expires,
 633                                                cputime_zero) &&
 634                                    cputime_lt(p->signal->it_prof_expires,
 635                                               timer->it.cpu.expires.cpu))
 636                                        break;
 637                                i = p->signal->rlim[RLIMIT_CPU].rlim_cur;
 638                                if (i != RLIM_INFINITY &&
 639                                    i <= cputime_to_secs(timer->it.cpu.expires.cpu))
 640                                        break;
 641                                p->signal->cputime_expires.prof_exp =
 642                                        timer->it.cpu.expires.cpu;
 643                                break;
 644                        case CPUCLOCK_SCHED:
 645                                p->signal->cputime_expires.sched_exp =
 646                                        timer->it.cpu.expires.sched;
 647                                break;
 648                        }
 649                }
 650        }
 651
 652        spin_unlock(&p->sighand->siglock);
 653}
 654
 655/*
 656 * The timer is locked, fire it and arrange for its reload.
 657 */
 658static void cpu_timer_fire(struct k_itimer *timer)
 659{
 660        if (unlikely(timer->sigq == NULL)) {
 661                /*
 662                 * This a special case for clock_nanosleep,
 663                 * not a normal timer from sys_timer_create.
 664                 */
 665                wake_up_process(timer->it_process);
 666                timer->it.cpu.expires.sched = 0;
 667        } else if (timer->it.cpu.incr.sched == 0) {
 668                /*
 669                 * One-shot timer.  Clear it as soon as it's fired.
 670                 */
 671                posix_timer_event(timer, 0);
 672                timer->it.cpu.expires.sched = 0;
 673        } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
 674                /*
 675                 * The signal did not get queued because the signal
 676                 * was ignored, so we won't get any callback to
 677                 * reload the timer.  But we need to keep it
 678                 * ticking in case the signal is deliverable next time.
 679                 */
 680                posix_cpu_timer_schedule(timer);
 681        }
 682}
 683
 684/*
 685 * Sample a process (thread group) timer for the given group_leader task.
 686 * Must be called with tasklist_lock held for reading.
 687 */
 688static int cpu_timer_sample_group(const clockid_t which_clock,
 689                                  struct task_struct *p,
 690                                  union cpu_time_count *cpu)
 691{
 692        struct task_cputime cputime;
 693
 694        thread_group_cputimer(p, &cputime);
 695        switch (CPUCLOCK_WHICH(which_clock)) {
 696        default:
 697                return -EINVAL;
 698        case CPUCLOCK_PROF:
 699                cpu->cpu = cputime_add(cputime.utime, cputime.stime);
 700                break;
 701        case CPUCLOCK_VIRT:
 702                cpu->cpu = cputime.utime;
 703                break;
 704        case CPUCLOCK_SCHED:
 705                cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
 706                break;
 707        }
 708        return 0;
 709}
 710
 711/*
 712 * Guts of sys_timer_settime for CPU timers.
 713 * This is called with the timer locked and interrupts disabled.
 714 * If we return TIMER_RETRY, it's necessary to release the timer's lock
 715 * and try again.  (This happens when the timer is in the middle of firing.)
 716 */
 717int posix_cpu_timer_set(struct k_itimer *timer, int flags,
 718                        struct itimerspec *new, struct itimerspec *old)
 719{
 720        struct task_struct *p = timer->it.cpu.task;
 721        union cpu_time_count old_expires, new_expires, val;
 722        int ret;
 723
 724        if (unlikely(p == NULL)) {
 725                /*
 726                 * Timer refers to a dead task's clock.
 727                 */
 728                return -ESRCH;
 729        }
 730
 731        new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
 732
 733        read_lock(&tasklist_lock);
 734        /*
 735         * We need the tasklist_lock to protect against reaping that
 736         * clears p->signal.  If p has just been reaped, we can no
 737         * longer get any information about it at all.
 738         */
 739        if (unlikely(p->signal == NULL)) {
 740                read_unlock(&tasklist_lock);
 741                put_task_struct(p);
 742                timer->it.cpu.task = NULL;
 743                return -ESRCH;
 744        }
 745
 746        /*
 747         * Disarm any old timer after extracting its expiry time.
 748         */
 749        BUG_ON(!irqs_disabled());
 750
 751        ret = 0;
 752        spin_lock(&p->sighand->siglock);
 753        old_expires = timer->it.cpu.expires;
 754        if (unlikely(timer->it.cpu.firing)) {
 755                timer->it.cpu.firing = -1;
 756                ret = TIMER_RETRY;
 757        } else
 758                list_del_init(&timer->it.cpu.entry);
 759        spin_unlock(&p->sighand->siglock);
 760
 761        /*
 762         * We need to sample the current value to convert the new
 763         * value from to relative and absolute, and to convert the
 764         * old value from absolute to relative.  To set a process
 765         * timer, we need a sample to balance the thread expiry
 766         * times (in arm_timer).  With an absolute time, we must
 767         * check if it's already passed.  In short, we need a sample.
 768         */
 769        if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
 770                cpu_clock_sample(timer->it_clock, p, &val);
 771        } else {
 772                cpu_timer_sample_group(timer->it_clock, p, &val);
 773        }
 774
 775        if (old) {
 776                if (old_expires.sched == 0) {
 777                        old->it_value.tv_sec = 0;
 778                        old->it_value.tv_nsec = 0;
 779                } else {
 780                        /*
 781                         * Update the timer in case it has
 782                         * overrun already.  If it has,
 783                         * we'll report it as having overrun
 784                         * and with the next reloaded timer
 785                         * already ticking, though we are
 786                         * swallowing that pending
 787                         * notification here to install the
 788                         * new setting.
 789                         */
 790                        bump_cpu_timer(timer, val);
 791                        if (cpu_time_before(timer->it_clock, val,
 792                                            timer->it.cpu.expires)) {
 793                                old_expires = cpu_time_sub(
 794                                        timer->it_clock,
 795                                        timer->it.cpu.expires, val);
 796                                sample_to_timespec(timer->it_clock,
 797                                                   old_expires,
 798                                                   &old->it_value);
 799                        } else {
 800                                old->it_value.tv_nsec = 1;
 801                                old->it_value.tv_sec = 0;
 802                        }
 803                }
 804        }
 805
 806        if (unlikely(ret)) {
 807                /*
 808                 * We are colliding with the timer actually firing.
 809                 * Punt after filling in the timer's old value, and
 810                 * disable this firing since we are already reporting
 811                 * it as an overrun (thanks to bump_cpu_timer above).
 812                 */
 813                read_unlock(&tasklist_lock);
 814                goto out;
 815        }
 816
 817        if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
 818                cpu_time_add(timer->it_clock, &new_expires, val);
 819        }
 820
 821        /*
 822         * Install the new expiry time (or zero).
 823         * For a timer with no notification action, we don't actually
 824         * arm the timer (we'll just fake it for timer_gettime).
 825         */
 826        timer->it.cpu.expires = new_expires;
 827        if (new_expires.sched != 0 &&
 828            (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
 829            cpu_time_before(timer->it_clock, val, new_expires)) {
 830                arm_timer(timer, val);
 831        }
 832
 833        read_unlock(&tasklist_lock);
 834
 835        /*
 836         * Install the new reload setting, and
 837         * set up the signal and overrun bookkeeping.
 838         */
 839        timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
 840                                                &new->it_interval);
 841
 842        /*
 843         * This acts as a modification timestamp for the timer,
 844         * so any automatic reload attempt will punt on seeing
 845         * that we have reset the timer manually.
 846         */
 847        timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
 848                ~REQUEUE_PENDING;
 849        timer->it_overrun_last = 0;
 850        timer->it_overrun = -1;
 851
 852        if (new_expires.sched != 0 &&
 853            (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
 854            !cpu_time_before(timer->it_clock, val, new_expires)) {
 855                /*
 856                 * The designated time already passed, so we notify
 857                 * immediately, even if the thread never runs to
 858                 * accumulate more time on this clock.
 859                 */
 860                cpu_timer_fire(timer);
 861        }
 862
 863        ret = 0;
 864 out:
 865        if (old) {
 866                sample_to_timespec(timer->it_clock,
 867                                   timer->it.cpu.incr, &old->it_interval);
 868        }
 869        return ret;
 870}
 871
 872void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
 873{
 874        union cpu_time_count now;
 875        struct task_struct *p = timer->it.cpu.task;
 876        int clear_dead;
 877
 878        /*
 879         * Easy part: convert the reload time.
 880         */
 881        sample_to_timespec(timer->it_clock,
 882                           timer->it.cpu.incr, &itp->it_interval);
 883
 884        if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all.  */
 885                itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
 886                return;
 887        }
 888
 889        if (unlikely(p == NULL)) {
 890                /*
 891                 * This task already died and the timer will never fire.
 892                 * In this case, expires is actually the dead value.
 893                 */
 894        dead:
 895                sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
 896                                   &itp->it_value);
 897                return;
 898        }
 899
 900        /*
 901         * Sample the clock to take the difference with the expiry time.
 902         */
 903        if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
 904                cpu_clock_sample(timer->it_clock, p, &now);
 905                clear_dead = p->exit_state;
 906        } else {
 907                read_lock(&tasklist_lock);
 908                if (unlikely(p->signal == NULL)) {
 909                        /*
 910                         * The process has been reaped.
 911                         * We can't even collect a sample any more.
 912                         * Call the timer disarmed, nothing else to do.
 913                         */
 914                        put_task_struct(p);
 915                        timer->it.cpu.task = NULL;
 916                        timer->it.cpu.expires.sched = 0;
 917                        read_unlock(&tasklist_lock);
 918                        goto dead;
 919                } else {
 920                        cpu_timer_sample_group(timer->it_clock, p, &now);
 921                        clear_dead = (unlikely(p->exit_state) &&
 922                                      thread_group_empty(p));
 923                }
 924                read_unlock(&tasklist_lock);
 925        }
 926
 927        if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
 928                if (timer->it.cpu.incr.sched == 0 &&
 929                    cpu_time_before(timer->it_clock,
 930                                    timer->it.cpu.expires, now)) {
 931                        /*
 932                         * Do-nothing timer expired and has no reload,
 933                         * so it's as if it was never set.
 934                         */
 935                        timer->it.cpu.expires.sched = 0;
 936                        itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
 937                        return;
 938                }
 939                /*
 940                 * Account for any expirations and reloads that should
 941                 * have happened.
 942                 */
 943                bump_cpu_timer(timer, now);
 944        }
 945
 946        if (unlikely(clear_dead)) {
 947                /*
 948                 * We've noticed that the thread is dead, but
 949                 * not yet reaped.  Take this opportunity to
 950                 * drop our task ref.
 951                 */
 952                clear_dead_task(timer, now);
 953                goto dead;
 954        }
 955
 956        if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
 957                sample_to_timespec(timer->it_clock,
 958                                   cpu_time_sub(timer->it_clock,
 959                                                timer->it.cpu.expires, now),
 960                                   &itp->it_value);
 961        } else {
 962                /*
 963                 * The timer should have expired already, but the firing
 964                 * hasn't taken place yet.  Say it's just about to expire.
 965                 */
 966                itp->it_value.tv_nsec = 1;
 967                itp->it_value.tv_sec = 0;
 968        }
 969}
 970
 971/*
 972 * Check for any per-thread CPU timers that have fired and move them off
 973 * the tsk->cpu_timers[N] list onto the firing list.  Here we update the
 974 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
 975 */
 976static void check_thread_timers(struct task_struct *tsk,
 977                                struct list_head *firing)
 978{
 979        int maxfire;
 980        struct list_head *timers = tsk->cpu_timers;
 981        struct signal_struct *const sig = tsk->signal;
 982
 983        maxfire = 20;
 984        tsk->cputime_expires.prof_exp = cputime_zero;
 985        while (!list_empty(timers)) {
 986                struct cpu_timer_list *t = list_first_entry(timers,
 987                                                      struct cpu_timer_list,
 988                                                      entry);
 989                if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
 990                        tsk->cputime_expires.prof_exp = t->expires.cpu;
 991                        break;
 992                }
 993                t->firing = 1;
 994                list_move_tail(&t->entry, firing);
 995        }
 996
 997        ++timers;
 998        maxfire = 20;
 999        tsk->cputime_expires.virt_exp = cputime_zero;
1000        while (!list_empty(timers)) {
1001                struct cpu_timer_list *t = list_first_entry(timers,
1002                                                      struct cpu_timer_list,
1003                                                      entry);
1004                if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
1005                        tsk->cputime_expires.virt_exp = t->expires.cpu;
1006                        break;
1007                }
1008                t->firing = 1;
1009                list_move_tail(&t->entry, firing);
1010        }
1011
1012        ++timers;
1013        maxfire = 20;
1014        tsk->cputime_expires.sched_exp = 0;
1015        while (!list_empty(timers)) {
1016                struct cpu_timer_list *t = list_first_entry(timers,
1017                                                      struct cpu_timer_list,
1018                                                      entry);
1019                if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
1020                        tsk->cputime_expires.sched_exp = t->expires.sched;
1021                        break;
1022                }
1023                t->firing = 1;
1024                list_move_tail(&t->entry, firing);
1025        }
1026
1027        /*
1028         * Check for the special case thread timers.
1029         */
1030        if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) {
1031                unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
1032                unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
1033
1034                if (hard != RLIM_INFINITY &&
1035                    tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
1036                        /*
1037                         * At the hard limit, we just die.
1038                         * No need to calculate anything else now.
1039                         */
1040                        __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1041                        return;
1042                }
1043                if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) {
1044                        /*
1045                         * At the soft limit, send a SIGXCPU every second.
1046                         */
1047                        if (sig->rlim[RLIMIT_RTTIME].rlim_cur
1048                            < sig->rlim[RLIMIT_RTTIME].rlim_max) {
1049                                sig->rlim[RLIMIT_RTTIME].rlim_cur +=
1050                                                                USEC_PER_SEC;
1051                        }
1052                        printk(KERN_INFO
1053                                "RT Watchdog Timeout: %s[%d]\n",
1054                                tsk->comm, task_pid_nr(tsk));
1055                        __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1056                }
1057        }
1058}
1059
1060static void stop_process_timers(struct task_struct *tsk)
1061{
1062        struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
1063        unsigned long flags;
1064
1065        if (!cputimer->running)
1066                return;
1067
1068        spin_lock_irqsave(&cputimer->lock, flags);
1069        cputimer->running = 0;
1070        spin_unlock_irqrestore(&cputimer->lock, flags);
1071}
1072
1073/*
1074 * Check for any per-thread CPU timers that have fired and move them
1075 * off the tsk->*_timers list onto the firing list.  Per-thread timers
1076 * have already been taken off.
1077 */
1078static void check_process_timers(struct task_struct *tsk,
1079                                 struct list_head *firing)
1080{
1081        int maxfire;
1082        struct signal_struct *const sig = tsk->signal;
1083        cputime_t utime, ptime, virt_expires, prof_expires;
1084        unsigned long long sum_sched_runtime, sched_expires;
1085        struct list_head *timers = sig->cpu_timers;
1086        struct task_cputime cputime;
1087
1088        /*
1089         * Don't sample the current process CPU clocks if there are no timers.
1090         */
1091        if (list_empty(&timers[CPUCLOCK_PROF]) &&
1092            cputime_eq(sig->it_prof_expires, cputime_zero) &&
1093            sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
1094            list_empty(&timers[CPUCLOCK_VIRT]) &&
1095            cputime_eq(sig->it_virt_expires, cputime_zero) &&
1096            list_empty(&timers[CPUCLOCK_SCHED])) {
1097                stop_process_timers(tsk);
1098                return;
1099        }
1100
1101        /*
1102         * Collect the current process totals.
1103         */
1104        thread_group_cputimer(tsk, &cputime);
1105        utime = cputime.utime;
1106        ptime = cputime_add(utime, cputime.stime);
1107        sum_sched_runtime = cputime.sum_exec_runtime;
1108        maxfire = 20;
1109        prof_expires = cputime_zero;
1110        while (!list_empty(timers)) {
1111                struct cpu_timer_list *tl = list_first_entry(timers,
1112                                                      struct cpu_timer_list,
1113                                                      entry);
1114                if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) {
1115                        prof_expires = tl->expires.cpu;
1116                        break;
1117                }
1118                tl->firing = 1;
1119                list_move_tail(&tl->entry, firing);
1120        }
1121
1122        ++timers;
1123        maxfire = 20;
1124        virt_expires = cputime_zero;
1125        while (!list_empty(timers)) {
1126                struct cpu_timer_list *tl = list_first_entry(timers,
1127                                                      struct cpu_timer_list,
1128                                                      entry);
1129                if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) {
1130                        virt_expires = tl->expires.cpu;
1131                        break;
1132                }
1133                tl->firing = 1;
1134                list_move_tail(&tl->entry, firing);
1135        }
1136
1137        ++timers;
1138        maxfire = 20;
1139        sched_expires = 0;
1140        while (!list_empty(timers)) {
1141                struct cpu_timer_list *tl = list_first_entry(timers,
1142                                                      struct cpu_timer_list,
1143                                                      entry);
1144                if (!--maxfire || sum_sched_runtime < tl->expires.sched) {
1145                        sched_expires = tl->expires.sched;
1146                        break;
1147                }
1148                tl->firing = 1;
1149                list_move_tail(&tl->entry, firing);
1150        }
1151
1152        /*
1153         * Check for the special case process timers.
1154         */
1155        if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
1156                if (cputime_ge(ptime, sig->it_prof_expires)) {
1157                        /* ITIMER_PROF fires and reloads.  */
1158                        sig->it_prof_expires = sig->it_prof_incr;
1159                        if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
1160                                sig->it_prof_expires = cputime_add(
1161                                        sig->it_prof_expires, ptime);
1162                        }
1163                        __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
1164                }
1165                if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
1166                    (cputime_eq(prof_expires, cputime_zero) ||
1167                     cputime_lt(sig->it_prof_expires, prof_expires))) {
1168                        prof_expires = sig->it_prof_expires;
1169                }
1170        }
1171        if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1172                if (cputime_ge(utime, sig->it_virt_expires)) {
1173                        /* ITIMER_VIRTUAL fires and reloads.  */
1174                        sig->it_virt_expires = sig->it_virt_incr;
1175                        if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1176                                sig->it_virt_expires = cputime_add(
1177                                        sig->it_virt_expires, utime);
1178                        }
1179                        __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
1180                }
1181                if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
1182                    (cputime_eq(virt_expires, cputime_zero) ||
1183                     cputime_lt(sig->it_virt_expires, virt_expires))) {
1184                        virt_expires = sig->it_virt_expires;
1185                }
1186        }
1187        if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
1188                unsigned long psecs = cputime_to_secs(ptime);
1189                cputime_t x;
1190                if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) {
1191                        /*
1192                         * At the hard limit, we just die.
1193                         * No need to calculate anything else now.
1194                         */
1195                        __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1196                        return;
1197                }
1198                if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {
1199                        /*
1200                         * At the soft limit, send a SIGXCPU every second.
1201                         */
1202                        __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1203                        if (sig->rlim[RLIMIT_CPU].rlim_cur
1204                            < sig->rlim[RLIMIT_CPU].rlim_max) {
1205                                sig->rlim[RLIMIT_CPU].rlim_cur++;
1206                        }
1207                }
1208                x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
1209                if (cputime_eq(prof_expires, cputime_zero) ||
1210                    cputime_lt(x, prof_expires)) {
1211                        prof_expires = x;
1212                }
1213        }
1214
1215        if (!cputime_eq(prof_expires, cputime_zero) &&
1216            (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) ||
1217             cputime_gt(sig->cputime_expires.prof_exp, prof_expires)))
1218                sig->cputime_expires.prof_exp = prof_expires;
1219        if (!cputime_eq(virt_expires, cputime_zero) &&
1220            (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) ||
1221             cputime_gt(sig->cputime_expires.virt_exp, virt_expires)))
1222                sig->cputime_expires.virt_exp = virt_expires;
1223        if (sched_expires != 0 &&
1224            (sig->cputime_expires.sched_exp == 0 ||
1225             sig->cputime_expires.sched_exp > sched_expires))
1226                sig->cputime_expires.sched_exp = sched_expires;
1227}
1228
1229/*
1230 * This is called from the signal code (via do_schedule_next_timer)
1231 * when the last timer signal was delivered and we have to reload the timer.
1232 */
1233void posix_cpu_timer_schedule(struct k_itimer *timer)
1234{
1235        struct task_struct *p = timer->it.cpu.task;
1236        union cpu_time_count now;
1237
1238        if (unlikely(p == NULL))
1239                /*
1240                 * The task was cleaned up already, no future firings.
1241                 */
1242                goto out;
1243
1244        /*
1245         * Fetch the current sample and update the timer's expiry time.
1246         */
1247        if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1248                cpu_clock_sample(timer->it_clock, p, &now);
1249                bump_cpu_timer(timer, now);
1250                if (unlikely(p->exit_state)) {
1251                        clear_dead_task(timer, now);
1252                        goto out;
1253                }
1254                read_lock(&tasklist_lock); /* arm_timer needs it.  */
1255        } else {
1256                read_lock(&tasklist_lock);
1257                if (unlikely(p->signal == NULL)) {
1258                        /*
1259                         * The process has been reaped.
1260                         * We can't even collect a sample any more.
1261                         */
1262                        put_task_struct(p);
1263                        timer->it.cpu.task = p = NULL;
1264                        timer->it.cpu.expires.sched = 0;
1265                        goto out_unlock;
1266                } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1267                        /*
1268                         * We've noticed that the thread is dead, but
1269                         * not yet reaped.  Take this opportunity to
1270                         * drop our task ref.
1271                         */
1272                        clear_dead_task(timer, now);
1273                        goto out_unlock;
1274                }
1275                cpu_timer_sample_group(timer->it_clock, p, &now);
1276                bump_cpu_timer(timer, now);
1277                /* Leave the tasklist_lock locked for the call below.  */
1278        }
1279
1280        /*
1281         * Now re-arm for the new expiry time.
1282         */
1283        arm_timer(timer, now);
1284
1285out_unlock:
1286        read_unlock(&tasklist_lock);
1287
1288out:
1289        timer->it_overrun_last = timer->it_overrun;
1290        timer->it_overrun = -1;
1291        ++timer->it_requeue_pending;
1292}
1293
1294/**
1295 * task_cputime_zero - Check a task_cputime struct for all zero fields.
1296 *
1297 * @cputime:    The struct to compare.
1298 *
1299 * Checks @cputime to see if all fields are zero.  Returns true if all fields
1300 * are zero, false if any field is nonzero.
1301 */
1302static inline int task_cputime_zero(const struct task_cputime *cputime)
1303{
1304        if (cputime_eq(cputime->utime, cputime_zero) &&
1305            cputime_eq(cputime->stime, cputime_zero) &&
1306            cputime->sum_exec_runtime == 0)
1307                return 1;
1308        return 0;
1309}
1310
1311/**
1312 * task_cputime_expired - Compare two task_cputime entities.
1313 *
1314 * @sample:     The task_cputime structure to be checked for expiration.
1315 * @expires:    Expiration times, against which @sample will be checked.
1316 *
1317 * Checks @sample against @expires to see if any field of @sample has expired.
1318 * Returns true if any field of the former is greater than the corresponding
1319 * field of the latter if the latter field is set.  Otherwise returns false.
1320 */
1321static inline int task_cputime_expired(const struct task_cputime *sample,
1322                                        const struct task_cputime *expires)
1323{
1324        if (!cputime_eq(expires->utime, cputime_zero) &&
1325            cputime_ge(sample->utime, expires->utime))
1326                return 1;
1327        if (!cputime_eq(expires->stime, cputime_zero) &&
1328            cputime_ge(cputime_add(sample->utime, sample->stime),
1329                       expires->stime))
1330                return 1;
1331        if (expires->sum_exec_runtime != 0 &&
1332            sample->sum_exec_runtime >= expires->sum_exec_runtime)
1333                return 1;
1334        return 0;
1335}
1336
1337/**
1338 * fastpath_timer_check - POSIX CPU timers fast path.
1339 *
1340 * @tsk:        The task (thread) being checked.
1341 *
1342 * Check the task and thread group timers.  If both are zero (there are no
1343 * timers set) return false.  Otherwise snapshot the task and thread group
1344 * timers and compare them with the corresponding expiration times.  Return
1345 * true if a timer has expired, else return false.
1346 */
1347static inline int fastpath_timer_check(struct task_struct *tsk)
1348{
1349        struct signal_struct *sig;
1350
1351        /* tsk == current, ensure it is safe to use ->signal/sighand */
1352        if (unlikely(tsk->exit_state))
1353                return 0;
1354
1355        if (!task_cputime_zero(&tsk->cputime_expires)) {
1356                struct task_cputime task_sample = {
1357                        .utime = tsk->utime,
1358                        .stime = tsk->stime,
1359                        .sum_exec_runtime = tsk->se.sum_exec_runtime
1360                };
1361
1362                if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1363                        return 1;
1364        }
1365
1366        sig = tsk->signal;
1367        if (!task_cputime_zero(&sig->cputime_expires)) {
1368                struct task_cputime group_sample;
1369
1370                thread_group_cputimer(tsk, &group_sample);
1371                if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1372                        return 1;
1373        }
1374
1375        return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY;
1376}
1377
1378/*
1379 * This is called from the timer interrupt handler.  The irq handler has
1380 * already updated our counts.  We need to check if any timers fire now.
1381 * Interrupts are disabled.
1382 */
1383void run_posix_cpu_timers(struct task_struct *tsk)
1384{
1385        LIST_HEAD(firing);
1386        struct k_itimer *timer, *next;
1387
1388        BUG_ON(!irqs_disabled());
1389
1390        /*
1391         * The fast path checks that there are no expired thread or thread
1392         * group timers.  If that's so, just return.
1393         */
1394        if (!fastpath_timer_check(tsk))
1395                return;
1396
1397        spin_lock(&tsk->sighand->siglock);
1398        /*
1399         * Here we take off tsk->signal->cpu_timers[N] and
1400         * tsk->cpu_timers[N] all the timers that are firing, and
1401         * put them on the firing list.
1402         */
1403        check_thread_timers(tsk, &firing);
1404        check_process_timers(tsk, &firing);
1405
1406        /*
1407         * We must release these locks before taking any timer's lock.
1408         * There is a potential race with timer deletion here, as the
1409         * siglock now protects our private firing list.  We have set
1410         * the firing flag in each timer, so that a deletion attempt
1411         * that gets the timer lock before we do will give it up and
1412         * spin until we've taken care of that timer below.
1413         */
1414        spin_unlock(&tsk->sighand->siglock);
1415
1416        /*
1417         * Now that all the timers on our list have the firing flag,
1418         * noone will touch their list entries but us.  We'll take
1419         * each timer's lock before clearing its firing flag, so no
1420         * timer call will interfere.
1421         */
1422        list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1423                int cpu_firing;
1424
1425                spin_lock(&timer->it_lock);
1426                list_del_init(&timer->it.cpu.entry);
1427                cpu_firing = timer->it.cpu.firing;
1428                timer->it.cpu.firing = 0;
1429                /*
1430                 * The firing flag is -1 if we collided with a reset
1431                 * of the timer, which already reported this
1432                 * almost-firing as an overrun.  So don't generate an event.
1433                 */
1434                if (likely(cpu_firing >= 0))
1435                        cpu_timer_fire(timer);
1436                spin_unlock(&timer->it_lock);
1437        }
1438}
1439
1440/*
1441 * Set one of the process-wide special case CPU timers.
1442 * The tsk->sighand->siglock must be held by the caller.
1443 * The *newval argument is relative and we update it to be absolute, *oldval
1444 * is absolute and we update it to be relative.
1445 */
1446void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1447                           cputime_t *newval, cputime_t *oldval)
1448{
1449        union cpu_time_count now;
1450        struct list_head *head;
1451
1452        BUG_ON(clock_idx == CPUCLOCK_SCHED);
1453        cpu_timer_sample_group(clock_idx, tsk, &now);
1454
1455        if (oldval) {
1456                if (!cputime_eq(*oldval, cputime_zero)) {
1457                        if (cputime_le(*oldval, now.cpu)) {
1458                                /* Just about to fire. */
1459                                *oldval = jiffies_to_cputime(1);
1460                        } else {
1461                                *oldval = cputime_sub(*oldval, now.cpu);
1462                        }
1463                }
1464
1465                if (cputime_eq(*newval, cputime_zero))
1466                        return;
1467                *newval = cputime_add(*newval, now.cpu);
1468
1469                /*
1470                 * If the RLIMIT_CPU timer will expire before the
1471                 * ITIMER_PROF timer, we have nothing else to do.
1472                 */
1473                if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur
1474                    < cputime_to_secs(*newval))
1475                        return;
1476        }
1477
1478        /*
1479         * Check whether there are any process timers already set to fire
1480         * before this one.  If so, we don't have anything more to do.
1481         */
1482        head = &tsk->signal->cpu_timers[clock_idx];
1483        if (list_empty(head) ||
1484            cputime_ge(list_first_entry(head,
1485                                  struct cpu_timer_list, entry)->expires.cpu,
1486                       *newval)) {
1487                switch (clock_idx) {
1488                case CPUCLOCK_PROF:
1489                        tsk->signal->cputime_expires.prof_exp = *newval;
1490                        break;
1491                case CPUCLOCK_VIRT:
1492                        tsk->signal->cputime_expires.virt_exp = *newval;
1493                        break;
1494                }
1495        }
1496}
1497
1498static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1499                            struct timespec *rqtp, struct itimerspec *it)
1500{
1501        struct k_itimer timer;
1502        int error;
1503
1504        /*
1505         * Set up a temporary timer and then wait for it to go off.
1506         */
1507        memset(&timer, 0, sizeof timer);
1508        spin_lock_init(&timer.it_lock);
1509        timer.it_clock = which_clock;
1510        timer.it_overrun = -1;
1511        error = posix_cpu_timer_create(&timer);
1512        timer.it_process = current;
1513        if (!error) {
1514                static struct itimerspec zero_it;
1515
1516                memset(it, 0, sizeof *it);
1517                it->it_value = *rqtp;
1518
1519                spin_lock_irq(&timer.it_lock);
1520                error = posix_cpu_timer_set(&timer, flags, it, NULL);
1521                if (error) {
1522                        spin_unlock_irq(&timer.it_lock);
1523                        return error;
1524                }
1525
1526                while (!signal_pending(current)) {
1527                        if (timer.it.cpu.expires.sched == 0) {
1528                                /*
1529                                 * Our timer fired and was reset.
1530                                 */
1531                                spin_unlock_irq(&timer.it_lock);
1532                                return 0;
1533                        }
1534
1535                        /*
1536                         * Block until cpu_timer_fire (or a signal) wakes us.
1537                         */
1538                        __set_current_state(TASK_INTERRUPTIBLE);
1539                        spin_unlock_irq(&timer.it_lock);
1540                        schedule();
1541                        spin_lock_irq(&timer.it_lock);
1542                }
1543
1544                /*
1545                 * We were interrupted by a signal.
1546                 */
1547                sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1548                posix_cpu_timer_set(&timer, 0, &zero_it, it);
1549                spin_unlock_irq(&timer.it_lock);
1550
1551                if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1552                        /*
1553                         * It actually did fire already.
1554                         */
1555                        return 0;
1556                }
1557
1558                error = -ERESTART_RESTARTBLOCK;
1559        }
1560
1561        return error;
1562}
1563
1564int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1565                     struct timespec *rqtp, struct timespec __user *rmtp)
1566{
1567        struct restart_block *restart_block =
1568            &current_thread_info()->restart_block;
1569        struct itimerspec it;
1570        int error;
1571
1572        /*
1573         * Diagnose required errors first.
1574         */
1575        if (CPUCLOCK_PERTHREAD(which_clock) &&
1576            (CPUCLOCK_PID(which_clock) == 0 ||
1577             CPUCLOCK_PID(which_clock) == current->pid))
1578                return -EINVAL;
1579
1580        error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1581
1582        if (error == -ERESTART_RESTARTBLOCK) {
1583
1584                if (flags & TIMER_ABSTIME)
1585                        return -ERESTARTNOHAND;
1586                /*
1587                 * Report back to the user the time still remaining.
1588                 */
1589                if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1590                        return -EFAULT;
1591
1592                restart_block->fn = posix_cpu_nsleep_restart;
1593                restart_block->arg0 = which_clock;
1594                restart_block->arg1 = (unsigned long) rmtp;
1595                restart_block->arg2 = rqtp->tv_sec;
1596                restart_block->arg3 = rqtp->tv_nsec;
1597        }
1598        return error;
1599}
1600
1601long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1602{
1603        clockid_t which_clock = restart_block->arg0;
1604        struct timespec __user *rmtp;
1605        struct timespec t;
1606        struct itimerspec it;
1607        int error;
1608
1609        rmtp = (struct timespec __user *) restart_block->arg1;
1610        t.tv_sec = restart_block->arg2;
1611        t.tv_nsec = restart_block->arg3;
1612
1613        restart_block->fn = do_no_restart_syscall;
1614        error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1615
1616        if (error == -ERESTART_RESTARTBLOCK) {
1617                /*
1618                 * Report back to the user the time still remaining.
1619                 */
1620                if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1621                        return -EFAULT;
1622
1623                restart_block->fn = posix_cpu_nsleep_restart;
1624                restart_block->arg0 = which_clock;
1625                restart_block->arg1 = (unsigned long) rmtp;
1626                restart_block->arg2 = t.tv_sec;
1627                restart_block->arg3 = t.tv_nsec;
1628        }
1629        return error;
1630
1631}
1632
1633
1634#define PROCESS_CLOCK   MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1635#define THREAD_CLOCK    MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1636
1637static int process_cpu_clock_getres(const clockid_t which_clock,
1638                                    struct timespec *tp)
1639{
1640        return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1641}
1642static int process_cpu_clock_get(const clockid_t which_clock,
1643                                 struct timespec *tp)
1644{
1645        return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1646}
1647static int process_cpu_timer_create(struct k_itimer *timer)
1648{
1649        timer->it_clock = PROCESS_CLOCK;
1650        return posix_cpu_timer_create(timer);
1651}
1652static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1653                              struct timespec *rqtp,
1654                              struct timespec __user *rmtp)
1655{
1656        return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1657}
1658static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1659{
1660        return -EINVAL;
1661}
1662static int thread_cpu_clock_getres(const clockid_t which_clock,
1663                                   struct timespec *tp)
1664{
1665        return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1666}
1667static int thread_cpu_clock_get(const clockid_t which_clock,
1668                                struct timespec *tp)
1669{
1670        return posix_cpu_clock_get(THREAD_CLOCK, tp);
1671}
1672static int thread_cpu_timer_create(struct k_itimer *timer)
1673{
1674        timer->it_clock = THREAD_CLOCK;
1675        return posix_cpu_timer_create(timer);
1676}
1677static int thread_cpu_nsleep(const clockid_t which_clock, int flags,
1678                              struct timespec *rqtp, struct timespec __user *rmtp)
1679{
1680        return -EINVAL;
1681}
1682static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
1683{
1684        return -EINVAL;
1685}
1686
1687static __init int init_posix_cpu_timers(void)
1688{
1689        struct k_clock process = {
1690                .clock_getres = process_cpu_clock_getres,
1691                .clock_get = process_cpu_clock_get,
1692                .clock_set = do_posix_clock_nosettime,
1693                .timer_create = process_cpu_timer_create,
1694                .nsleep = process_cpu_nsleep,
1695                .nsleep_restart = process_cpu_nsleep_restart,
1696        };
1697        struct k_clock thread = {
1698                .clock_getres = thread_cpu_clock_getres,
1699                .clock_get = thread_cpu_clock_get,
1700                .clock_set = do_posix_clock_nosettime,
1701                .timer_create = thread_cpu_timer_create,
1702                .nsleep = thread_cpu_nsleep,
1703                .nsleep_restart = thread_cpu_nsleep_restart,
1704        };
1705
1706        register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1707        register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1708
1709        return 0;
1710}
1711__initcall(init_posix_cpu_timers);
1712