linux/arch/s390/kernel/vtime.c
<<
>>
Prefs
   1/*
   2 *  arch/s390/kernel/vtime.c
   3 *    Virtual cpu timer based timer functions.
   4 *
   5 *  S390 version
   6 *    Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
   7 *    Author(s): Jan Glauber <jan.glauber@de.ibm.com>
   8 */
   9
  10#include <linux/module.h>
  11#include <linux/kernel.h>
  12#include <linux/time.h>
  13#include <linux/delay.h>
  14#include <linux/init.h>
  15#include <linux/smp.h>
  16#include <linux/types.h>
  17#include <linux/timex.h>
  18#include <linux/notifier.h>
  19#include <linux/kernel_stat.h>
  20#include <linux/rcupdate.h>
  21#include <linux/posix-timers.h>
  22
  23#include <asm/s390_ext.h>
  24#include <asm/timer.h>
  25#include <asm/irq_regs.h>
  26
  27static ext_int_info_t ext_int_info_timer;
  28static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
  29
  30#ifdef CONFIG_VIRT_CPU_ACCOUNTING
  31/*
  32 * Update process times based on virtual cpu times stored by entry.S
  33 * to the lowcore fields user_timer, system_timer & steal_clock.
  34 */
  35void account_process_tick(struct task_struct *tsk, int user_tick)
  36{
  37        cputime_t cputime;
  38        __u64 timer, clock;
  39        int rcu_user_flag;
  40
  41        timer = S390_lowcore.last_update_timer;
  42        clock = S390_lowcore.last_update_clock;
  43        asm volatile ("  STPT %0\n"    /* Store current cpu timer value */
  44                      "  STCK %1"      /* Store current tod clock value */
  45                      : "=m" (S390_lowcore.last_update_timer),
  46                        "=m" (S390_lowcore.last_update_clock) );
  47        S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
  48        S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock;
  49
  50        cputime = S390_lowcore.user_timer >> 12;
  51        rcu_user_flag = cputime != 0;
  52        S390_lowcore.user_timer -= cputime << 12;
  53        S390_lowcore.steal_clock -= cputime << 12;
  54        account_user_time(tsk, cputime);
  55
  56        cputime =  S390_lowcore.system_timer >> 12;
  57        S390_lowcore.system_timer -= cputime << 12;
  58        S390_lowcore.steal_clock -= cputime << 12;
  59        account_system_time(tsk, HARDIRQ_OFFSET, cputime);
  60
  61        cputime = S390_lowcore.steal_clock;
  62        if ((__s64) cputime > 0) {
  63                cputime >>= 12;
  64                S390_lowcore.steal_clock -= cputime << 12;
  65                account_steal_time(tsk, cputime);
  66        }
  67}
  68
  69/*
  70 * Update process times based on virtual cpu times stored by entry.S
  71 * to the lowcore fields user_timer, system_timer & steal_clock.
  72 */
  73void account_vtime(struct task_struct *tsk)
  74{
  75        cputime_t cputime;
  76        __u64 timer;
  77
  78        timer = S390_lowcore.last_update_timer;
  79        asm volatile ("  STPT %0"    /* Store current cpu timer value */
  80                      : "=m" (S390_lowcore.last_update_timer) );
  81        S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
  82
  83        cputime = S390_lowcore.user_timer >> 12;
  84        S390_lowcore.user_timer -= cputime << 12;
  85        S390_lowcore.steal_clock -= cputime << 12;
  86        account_user_time(tsk, cputime);
  87
  88        cputime =  S390_lowcore.system_timer >> 12;
  89        S390_lowcore.system_timer -= cputime << 12;
  90        S390_lowcore.steal_clock -= cputime << 12;
  91        account_system_time(tsk, 0, cputime);
  92}
  93
  94/*
  95 * Update process times based on virtual cpu times stored by entry.S
  96 * to the lowcore fields user_timer, system_timer & steal_clock.
  97 */
  98void account_system_vtime(struct task_struct *tsk)
  99{
 100        cputime_t cputime;
 101        __u64 timer;
 102
 103        timer = S390_lowcore.last_update_timer;
 104        asm volatile ("  STPT %0"    /* Store current cpu timer value */
 105                      : "=m" (S390_lowcore.last_update_timer) );
 106        S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
 107
 108        cputime =  S390_lowcore.system_timer >> 12;
 109        S390_lowcore.system_timer -= cputime << 12;
 110        S390_lowcore.steal_clock -= cputime << 12;
 111        account_system_time(tsk, 0, cputime);
 112}
 113EXPORT_SYMBOL_GPL(account_system_vtime);
 114
 115static inline void set_vtimer(__u64 expires)
 116{
 117        __u64 timer;
 118
 119        asm volatile ("  STPT %0\n"  /* Store current cpu timer value */
 120                      "  SPT %1"     /* Set new value immediatly afterwards */
 121                      : "=m" (timer) : "m" (expires) );
 122        S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
 123        S390_lowcore.last_update_timer = expires;
 124
 125        /* store expire time for this CPU timer */
 126        __get_cpu_var(virt_cpu_timer).to_expire = expires;
 127}
 128#else
 129static inline void set_vtimer(__u64 expires)
 130{
 131        S390_lowcore.last_update_timer = expires;
 132        asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
 133
 134        /* store expire time for this CPU timer */
 135        __get_cpu_var(virt_cpu_timer).to_expire = expires;
 136}
 137#endif
 138
 139void vtime_start_cpu_timer(void)
 140{
 141        struct vtimer_queue *vt_list;
 142
 143        vt_list = &__get_cpu_var(virt_cpu_timer);
 144
 145        /* CPU timer interrupt is pending, don't reprogramm it */
 146        if (vt_list->idle & 1LL<<63)
 147                return;
 148
 149        if (!list_empty(&vt_list->list))
 150                set_vtimer(vt_list->idle);
 151}
 152
 153void vtime_stop_cpu_timer(void)
 154{
 155        struct vtimer_queue *vt_list;
 156
 157        vt_list = &__get_cpu_var(virt_cpu_timer);
 158
 159        /* nothing to do */
 160        if (list_empty(&vt_list->list)) {
 161                vt_list->idle = VTIMER_MAX_SLICE;
 162                goto fire;
 163        }
 164
 165        /* store the actual expire value */
 166        asm volatile ("STPT %0" : "=m" (vt_list->idle));
 167
 168        /*
 169         * If the CPU timer is negative we don't reprogramm
 170         * it because we will get instantly an interrupt.
 171         */
 172        if (vt_list->idle & 1LL<<63)
 173                return;
 174
 175        vt_list->offset += vt_list->to_expire - vt_list->idle;
 176
 177        /*
 178         * We cannot halt the CPU timer, we just write a value that
 179         * nearly never expires (only after 71 years) and re-write
 180         * the stored expire value if we continue the timer
 181         */
 182 fire:
 183        set_vtimer(VTIMER_MAX_SLICE);
 184}
 185
 186/*
 187 * Sorted add to a list. List is linear searched until first bigger
 188 * element is found.
 189 */
 190static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
 191{
 192        struct vtimer_list *event;
 193
 194        list_for_each_entry(event, head, entry) {
 195                if (event->expires > timer->expires) {
 196                        list_add_tail(&timer->entry, &event->entry);
 197                        return;
 198                }
 199        }
 200        list_add_tail(&timer->entry, head);
 201}
 202
 203/*
 204 * Do the callback functions of expired vtimer events.
 205 * Called from within the interrupt handler.
 206 */
 207static void do_callbacks(struct list_head *cb_list)
 208{
 209        struct vtimer_queue *vt_list;
 210        struct vtimer_list *event, *tmp;
 211        void (*fn)(unsigned long);
 212        unsigned long data;
 213
 214        if (list_empty(cb_list))
 215                return;
 216
 217        vt_list = &__get_cpu_var(virt_cpu_timer);
 218
 219        list_for_each_entry_safe(event, tmp, cb_list, entry) {
 220                fn = event->function;
 221                data = event->data;
 222                fn(data);
 223
 224                if (!event->interval)
 225                        /* delete one shot timer */
 226                        list_del_init(&event->entry);
 227                else {
 228                        /* move interval timer back to list */
 229                        spin_lock(&vt_list->lock);
 230                        list_del_init(&event->entry);
 231                        list_add_sorted(event, &vt_list->list);
 232                        spin_unlock(&vt_list->lock);
 233                }
 234        }
 235}
 236
 237/*
 238 * Handler for the virtual CPU timer.
 239 */
 240static void do_cpu_timer_interrupt(__u16 error_code)
 241{
 242        __u64 next, delta;
 243        struct vtimer_queue *vt_list;
 244        struct vtimer_list *event, *tmp;
 245        struct list_head *ptr;
 246        /* the callback queue */
 247        struct list_head cb_list;
 248
 249        INIT_LIST_HEAD(&cb_list);
 250        vt_list = &__get_cpu_var(virt_cpu_timer);
 251
 252        /* walk timer list, fire all expired events */
 253        spin_lock(&vt_list->lock);
 254
 255        if (vt_list->to_expire < VTIMER_MAX_SLICE)
 256                vt_list->offset += vt_list->to_expire;
 257
 258        list_for_each_entry_safe(event, tmp, &vt_list->list, entry) {
 259                if (event->expires > vt_list->offset)
 260                        /* found first unexpired event, leave */
 261                        break;
 262
 263                /* re-charge interval timer, we have to add the offset */
 264                if (event->interval)
 265                        event->expires = event->interval + vt_list->offset;
 266
 267                /* move expired timer to the callback queue */
 268                list_move_tail(&event->entry, &cb_list);
 269        }
 270        spin_unlock(&vt_list->lock);
 271        do_callbacks(&cb_list);
 272
 273        /* next event is first in list */
 274        spin_lock(&vt_list->lock);
 275        if (!list_empty(&vt_list->list)) {
 276                ptr = vt_list->list.next;
 277                event = list_entry(ptr, struct vtimer_list, entry);
 278                next = event->expires - vt_list->offset;
 279
 280                /* add the expired time from this interrupt handler
 281                 * and the callback functions
 282                 */
 283                asm volatile ("STPT %0" : "=m" (delta));
 284                delta = 0xffffffffffffffffLL - delta + 1;
 285                vt_list->offset += delta;
 286                next -= delta;
 287        } else {
 288                vt_list->offset = 0;
 289                next = VTIMER_MAX_SLICE;
 290        }
 291        spin_unlock(&vt_list->lock);
 292        set_vtimer(next);
 293}
 294
 295void init_virt_timer(struct vtimer_list *timer)
 296{
 297        timer->function = NULL;
 298        INIT_LIST_HEAD(&timer->entry);
 299        spin_lock_init(&timer->lock);
 300}
 301EXPORT_SYMBOL(init_virt_timer);
 302
 303static inline int vtimer_pending(struct vtimer_list *timer)
 304{
 305        return (!list_empty(&timer->entry));
 306}
 307
 308/*
 309 * this function should only run on the specified CPU
 310 */
 311static void internal_add_vtimer(struct vtimer_list *timer)
 312{
 313        unsigned long flags;
 314        __u64 done;
 315        struct vtimer_list *event;
 316        struct vtimer_queue *vt_list;
 317
 318        vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
 319        spin_lock_irqsave(&vt_list->lock, flags);
 320
 321        BUG_ON(timer->cpu != smp_processor_id());
 322
 323        /* if list is empty we only have to set the timer */
 324        if (list_empty(&vt_list->list)) {
 325                /* reset the offset, this may happen if the last timer was
 326                 * just deleted by mod_virt_timer and the interrupt
 327                 * didn't happen until here
 328                 */
 329                vt_list->offset = 0;
 330                goto fire;
 331        }
 332
 333        /* save progress */
 334        asm volatile ("STPT %0" : "=m" (done));
 335
 336        /* calculate completed work */
 337        done = vt_list->to_expire - done + vt_list->offset;
 338        vt_list->offset = 0;
 339
 340        list_for_each_entry(event, &vt_list->list, entry)
 341                event->expires -= done;
 342
 343 fire:
 344        list_add_sorted(timer, &vt_list->list);
 345
 346        /* get first element, which is the next vtimer slice */
 347        event = list_entry(vt_list->list.next, struct vtimer_list, entry);
 348
 349        set_vtimer(event->expires);
 350        spin_unlock_irqrestore(&vt_list->lock, flags);
 351        /* release CPU acquired in prepare_vtimer or mod_virt_timer() */
 352        put_cpu();
 353}
 354
 355static inline void prepare_vtimer(struct vtimer_list *timer)
 356{
 357        BUG_ON(!timer->function);
 358        BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE);
 359        BUG_ON(vtimer_pending(timer));
 360        timer->cpu = get_cpu();
 361}
 362
 363/*
 364 * add_virt_timer - add an oneshot virtual CPU timer
 365 */
 366void add_virt_timer(void *new)
 367{
 368        struct vtimer_list *timer;
 369
 370        timer = (struct vtimer_list *)new;
 371        prepare_vtimer(timer);
 372        timer->interval = 0;
 373        internal_add_vtimer(timer);
 374}
 375EXPORT_SYMBOL(add_virt_timer);
 376
 377/*
 378 * add_virt_timer_int - add an interval virtual CPU timer
 379 */
 380void add_virt_timer_periodic(void *new)
 381{
 382        struct vtimer_list *timer;
 383
 384        timer = (struct vtimer_list *)new;
 385        prepare_vtimer(timer);
 386        timer->interval = timer->expires;
 387        internal_add_vtimer(timer);
 388}
 389EXPORT_SYMBOL(add_virt_timer_periodic);
 390
 391/*
 392 * If we change a pending timer the function must be called on the CPU
 393 * where the timer is running on, e.g. by smp_call_function_single()
 394 *
 395 * The original mod_timer adds the timer if it is not pending. For compatibility
 396 * we do the same. The timer will be added on the current CPU as a oneshot timer.
 397 *
 398 * returns whether it has modified a pending timer (1) or not (0)
 399 */
 400int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
 401{
 402        struct vtimer_queue *vt_list;
 403        unsigned long flags;
 404        int cpu;
 405
 406        BUG_ON(!timer->function);
 407        BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
 408
 409        /*
 410         * This is a common optimization triggered by the
 411         * networking code - if the timer is re-modified
 412         * to be the same thing then just return:
 413         */
 414        if (timer->expires == expires && vtimer_pending(timer))
 415                return 1;
 416
 417        cpu = get_cpu();
 418        vt_list = &per_cpu(virt_cpu_timer, cpu);
 419
 420        /* check if we run on the right CPU */
 421        BUG_ON(timer->cpu != cpu);
 422
 423        /* disable interrupts before test if timer is pending */
 424        spin_lock_irqsave(&vt_list->lock, flags);
 425
 426        /* if timer isn't pending add it on the current CPU */
 427        if (!vtimer_pending(timer)) {
 428                spin_unlock_irqrestore(&vt_list->lock, flags);
 429                /* we do not activate an interval timer with mod_virt_timer */
 430                timer->interval = 0;
 431                timer->expires = expires;
 432                timer->cpu = cpu;
 433                internal_add_vtimer(timer);
 434                return 0;
 435        }
 436
 437        list_del_init(&timer->entry);
 438        timer->expires = expires;
 439
 440        /* also change the interval if we have an interval timer */
 441        if (timer->interval)
 442                timer->interval = expires;
 443
 444        /* the timer can't expire anymore so we can release the lock */
 445        spin_unlock_irqrestore(&vt_list->lock, flags);
 446        internal_add_vtimer(timer);
 447        return 1;
 448}
 449EXPORT_SYMBOL(mod_virt_timer);
 450
 451/*
 452 * delete a virtual timer
 453 *
 454 * returns whether the deleted timer was pending (1) or not (0)
 455 */
 456int del_virt_timer(struct vtimer_list *timer)
 457{
 458        unsigned long flags;
 459        struct vtimer_queue *vt_list;
 460
 461        /* check if timer is pending */
 462        if (!vtimer_pending(timer))
 463                return 0;
 464
 465        vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
 466        spin_lock_irqsave(&vt_list->lock, flags);
 467
 468        /* we don't interrupt a running timer, just let it expire! */
 469        list_del_init(&timer->entry);
 470
 471        /* last timer removed */
 472        if (list_empty(&vt_list->list)) {
 473                vt_list->to_expire = 0;
 474                vt_list->offset = 0;
 475        }
 476
 477        spin_unlock_irqrestore(&vt_list->lock, flags);
 478        return 1;
 479}
 480EXPORT_SYMBOL(del_virt_timer);
 481
 482/*
 483 * Start the virtual CPU timer on the current CPU.
 484 */
 485void init_cpu_vtimer(void)
 486{
 487        struct vtimer_queue *vt_list;
 488
 489        /* kick the virtual timer */
 490        S390_lowcore.exit_timer = VTIMER_MAX_SLICE;
 491        S390_lowcore.last_update_timer = VTIMER_MAX_SLICE;
 492        asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
 493        asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock));
 494
 495        /* enable cpu timer interrupts */
 496        __ctl_set_bit(0,10);
 497
 498        vt_list = &__get_cpu_var(virt_cpu_timer);
 499        INIT_LIST_HEAD(&vt_list->list);
 500        spin_lock_init(&vt_list->lock);
 501        vt_list->to_expire = 0;
 502        vt_list->offset = 0;
 503        vt_list->idle = 0;
 504
 505}
 506
 507void __init vtime_init(void)
 508{
 509        /* request the cpu timer external interrupt */
 510        if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt,
 511                                              &ext_int_info_timer) != 0)
 512                panic("Couldn't request external interrupt 0x1005");
 513
 514        /* Enable cpu timer interrupts on the boot cpu. */
 515        init_cpu_vtimer();
 516}
 517
 518