linux/arch/powerpc/platforms/iseries/lpevents.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001 Mike Corrigan  IBM Corporation
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 */
   9
  10#include <linux/stddef.h>
  11#include <linux/kernel.h>
  12#include <linux/sched.h>
  13#include <linux/bootmem.h>
  14#include <linux/seq_file.h>
  15#include <linux/proc_fs.h>
  16#include <linux/export.h>
  17
  18#include <asm/system.h>
  19#include <asm/paca.h>
  20#include <asm/firmware.h>
  21#include <asm/iseries/it_lp_queue.h>
  22#include <asm/iseries/hv_lp_event.h>
  23#include <asm/iseries/hv_call_event.h>
  24#include "it_lp_naca.h"
  25
  26/*
  27 * The LpQueue is used to pass event data from the hypervisor to
  28 * the partition.  This is where I/O interrupt events are communicated.
  29 *
  30 * It is written to by the hypervisor so cannot end up in the BSS.
  31 */
  32struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
  33
  34DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
  35
  36static char *event_types[HvLpEvent_Type_NumTypes] = {
  37        "Hypervisor",
  38        "Machine Facilities",
  39        "Session Manager",
  40        "SPD I/O",
  41        "Virtual Bus",
  42        "PCI I/O",
  43        "RIO I/O",
  44        "Virtual Lan",
  45        "Virtual I/O"
  46};
  47
  48/* Array of LpEvent handler functions */
  49static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
  50static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
  51
  52static struct HvLpEvent * get_next_hvlpevent(void)
  53{
  54        struct HvLpEvent * event;
  55        event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
  56
  57        if (hvlpevent_is_valid(event)) {
  58                /* rmb() needed only for weakly consistent machines (regatta) */
  59                rmb();
  60                /* Set pointer to next potential event */
  61                hvlpevent_queue.hq_current_event += ((event->xSizeMinus1 +
  62                                IT_LP_EVENT_ALIGN) / IT_LP_EVENT_ALIGN) *
  63                                        IT_LP_EVENT_ALIGN;
  64
  65                /* Wrap to beginning if no room at end */
  66                if (hvlpevent_queue.hq_current_event >
  67                                hvlpevent_queue.hq_last_event) {
  68                        hvlpevent_queue.hq_current_event =
  69                                hvlpevent_queue.hq_event_stack;
  70                }
  71        } else {
  72                event = NULL;
  73        }
  74
  75        return event;
  76}
  77
  78static unsigned long spread_lpevents = NR_CPUS;
  79
  80int hvlpevent_is_pending(void)
  81{
  82        struct HvLpEvent *next_event;
  83
  84        if (smp_processor_id() >= spread_lpevents)
  85                return 0;
  86
  87        next_event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
  88
  89        return hvlpevent_is_valid(next_event) ||
  90                hvlpevent_queue.hq_overflow_pending;
  91}
  92
  93static void hvlpevent_clear_valid(struct HvLpEvent * event)
  94{
  95        /* Tell the Hypervisor that we're done with this event.
  96         * Also clear bits within this event that might look like valid bits.
  97         * ie. on 64-byte boundaries.
  98         */
  99        struct HvLpEvent *tmp;
 100        unsigned extra = ((event->xSizeMinus1 + IT_LP_EVENT_ALIGN) /
 101                                IT_LP_EVENT_ALIGN) - 1;
 102
 103        switch (extra) {
 104        case 3:
 105                tmp = (struct HvLpEvent*)((char*)event + 3 * IT_LP_EVENT_ALIGN);
 106                hvlpevent_invalidate(tmp);
 107        case 2:
 108                tmp = (struct HvLpEvent*)((char*)event + 2 * IT_LP_EVENT_ALIGN);
 109                hvlpevent_invalidate(tmp);
 110        case 1:
 111                tmp = (struct HvLpEvent*)((char*)event + 1 * IT_LP_EVENT_ALIGN);
 112                hvlpevent_invalidate(tmp);
 113        }
 114
 115        mb();
 116
 117        hvlpevent_invalidate(event);
 118}
 119
 120void process_hvlpevents(void)
 121{
 122        struct HvLpEvent * event;
 123
 124 restart:
 125        /* If we have recursed, just return */
 126        if (!spin_trylock(&hvlpevent_queue.hq_lock))
 127                return;
 128
 129        for (;;) {
 130                event = get_next_hvlpevent();
 131                if (event) {
 132                        /* Call appropriate handler here, passing
 133                         * a pointer to the LpEvent.  The handler
 134                         * must make a copy of the LpEvent if it
 135                         * needs it in a bottom half. (perhaps for
 136                         * an ACK)
 137                         *
 138                         *  Handlers are responsible for ACK processing
 139                         *
 140                         * The Hypervisor guarantees that LpEvents will
 141                         * only be delivered with types that we have
 142                         * registered for, so no type check is necessary
 143                         * here!
 144                         */
 145                        if (event->xType < HvLpEvent_Type_NumTypes)
 146                                __get_cpu_var(hvlpevent_counts)[event->xType]++;
 147                        if (event->xType < HvLpEvent_Type_NumTypes &&
 148                                        lpEventHandler[event->xType])
 149                                lpEventHandler[event->xType](event);
 150                        else {
 151                                u8 type = event->xType;
 152
 153                                /*
 154                                 * Don't printk in the spinlock as printk
 155                                 * may require ack events form the HV to send
 156                                 * any characters there.
 157                                 */
 158                                hvlpevent_clear_valid(event);
 159                                spin_unlock(&hvlpevent_queue.hq_lock);
 160                                printk(KERN_INFO
 161                                        "Unexpected Lp Event type=%d\n", type);
 162                                goto restart;
 163                        }
 164
 165                        hvlpevent_clear_valid(event);
 166                } else if (hvlpevent_queue.hq_overflow_pending)
 167                        /*
 168                         * No more valid events. If overflow events are
 169                         * pending process them
 170                         */
 171                        HvCallEvent_getOverflowLpEvents(hvlpevent_queue.hq_index);
 172                else
 173                        break;
 174        }
 175
 176        spin_unlock(&hvlpevent_queue.hq_lock);
 177}
 178
 179static int set_spread_lpevents(char *str)
 180{
 181        unsigned long val = simple_strtoul(str, NULL, 0);
 182
 183        /*
 184         * The parameter is the number of processors to share in processing
 185         * lp events.
 186         */
 187        if (( val > 0) && (val <= NR_CPUS)) {
 188                spread_lpevents = val;
 189                printk("lpevent processing spread over %ld processors\n", val);
 190        } else {
 191                printk("invalid spread_lpevents %ld\n", val);
 192        }
 193
 194        return 1;
 195}
 196__setup("spread_lpevents=", set_spread_lpevents);
 197
 198void __init setup_hvlpevent_queue(void)
 199{
 200        void *eventStack;
 201
 202        spin_lock_init(&hvlpevent_queue.hq_lock);
 203
 204        /* Allocate a page for the Event Stack. */
 205        eventStack = alloc_bootmem_pages(IT_LP_EVENT_STACK_SIZE);
 206        memset(eventStack, 0, IT_LP_EVENT_STACK_SIZE);
 207
 208        /* Invoke the hypervisor to initialize the event stack */
 209        HvCallEvent_setLpEventStack(0, eventStack, IT_LP_EVENT_STACK_SIZE);
 210
 211        hvlpevent_queue.hq_event_stack = eventStack;
 212        hvlpevent_queue.hq_current_event = eventStack;
 213        hvlpevent_queue.hq_last_event = (char *)eventStack +
 214                (IT_LP_EVENT_STACK_SIZE - IT_LP_EVENT_MAX_SIZE);
 215        hvlpevent_queue.hq_index = 0;
 216}
 217
 218/* Register a handler for an LpEvent type */
 219int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler)
 220{
 221        if (eventType < HvLpEvent_Type_NumTypes) {
 222                lpEventHandler[eventType] = handler;
 223                return 0;
 224        }
 225        return 1;
 226}
 227EXPORT_SYMBOL(HvLpEvent_registerHandler);
 228
 229int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)
 230{
 231        might_sleep();
 232
 233        if (eventType < HvLpEvent_Type_NumTypes) {
 234                if (!lpEventHandlerPaths[eventType]) {
 235                        lpEventHandler[eventType] = NULL;
 236                        /*
 237                         * We now sleep until all other CPUs have scheduled.
 238                         * This ensures that the deletion is seen by all
 239                         * other CPUs, and that the deleted handler isn't
 240                         * still running on another CPU when we return.
 241                         */
 242                        synchronize_sched();
 243                        return 0;
 244                }
 245        }
 246        return 1;
 247}
 248EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
 249
 250/*
 251 * lpIndex is the partition index of the target partition.
 252 * needed only for VirtualIo, VirtualLan and SessionMgr.  Zero
 253 * indicates to use our partition index - for the other types.
 254 */
 255int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
 256{
 257        if ((eventType < HvLpEvent_Type_NumTypes) &&
 258                        lpEventHandler[eventType]) {
 259                if (lpIndex == 0)
 260                        lpIndex = itLpNaca.xLpIndex;
 261                HvCallEvent_openLpEventPath(lpIndex, eventType);
 262                ++lpEventHandlerPaths[eventType];
 263                return 0;
 264        }
 265        return 1;
 266}
 267
 268int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
 269{
 270        if ((eventType < HvLpEvent_Type_NumTypes) &&
 271                        lpEventHandler[eventType] &&
 272                        lpEventHandlerPaths[eventType]) {
 273                if (lpIndex == 0)
 274                        lpIndex = itLpNaca.xLpIndex;
 275                HvCallEvent_closeLpEventPath(lpIndex, eventType);
 276                --lpEventHandlerPaths[eventType];
 277                return 0;
 278        }
 279        return 1;
 280}
 281
 282static int proc_lpevents_show(struct seq_file *m, void *v)
 283{
 284        int cpu, i;
 285        unsigned long sum;
 286        static unsigned long cpu_totals[NR_CPUS];
 287
 288        /* FIXME: do we care that there's no locking here? */
 289        sum = 0;
 290        for_each_online_cpu(cpu) {
 291                cpu_totals[cpu] = 0;
 292                for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
 293                        cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
 294                }
 295                sum += cpu_totals[cpu];
 296        }
 297
 298        seq_printf(m, "LpEventQueue 0\n");
 299        seq_printf(m, "  events processed:\t%lu\n", sum);
 300
 301        for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
 302                sum = 0;
 303                for_each_online_cpu(cpu) {
 304                        sum += per_cpu(hvlpevent_counts, cpu)[i];
 305                }
 306
 307                seq_printf(m, "    %-20s %10lu\n", event_types[i], sum);
 308        }
 309
 310        seq_printf(m, "\n  events processed by processor:\n");
 311
 312        for_each_online_cpu(cpu) {
 313                seq_printf(m, "    CPU%02d  %10lu\n", cpu, cpu_totals[cpu]);
 314        }
 315
 316        return 0;
 317}
 318
 319static int proc_lpevents_open(struct inode *inode, struct file *file)
 320{
 321        return single_open(file, proc_lpevents_show, NULL);
 322}
 323
 324static const struct file_operations proc_lpevents_operations = {
 325        .open           = proc_lpevents_open,
 326        .read           = seq_read,
 327        .llseek         = seq_lseek,
 328        .release        = single_release,
 329};
 330
 331static int __init proc_lpevents_init(void)
 332{
 333        if (!firmware_has_feature(FW_FEATURE_ISERIES))
 334                return 0;
 335
 336        proc_create("iSeries/lpevents", S_IFREG|S_IRUGO, NULL,
 337                    &proc_lpevents_operations);
 338        return 0;
 339}
 340__initcall(proc_lpevents_init);
 341
 342