linux/arch/x86/xen/multicalls.c
<<
>>
Prefs
   1/*
   2 * Xen hypercall batching.
   3 *
   4 * Xen allows multiple hypercalls to be issued at once, using the
   5 * multicall interface.  This allows the cost of trapping into the
   6 * hypervisor to be amortized over several calls.
   7 *
   8 * This file implements a simple interface for multicalls.  There's a
   9 * per-cpu buffer of outstanding multicalls.  When you want to queue a
  10 * multicall for issuing, you can allocate a multicall slot for the
  11 * call and its arguments, along with storage for space which is
  12 * pointed to by the arguments (for passing pointers to structures,
  13 * etc).  When the multicall is actually issued, all the space for the
  14 * commands and allocated memory is freed for reuse.
  15 *
  16 * Multicalls are flushed whenever any of the buffers get full, or
  17 * when explicitly requested.  There's no way to get per-multicall
  18 * return results back.  It will BUG if any of the multicalls fail.
  19 *
  20 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  21 */
  22#include <linux/percpu.h>
  23#include <linux/hardirq.h>
  24
  25#include <asm/xen/hypercall.h>
  26
  27#include "multicalls.h"
  28
  29#define MC_DEBUG        1
  30
  31#define MC_BATCH        32
  32#define MC_ARGS         (MC_BATCH * 16)
  33
  34struct mc_buffer {
  35        struct multicall_entry entries[MC_BATCH];
  36#if MC_DEBUG
  37        struct multicall_entry debug[MC_BATCH];
  38#endif
  39        unsigned char args[MC_ARGS];
  40        struct callback {
  41                void (*fn)(void *);
  42                void *data;
  43        } callbacks[MC_BATCH];
  44        unsigned mcidx, argidx, cbidx;
  45};
  46
  47static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
  48DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
  49
  50void xen_mc_flush(void)
  51{
  52        struct mc_buffer *b = &__get_cpu_var(mc_buffer);
  53        int ret = 0;
  54        unsigned long flags;
  55        int i;
  56
  57        BUG_ON(preemptible());
  58
  59        /* Disable interrupts in case someone comes in and queues
  60           something in the middle */
  61        local_irq_save(flags);
  62
  63        if (b->mcidx) {
  64#if MC_DEBUG
  65                memcpy(b->debug, b->entries,
  66                       b->mcidx * sizeof(struct multicall_entry));
  67#endif
  68
  69                if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
  70                        BUG();
  71                for (i = 0; i < b->mcidx; i++)
  72                        if (b->entries[i].result < 0)
  73                                ret++;
  74
  75#if MC_DEBUG
  76                if (ret) {
  77                        printk(KERN_ERR "%d multicall(s) failed: cpu %d\n",
  78                               ret, smp_processor_id());
  79                        dump_stack();
  80                        for (i = 0; i < b->mcidx; i++) {
  81                                printk("  call %2d/%d: op=%lu arg=[%lx] result=%ld\n",
  82                                       i+1, b->mcidx,
  83                                       b->debug[i].op,
  84                                       b->debug[i].args[0],
  85                                       b->entries[i].result);
  86                        }
  87                }
  88#endif
  89
  90                b->mcidx = 0;
  91                b->argidx = 0;
  92        } else
  93                BUG_ON(b->argidx != 0);
  94
  95        local_irq_restore(flags);
  96
  97        for (i = 0; i < b->cbidx; i++) {
  98                struct callback *cb = &b->callbacks[i];
  99
 100                (*cb->fn)(cb->data);
 101        }
 102        b->cbidx = 0;
 103
 104        BUG_ON(ret);
 105}
 106
 107struct multicall_space __xen_mc_entry(size_t args)
 108{
 109        struct mc_buffer *b = &__get_cpu_var(mc_buffer);
 110        struct multicall_space ret;
 111        unsigned argidx = roundup(b->argidx, sizeof(u64));
 112
 113        BUG_ON(preemptible());
 114        BUG_ON(b->argidx > MC_ARGS);
 115
 116        if (b->mcidx == MC_BATCH ||
 117            (argidx + args) > MC_ARGS) {
 118                xen_mc_flush();
 119                argidx = roundup(b->argidx, sizeof(u64));
 120        }
 121
 122        ret.mc = &b->entries[b->mcidx];
 123        b->mcidx++;
 124        ret.args = &b->args[argidx];
 125        b->argidx = argidx + args;
 126
 127        BUG_ON(b->argidx > MC_ARGS);
 128        return ret;
 129}
 130
 131struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
 132{
 133        struct mc_buffer *b = &__get_cpu_var(mc_buffer);
 134        struct multicall_space ret = { NULL, NULL };
 135
 136        BUG_ON(preemptible());
 137        BUG_ON(b->argidx > MC_ARGS);
 138
 139        if (b->mcidx == 0)
 140                return ret;
 141
 142        if (b->entries[b->mcidx - 1].op != op)
 143                return ret;
 144
 145        if ((b->argidx + size) > MC_ARGS)
 146                return ret;
 147
 148        ret.mc = &b->entries[b->mcidx - 1];
 149        ret.args = &b->args[b->argidx];
 150        b->argidx += size;
 151
 152        BUG_ON(b->argidx > MC_ARGS);
 153        return ret;
 154}
 155
 156void xen_mc_callback(void (*fn)(void *), void *data)
 157{
 158        struct mc_buffer *b = &__get_cpu_var(mc_buffer);
 159        struct callback *cb;
 160
 161        if (b->cbidx == MC_BATCH)
 162                xen_mc_flush();
 163
 164        cb = &b->callbacks[b->cbidx++];
 165        cb->fn = fn;
 166        cb->data = data;
 167}
 168