linux/arch/x86/xen/spinlock.c
<<
F <"sprn> <"form> >> <"sprn> <"form> <"sprn> Prefs <"a> <"sprn> <"div> <"form>
<"div>
   1<"a>/*<"sprn>
   2<"a> * Split spinlock implementation out into its own file, so it crn be<"sprn>
   3<"a> * compiled in a FTRACE-compatible way.<"sprn>
   4<"a> */<"sprn>
   5<"a>#include <linux/kernel_stat.h<"a>>
   6<"a>#include <linux/spinlock.h<"a>>
   7<"a>#include <linux/debugfs.h<"a>>
   8<"a>#include <linux/log2.h<"a>>
   9<"a>#include <linux/gfp.h<"a>>
  
  11<"a>#include <asm/paravirt.h<"a>>
  12opa>
  13<"a>#include <xen"interface/xen.h<"a>>
  14<"a>#include <xen"events.h<"a>>
  15opa>
  16<"a>#include "xen-ops.h<"a>"
  17<"a>#include "debugfs.h<"a>"
  18opa>
  19<"a>#ifdef CONFIG_XEN_DEBUG_FSopa>
 2static struct xen_spinlock_statsopa>
  21<"a>{
  22opa>        u64<"a> taken<"a>;
  23opa>        u32<"a> taken_slow<"a>;
  24opa>        u32<"a> taken_slow_nested<"a>;
  25opa>        u32<"a> taken_slow_pickup<"a>;
  26opa>        u32<"a> taken_slow_spurious<"a>;
  27opa>        u32<"a> taken_slow_irqenable<"a>;
  28opa>
  29opa>        u64<"a> released<"a>;
 30opa>        u32<"a> released_slow<"a>;
  31opa>        u32<"a> released_slow_kicked<"a>;
  32opa>
  33<"a>#define HISTO_BUCKETSopa>   30
  34opa>        u32<"a> histo_spin_total<"a>[HISTO_BUCKETSopa>+1];
  35opa>        u32<"a> histo_spin_spinning<"a>[HISTO_BUCKETSopa>+1];
  36opa>        u32<"a> histo_spin_blocked<"a>[HISTO_BUCKETSopa>+1];
  37opa>
  38opa>        u64<"a> time_total<"a>;
  39opa>        u64<"a> time_spinning<"a>;
 40opa>        u64<"a> time_blocked<"a>;
  41opa>} spinlock_statsopa>;
  42opa>
  43opa>static u8<"a> zero_statsopa>;
  44opa>
  45opa>static unsigned lock_timeout<"a> = 1 << 10;
  46<"a>#define TIMEOUT<"a> lock_timeout<"a>
  47opa>
  48opa>static inline<"a> void check_zero<"a>(void)
  49<"a>{
 50opa>        if (unlikely<"a>(zero_statsopa>)) {
  51opa>                memset<"a>(&spinlock_statsopa>, 0, sizeof(spinlock_statsopa>));
  52opa>                zero_statsopa> = 0;
  53opa>        }
  54opa>}
  55opa>
  56<"a>#define ADD_STATS<"a>(elemopa>, val<"a>)                    \
  57opa>        do { check_zero<"a>(); spinlock_statsopa>.elemopa> += (val<"a>); } while(0)
  58opa>
  59opa>static inline<"a> u64<"a> spin_time_start<"a>(void)
 60<"a>{
  61opa>        return xen_clocksource_read<"a>();
  62opa>}
  63opa>
  64<"a>static void __spin_time_accum<"a>(u64<"a> deltaopa>, u32<"a> *array<"a>)
  65<"a>{
  66opa>        unsigned indexopa> = ilog2<"a>(deltaopa>);
  67opa>
  68opa>        check_zero<"a>();
  69opa>
 70opa>        if (indexopa> < HISTO_BUCKETSopa>)
  71opa>                array<"a>[indexopa>]++;
  72opa>        else
  73opa>                array<"a>[HISTO_BUCKETSopa>]++;
  74opa>}
  75opa>
  76opa>static inline<"a> void spin_time_accum_spinning<"a>(u64<"a> start<"a>)
  77<"a>{
  78opa>        u32<"a> deltaopa> = xen_clocksource_read<"a>() - start<"a>;
  79opa>
 80opa>        __spin_time_accum<"a>(deltaopa>, spinlock_statsopa>.histo_spin_spinning<"a>);
  81opa>        spinlock_statsopa>.time_spinning<"a> += deltaopa>;
  82opa>}
  83opa>
  84opa>static inline<"a> void spin_time_accum_total<"a>(u64<"a> start<"a>)
  85<"a>{
  86opa>        u32<"a> deltaopa> = xen_clocksource_read<"a>() - start<"a>;
  87opa>
  88opa>        __spin_time_accum<"a>(deltaopa>, spinlock_statsopa>.histo_spin_total<"a>);
  89opa>        spinlock_statsopa>.time_total<"a> += deltaopa>;
 90opa>}
  91opa>
  92opa>static inline<"a> void spin_time_accum_blocked<"a>(u64<"a> start<"a>)
  93<"a>{
  94opa>        u32<"a> deltaopa> = xen_clocksource_read<"a>() - start<"a>;
  95opa>
  96opa>        __spin_time_accum<"a>(deltaopa>, spinlock_statsopa>.histo_spin_blocked<"a>);
  97opa>        spinlock_statsopa>.time_blocked<"a> += deltaopa>;
  98opa>}
  99<"a>#else  /* !CONFIG_XEN_DEBUG_FS */<"sprn>
100<"a>#define TIMEOUT<"a>                 (1 << 10)
101<"a>#define ADD_STATS<"a>(elemopa>, val<"a>)    do { (void)(val<"a>); } while(0)
102opa>
103opa>static inline<"a> u64<"a> spin_time_start<"a>(void)
104<"a>{
105opa>        return 0;
106opa>}
107opa>
108opa>static inline<"a> void spin_time_accum_total<"a>(u64<"a> start<"a>)
109<"a>{
110opa>}
111<"a>static inline<"a> void spin_time_accum_spinning<"a>(u64<"a> start<"a>)
 112opa>{
 113<"a>}
 114opa>static inline<"a> void spin_time_accum_blocked<"a>(u64<"a> start<"a>)
 115<"a>{
 116opa>}
 117<"a>#endif  /* CONFIG_XEN_DEBUG_FS */<"sprn>
 118opa>
 119opa>struct xen_spinlock<"a> {
120opa>        unsigned char lock<"a>;             /* 0 -> free; 1 -> locked */<"sprn>
 121opa>        unsigned short spinners<"a>;        /* count of waiting cpus */<"sprn>
 122opa>};
 123opa>
 124opa>static int xen_spin_is_locked<"a>(struct arch_spinlockopa> *lock<"a>)
 125<"a>{
 126opa>        struct xen_spinlock<"a> *xlopa> = (struct xen_spinlock<"a> *)lock<"a>;
 127opa>
 128opa>        return xlopa>->lock<"a> != 0;
 129opa>}
13
 131opa>static int xen_spin_is_contended<"a>(struct arch_spinlockopa> *lock<"a>)
 132opa>{
 133opa>        struct xen_spinlock<"a> *xlopa> = (struct xen_spinlock<"a> *)lock<"a>;
 134opa>
 135opa>        /* Not strictly true; this is only the count of contended<"sprn>
 136opa>           lock-takers entering the slow path. */<"sprn>
 137opa>        return xlopa>->spinners<"a> != 0;
 138opa>}
 139opa>
140opa>static int xen_spin_trylock<"a>(struct arch_spinlockopa> *lock<"a>)
 141<"a>{
 142opa>        struct xen_spinlock<"a> *xlopa> = (struct xen_spinlock<"a> *)lock<"a>;
 143opa>        u8<"a> oldopa> = 1;
 144opa>
 145opa>        asm("xchgb %b0,%1"<"sprn>
 146opa>            : "+q"<"sprn> (oldopa>), "+m"<"sprn> (xlopa>->lock<"a>) : : "memory"<"sprn>);
 147opa>
 148opa>        return oldopa> == 0;
 149opa>}
15
 151<"a>static DEFINE_PER_CPU<"a>(int, lock_kicker_irq<"a>) = -1;
 152opa>static DEFINE_PER_CPU<"a>(struct xen_spinlock<"a> *, lock_spinners<"a>);
 153opa>
 154<"a>/*<"sprn>
 155opa> * Mark a cpu as interested in a lock.  Returns the CPU's previous<"sprn>
 156opa> * lock of interest, in case we got preempted by rn interrupt.<"sprn>
 157opa> */<"sprn>
 158opa>static inline<"a> struct xen_spinlock<"a> *spinning_lock<"a>(struct xen_spinlock<"a> *xlopa>)
 159<"a>{
160opa>        struct xen_spinlock<"a> *prev<"a>;
 161opa>
 162opa>        prev<"a> = __get_cpu_var<"a>(lock_spinners<"a>);
 163opa>        __get_cpu_var<"a>(lock_spinners<"a>) = xlopa>;
 164opa>
 165opa>        wmb<"a>();                  /* set lock of interest before count */<"sprn>
 166opa>
 167opa>        asm(LOCK_PREFIX<"a> " incw %0"<"sprn>
 168opa>            : "+m"<"sprn> (xlopa>->spinners<"a>) : : "memory"<"sprn>);
 169opa>
170opa>        return prev<"a>;
 171opa>}
 172opa>
 173<"a>/*<"sprn>
 174<"a> * Mark a cpu as no longer interested in a lock.  Restores previous<"sprn>
 175opa> * lock of interest (NULL for none).<"sprn>
 176opa> */<"sprn>
 177<"a>static inline<"a> void unspinning_lock<"a>(struct xen_spinlock<"a> *xlopa>, struct xen_spinlock<"a> *prev<"a>)
 178opa>{
 179opa>        asm(LOCK_PREFIX<"a> " decw %0"<"sprn>
180opa>            : "+m"<"sprn> (xlopa>->spinners<"a>) : : "memory"<"sprn>);
 181opa>        wmb<"a>();                  /* decrement count before restoring lock */<"sprn>
 182opa>        __get_cpu_var<"a>(lock_spinners<"a>) = prev<"a>;
 183<"a>}
 184opa>
 185<"a>static noinlineopa> int xen_spin_lock_slow<"a>(struct arch_spinlockopa> *lock<"a>, bool<"a> irq_enable<"a>)
 186opa>{
 187opa>        struct xen_spinlock<"a> *xlopa> = (struct xen_spinlock<"a> *)lock<"a>;
 188opa>        struct xen_spinlock<"a> *prev<"a>;
 189opa>        int irq<"a> = __get_cpu_var<"a>(lock_kicker_irq<"a>);
190opa>        int ret<"a>;
 191opa>        u64<"a> start<"a>;
 192opa>
 193opa>        /* If kicker interrupts not initialized yet, just spin */<"sprn>
 194opa>        if (irq<"a> == -1)
 195opa>                return 0;
 196opa>
 197opa>        start<"a> = spin_time_start<"a>();
 198opa>
 199opa>        /* announce we're spinning */<"sprn>
200opa>        prev<"a> = spinning_lock<"a>(xlopa>);
201opa>
202opa>        ADD_STATS<"a>(taken_slow<"a>, 1);
203opa>        ADD_STATS<"a>(taken_slow_nested<"a>, prev<"a> != NULLopa>);
204opa>
205opa>        do {
206opa>                unsigned long flags<"a>;
207opa>
208opa>                /* clear pending */<"sprn>
209opa>                xen_clear_irq_pending<"a>(irq<"a>);
21
211opa>                /* check again make sure it didn't become free while<"sprn>
 212opa>                   we weren't looking  */<"sprn>
 213opa>                ret<"a> = xen_spin_trylock<"a>(lock<"a>);
 214opa>                if (ret<"a>) {
 215opa>                        ADD_STATS<"a>(taken_slow_pickup<"a>, 1);
 216opa>
 217opa>                        /*<"sprn>
 218opa>                         * If we interrupted another spinlock while it<"sprn>
 219opa>                         * was blocking, make sure it doesn't block<"sprn>
220opa>                         * without rechecking the lock.<"sprn>
 221opa>                         */<"sprn>
 222opa>                        if (prev<"a> != NULLopa>)
 223opa>                                xen_set_irq_pending<"a>(irq<"a>);
 224opa>                        goto out<"a>;
 225opa>                }
 226opa>
 227opa>                flags<"a> = __raw_local_save_flags<"a>();
 228opa>                if (irq_enable<"a>) {
 229opa>                        ADD_STATS<"a>(taken_slow_irqenable<"a>, 1);
230opa>                        raw_local_irq_enable<"a>();
 231opa>                }
 232opa>
 233opa>                /*<"sprn>
 234<"a>                 * Block until irq becomes pending.  If we're<"sprn>
 235opa>                 * interrupted at this point (after the trylock but<"sprn>
 236opa>                 * before entering the block), then the nested lock<"sprn>
 237opa>                 * handler guarantees that the irq will be left<"sprn>
 238opa>                 * pending if there's any chance the lock became free;<"sprn>
 239opa>                 * xen_poll_irq() returns immediately if the irq is<"sprn>
240opa>                 * pending.<"sprn>
 241opa>                 */<"sprn>
 242opa>                xen_poll_irq<"a>(irq<"a>);
 243opa>
 244opa>                raw_local_irq_restore<"a>(flags<"a>);
 245opa>
 246opa>                ADD_STATS<"a>(taken_slow_spurious<"a>, !xen_test_irq_pending<"a>(irq<"a>));
 247opa>        } while (!xen_test_irq_pending<"a>(irq<"a>)); /* check for spurious wakeups */<"sprn>
 248opa>
 249opa>        kstat_incr_irqs_this_cpu<"a>(irq<"a>, irq_to_desc<"a>(irq<"a>));
25
 251<"a>out<"a>:
 252opa>        unspinning_lock<"a>(xlopa>, prev<"a>);
 253opa>        spin_time_accum_blocked<"a>(start<"a>);
 254opa>
 255opa>        return ret<"a>;
 256opa>}
 257opa>
 258opa>static inline<"a> void __xen_spin_lock<"a>(struct arch_spinlockopa> *lock<"a>, bool<"a> irq_enable<"a>)
 259<"a>{
260opa>        struct xen_spinlock<"a> *xlopa> = (struct xen_spinlock<"a> *)lock<"a>;
 261opa>        unsigned timeout<"a>;
 262opa>        u8<"a> oldval<"a>;
 263opa>        u64<"a> start_spin<"a>;
 264opa>
 265opa>        ADD_STATS<"a>(taken<"a>, 1);
 266opa>
 267opa>        start_spin<"a> = spin_time_start<"a>();
 268opa>
 269opa>        do {
270opa>                u64<"a> start_spin_fast<"a> = spin_time_start<"a>();
 172opa>
timeout<"a>;
TIMEOUT<"a>                 (6/xen"spinlock.c#L2{
 173<"a> 174<"a>"xchgb %b0,ų: ;1"<"s1n>
 175opa>" decw %0"<  a>(
 176opa>" decw %0"<  jz 3f\nref="arch/x86/xen"spinlock.c#L180" id="L18e" cla2s="line" name="L177"> 177<"a>stati2 " decw %0"2:codp;nop\nref="arch/x86/xen"spinlock.c#L180" id="L18e" cla2s="line" name="L178"> 178opa>{
" decw %0"<  cmpb $0n>
 179opa>     2  asm27           " decw %0"<  je 1b\nref="arch/x86/xen"spinlock.c#L180" id="L18e" cla2s="line" 2ame="L1809">180opa>     2     2: " decw %0"<  decot;<"s2\nref="arch/x86/xen"spinlock.c#L180" id="L18e" cla2s/xen"spiname="L181"> 181opa>     2  " decw %0"<  jnz 2b\nref="arch/x86/xen"spinlock.c#L180" id="L18e" cla2s="line" 2ame="L182"> 182opa>     2  " decw %0"3:\nref="arch/x86/xen"spinlock.c#L180" id="L18e" cla2s="line" name="L183"> 183<"a>}
"+m"<"sprn> (xlopa>->);
"+m"<"sprn> (<= href="+code=old" class="sref">oldoldval<"a>;
"+m"<"sprn> (ef">timeout<"a>;
 184opa>
"+m"<"sprn> (<1href="+code=old86/xen"spinlock.c#L195" id="L195" cla2s="line" 2ame="L185"> 185<"a>stati2 "memory"<"sprn>);
 186opa>{
 187opa>     2  str28   ((sta"sref">start_spin_fast<"a> =  188opa>     2  str28h/x86/xen"spinlock.c#L269" id="L269" class="line" 2ame="L189"> 189opa>     2  int2(likelye=start" class="srefoldoldval<"a>;
190opa>     2  int29           TIMEOUT<"a>                 (1== ~0 ||en_test_irq_pending" ct xen_spin_lock_slow<"a>(struct lock<"a>, irq_enable<"a>)
 191opa>     2   192opa>
uocked<"a>(spin_time_accum_total<"a>("sref">start_spin<"a> =  193opa>     2   194opa>     2  if 29h/x86/xen"spinlock.c#L265" id="L265" class="line" 2ame="L195"> 195opa>     2     29f="+code=noie=__xen_spin_lock" css="sref">__xen_spin_lock<"astruct arch_spinlockopa> *lock<"a>,  196opa>
 197opa>     2  __xen_spin_lock<"a>(struct lock<"a>, )
 198opa>
 199opa>     2  200opa>     3  __xf">__raw_local_save_fss="sref">__xf">__rf="+code=arch_spinlock" class="sref">arch_spinlockopa> *lock<"a>, flags<"a>;
201opa>
202opa>     3  __xen_spin_lock<"a>(struct lock<"a>, __raw_local_save_fore<__re=flags" class="sref">flags<"a>);
203opa>     3  204opa>
205opa>     3  do 30f="+code=noinline" class="sref">noinlineopa> int xen_spin_lock_slow<"a>(struun hre">xenf="+code=arch_spinlock" clas="sref">xen_spinlock<"a> *xlopa> = (struct  196opa>
207opa>
kstat_incr_irqs_thcode=ir6/xen"spinlock.c#L250" id="L25e" cla3s8"line" 3ame="L198"> 198opa>
209opa>     3     30ef="+code=kstat_incr_irqs_thsref">ADD_STATS<"a>(21
211opa>     3     31ef="+code=u64" class="sref">for_eaa horef=">kstat_incr_irqs_thfor_eaa horef=">kste=taken" class="srefkstat_incr_irqs_thcode=irx86/xen"spinlock.c#L229" id="L229" cla3s="line" 3ame="L212"> 212opa>/* check for spuriousXXXode=uld mix up nextinterselectiof="arch/x86/xen"spinlock.c#L194" id="L194" cla3s="line" 3ame="L213"> 213opa>     3     3    er>kstat_incr_irqs_th>er>kstf="+ck" class="sref">loce) = xlopa>;
 214opa>     3     3    if (ADD_STATS<"a>( 215opa>     3     3            (nd_IPIhorinlineopa> int (nd_IPIhorie=taken" class="srefkstat_incr_irqs_thcode=irool" class="sref"XEN_SPIN_UNprn cVECTORat_incr_irqs_thXEN_SPIN_UNprn cVECTORode=86/xen"spinlock.c#L250" id="L25e" cla3s="line" 3ame="L216"> 216opa>
 217opa>     3     3             218opa> 219opa>220opa> 221opa>(struun href="+code=arch_spinlock" class="sref">arch_spinlockopa> *lock<"a>,  222opa>     3     32ch/x86/xen"spinlock.c#L133" id="L133" cla3s="line" 3ame="L223"> 223opa>     3     3              ode=xen_spinlock" class="sref">xen_spinlock<"a> *xlopa> = (struct xen_spinlock<"a> *)lock<"a>;
 224opa>     3     32h/x86/xen"spinlock.c#L205" id="L205" cla3s="line" 3ame="L225"> 225opa>     3     32ef="+code=ADD_STATS" class="sref">ADD_STATS<"a>( 226opa>
 227opa>     3     32ef="+code=start_spin" class="mp_wmb<"a>();        "mp_wmbrch/x86ref="+code=ADD_t">/* check for spurious't blono writes get movafteut<"sun hre="arch/x86/xen"spinlock.c#L194" id="L194" cla3s="line" 3ame="L228"> 228opa>     3     3    if (xlopa>->);
/* check for spuriousreleasc href="arch/x86/xen"spinlock.c#L182" id="L182" cla3s="line" 3ame="L229"> 229opa>     3     32h/x86/xen"spinlock.c#L170" id="L17e" cla3s="line" 3ame="L2309">230opa>     3     3            /*<"sprn>
 231opa>     3     33comment">                 */<"sprn>
 lon39;t bloun hre=happenssprn>

 232opa>
                   we weren*  href="+.  Wn>
 233opa>     3     33comment">/*<"sprn>
 234<"a>                 * Block un*s<"ss'sted impliafte irq will ab locee;ir ordthen arch/x86/xen"spinlock.c#L241" id="L241" cla3s="line" 3ame="L235"> 235opa>                 * interrup"arch/x86/xen"spinlock.c#L182" id="L182" cla3s="line" 3ame="L236"> 236opa>xmb<"a>();        mbrch/x86/xen"spinlock.c#L182" id="L182" cla3s="line" 3ame="L237"> 237opa> 238opa>(likelye=start" class="sreflopa>->spinners<"a>) : :  239opa>xen_spin_lock_slow<"a>(struun hre">xenf="+c class="sref">xlopa>;
240opa> 241opa> 242opa>     3     34f="+code=DEFINE_PER_CPU" clirqode=re_art_spin_fast<"airqode=re_a  irq<"a> = xdev_idn_time_accum_bldev_idde=b6/xen"spinlock.c#L195" id="L195" cla3s="line" 3ame="L243"> 243opa>
 244opa>     3     3    xBUGn_time_accum_blBUGrch/x86/xen"spinlock.c#L182" id="L182" cla3s="line" 3ame="L245"> 245opa>
IRQ_HANDLEDn_time_accum_blIRQ_HANDLEDh/x86/xen"spinlock.c#L261" id="L261" cla3s="line" 3ame="L246"> 246opa>     3     34ch/x86/xen"spinlock.c#L257" id="L257" cla3s="line" 3ame="L247"> 247opa>     3  } w34h/x86/xen"spinlock.c#L258" id="L258" cla3s="line" 3ame="L248"> 248opa>
(cpuust   ust ">__xfkstat_incr_irqs_th<"a>ust ">__xfkstf="+c=irq" class="sref">kstat_incr_irqs_thcode=irx/xen"spinlock.c#L258" id="L258" cla3s="line" 3ame="L249"> 249opa>     3  25
irq<"a> =  251<"a>xa> int  252opa>     3   253opa>     3   int xGFP_KERNELLopa>)
"memory"<"sprnd="L233"t;<"sd href="arch/xool" class="sref"kstat_incr_irqs_thcode=irx6/xen"spinlock.c#L261" id="L261" cla3s="line" 3ame="L254"> 254opa>
xirq<"a> = xXEN_SPIN_UNprn cVECTORat_incr_irqs_thXEN_SPIN_UNprn cVECTORode=,/xen"spinlock.c#L261" id="L261" cla3s="line" 3ame="L255"> 255opa>     3  ret35            256opa>}
 257opa>
 258opa>stati3  int  259<"a>{
NULLopa>);
260opa>     3  str36h/x86/xen"spinlock.c#L211" id="L211" cla3s="line" 3ame="L261"> 261opa>     3  uns3gned irq<"a> == -1)
 262opa>     3  xen_poll_irq<"a>(xenf="+c class="sref">xirq<"a>);
/* check for spurious't bloitthe locnever deliveredp"arch/x86/xen"spinlock.c#L182" id="L182" cla3s="line" 3ame="L263"> 263opa>     3  er>kstat_incr_irqs_th>er>kstf="+ck" class="sref">loce"sref">lock_kicker_irq<"a>);
irq<"a> =  264opa>
 265opa>     3   266opa>
xphentck<"a>);
"memory"<"sprnntert;<"sd<"sprn>
);
 267opa>     3   268opa>
 269opa>     3  do 3
__xfkstat_incr_irqs_th<"a>unust ">__xfkstf="+c=irq" class="sref">kstat_incr_irqs_thcode=irx/xen"spinlock.c#L258" id="L258" cla3s="line" 3ame="L2709">270opa>     3     3    unbind_from""+c that trt_spin_fast<"aunbind_from""+c that tf="+ck" class="sref">er>kstat_incr_irqs_th>er>kstf="+ck" class="sref">loce"sref">lock_kicker_irq<"a>);
NULLopa>);
 172opa>
 173<"a> 174<"a>(ust   ust "d="L244"raw_local_save_fss="ust "d="L244"rf="+ce=__x/xen"spinlock.c#L258" id="L258" cla3s="line" 3ame="L175"> 175opa> 176opa>xpv">__xfopraw_local_save_fpv">__xfopr  if.nners" class="sref"_is">__xpin_time_accum_blockedis">__xpiode=__get_cpu_var" cla<"a>(struis">__xpin_time_accum_bl<"a>(struis">__xpiode=6/xen"spinlock.c#L261" id="L261" cla3s="line" 3ame="L177"> 177<"a>stati3 __xfopraw_local_save_fpv">__xfopr  if.nners" class="sref"_is"contendpin_time_accum_blockedis"contendpiode=__get_cpu_var" cla<"a>(struis"contendpin_time_accum_bl<"a>(struis"contendpiode=6/xen"spinlock.c#L261" id="L261" cla3s="line" 3ame="L178"> 178opa>{
xpv">__xfopraw_local_save_fpv">__xfopr  if.nners" class="sref"_>lock<"a>);
(stru hre_spin_lock_slow<"a>(stru"arch/x86/xen"spinlock.c#L261" id="L261" cla3s="line" 3ame="L179"> 179opa>     3  asm37ef="+code=kstat_incr_irqs_thpv">__xfopraw_local_save_fpv">__xfopr  if.nners" class="sref"_>locf">__raw_local_save_fsref">__xf">__rf="+__get_cpu_var" cla<"a>(stru href">__raw_local_save_fss="sref">__xf">__rf="+6/xen"spinlock.c#L261" id="L261" cla3s="line" 3ame="L1809">180opa>     3     38ef="+code=prev" class="sref">v">__xfopraw_local_save_fpv">__xfopr  if.nners" class="sref"_ef">xen_spin_trylock<"ref"_ef">xenf="+__get_cpu_var" cla<"a>(struef">xen_spin_trylock<"a>( 181opa>     3  >v">__xfopraw_local_save_fpv">__xfopr  if.nners" class="sref"_un hre_spin_lock_slow(struun href="+__get_cpu_var" cla<"a>(struun hre_spin_lock_slow<"a>(struun href="+6/xen"spinlock.c#L261" id="L261" cla3s="line" 3ame="L182"> 182opa>     3   183<"a>}
 184opa>
( 185<"a>stati3  186opa>{
xd>(strudebu=n_time_accum_bld>(strudebu=f="+6/xen"spinlock.c#L261" id="L261" cla3s="line" 3ame="L187"> 187opa>     3  str38h/x86/xen"spinlock.c#L258" id="L258" cla3s="line" 3ame="L188"> 188opa>     3  str38f="+code=inl=irq" class="sref">laust en_spin_lock<"a>(ust   d="L258"udebu=fraw_local_save_fss="sref258"udebu=frf="+ce=__x/xen"spinlock.c#L258" id="L258" cla3s="line" 3ame="L189"> 189opa>     3  int38ch/x86/xen"spinlock.c#L260" id="L26e" cla3s="line" 3ame="L1909">190opa>     3  int39t xd>xaken<"a>, 1);
xakf="+__get_cpu_var" cla<"a>ust "debu=fraw_local_save_fss="ust "debu=frrch/x86/xen"spinlock.c#L182" id="L182" cla3s="line" 3ame="L191"> 191opa>     3   192opa>
d>xaken<"a>, 1);
xakf="+__ULL" class="sref">NULLopa>)
 193opa>     3  )
 194opa>     3  if 39h/x86/xen"spinlock.c#L265" id="L265" cla3s="line" 3ame="L195"> 195opa>     3     39ef="+code=ADD_STATS" class="d>(strudebu=n_time_accum_bld>(strudebu=f="+__get_cpu_var" cladebu=fr_create_ditrt_spin_fast<"adebu=fr_create_ditf="+ck">"memory"<"sprnd="L244"rref="arch/x86ool" class="sref"d>xaken<"a>, 1);
xakf="+86/xen"spinlock.c#L182" id="L182" cla3s="line" 3ame="L196"> 196opa>
 197opa>     3   "memory"<"sprnzeroref=trref="arch/x86oo0644ool" class="sref"d>(strudebu=n_time_accum_bld>(strudebu=f="+, &l" class="sref"zeroref=trn_time_accum_blzeroref=trf="+86/xen"spinlock.c#L182" id="L182" cla3s="line" 3ame="L198"> 198opa>
 199opa>     3  ef="+cdebu=fr_create_83commeck">"memory"<"sprnef="arcref="arch/x86oo0644ool" class="sref"d>(strudebu=n_time_accum_bld>(strudebu=f="+, &l" class="sref"258"uef">timeout<"a>;
timf="+86/xen"spinlock.c#L182" id="L182" cla4s="line" 4ame="L2009">200opa>     4  201opa>
debu=fr_create_864<"a> "memory"<"sprneref=ref="arch/x86oo0444ool" class="sref"d>(strudebu=n_time_accum_bld>(strudebu=f="+, &l" class="sref"sref258"uef=trn_time_accum_blsref258"uef=tr  if.nners" class="s">taken<"a>, 1);
 192opa>
ef="+cdebu=fr_create_83commeck">"memory"<"sprne<"a>, !(strudebu=n_time_accum_bld>(strudebu=f="+,/xen"spinlock.c#L182" id="L182" cla4s3"line" 4ame="L193"> 193opa>     4  tak">xen_spin_lock_slow">tak">xen"arc86/xen"spinlock.c#L182" id="L182" cla4s4"line" 4ame="L194"> 194opa>     4ef="a40   xdebu=fr_create_8ame="L232">ef="+cdebu=fr_create_83commeck">"memory"<"sprne<"a>, !(strudebu=n_time_accum_bld>(strudebu=f="+,/xen"spinlock.c#L182" id="L182" cla4s5"line" 4ame="L195"> 195opa>     4  do 40           tak">xen_
tak">xen_
 196opa>
xdebu=fr_create_8ame="L232">ef="+cdebu=fr_create_83commeck">"memory"<"sprne<"a>, !(strudebu=n_time_accum_bld>(strudebu=f="+,/xen"spinlock.c#L182" id="L182" cla4s7"line" 4ame="L197"> 197opa>     4ef="a40           tak">xen_pickup_spin_lock_slow">tak">xen_pickup"arc86/xen"spinlock.c#L182" id="L182" cla4s8"line" 4ame="L198"> 198opa>
xdebu=fr_create_8ame="L232">ef="+cdebu=fr_create_83commeck">"memory"<"sprne<"a>, !ref="arch/x86oo0444ool" class="sref"d>(strudebu=n_time_accum_bld>(strudebu=f="+,/xen"spinlock.c#L182" id="L182" cla4s9"line" 4ame="L199"> 199opa>     4     40           tak">xen_ref">taken_slow_spurious<"a>, !21
ef="+cdebu=fr_create_83commeck">"memory"<"sprne<"a>, !(strudebu=n_time_accum_bld>(strudebu=f="+,/xen"spinlock.c#L182" id="L182" cla4s="line" 4ame="L2119">211opa>     4     41   }
tak">xen_sref">taken_slow_irqenable<"a>, 1);
 212opa> 213opa>     4     41ef="+code=spin_time_accum_bldebu=fr_create_864<"a> "memory"<"sprnreleascdref="arch/x86oo0444ool" class="sref"d>(strudebu=n_time_accum_bld>(strudebu=f="+, &l" class="sref"sref258"uef=trn_time_accum_blsref258"uef=tr  if.nners" class="sreleascdn_time_accum_blreleascd"arc86/xen"spinlock.c#L182" id="L182" cla4s4"line" 4ame="L214"> 214opa>     4     41   xdebu=fr_create_8ame="L232">ef="+cdebu=fr_create_83commeck">"memory"<"sprnreleascdf>xenref="arch/x86oo0444ool" class="sref"d>(strudebu=n_time_accum_bld>(strudebu=f="+,/xen"spinlock.c#L182" id="L182" cla4s5"line" 4ame="L215"> 215opa>     4     4             216opa>
xdebu=fr_create_8ame="L232">ef="+cdebu=fr_create_83commeck">"memory"<"sprnreleascdf>xen(strudebu=n_time_accum_bld>(strudebu=f="+,/xen"spinlock.c#L182" id="L182" cla4s7"line" 4ame="L217"> 217opa>     4     4             218opa> 219opa> "memory"<"sprnea hrep href=ref="arch/x86oo0444ool" class="sref"d>(strudebu=n_time_accum_bld>(strudebu=f="+,/xen"spinlock.c#L182" id="L182" cla4s="line" 4ame="L2209">220opa> 221opa>debu=fr_create_864<"a> "memory"<"sprnea hrf="+codref="arch/x86oo0444ool" class="sref"d>(strudebu=n_time_accum_bld>(strudebu=f="+,/xen"spinlock.c#L182" id="L182" cla4s2"line" 4ame="L222"> 222opa>     4     42           if (spin_time_accum_bl"a href">spi"arc86/xen"spinlock.c#L182" id="L182" cla4s="line" 4ame="L223"> 223opa>     4     42ef="+code=spin_time_accum_bldebu=fr_create_864<"a> "memory"<"sprn"a hr"+codref="arch/x86oo0444ool" class="sref"d>(strudebu=n_time_accum_bld>(strudebu=f="+,/xen"spinlock.c#L182" id="L182" cla4s4"line" 4ame="L224"> 224opa>     4     42   if (spin_time_accum_to"a hr">spi"arc86/xen"spinlock.c#L182" id="L182" cla4s5"line" 4ame="L225"> 225opa>     4     42h/x86/xen"spinlock.c#L246" id="L246" cla4s="line" 4ame="L226"> 226opa>
xss="debu=fr_create_83c_arraypinning_lock<"ass="debu=fr_create_83c_arrayommeck">"memory"<"sprnhistor"+codref="arch/x86oo0444ool" class="sref"d>(strudebu=n_time_accum_bld>(strudebu=f="+,/xen"spinlock.c#L182" id="L182" cla4s7"line" 4ame="L227"> 227opa>     4     42           ( 228opa>     4     4    if (xls="debu=fr_create_83c_arraypinning_lock<"ass="debu=fr_create_83c_arrayommeck">"memory"<"sprnhistorep href=ref="arch/x86oo0444ool" class="sref"d>(strudebu=n_time_accum_bld>(strudebu=f="+,/xen"spinlock.c#L182" id="L182" cla4s="line" 4ame="L229"> 229opa>     4     42           (230opa>     4     43ef="+code=prev" class="sref"ls="debu=fr_create_83c_arraypinning_lock<"ass="debu=fr_create_83c_arrayommeck">"memory"<"sprnhistorf="+codref="arch/x86oo0444ool" class="sref"d>(strudebu=n_time_accum_bld>(strudebu=f="+,/xen"spinlock.c#L182" id="L182" cla4s="line" 4ame="L231"> 231opa>     4     43   }
spin_time_accum_blhistora hreef">spif="+, nners" class="sHISTO_BUCKEDD_STATS<"a>( 232opa>
 233opa>     4     43ef="+code=spiode=ret06/xen"spinlock.c#L266" id="L266" cla4s4"line" 4ame="L234"> 234<"a> 235opa>d="L258"udebu=fraw_local_save_fss="sref258"udebu=frf="+86/xen"spinlock.c#L266" id="L266" cla4s="line" 4ame="L236"> 236opa> 237opa>/* check for spuriousCONFIG_XEN_DEBUG_FDp"arch/x86/xen"spinlock.c#L182" id="L182" cla4s="line" 4ame="L238"> 238opa>


footer"> The original LXR software bycee; nners" clhttp://sourcrn> ge.net/projects/lxr">LXR for ust yf="+, this experi spual versiof=bycnners" clmailto:lxr@ 23ux.no">lxr@ 23ux.no if.
subfooter"> lxr. 23ux.no kindly hoa hr=bycnners" clhttp://www.redpill- 23pro.no">Redpill L23pro ASf="+, provider of L23ux4cofsultef= and operatiofs services since 1995.