linux/lib/cpu_rmap.c
<<
tionv4./spa > v4./form > v4.a tionv4 href="../linux+v3"649/lib/cpu_rmap.c">tionv4.img src="../.static/gfx/right.png" alt=">>">ti./spa >ti.spa class="lxr_search">tiontionv4.input typ> hidden" nam> navtarget" tion> ">tionv4.input typ> text" nam> search" id search">tionv4.butt/optyp> submit">Search v4./form > ./spa >ti.spa class="lxr_prefs" > v4.a href="+prefs?return=lib/cpu_rmap.c"tionv4 onclick="return ajax_prefs();">tionv4Prefs> v4./a>ti./spa >onv4 4./div >onv4 4.form ac" ="ajax+*" method="post" onsubmit="return false;">ti.input typ> hidden" nam> ajax_lookup" id ajax_lookup" tion> ">tonv4 4./form >tonv4 4.div class="headingbott/m">
onv4
onv4 v4 4.div id search_results" class="search_results"> v >onv4 4./div > .div id content">> .div id file_contents"
 L1">4 41./a>.spa  class="comment">/*./spa  >
 L2">4 42./a>.spa  class="comment"> * cpu_rmap.c: CPU affinity reverse-map support./spa  >
 L3">4 43./a>.spa  class="comment"> * Copyright 2011 Solarflare Communica"
	 s Inc../spa  >
 L4">4 44./a>.spa  class="comment"> *./spa  >
 L5">4 45./a>.spa  class="comment"> * This program is free software; you ca  redistribute it and/or modify it./spa  >
 L6">4 46./a>.spa  class="comment"> * under the terms of the GNU General Public License vers
 L7">4 47./a>.spa  class="comment"> * by the Free Software Founda"
	 , incorporated herein by reference../spa  >
 L8">4 48./a>.spa  class="comment"> */./spa  >
 L9">4 49./a>t
 L10">4 ="v4a>#include <linux/cpu_rmap.hv4a>>t
 L11">4 11v4a>#ifdef4.a href="+code=CONFIG_GENERIC_HARDIRQS" class="sref">CONFIG_GENERIC_HARDIRQS./a>t
 L12">4 12v4a>#include <linux/interrupt.hv4a>>t
 L13">4 13./a>#endift
 L14">4 14v4a>#include <linux/export.hv4a>>t
 L15">4 15./a>t
 L16">4 16./a>.spa  class="comment">/*./spa  >
 L17">4 17./a>.spa  class="comment"> * These func"
	 s maintain a mapping from CPUs to some ordered set of./spa  >
 L18">4 18./a>.spa  class="comment"> * objects with CPU affinities.  This ca  be seen as a reverse-map of./spa  >
 L19">4 19./a>.spa  class="comment"> * CPU affinity.  However, we do not assume that the object affinities./spa  >
 L20">4 20./a>.spa  class="comment"> * cover all CPUs in the system.  For those CPUs not directly covered./spa  >
 L21">4 21./a>.spa  class="comment"> * by object affinities, we attempt to find a nearest object based on./spa  >
 L22">4 22./a>.spa  class="comment"> * CPU topology../spa  >
 L23">4 23./a>.spa  class="comment"> */./spa  >
 L24">4 24./a>t
 L25">4 25./a>.spa  class="comment">/**./spa  >
 L26">4 26./a>.spa  class="comment"> * alloc_cpu_rmap - allocate CPU affinity reverse-map./spa  >
 L27">4 27./a>.spa  class="comment"> * @size: Number of objects to be mapped./spa  >
 L28">4 28./a>.spa  class="comment"> * @flags: Allocat
 L29">4 29./a>.spa  class="comment"> */./spa  >
 L30">4 30./a>struct .a href="+code=cpu_rmap" class="sref">cpu_rmap./a> *.a href="+code=alloc_cpu_rmap" class="sref">alloc_cpu_rmap./a>(unsigned int .a href="+code=size" class="sref">size./a>, .a href="+code=gfp_t" class="sref">gfp_t./a> .a href="+code=flags" class="sref">flags./a>)>
 L31">4 31./a>{>
 L32">4 32./a>        struct .a href="+code=cpu_rmap" class="sref">cpu_rmap./a> *.a href="+code=rmap" class="sref">rmap./a>;t
 L33">4 33./a>        unsigned int .a href="+code=cpu" class="sref">cpu./a>;t
 L34">4 34./a>        .a href="+code=size_t" class="sref">size_t./a> .a href="+code=obj_offset" class="sref">obj_offset./a>;t
 L35">4 35./a>t
 L36">4 36./a>        .spa  class="comment">/* This is a silly number of objects, and we use u16 indices. */./spa  >
 L37">4 37./a>        if (.a href="+code=size" class="sref">size./a> > 0xffff)>
 L38">4 38./a>                return .a href="+code=NULL" class="sref">NULL./a>;t
 L39">4 39./a>t
 L40">4 40./a>        .spa  class="comment">/* Offset of object pointer array from base structure */./spa  >
 L41">4 41./a>        .a href="+code=obj_offset" class="sref">obj_offset./a> = .a href="+code=ALIGN" class="sref">ALIGN./a>(.a href="+code=offsetof" class="sref">offsetof./a>(struct .a href="+code=cpu_rmap" class="sref">cpu_rmap./a>, .a href="+code=near" class="sref">near./a>[.a href="+code=nr_cpu_ids" class="sref">nr_cpu_ids./a>]),>
 L42">4 42./a>                           sizeof(void *));t
 L43">4 43./a>t
 L44">4 44./a>        .a href="+code=rmap" class="sref">rmap./a> = .a href="+code=kzalloc" class="sref">kzalloc./a>(.a href="+code=obj_offset" class="sref">obj_offset./a> + .a href="+code=size" class="sref">size./a> * sizeof(.a href="+code=rmap" class="sref">rmap./a>->.a href="+code=obj" class="sref">obj./a>[0]), .a href="+code=flags" class="sref">flags./a>);t
 L45">4 45./a>        if (!.a href="+code=rmap" class="sref">rmap./a>)>
 L46">4 46./a>                return .a href="+code=NULL" class="sref">NULL./a>;t
 L47">4 47./a>t
 L48">4 48./a>        .a href="+code=rmap" class="sref">rmap./a>->.a href="+code=obj" class="sref">obj./a> = (void **)((char *).a href="+code=rmap" class="sref">rmap./a> + .a href="+code=obj_offset" class="sref">obj_offset./a>);t
 L49">4 49./a>t
 L50">4 50./a>        .spa  class="comment">/* Initially assign CPUs to objects on a rota, since we have./spa  >
 L51">4 51./a>.spa  class="comment">         * no idea where the objects are.  Use infinite distance, so./spa  >
 L52">4 52./a>.spa  class="comment">         * any object with known distance is preferable.  Include the./spa  >
 L53">4 53./a>.spa  class="comment">         * CPUs that are not present/online, since we definitely want./spa  >
 L54">4 54./a>.spa  class="comment">         * any newly-hotplugged CPUs to have some object assigned../spa  >
 L55">4 55./a>.spa  class="comment">         */./spa  >
 L56">4 56./a>        .a href="+code=for_each_possible_cpu" class="sref">for_each_possible_cpu./a>(.a href="+code=cpu" class="sref">cpu./a>) {>
 L57">4 57./a>                .a href="+code=rmap" class="sref">rmap./a>->.a href="+code=near" class="sref">near./a>[.a href="+code=cpu" class="sref">cpu./a>]..a href="+code=index" class="sref">index./a> = .a href="+code=cpu" class="sref">cpu./a> % .a href="+code=size" class="sref">size./a>;t
 L58">4 58./a>                .a href="+code=rmap" class="sref">rmap./a>->.a href="+code=near" class="sref">near./a>[.a href="+code=cpu" class="sref">cpu./a>]..a href="+code=dist" class="sref">dist./a> = .a href="+code=CPU_RMAP_DIST_INF" class="sref">CPU_RMAP_DIST_INF./a>;t
 L59">4 59./a>        }>
 L60">4 60./a>t
 L61">4 61./a>        .a href="+code=rmap" class="sref">rmap./a>->.a href="+code=size" class="sref">size./a> = .a href="+code=size" class="sref">size./a>;t
 L62">4 62./a>        return .a href="+code=rmap" class="sref">rmap./a>;t
 L63">4 63./a>}>
 L64">4 64./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=alloc_cpu_rmap" class="sref">alloc_cpu_rmap./a>);t
 L65">4 65./a>t
 L66">4 66./a>.spa  class="comment">/* Reetionate nearest object for given CPU, comparing with the given./spa  >
 L67">4 67./a>.spa  class="comment"> * neighbours at the given distance../spa  >
 L68">4 68./a>.spa  class="comment"> */./spa  >
 L69">4 69./a>static .a href="+code=bool" class="sref">bool./a> .a href="+code=cpu_rmap_copy_neigh" class="sref">cpu_rmap_copy_neigh./a>(struct .a href="+code=cpu_rmap" class="sref">cpu_rmap./a> *.a href="+code=rmap" class="sref">rmap./a>, unsigned int .a href="+code=cpu" class="sref">cpu./a>,>
 L70">4 70./a>                                const struct .a href="+code=cpumask" class="sref">cpumask./a> *.a href="+code=mask" class="sref">mask./a>, .a href="+code=u16" class="sref">u16./a> .a href="+code=dist" class="sref">dist./a>)>
 L71">4 71./a>{>
 L72">4 72./a>        int .a href="+code=neigh" class="sref">neigh./a>;t
 L73">4 73./a>t
 L74">4 74./a>        .a href="+code=for_each_cpu" class="sref">for_each_cpu./a>(.a href="+code=neigh" class="sref">neigh./a>, .a href="+code=mask" class="sref">mask./a>) {>
 L75">4 75./a>                if (.a href="+code=rmap" class="sref">rmap./a>->.a href="+code=near" class="sref">near./a>[.a href="+code=cpu" class="sref">cpu./a>]..a href="+code=dist" class="sref">dist./a> > .a href="+code=dist" class="sref">dist./a> &&>
 L76">4 76./a>                    .a href="+code=rmap" class="sref">rmap./a>->.a href="+code=near" class="sref">near./a>[.a href="+code=neigh" class="sref">neigh./a>]..a href="+code=dist" class="sref">dist./a> <= .a href="+code=dist" class="sref">dist./a>) {>
 L77">4 77./a>                        .a href="+code=rmap" class="sref">rmap./a>->.a href="+code=near" class="sref">near./a>[.a href="+code=cpu" class="sref">cpu./a>]..a href="+code=index" class="sref">index./a> = .a href="+code=rmap" class="sref">rmap./a>->.a href="+code=near" class="sref">near./a>[.a href="+code=neigh" class="sref">neigh./a>]..a href="+code=index" class="sref">index./a>;>
 L78">4 78./a>                        .a href="+code=rmap" class="sref">rmap./a>->.a href="+code=near" class="sref">near./a>[.a href="+code=cpu" class="sref">cpu./a>]..a href="+code=dist" class="sref">dist./a> = .a href="+code=dist" class="sref">dist./a>;>
 L79">4 79./a>                        return .a href="+code=true" class="sref">true./a>;>
 L80">4 80./a>                }>
 L81">4 81./a>        }>
 L82">4 82./a>        return .a href="+code=false" class="sref">false./a>;>
 L83">4 83./a>}>
 L84">4 84./a>t
 L85">4 85./a>#ifdef4.a href="+code=DEBUG" class="sref">DEBUG./a>t
 L86">4 86./a>static void .a href="+code=debug_print_rmap" class="sref">debug_print_rmap./a>(const struct .a href="+code=cpu_rmap" class="sref">cpu_rmap./a> *.a href="+code=rmap" class="sref">rmap./a>, const char *.a href="+code=prefix" class="sref">prefix./a>)>
 L87">4 87./a>{>
 L88">4 88./a>        unsigned .a href="+code=index" class="sref">index./a>;>
 L89">4 89./a>        unsigned int .a href="+code=cpu" class="sref">cpu./a>;t
 L90">4 90./a>t
 L91">4 91./a>        .a href="+code=pr_info" class="sref">pr_info./a>(.spa  class="string">"cpu_rmap %p, %s:\n"./spa  , .a href="+code=rmap" class="sref">rmap./a>, .a href="+code=prefix" class="sref">prefix./a>);t
 L92">4 92./a>t
 L93">4 93./a>        .a href="+code=for_each_possible_cpu" class="sref">for_each_possible_cpu./a>(.a href="+code=cpu" class="sref">cpu./a>) {>
 L94">4 94./a>                .a href="+code=index" class="sref">index./a> = .a href="+code=rmap" class="sref">rmap./a>->.a href="+code=near" class="sref">near./a>[.a href="+code=cpu" class="sref">cpu./a>]..a href="+code=index" class="sref">index./a>;t
 L95">4 95./a>                .a href="+code=pr_info" class="sref">pr_info./a>(.spa  class="string">"cpu %d -> obj %u (distance %u)\n"./spa  ,t
 L96">4 96./a>                        .a href="+code=cpu" class="sref">cpu./a>, .a href="+code=index" class="sref">index./a>, .a href="+code=rmap" class="sref">rmap./a>->.a href="+code=near" class="sref">near./a>[.a href="+code=cpu" class="sref">cpu./a>]..a href="+code=dist" class="sref">dist./a>);t
 L97">4 97./a>        }>
 L98">4 98./a>}>
 L99">4 99./a>#elset
 L100">4100./a>static .a href="+code=inline" class="sref">inline./a> voidt
 L101">4101./a>.a href="+code=debug_print_rmap" class="sref">debug_print_rmap./a>(const struct .a href="+code=cpu_rmap" class="sref">cpu_rmap./a> *.a href="+code=rmap" class="sref">rmap./a>, const char *.a href="+code=prefix" class="sref">prefix./a>)>
 L102">4102./a>{>
 L103">4103./a>}>
 L104">4104v4a>#endift
 L105">4105./a>t
 L106">4106./a>.spa  class="comment">/**./spa  >
 L107">4107./a>.spa  class="comment"> * cpu_rmap_add - add object to a rmap./spa  >
 L108">4108./a>.spa  class="comment"> * @rmap: CPU rmap allocated with alloc_cpu_rmap()./spa  >
 L109">4109./a>.spa  class="comment"> * @obj: Object to add to rmap./spa  >
 L110">4110./a>.spa  class="comment"> *./spa  >
 L111">4111./a>.spa  class="comment"> * Return index of object../spa  >
 L112">4112./a>.spa  class="comment"> */./spa  >
 L113">4113./a>int .a href="+code=cpu_rmap_add" class="sref">cpu_rmap_add./a>(struct .a href="+code=cpu_rmap" class="sref">cpu_rmap./a> *.a href="+code=rmap" class="sref">rmap./a>, void *.a href="+code=obj" class="sref">obj./a>)>
 L114">4114v4a>{>
 L115">4115./a>        .a href="+code=u16" class="sref">u16./a> .a href="+code=index" class="sref">index./a>;t
 L116">4116./a>t
 L117">4117./a>        .a href="+code=BUG_ON" class="sref">BUG_ON./a>(.a href="+code=rmap" class="sref">rmap./a>->.a href="+code=used" class="sref">used./a> >= .a href="+code=rmap" class="sref">rmap./a>->.a href="+code=size" class="sref">size./a>);t
 L118">4118./a>        .a href="+code=index" class="sref">index./a> = .a href="+code=rmap" class="sref">rmap./a>->.a href="+code=used" class="sref">used./a>++;t
 L119">4119./a>        .a href="+code=rmap" class="sref">rmap./a>->.a href="+code=obj" class="sref">obj./a>[.a href="+code=index" class="sref">index./a>] = .a href="+code=obj" class="sref">obj./a>;t
 L120">4120./a>        return .a href="+code=index" class="sref">index./a>;t
 L121">4121./a>}>
 L122">4122./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=cpu_rmap_add" class="sref">cpu_rmap_add./a>);t
 L123">4123./a>t
 L124">4124./a>.spa  class="comment">/**./spa  >
 L125">4125./a>.spa  class="comment"> * cpu_rmap_update - update CPU rmap following a change of object affinity./spa  >
 L126">4126./a>.spa  class="comment"> * @rmap: CPU rmap to update./spa  >
 L127">4127./a>.spa  class="comment"> * @index: Index of object whose affinity changed./spa  >
 L128">4128./a>.spa  class="comment"> * @affinity: New CPU affinity of object./spa  >
 L129">4129./a>.spa  class="comment"> */./spa  >
 L130">4130./a>int .a href="+code=cpu_rmap_update" class="sref">cpu_rmap_update./a>(struct .a href="+code=cpu_rmap" class="sref">cpu_rmap./a> *.a href="+code=rmap" class="sref">rmap./a>, .a href="+code=u16" class="sref">u16./a> .a href="+code=index" class="sref">index./a>,t
 L131">4131./a>                    const struct .a href="+code=cpumask" class="sref">cpumask./a> *.a href="+code=affinity" class="sref">affinity./a>)>
 L132">4132./a>{>
 L133">4133./a>        .a href="+code=cpumask_var_t" class="sref">cpumask_var_t./a> .a href="+code=update_mask" class="sref">update_mask./a>;t
 L134">4134./a>        unsigned int .a href="+code=cpu" class="sref">cpu./a>;t
 L135">4135./a>t
 L136">4136./a>        if (.a href="+code=unlikely" class="sref">unlikely./a>(!.a href="+code=zalloc_cpumask_var" class="sref">zalloc_cpumask_var./a>(&.a href="+code=update_mask" class="sref">update_mask./a>, .a href="+code=GFP_KERNEL" class="sref">GFP_KERNEL./a>)))>
 L137">4137./a>                return -.a href="+code=ENOMEM" class="sref">ENOMEM./a>;t
 L138">4138./a>t
 L139">4139./a>        .spa  class="comment">/* Invalidate distance for all CPUs for which this used to be./spa  >
 L140">4140./a>.spa  class="comment">         * the nearest object.  Mark those CPUs for update../spa  >
 L141">4141./a>.spa  class="comment">         */./spa  >
 L142">4142./a>        .a href="+code=for_each_online_cpu" class="sref">for_each_online_cpu./a>(.a href="+code=cpu" class="sref">cpu./a>) {>
 L143">4143./a>                if (.a href="+code=rmap" class="sref">rmap./a>->.a href="+code=near" class="sref">near./a>[.a href="+code=cpu" class="sref">cpu./a>]..a href="+code=index" class="sref">index./a> == .a href="+code=index" class="sref">index./a>) {>
 L144">4144./a>                        .a href="+code=rmap" class="sref">rmap./a>->.a href="+code=near" class="sref">near./a>[.a href="+code=cpu" class="sref">cpu./a>]..a href="+code=dist" class="sref">dist./a> = .a href="+code=CPU_RMAP_DIST_INF" class="sref">CPU_RMAP_DIST_INF./a>;t
 L145">4145./a>                        .a href="+code=cpumask_set_cpu" class="sref">cpumask_set_cpu./a>(.a href="+code=cpu" class="sref">cpu./a>, .a href="+code=update_mask" class="sref">update_mask./a>);t
 L146">4146./a>                }>
 L147">4147./a>        }>
 L148">4148./a>t
 L149">4149./a>        .a href="+code=debug_print_rmap" class="sref">debug_print_rmap./a>(.a href="+code=rmap" class="sref">rmap./a>, .spa  class="string">"after invalidating old distances"./spa  );t
 L150">4150./a>t
 L151">4151./a>        .spa  class="comment">/* Set distance to 0 for all CPUs in the new affinity mask../spa  >
 L152">4152./a>.spa  class="comment">         * Mark all CPUs within their NUMA nodes for update../spa  >
 L153">4153./a>.spa  class="comment">         */./spa  >
 L154">4154./a>        .a href="+code=for_each_cpu" class="sref">for_each_cpu./a>(.a href="+code=cpu" class="sref">cpu./a>, .a href="+code=affinity" class="sref">affinity./a>) {>
 L155">4155./a>                .a href="+code=rmap" class="sref">rmap./a>->.a href="+code=near" class="sref">near./a>[.a href="+code=cpu" class="sref">cpu./a>]..a href="+code=index" class="sref">index./a> = .a href="+code=index" class="sref">index./a>;t
 L156">4156./a>                .a href="+code=rmap" class="sref">rmap./a>->.a href="+code=near" class="sref">near./a>[.a href="+code=cpu" class="sref">cpu./a>]..a href="+code=dist" class="sref">dist./a> = 0;t
 L157">4157./a>                .a href="+code=cpumask_or" class="sref">cpumask_or./a>(.a href="+code=update_mask" class="sref">update_mask./a>, .a href="+code=update_mask" class="sref">update_mask./a>,t
 L158">4158./a>                           .a href="+code=cpumask_of_node" class="sref">cpumask_of_node./a>(.a href="+code=cpu_to_node" class="sref">cpu_to_node./a>(.a href="+code=cpu" class="sref">cpu./a>)));t
 L159">4159./a>        }>
 L160">4160./a>t
 L161">4161./a>        .a href="+code=debug_print_rmap" class="sref">debug_print_rmap./a>(.a href="+code=rmap" class="sref">rmap./a>, .spa  class="string">"after updating neighbours"./spa  );t
 L162">4162./a>t
 L163">4163./a>        .spa  class="comment">/* Update distances based on topology */./spa  >
 L164">4164./a>        .a href="+code=for_each_cpu" class="sref">for_each_cpu./a>(.a href="+code=cpu" class="sref">cpu./a>, .a href="+code=update_mask" class="sref">update_mask./a>) {>
 L165">4165./a>                if (.a href="+code=cpu_rmap_copy_neigh" class="sref">cpu_rmap_copy_neigh./a>(.a href="+code=rmap" class="sref">rmap./a>, .a href="+code=cpu" class="sref">cpu./a>,>
 L166">4166./a>                                        .a href="+code=topology_thread_cpumask" class="sref">topology_thread_cpumask./a>(.a href="+code=cpu" class="sref">cpu./a>), 1))>
 L167">4167./a>                        continue;t
 L168">4168./a>                if (.a href="+code=cpu_rmap_copy_neigh" class="sref">cpu_rmap_copy_neigh./a>(.a href="+code=rmap" class="sref">rmap./a>, .a href="+code=cpu" class="sref">cpu./a>,>
 L169">4169./a>                                        .a href="+code=topology_core_cpumask" class="sref">topology_core_cpumask./a>(.a href="+code=cpu" class="sref">cpu./a>), 2))>
 L170">4170./a>                        continue;t
 L171">4171./a>                if (.a href="+code=cpu_rmap_copy_neigh" class="sref">cpu_rmap_copy_neigh./a>(.a href="+code=rmap" class="sref">rmap./a>, .a href="+code=cpu" class="sref">cpu./a>,>
 L172">4172./a>                                        .a href="+code=cpumask_of_node" class="sref">cpumask_of_node./a>(.a href="+code=cpu_to_node" class="sref">cpu_to_node./a>(.a href="+code=cpu" class="sref">cpu./a>)), 3))>
 L173">4173./a>                        continue;t
 L174">4174./a>                .spa  class="comment">/* We could continue into NUMA node distances, but for now./spa  >
 L175">4175./a>.spa  class="comment">                 * we give up../spa  >
 L176">4176./a>.spa  class="comment">                 */./spa  >
 L177">4177./a>        }>
 L178">4178./a>t
 L179">4179./a>        .a href="+code=debug_print_rmap" class="sref">debug_print_rmap./a>(.a href="+code=rmap" class="sref">rmap./a>, .spa  class="string">"after copying neighbours"./spa  );t
 L180">4180./a>t
 L181">4181./a>        .a href="+code=free_cpumask_var" class="sref">free_cpumask_var./a>(.a href="+code=update_mask" class="sref">update_mask./a>);t
 L182">4182./a>        return 0;t
 L183">4183./a>}>
 L184">4184./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=cpu_rmap_update" class="sref">cpu_rmap_update./a>);t
 L185">4185./a>t
 L186">4186./a>#ifdef4.a href="+code=CONFIG_GENERIC_HARDIRQS" class="sref">CONFIG_GENERIC_HARDIRQS./a>t
 L187">4187./a>t
 L188">4188./a>.spa  class="comment">/* Glue between IRQ affinity notifiers and CPU rmaps */./spa  >
 L189">4189./a>t
 L190">4190./a>struct .a href="+code=irq_glue" class="sref">irq_glue./a> {>
 L191">4191./a>        struct .a href="+code=irq_affinity_notify" class="sref">irq_affinity_notify./a> .a href="+code=notify" class="sref">notify./a>;t
 L192">4192./a>        struct .a href="+code=cpu_rmap" class="sref">cpu_rmap./a> *.a href="+code=rmap" class="sref">rmap./a>;t
 L193">4193./a>        .a href="+code=u16" class="sref">u16./a> .a href="+code=index" class="sref">index./a>;t
 L194">4194./a>};t
 L195">4195./a>t
 L196">4196./a>.spa  class="comment">/**./spa  >
 L197">4197./a>.spa  class="comment"> * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs./spa  >
 L198">4198./a>.spa  class="comment"> * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL./spa  >
 L199">4199./a>.spa  class="comment"> *./spa  >
 L200">4200./a>.spa  class="comment"> * Must be called in process context, before freeing the IRQs, and./spa  >
 L201">4201./a>.spa  class="comment"> * without holding any locks required by global workqueue items../spa  >
 L202">4202./a>.spa  class="comment"> */./spa  >
 L203">4203./a>void .a href="+code=free_irq_cpu_rmap" class="sref">free_irq_cpu_rmap./a>(struct .a href="+code=cpu_rmap" class="sref">cpu_rmap./a> *.a href="+code=rmap" class="sref">rmap./a>)>
 L204">4204v4a>{>
 L205">4205./a>        struct .a href="+code=irq_glue" class="sref">irq_glue./a> *.a href="+code=glue" class="sref">glue./a>;t
 L206">4206./a>        .a href="+code=u16" class="sref">u16./a> .a href="+code=index" class="sref">index./a>;t
 L207">4207./a>t
 L208">4208./a>        if (!.a href="+code=rmap" class="sref">rmap./a>)>
 L209">4209./a>                return;t
 L210">4210./a>t
 L211">4211./a>        for (.a href="+code=index" class="sref">index./a> = 0; .a href="+code=index" class="sref">index./a> < .a href="+code=rmap" class="sref">rmap./a>->.a href="+code=used" class="sref">used./a>; .a href="+code=index" class="sref">index./a>++) {>
 L212">4212./a>                .a href="+code=glue" class="sref">glue./a> = .a href="+code=rmap" class="sref">rmap./a>->.a href="+code=obj" class="sref">obj./a>[.a href="+code=index" class="sref">index./a>];t
 L213">4213./a>                .a href="+code=irq_set_affinity_notifier" class="sref">irq_set_affinity_notifier./a>(.a href="+code=glue" class="sref">glue./a>->.a href="+code=notify" class="sref">notify./a>..a href="+code=irq" class="sref">irq./a>, .a href="+code=NULL" class="sref">NULL./a>);t
 L214">4214./a>        }>
 L215">4215./a>        .a href="+code=irq_run_affinity_notifiers" class="sref">irq_run_affinity_notifiers./a>();t
 L216">4216./a>t
 L217">4217./a>        .a href="+code=kfree" class="sref">kfree./a>(.a href="+code=rmap" class="sref">rmap./a>);t
 L218">4218./a>}>
 L219">4219./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=free_irq_cpu_rmap" class="sref">free_irq_cpu_rmap./a>);t
 L220">4220./a>t
 L221">4221./a>static voidt
 L222">4222./a>.a href="+code=irq_cpu_rmap_notify" class="sref">irq_cpu_rmap_notify./a>(struct .a href="+code=irq_affinity_notify" class="sref">irq_affinity_notify./a> *.a href="+code=notify" class="sref">notify./a>, const .a href="+code=cpumask_t" class="sref">cpumask_t./a> *.a href="+code=mask" class="sref">mask./a>)t
 L223">4223./a>{>
 L224">4224./a>        struct .a href="+code=irq_glue" class="sref">irq_glue./a> *.a href="+code=glue" class="sref">glue./a> =>
 L225">4225./a>                .a href="+code=container_of" class="sref">container_of./a>(.a href="+code=notify" class="sref">notify./a>, struct .a href="+code=irq_glue" class="sref">irq_glue./a>, .a href="+code=notify" class="sref">notify./a>);t
 L226">4226./a>        int .a href="+code=rc" class="sref">rc./a>;t
 L227">4227./a>t
 L228">4228./a>        .a href="+code=rc" class="sref">rc./a> = .a href="+code=cpu_rmap_update" class="sref">cpu_rmap_update./a>(.a href="+code=glue" class="sref">glue./a>->.a href="+code=rmap" class="sref">rmap./a>, .a href="+code=glue" class="sref">glue./a>->.a href="+code=index" class="sref">index./a>, .a href="+code=mask" class="sref">mask./a>);t
 L229">4229./a>        if (.a href="+code=rc" class="sref">rc./a>)t
 L230">4230./a>                .a href="+code=pr_warning" class="sref">pr_warning./a>(.spa  class="string">"irq_cpu_rmap_notify: update failed: %d\n"./spa  , .a href="+code=rc" class="sref">rc./a>);t
 L231">4231./a>}>
 L232">4232./a>t
 L233">4233./a>static void .a href="+code=irq_cpu_rmap_release" class="sref">irq_cpu_rmap_release./a>(struct .a href="+code=kref" class="sref">kref./a> *.a href="+code=ref" class="sref">ref./a>)t
 L234">4234v4a>{>
 L235">4235./a>        struct .a href="+code=irq_glue" class="sref">irq_glue./a> *.a href="+code=glue" class="sref">glue./a> =>
 L236">4236./a>                .a href="+code=container_of" class="sref">container_of./a>(.a href="+code=ref" class="sref">ref./a>, struct .a href="+code=irq_glue" class="sref">irq_glue./a>, .a href="+code=notify" class="sref">notify./a>..a href="+code=kref" class="sref">kref./a>);t
 L237">4237./a>        .a href="+code=kfree" class="sref">kfree./a>(.a href="+code=glue" class="sref">glue./a>);t
 L238">4238./a>}>
 L239">4239./a>t
 L240">4240./a>.spa  class="comment">/**./spa  >
 L241">4241./a>.spa  class="comment"> * irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map./spa  >
 L242">4242./a>.spa  class="comment"> * @rmap: The reverse-map./spa  >
 L243">4243./a>.spa  class="comment"> * @irq: The IRQ number./spa  >
 L244">4244./a>.spa  class="comment"> *./spa  >
 L245">4245./a>.spa  class="comment"> * This adds an IRQ affinity notifier that will update the reverse-map./spa  >
 L246">4246./a>.spa  class="comment"> * automatically../spa  >
 L247">4247./a>.spa  class="comment"> *./spa  >
 L248">4248./a>.spa  class="comment"> * Must be called in process context, after the IRQ is allocated but./spa  >
 L249">4249./a>.spa  class="comment"> * before it is bound with request_irq()../spa  >
 L250">4250./a>.spa  class="comment"> */./spa  >
 L251">4251./a>int .a href="+code=irq_cpu_rmap_add" class="sref">irq_cpu_rmap_add./a>(struct .a href="+code=cpu_rmap" class="sref">cpu_rmap./a> *.a href="+code=rmap" class="sref">rmap./a>, int .a href="+code=irq" class="sref">irq./a>)t
 L252">4252./a>{>
 L253">4253./a>        struct .a href="+code=irq_glue" class="sref">irq_glue./a> *.a href="+code=glue" class="sref">glue./a> = .a href="+code=kzalloc" class="sref">kzalloc./a>(sizeof(*.a href="+code=glue" class="sref">glue./a>), .a href="+code=GFP_KERNEL" class="sref">GFP_KERNEL./a>);t
 L254">4254./a>        int .a href="+code=rc" class="sref">rc./a>;t
 L255">4255./a>t
 L256">4256./a>        if (!.a href="+code=glue" class="sref">glue./a>)t
 L257">4257./a>                return -.a href="+code=ENOMEM" class="sref">ENOMEM./a>;t
 L258">4258./a>        .a href="+code=glue" class="sref">glue./a>->.a href="+code=notify" class="sref">notify./a>..a href="+code=notify" class="sref">notify./a> = .a href="+code=irq_cpu_rmap_notify" class="sref">irq_cpu_rmap_notify./a>;t
 L259">4259./a>        .a href="+code=glue" class="sref">glue./a>->.a href="+code=notify" class="sref">notify./a>..a href="+code=release" class="sref">release./a> = .a href="+code=irq_cpu_rmap_release" class="sref">irq_cpu_rmap_release./a>;t
 L260">4260./a>        .a href="+code=glue" class="sref">glue./a>->.a href="+code=rmap" class="sref">rmap./a> = .a href="+code=rmap" class="sref">rmap./a>;t
 L261">4261./a>        .a href="+code=glue" class="sref">glue./a>->.a href="+code=index" class="sref">index./a> = .a href="+code=cpu_rmap_add" class="sref">cpu_rmap_add./a>(.a href="+code=rmap" class="sref">rmap./a>, .a href="+code=glue" class="sref">glue./a>);t
 L262">4262./a>        .a href="+code=rc" class="sref">rc./a> = .a href="+code=irq_set_affinity_notifier" class="sref">irq_set_affinity_notifier./a>(.a href="+code=irq" class="sref">irq./a>, &.a href="+code=glue" class="sref">glue./a>->.a href="+code=notify" class="sref">notify./a>);t
 L263">4263./a>        if (.a href="+code=rc" class="sref">rc./a>)t
 L264">4264./a>                .a href="+code=kfree" class="sref">kfree./a>(.a href="+code=glue" class="sref">glue./a>);t
 L265">4265./a>        return .a href="+code=rc" class="sref">rc./a>;t
 L266">4266./a>}>
 L267">4267./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=irq_cpu_rmap_add" class="sref">irq_cpu_rmap_add./a>);t
 L268">4268./a>t
 L269">4269./a>#endif .spa  class="comment">/* CONFIG_GENERIC_HARDIRQS */./spa  >
 L270">4270./a>
lxr.linux.no kindly hosted by Redpill Linpro AS./a>, provider of Linux consulting and operations services since 1995.