linux/lib/genalloc.c
<<
1" /spaion /formon a 1" href="../linux+v3v2.12/lib/genalloc.c">1" img src="../.static/gfx/right.png" alt=">>">1" /spaion1" spai class="lxr_search">1" 1" input typluehidden" namluenavtarget" n value">1" input typluetext" namluesearch" iduesearch">1" butttiotypluesubmit">Search1" Prefsn /a>1" /spaion /divon form acopti="ajax+*" method="post" onsubmit="return false;">1" input typluehidden" namlueajax_lookup" idueajax_lookup" n value">1 /formon1 div class="headingbotttm">n div iduefile_contents"o
 
1 /a> spai class="comment">/* /spaion
 
2 /a> spai class="comment"> * Basic general purpose allocator for managing special purpose /spaion
 
3 /a> spai class="comment"> * memory, for example, memory that is not managed by the regular /spaion
 
4 /a> spai class="comment"> * kmalloc/kfree interface.  Uses for this includes on-device special /spaion
 
5 /a> spai class="comment"> * memory, uncached memory etc. /spaion
 
6 /a> spai class="comment"> * /spaion
 
7 /a> spai class="comment"> * It is safe to use the allocator in NMI handlers and other special /spaion
 
8 /a> spai class="comment"> * unblockable contexts that could otherwise deadlock on locks.  This /spaion
 
9 /a> spai class="comment"> * is implemented by using atomic operaoptis and retries on any /spaion
 
  va> spai class="comment"> * conflicts.  The disadvantage is that there may be livelocks in /spaion
 11 /a> spai class="comment"> * extreme cases.  For better scalability, one allocator cai be used /spaion
 12 /a> spai class="comment"> * for each CPU. /spaion
 13 /a> spai class="comment"> * /spaion
 14 /a> spai class="comment"> * The lockless operaopti only works if there is enough memory /spaion
 15 /a> spai class="comment"> * available.  If new memory is added to the pool a lock has to be /spaion
 16 /a> spai class="comment"> * still taken.  So any user relying on locklessness has to ensure /spaion
 17 /a> spai class="comment"> * that sufficient memory is preallocated. /spaion
 18 /a> spai class="comment"> * /spaion
 19 /a> spai class="comment"> * The basic atomic operaopti of this allocator is cmpxchg on long. /spaion
 2  va> spai class="comment"> * On architectures that don't have NMI-safe cmpxchg implementaopti, /spaion
 21 /a> spai class="comment"> * the allocator cai NOT be used in NMI handler.  So code uses the /spaion
 22 /a> spai class="comment"> * allocator in NMI handler should depend on /spaion
 23 /a> spai class="comment"> * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. /spaion
 24 /a> spai class="comment"> * /spaion
 25 /a> spai class="comment"> * Copyright 2 05 (C) Jes Sorensen <jes@trained-monkey.org> /spaion
 26 /a> spai class="comment"> * /spaion
 27 /a> spai class="comment"> * This source code is licensed under the GNU General Public License, /spaion
 28 /a> spai class="comment"> * Verspti 2.  See the file COPYING for more details. /spaion
 29 /a> spai class="comment"> */ /spaion
 3  va>n
 31 /a>#include <linux/slab.h /a>>n
 32 /a>#include <linux/export.h /a>>n
 33 /a>#include <linux/bitmap.h /a>>n
 34 /a>#include <linux/rculist.h /a>>n
 35 /a>#include <linux/interrupt.h /a>>n
 36 /a>#include <linux/genalloc.h /a>>n
 37 va>n
 38 /a>static int
 a href="+code=set_bits_ll" class="sref">set_bits_ll /a>(unsigned long * a href="+code=addr" class="sref">addr /a>, unsigned long  a href="+code=mask_to_set" class="sref">mask_to_set /a>)n
 39 /a>{n
 4  va>        unsigned long  a href="+code=val" class="sref">val /a>,  a href="+code=nval" class="sref">nval /a>;n
 41 va>n
 42 va>         a href="+code=nval" class="sref">nval /a> = * a href="+code=addr" class="sref">addr /a>;n
 43 va>        do {n
 44 va>                 a href="+code=val" class="sref">val /a> =  a href="+code=nval" class="sref">nval /a>;n
 45 va>                if ( a href="+code=val" class="sref">val /a> &  a href="+code=mask_to_set" class="sref">mask_to_set /a>)n
 46 va>                        return - a href="+code=EBUSY" class="sref">EBUSY /a>;n
 47 va>                 a href="+code=cpu_relax" class="sref">cpu_relax /a>();n
 48 va>        } while (( a href="+code=nval" class="sref">nval /a> =  a href="+code=cmpxchg" class="sref">cmpxchg /a>( a href="+code=addr" class="sref">addr /a>,  a href="+code=val" class="sref">val /a>,  a href="+code=val" class="sref">val /a> |  a href="+code=mask_to_set" class="sref">mask_to_set /a>)) !=  a href="+code=val" class="sref">val /a>);n
 49 va>n
 5  va>        return 0;n
 51 va>}n
 52 va>n
 53 /a>static int
 a href="+code=clear_bits_ll" class="sref">clear_bits_ll /a>(unsigned long * a href="+code=addr" class="sref">addr /a>, unsigned long  a href="+code=mask_to_clear" class="sref">mask_to_clear /a>)n
 54 /a>{n
 55 va>        unsigned long  a href="+code=val" class="sref">val /a>,  a href="+code=nval" class="sref">nval /a>;n
 56 va>n
 57 va>         a href="+code=nval" class="sref">nval /a> = * a href="+code=addr" class="sref">addr /a>;n
 58 va>        do {n
 59 va>                 a href="+code=val" class="sref">val /a> =  a href="+code=nval" class="sref">nval /a>;n
 60 va>                if (( a href="+code=val" class="sref">val /a> &  a href="+code=mask_to_clear" class="sref">mask_to_clear /a>) !=  a href="+code=mask_to_clear" class="sref">mask_to_clear /a>)n
 61 va>                        return - a href="+code=EBUSY" class="sref">EBUSY /a>;n
 62 va>                 a href="+code=cpu_relax" class="sref">cpu_relax /a>();n
 63 va>        } while (( a href="+code=nval" class="sref">nval /a> =  a href="+code=cmpxchg" class="sref">cmpxchg /a>( a href="+code=addr" class="sref">addr /a>,  a href="+code=val" class="sref">val /a>,  a href="+code=val" class="sref">val /a> & ~ a href="+code=mask_to_clear" class="sref">mask_to_clear /a>)) !=  a href="+code=val" class="sref">val /a>);n
 64 va>n
 65 va>        return 0;n
 66 va>}n
 67 va>n
 68 /a> spai class="comment">/* /spaion
 69 /a> spai class="comment"> * bitmap_set_ll - set the specified number of bits at the specified posiopti /spaion
 7  va> spai class="comment"> * @map: pointer to a bitmap /spaion
 71 /a> spai class="comment"> * @start: a bit posiopti in @map /spaion
 72 /a> spai class="comment"> * @nr: number of bits to set /spaion
 73 /a> spai class="comment"> * /spaion
 74 /a> spai class="comment"> * Set @nr bits start from @start in @map lock-lessly. Several users /spaion
 75 /a> spai class="comment"> * cai set/clear the saml bitmap simultaneously without lock. If two /spaion
 76 /a> spai class="comment"> * users set the saml bit, one user will return remain bits, otherwise /spaion
 77 /a> spai class="comment"> * return 0. /spaion
 78 /a> spai class="comment"> */ /spaion
 79 /a>static int
 a href="+code=bitmap_set_ll" class="sref">bitmap_set_ll /a>(unsigned long * a href="+code=map" class="sref">map /a>, int
 a href="+code=start" class="sref">start /a>, int
 a href="+code=nr" class="sref">nr /a>)n
 80 /a>{n
 81 va>        unsigned long * a href="+code=p" class="sref">p /a> =  a href="+code=map" class="sref">map /a> +  a href="+code=BIT_WORD" class="sref">BIT_WORD /a>( a href="+code=start" class="sref">start /a>);n
 82 va>        const int
 a href="+code=size" class="sref">size /a> =  a href="+code=start" class="sref">start /a> +  a href="+code=nr" class="sref">nr /a>;n
 83 va>        int
 a href="+code=bits_to_set" class="sref">bits_to_set /a> =  a href="+code=BITS_PER_LONG" class="sref">BITS_PER_LONG /a> - ( a href="+code=start" class="sref">start /a> %  a href="+code=BITS_PER_LONG" class="sref">BITS_PER_LONG /a>);n
 84 va>        unsigned long  a href="+code=mask_to_set" class="sref">mask_to_set /a> =  a href="+code=BITMAP_FIRST_WORD_MASK" class="sref">BITMAP_FIRST_WORD_MASK /a>( a href="+code=start" class="sref">start /a>);n
 85 va>n
 86 va>        while ( a href="+code=nr" class="sref">nr /a> -  a href="+code=bits_to_set" class="sref">bits_to_set /a> >= 0) {n
 87 va>                if ( a href="+code=set_bits_ll" class="sref">set_bits_ll /a>( a href="+code=p" class="sref">p /a>,  a href="+code=mask_to_set" class="sref">mask_to_set /a>))n
 88 va>                        return  a href="+code=nr" class="sref">nr /a>;n
 89 va>                 a href="+code=nr" class="sref">nr /a> -=  a href="+code=bits_to_set" class="sref">bits_to_set /a>;n
 90 va>                 a href="+code=bits_to_set" class="sref">bits_to_set /a> =  a href="+code=BITS_PER_LONG" class="sref">BITS_PER_LONG /a>;n
 91 va>                 a href="+code=mask_to_set" class="sref">mask_to_set /a> = ~0UL;n
 92 va>                 a href="+code=p" class="sref">p /a>++;n
 93 va>        }n
 94 va>        if ( a href="+code=nr" class="sref">nr /a>) {n
 95 va>                 a href="+code=mask_to_set" class="sref">mask_to_set /a> &=  a href="+code=BITMAP_LAST_WORD_MASK" class="sref">BITMAP_LAST_WORD_MASK /a>( a href="+code=size" class="sref">size /a>);n
 96 va>                if ( a href="+code=set_bits_ll" class="sref">set_bits_ll /a>( a href="+code=p" class="sref">p /a>,  a href="+code=mask_to_set" class="sref">mask_to_set /a>))n
 97 va>                        return  a href="+code=nr" class="sref">nr /a>;n
 98 va>        }n
 99 va>n
100 va>        return 0;n
101 va>}n
102 va>n
103 /a> spai class="comment">/* /spaion
104 /a> spai class="comment"> * bitmap_clear_ll - clear the specified number of bits at the specified posiopti /spaion
105 /a> spai class="comment"> * @map: pointer to a bitmap /spaion
106 /a> spai class="comment"> * @start: a bit posiopti in @map /spaion
107 /a> spai class="comment"> * @nr: number of bits to set /spaion
108 /a> spai class="comment"> * /spaion
109 /a> spai class="comment"> * Clear @nr bits start from @start in @map lock-lessly. Several users /spaion
1
  va> spai class="comment"> * cai set/clear the saml bitmap simultaneously without lock. If two /spaion
111 /a> spai class="comment"> * users clear the saml bit, one user will return remain bits, /spaion
112 /a> spai class="comment"> * otherwise return 0. /spaion
113 /a> spai class="comment"> */ /spaion
114 /a>static int
 a href="+code=bitmap_clear_ll" class="sref">bitmap_clear_ll /a>(unsigned long * a href="+code=map" class="sref">map /a>, int
 a href="+code=start" class="sref">start /a>, int
 a href="+code=nr" class="sref">nr /a>)n
115 /a>{n
116 va>        unsigned long * a href="+code=p" class="sref">p /a> =  a href="+code=map" class="sref">map /a> +  a href="+code=BIT_WORD" class="sref">BIT_WORD /a>( a href="+code=start" class="sref">start /a>);n
117 va>        const int
 a href="+code=size" class="sref">size /a> =  a href="+code=start" class="sref">start /a> +  a href="+code=nr" class="sref">nr /a>;n
118 va>        int
 a href="+code=bits_to_clear" class="sref">bits_to_clear /a> =  a href="+code=BITS_PER_LONG" class="sref">BITS_PER_LONG /a> - ( a href="+code=start" class="sref">start /a> %  a href="+code=BITS_PER_LONG" class="sref">BITS_PER_LONG /a>);n
119 va>        unsigned long  a href="+code=mask_to_clear" class="sref">mask_to_clear /a> =  a href="+code=BITMAP_FIRST_WORD_MASK" class="sref">BITMAP_FIRST_WORD_MASK /a>( a href="+code=start" class="sref">start /a>);n
12  va>n
121 va>        while ( a href="+code=nr" class="sref">nr /a> -  a href="+code=bits_to_clear" class="sref">bits_to_clear /a> >= 0) {n
122 va>                if ( a href="+code=clear_bits_ll" class="sref">clear_bits_ll /a>( a href="+code=p" class="sref">p /a>,  a href="+code=mask_to_clear" class="sref">mask_to_clear /a>))n
123 va>                        return  a href="+code=nr" class="sref">nr /a>;n
124 va>                 a href="+code=nr" class="sref">nr /a> -=  a href="+code=bits_to_clear" class="sref">bits_to_clear /a>;n
125 va>                 a href="+code=bits_to_clear" class="sref">bits_to_clear /a> =  a href="+code=BITS_PER_LONG" class="sref">BITS_PER_LONG /a>;n
126 va>                 a href="+code=mask_to_clear" class="sref">mask_to_clear /a> = ~0UL;n
127 va>                 a href="+code=p" class="sref">p /a>++;n
128 va>        }n
129 va>        if ( a href="+code=nr" class="sref">nr /a>) {n
130 va>                 a href="+code=mask_to_clear" class="sref">mask_to_clear /a> &=  a href="+code=BITMAP_LAST_WORD_MASK" class="sref">BITMAP_LAST_WORD_MASK /a>( a href="+code=size" class="sref">size /a>);n
131 va>                if ( a href="+code=clear_bits_ll" class="sref">clear_bits_ll /a>( a href="+code=p" class="sref">p /a>,  a href="+code=mask_to_clear" class="sref">mask_to_clear /a>))n
132 va>                        return  a href="+code=nr" class="sref">nr /a>;n
133 va>        }n
134 va>n
135 va>        return 0;n
136 va>}n
137 va>n
138 /a> spai class="comment">/** /spaion
139 /a> spai class="comment"> * gen_pool_create - create a new special memory pool /spaion
14  va> spai class="comment"> * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents /spaion
141 /a> spai class="comment"> * @nid: node id of the node the pool structure should be allocated on, or -1 /spaion
142 /a> spai class="comment"> * /spaion
143 /a> spai class="comment"> * Create a new special memory pool that cai be used to manage special purpose /spaion
144 /a> spai class="comment"> * memory not managed by the regular kmalloc/kfree interface. /spaion
145 /a> spai class="comment"> */ /spaion
146 va>struct  a href="+code=gen_pool" class="sref">gen_pool va> * a href="+code=gen_pool_create" class="sref">gen_pool_create /a>(int
 a href="+code=min_alloc_order" class="sref">min_alloc_order /a>, int
 a href="+code=nid" class="sref">nid /a>)n
147 va>{n
148 va>        struct  a href="+code=gen_pool" class="sref">gen_pool va> * a href="+code=pool" class="sref">pool va>;n
149 va>n
15  va>         a href="+code=pool" class="sref">pool va> =  a href="+code=kmalloc_node" class="sref">kmalloc_node /a>(sizeof(struct  a href="+code=gen_pool" class="sref">gen_pool va>),  a href="+code=GFP_KERNEL" class="sref">GFP_KERNEL /a>,  a href="+code=nid" class="sref">nid /a>);n
151 va>        if ( a href="+code=pool" class="sref">pool va> !=  a href="+code=NULL" class="sref">NULL /a>) {n
152 va>                 a href="+code=spin_lock_init" class="sref">spin_lock_init /a>(& a href="+code=pool" class="sref">pool va>-> a href="+code=lock" class="sref">lock /a>);n
153 va>                 a href="+code=INIT_LIST_HEAD" class="sref">INIT_LIST_HEAD /a>(& a href="+code=pool" class="sref">pool va>-> a href="+code=chunks" class="sref">chunks /a>);n
154 va>                 a href="+code=pool" class="sref">pool va>-> a href="+code=min_alloc_order" class="sref">min_alloc_order /a> =  a href="+code=min_alloc_order" class="sref">min_alloc_order /a>;n
155 va>        }n
156 va>        return  a href="+code=pool" class="sref">pool va>;n
157 va>}n
158 va> a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL /a>( a href="+code=gen_pool_create" class="sref">gen_pool_create /a>);n
159 va>n
16  va> spai class="comment">/** /spaion
161 /a> spai class="comment"> * gen_pool_add_virt - add a new chunk of special memory to the pool /spaion
162 /a> spai class="comment"> * @pool: pool to add new memory chunk to /spaion
163 /a> spai class="comment"> * @virt: virtual starting address of memory chunk to add to pool /spaion
164 /a> spai class="comment"> * @phys: physical starting address of memory chunk to add to pool /spaion
165 /a> spai class="comment"> * @size: size in bytes of the memory chunk to add to pool /spaion
166 /a> spai class="comment"> * @nid: node id of the node the chunk structure and bitmap should be /spaion
167 /a> spai class="comment"> *       allocated on, or -1 /spaion
168 /a> spai class="comment"> * /spaion
169 /a> spai class="comment"> * Add a new chunk of special memory to the specified pool. /spaion
17  va> spai class="comment"> * /spaion
171 /a> spai class="comment"> * Returns 0 on success or a -ve errno on failure. /spaion
172 /a> spai class="comment"> */ /spaion
173 /a>int
 a href="+code=gen_pool_add_virt" class="sref">gen_pool_add_virt /a>(struct  a href="+code=gen_pool" class="sref">gen_pool va> * a href="+code=pool" class="sref">pool va>, unsigned long  a href="+code=virt" class="sref">virt /a>,  a href="+code=phys_addr_t" class="sref">phys_addr_t va>  a href="+code=phys" class="sref">phys /a>,n
174 va>                
 a href="+code=size_t" class="sref">size_t va>  a href="+code=size" class="sref">size /a>, int
 a href="+code=nid" class="sref">nid /a>)n
175 /a>{n
176 va>        struct  a href="+code=gen_pool_chunk" class="sref">gen_pool_chunk va> * a href="+code=chunk" class="sref">chunk va>;n
177 va>        int
 a href="+code=nbits" class="sref">nbits /a> =  a href="+code=size" class="sref">size /a> >>  a href="+code=pool" class="sref">pool va>-> a href="+code=min_alloc_order" class="sref">min_alloc_order /a>;n
178 va>        int
 a href="+code=nbytes" class="sref">nbytes /a> = sizeof(struct  a href="+code=gen_pool_chunk" class="sref">gen_pool_chunk va>) +n
179 va>                               
 a href="+code=BITS_TO_LONGS" class="sref">BITS_TO_LONGS /a>( a href="+code=nbits" class="sref">nbits /a>) * sizeof(long);n
18  va>n
181 va>         a href="+code=chunk" class="sref">chunk va> =  a href="+code=kmalloc_node" class="sref">kmalloc_node /a>( a href="+code=nbytes" class="sref">nbytes /a>,  a href="+code=GFP_KERNEL" class="sref">GFP_KERNEL /a> |  a href="+code=__GFP_ZERO" class="sref">__GFP_ZERO /a>,  a href="+code=nid" class="sref">nid /a>);n
182 va>        if ( a href="+code=unlikely" class="sref">unlikely /a>( a href="+code=chunk" class="sref">chunk va> ==  a href="+code=NULL" class="sref">NULL /a>))n
183 va>                return - a href="+code=ENOMEM" class="sref">ENOMEM /a>;n
184 va>n
185 va>         a href="+code=chunk" class="sref">chunk va>-> a href="+code=phys_addr" class="sref">phys_addr va> =  a href="+code=phys" class="sref">phys /a>;n
186 va>         a href="+code=chunk" class="sref">chunk va>-> a href="+code=start_addr" class="sref">start_addr va> =  a href="+code=virt" class="sref">virt /a>;n
187 va>         a href="+code=chunk" class="sref">chunk va>-> a href="+code=end_addr" class="sref">end_addr va> =  a href="+code=virt" class="sref">virt /a> +  a href="+code=size" class="sref">size /a>;n
188 va>         a href="+code=atomic_set" class="sref">atomic_set /a>(& a href="+code=chunk" class="sref">chunk va>-> a href="+code=avail" class="sref">avail /a>,  a href="+code=size" class="sref">size /a>);n
189 va>n
19  va>         a href="+code=spin_lock" class="sref">spin_lock /a>(& a href="+code=pool" class="sref">pool va>-> a href="+code=lock" class="sref">lock /a>);n
191 va>         a href="+code=list_add_rcu" class="sref">list_add_rcu /a>(& a href="+code=chunk" class="sref">chunk va>-> a href="+code=next_chunk" class="sref">next_chunk /a>, & a href="+code=pool" class="sref">pool va>-> a href="+code=chunks" class="sref">chunks /a>);n
192 va>         a href="+code=spin_unlock" class="sref">spin_unlock /a>(& a href="+code=pool" class="sref">pool va>-> a href="+code=lock" class="sref">lock /a>);n
193 va>n
194 va>        return 0;n
195 va>}n
196 va> a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL /a>( a href="+code=gen_pool_add_virt" class="sref">gen_pool_add_virt /a>);n
197 va>n
198 /a> spai class="comment">/** /spaion
199 /a> spai class="comment"> * gen_pool_virt_to_phys - return the physical address of memory /spaion
20  va> spai class="comment"> * @pool: pool to allocate from /spaion
201 /a> spai class="comment"> * @addr: starting address of memory /spaion
202 /a> spai class="comment"> * /spaion
203 /a> spai class="comment"> * Returns the physical address on success, or -1 on error. /spaion
204 /a> spai class="comment"> */ /spaion
205 /a> a href="+code=phys_addr_t" class="sref">phys_addr_t va>  a href="+code=gen_pool_virt_to_phys" class="sref">gen_pool_virt_to_phys /a>(struct  a href="+code=gen_pool" class="sref">gen_pool va> * a href="+code=pool" class="sref">pool va>, unsigned long  a href="+code=addr" class="sref">addr /a>)n
206 /a>{n
207 va>        struct  a href="+code=gen_pool_chunk" class="sref">gen_pool_chunk va> * a href="+code=chunk" class="sref">chunk va>;n
208 va>         a href="+code=phys_addr_t" class="sref">phys_addr_t va>  a href="+code=paddr" class="sref">paddr va> = -1;n
209 va>n
21  va>         a href="+code=rcu_read_lock" class="sref">rcu_read_lock /a>();n
211 va>         a href="+code=list_for_each_entry_rcu" class="sref">list_for_each_entry_rcu /a>( a href="+code=chunk" class="sref">chunk va>, & a href="+code=pool" class="sref">pool va>-> a href="+code=chunks" class="sref">chunks /a>,  a href="+code=next_chunk" class="sref">next_chunk /a>) {n
212 va>                if ( a href="+code=addr" class="sref">addr /a> >=  a href="+code=chunk" class="sref">chunk va>-> a href="+code=start_addr" class="sref">start_addr va> &&  a href="+code=addr" class="sref">addr /a> <  a href="+code=chunk" class="sref">chunk va>-> a href="+code=end_addr" class="sref">end_addr va>) {n
213 va>                         a href="+code=paddr" class="sref">paddr va> =  a href="+code=chunk" class="sref">chunk va>-> a href="+code=phys_addr" class="sref">phys_addr va> + ( a href="+code=addr" class="sref">addr /a> -  a href="+code=chunk" class="sref">chunk va>-> a href="+code=start_addr" class="sref">start_addr va>);n
214 va>                







break;n
215 va>                }n
216 va>        }n
217 va>         a href="+code=rcu_read_unlock" class="sref">rcu_read_unlock /a>();n
218 va>n
219 va>        return  a href="+code=paddr" class="sref">paddr va>;n
22  va>}n
221 /a> a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL /a>( a href="+code=gen_pool_virt_to_phys" class="sref">gen_pool_virt_to_phys /a>);n
222 va>n
223 /a> spai class="comment">/** /spaion
224 /a> spai class="comment"> * gen_pool_destroy - destroy a special memory pool /spaion
225 /a> spai class="comment"> * @pool: pool to destroy /spaion
226 /a> spai class="comment"> * /spaion
227 /a> spai class="comment"> * Destroy the specified special memory pool. Verifies that there are no /spaion
228 /a> spai class="comment"> * outstanding allocations. /spaion
229 /a> spai class="comment"> */ /spaion
230 va>void  a href="+code=gen_pool_destroy" class="sref">gen_pool_destroy /a>(struct  a href="+code=gen_pool" class="sref">gen_pool va> * a href="+code=pool" class="sref">pool va>)n
231 va>{n
232 va>        struct  a href="+code=list_head" class="sref">list_head va> * a href="+code=_chunk" class="sref">_chunk /a>, * a href="+code=_next_chunk" class="sref">_next_chunk /a>;n
233 va>        struct  a href="+code=gen_pool_chunk" class="sref">gen_pool_chunk va> * a href="+code=chunk" class="sref">chunk va>;n
234 va>        int
 a href="+code=order" class="sref">order /a> =  a href="+code=pool" class="sref">pool va>-> a href="+code=min_alloc_order" class="sref">min_alloc_order /a>;n
235 va>        int
 a href="+code=bit" class="sref">bit /a>,  a href="+code=end_bit" class="sref">end_bit /a>;n
236 va>n
237 va>         a href="+code=list_for_each_safe" class="sref">list_for_each_safe /a>( a href="+code=_chunk" class="sref">_chunk /a>,  a href="+code=_next_chunk" class="sref">_next_chunk /a>, & a href="+code=pool" class="sref">pool va>-> a href="+code=chunks" class="sref">chunks /a>) {n
238 va>                 a href="+code=chunk" class="sref">chunk va> =  a href="+code=list_entry" class="sref">list_entry /a>( a href="+code=_chunk" class="sref">_chunk /a>, struct  a href="+code=gen_pool_chunk" class="sref">gen_pool_chunk va>,  a href="+code=next_chunk" class="sref">next_chunk /a>);n
239 va>                 a href="+code=list_del" class="sref">list_del /a>(& a href="+code=chunk" class="sref">chunk va>-> a href="+code=next_chunk" class="sref">next_chunk /a>);n
24  va>n
241 va>                 a href="+code=end_bit" class="sref">end_bit /a> = ( a href="+code=chunk" class="sref">chunk va>-> a href="+code=end_addr" class="sref">end_addr va> -  a href="+code=chunk" class="sref">chunk va>-> a href="+code=start_addr" class="sref">start_addr va>) >>  a href="+code=order" class="sref">order /a>;n
242 va>                 a href="+code=bit" class="sref">bit /a> =  a href="+code=find_next_bit" class="sref">find_next_bit /a>( a href="+code=chunk" class="sref">chunk va>-> a href="+code=bits" class="sref">bits /a>,  a href="+code=end_bit" class="sref">end_bit /a>, 0);n
243 va>                 a href="+code=BUG_ON" class="sref">BUG_ON /a>( a href="+code=bit" class="sref">bit /a> <  a href="+code=end_bit" class="sref">end_bit /a>);n
244 va>n
245 va>                 a href="+code=kfree" class="sref">kfree /a>( a href="+code=chunk" class="sref">chunk va>);n
246 va>        }n
247 va>         a href="+code=kfree" class="sref">kfree /a>( a href="+code=pool" class="sref">pool va>);n
248 va>        return;n
249 va>}n
25  va> a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL /a>( a href="+code=gen_pool_destroy" class="sref">gen_pool_destroy /a>);n
251 va>n
252 /a> spai class="comment">/** /spaion
253 /a> spai class="comment"> * gen_pool_alloc - allocate special memory from the pool /spaion
254 /a> spai class="comment"> * @pool: pool to allocate from /spaion
255 /a> spai class="comment"> * @size: number of bytes to allocate from the pool /spaion
256 /a> spai class="comment"> * /spaion
257 /a> spai class="comment"> * Allocate the requested number of bytes from the specified pool. /spaion
258 /a> spai class="comment"> * Uses a first-fit algorithm. Cai not be used in NMI handler oi /spaion
259 /a> spai class="comment"> * architectures without NMI-safe cmpxchg implementation. /spaion
26  va> spai class="comment"> */ /spaion
261 /a>unsigned long  a href="+code=gen_pool_alloc" class="sref">gen_pool_alloc /a>(struct  a href="+code=gen_pool" class="sref">gen_pool va> * a href="+code=pool" class="sref">pool va>,  a href="+code=size_t" class="sref">size_t va>  a href="+code=size" class="sref">size /a>)n
262 /a>{n
263 va>        struct  a href="+code=gen_pool_chunk" class="sref">gen_pool_chunk va> * a href="+code=chunk" class="sref">chunk va>;n
264 va>        unsigned long  a href="+code=addr" class="sref">addr /a> = 0;n
265 va>        int
 a href="+code=order" class="sref">order /a> =  a href="+code=pool" class="sref">pool va>-> a href="+code=min_alloc_order" class="sref">min_alloc_order /a>;n
266 va>        int
 a href="+code=nbits" class="sref">nbits /a>,  a href="+code=start_bit" class="sref">start_bit /a> = 0,  a href="+code=end_bit" class="sref">end_bit /a>,  a href="+code=remain" class="sref">remain /a>;n
267 va>n
268 /a>#ifndef  a href="+code=CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG" class="sref">CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG va>n
269 va>         a href="+code=BUG_ON" class="sref">BUG_ON /a>( a href="+code=in_nmi" class="sref">in_nmi /a>());n
27  va>#endifn
271 va>n
272 va>        if ( a href="+code=size" class="sref">size /a> == 0)n
273 va>                return 0;n
274 va>n
275 va>         a href="+code=nbits" class="sref">nbits /a> = ( a href="+code=size" class="sref">size /a> + (1UL <<  a href="+code=order" class="sref">order /a>) - 1) >>  a href="+code=order" class="sref">order /a>;n
276 va>         a href="+code=rcu_read_lock" class="sref">rcu_read_lock /a>();n
277 va>         a href="+code=list_for_each_entry_rcu" class="sref">list_for_each_entry_rcu /a>( a href="+code=chunk" class="sref">chunk va>, & a href="+code=pool" class="sref">pool va>-> a href="+code=chunks" class="sref">chunks /a>,  a href="+code=next_chunk" class="sref">next_chunk /a>) {n
278 va>                if ( a href="+code=size" class="sref">size /a> >  a href="+code=atomic_read" class="sref">atomic_read /a>(& a href="+code=chunk" class="sref">chunk va>-> a href="+code=avail" class="sref">avail /a>))n
279 va>                        continue;n
28  va>n
281 va>                 a href="+code=end_bit" class="sref">end_bit /a> = ( a href="+code=chunk" class="sref">chunk va>-> a href="+code=end_addr" class="sref">end_addr va> -  a href="+code=chunk" class="sref">chunk va>-> a href="+code=start_addr" class="sref">start_addr va>) >>  a href="+code=order" class="sref">order /a>;n
282 va> a href="+code=retry" class="sref">retry va>:n
283 va>                 a href="+code=start_bit" class="sref">start_bit /a> =  a href="+code=bitmap_find_next_zero_area" class="sref">bitmap_find_next_zero_area /a>( a href="+code=chunk" class="sref">chunk va>-> a href="+code=bits" class="sref">bits /a>,  a href="+code=end_bit" class="sref">end_bit /a>,n
284 va>                






































 a href="+code=start_bit" class="sref">start_bit /a>,  a href="+code=nbits" class="sref">nbits /a>, 0);n
285 va>                if ( a href="+code=start_bit" class="sref">start_bit /a> >=  a href="+code=end_bit" class="sref">end_bit /a>)n
286 va>                        continue;n
287 va>                 a href="+code=remain" class="sref">remain /a> =  a href="+code=bitmap_set_ll" class="sref">bitmap_set_ll /a>( a href="+code=chunk" class="sref">chunk va>-> a href="+code=bits" class="sref">bits /a>,  a href="+code=start_bit" class="sref">start_bit /a>,  a href="+code=nbits" class="sref">nbits /a>);n
288 va>                if ( a href="+code=remain" class="sref">remain /a>) {n
289 va>                         a href="+code=remain" class="sref">remain /a> =  a href="+code=bitmap_clear_ll" class="sref">bitmap_clear_ll /a>( a href="+code=chunk" class="sref">chunk va>-> a href="+code=bits" class="sref">bits /a>,  a href="+code=start_bit" class="sref">start_bit /a>,n
290 va>                                                  a href="+code=nbits" class="sref">nbits /a> -  a href="+code=remain" class="sref">remain /a>);n
291 va>                         a href="+code=BUG_ON" class="sref">BUG_ON /a>( a href="+code=remain" class="sref">remain /a>);n
292 va>                        goto  a href="+code=retry" class="sref">retry va>;n
293 va>                }n
294 va>n
295 va>                 a href="+code=addr" class="sref">addr /a> =  a href="+code=chunk" class="sref">chunk va>-> a href="+code=start_addr" class="sref">start_addr va> + ((unsigned long) a href="+code=start_bit" class="sref">start_bit /a> <<  a href="+code=order" class="sref">order /a>);n
296 va>                 a href="+code=size" class="sref">size /a> =  a href="+code=nbits" class="sref">nbits /a> <<  a href="+code=order" class="sref">order /a>;n
297 va>                 a href="+code=atomic_sub" class="sref">atomic_sub /a>( a href="+code=size" class="sref">size /a>, & a href="+code=chunk" class="sref">chunk va>-> a href="+code=avail" class="sref">avail /a>);n
298 va>                break;n
299 va>        }n
30  va>         a href="+code=rcu_read_unlock" class="sref">rcu_read_unlock /a>();n
301 va>        return  a href="+code=addr" class="sref">addr /a>;n
302 /a>}n
303 /a> a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL /a>( a href="+code=gen_pool_alloc" class="sref">gen_pool_alloc /a>);n
304 va>n
305 /a> spai class="comment">/** /spaion
306 /a> spai class="comment"> * gen_pool_free - free allocated special memory back to the pool /spaion
307 /a> spai class="comment"> * @pool: pool to free to /spaion
308 /a> spai class="comment"> * @addr: starting address of memory to free back to pool /spaion
309 /a> spai class="comment"> * @size: size in bytes of memory to free /spaion
31  va> spai class="comment"> * /spaion
311 /a> spai class="comment"> * Free previously allocated special memory back to the specified /spaion
312 /a> spai class="comment"> * pool.  Cai not be used in NMI handler oi architectures without /spaion
313 /a> spai class="comment"> * NMI-safe cmpxchg implementation. /spaion
314 /a> spai class="comment"> */ /spaion
315 va>void  a href="+code=gen_pool_free" class="sref">gen_pool_free /a>(struct  a href="+code=gen_pool" class="sref">gen_pool va> * a href="+code=pool" class="sref">pool va>, unsigned long  a href="+code=addr" class="sref">addr /a>,  a href="+code=size_t" class="sref">size_t va>  a href="+code=size" class="sref">size /a>)n
316 /a>{n
317 va>        struct  a href="+code=gen_pool_chunk" class="sref">gen_pool_chunk va> * a href="+code=chunk" class="sref">chunk va>;n
318 va>        int
 a href="+code=order" class="sref">order /a> =  a href="+code=pool" class="sref">pool va>-> a href="+code=min_alloc_order" class="sref">min_alloc_order /a>;n
319 va>        int
 a href="+code=start_bit" class="sref">start_bit /a>,  a href="+code=nbits" class="sref">nbits /a>,  a href="+code=remain" class="sref">remain /a>;n
32  va>n
321 /a>#ifndef  a href="+code=CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG" class="sref">CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG va>n
322 va>         a href="+code=BUG_ON" class="sref">BUG_ON /a>( a href="+code=in_nmi" class="sref">in_nmi /a>());n
323 /a>#endifn
324 va>n
325 va>         a href="+code=nbits" class="sref">nbits /a> = ( a href="+code=size" class="sref">size /a> + (1UL <<  a href="+code=order" class="sref">order /a>) - 1) >>  a href="+code=order" class="sref">order /a>;n
326 va>         a href="+code=rcu_read_lock" class="sref">rcu_read_lock /a>();n
327 va>         a href="+code=list_for_each_entry_rcu" class="sref">list_for_each_entry_rcu /a>( a href="+code=chunk" class="sref">chunk va>, & a href="+code=pool" class="sref">pool va>-> a href="+code=chunks" class="sref">chunks /a>,  a href="+code=next_chunk" class="sref">next_chunk /a>) {n
328 va>                if ( a href="+code=addr" class="sref">addr /a> >=  a href="+code=chunk" class="sref">chunk va>-> a href="+code=start_addr" class="sref">start_addr va> &&  a href="+code=addr" class="sref">addr /a> <  a href="+code=chunk" class="sref">chunk va>-> a href="+code=end_addr" class="sref">end_addr va>) {n
329 va>                         a href="+code=BUG_ON" class="sref">BUG_ON /a>( a href="+code=addr" class="sref">addr /a> +  a href="+code=size" class="sref">size /a> >  a href="+code=chunk" class="sref">chunk va>-> a href="+code=end_addr" class="sref">end_addr va>);n
330 va>                         a href="+code=start_bit" class="sref">start_bit /a> = ( a href="+code=addr" class="sref">addr /a> -  a href="+code=chunk" class="sref">chunk va>-> a href="+code=start_addr" class="sref">start_addr va>) >>  a href="+code=order" class="sref">order /a>;n
331 va>                         a href="+code=remain" class="sref">remain /a> =  a href="+code=bitmap_clear_ll" class="sref">bitmap_clear_ll /a>( a href="+code=chunk" class="sref">chunk va>-> a href="+code=bits" class="sref">bits /a>,  a href="+code=start_bit" class="sref">start_bit /a>,  a href="+code=nbits" class="sref">nbits /a>);n
332 va>                         a href="+code=BUG_ON" class="sref">BUG_ON /a>( a href="+code=remain" class="sref">remain /a>);n
333 va>                         a href="+code=size" class="sref">size /a> =  a href="+code=nbits" class="sref">nbits /a> <<  a href="+code=order" class="sref">order /a>;n
334 va>                







 a href="+code=atomic_add" class="sref">atomic_add /a>( a href="+code=size" class="sref">size /a>, & a href="+code=chunk" class="sref">chunk va>-> a href="+code=avail" class="sref">avail /a>);n
335 va>                         a href="+code=rcu_read_unlock" class="sref">rcu_read_unlock /a>();n
336 va>                        return;n
337 va>                }n
338 va>        }n
339 va>         a href="+code=rcu_read_unlock" class="sref">rcu_read_unlock /a>();n
34  va>         a href="+code=BUG" class="sref">BUG /a>();n
341 va>}n
342 va> a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL /a>( a href="+code=gen_pool_free" class="sref">gen_pool_free /a>);n
343 va>n
344 /a> spai class="comment">/** /spaion
345 /a> spai class="comment"> * gen_pool_for_each_chunk - call func for every chunk of generic memory pool /spaion
346 /a> spai class="comment"> * @pool:       the generic memory pool /spaion
347 /a> spai class="comment"> * @func:       func to call /spaion
348 /a> spai class="comment"> * @data:       additional data used by @func /spaion
349 /a> spai class="comment"> * /spaion
35  va> spai class="comment"> * Call @func for every chunk of generic memory pool.  The @func is /spaion
351 /a> spai class="comment"> * called with rcu_read_lock held. /spaion
352 /a> spai class="comment"> */ /spaion
353 /a>void  a href="+code=gen_pool_for_each_chunk" class="sref">gen_pool_for_each_chunk /a>(struct  a href="+code=gen_pool" class="sref">gen_pool va> * a href="+code=pool" class="sref">pool va>,n
354 va>        void (* a href="+code=func" class="sref">func /a>)(struct  a href="+code=gen_pool" class="sref">gen_pool va> * a href="+code=pool" class="sref">pool va>, struct  a href="+code=gen_pool_chunk" class="sref">gen_pool_chunk va> * a href="+code=chunk" class="sref">chunk va>, void * a href="+code=data" class="sref">data /a>),n
355 va>        void * a href="+code=data" class="sref">data /a>)n
356 /a>{n
357 va>        struct  a href="+code=gen_pool_chunk" class="sref">gen_pool_chunk va> * a href="+code=chunk" class="sref">chunk va>;n
358 va>n
359 va>         a href="+code=rcu_read_lock" class="sref">rcu_read_lock /a>();n
36  va>         a href="+code=list_for_each_entry_rcu" class="sref">list_for_each_entry_rcu /a>( a href="+code=chunk" class="sref">chunk va>, &( a href="+code=pool" class="sref">pool va>)-> a href="+code=chunks" class="sref">chunks /a>,  a href="+code=next_chunk" class="sref">next_chunk /a>)n
361 va>                 a href="+code=func" class="sref">func /a>( a href="+code=pool" class="sref">pool va>,  a href="+code=chunk" class="sref">chunk va>,  a href="+code=data" class="sref">data /a>);n
362 va>         a href="+code=rcu_read_unlock" class="sref">rcu_read_unlock /a>();n
363 va>}n
364 va> a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL /a>( a href="+code=gen_pool_for_each_chunk" class="sref">gen_pool_for_each_chunk /a>);n
365 va>n
366 /a> spai class="comment">/** /spaion
367 /a> spai class="comment"> * gen_pool_avail - get available free space of the pool /spaion
368 /a> spai class="comment"> * @pool: pool to get available free space /spaion
369 /a> spai class="comment"> * /spaion
37  va> spai class="comment"> * Return available free space of the specified pool. /spaion
371 /a> spai class="comment"> */ /spaion
372 va> a href="+code=size_t" class="sref">size_t va>  a href="+code=gen_pool_avail" class="sref">gen_pool_avail /a>(struct  a href="+code=gen_pool" class="sref">gen_pool va> * a href="+code=pool" class="sref">pool va>)n
373 va>{n
374 va>        struct  a href="+code=gen_pool_chunk" class="sref">gen_pool_chunk va> * a href="+code=chunk" class="sref">chunk va>;n
375 va>         a href="+code=size_t" class="sref">size_t va>  a href="+code=avail" class="sref">avail /a> = 0;n
376 va>n
377 va>         a href="+code=rcu_read_lock" class="sref">rcu_read_lock /a>();n
378 va>         a href="+code=list_for_each_entry_rcu" class="sref">list_for_each_entry_rcu /a>( a href="+code=chunk" class="sref">chunk va>, & a href="+code=pool" class="sref">pool va>-> a href="+code=chunks" class="sref">chunks /a>,  a href="+code=next_chunk" class="sref">next_chunk /a>)n
379 va>                 a href="+code=avail" class="sref">avail /a> +=  a href="+code=atomic_read" class="sref">atomic_read /a>(& a href="+code=chunk" class="sref">chunk va>-> a href="+code=avail" class="sref">avail /a>);n
38  va>         a href="+code=rcu_read_unlock" class="sref">rcu_read_unlock /a>();n
381 va>        return  a href="+code=avail" class="sref">avail /a>;n
382 /a>}n
383 /a> a href="+code=EXPORT_SYMBOL_GPL" class="sref">EXPORT_SYMBOL_GPL /a>( a href="+code=gen_pool_avail" class="sref">gen_pool_avail /a>);n
384 va>n
385 /a> spai class="comment">/** /spaion
386 /a> spai class="comment"> * gen_pool_size - get size in bytes of memory managed by the pool /spaion
387 /a> spai class="comment"> * @pool: pool to get size /spaion
388 /a> spai class="comment"> * /spaion
389 /a> spai class="comment"> * Return size in bytes of memory managed by the pool. /spaion
39  va> spai class="comment"> */ /spaion
391 /a> a href="+code=size_t" class="sref">size_t va>  a href="+code=gen_pool_size" class="sref">gen_pool_size /a>(struct  a href="+code=gen_pool" class="sref">gen_pool va> * a href="+code=pool" class="sref">pool va>)n
392 /a>{n
393 va>        struct  a href="+code=gen_pool_chunk" class="sref">gen_pool_chunk va> * a href="+code=chunk" class="sref">chunk va>;n
394 va>         a href="+code=size_t" class="sref">size_t va>  a href="+code=size" class="sref">size /a> = 0;n
395 va>n
396 va>         a href="+code=rcu_read_lock" class="sref">rcu_read_lock /a>();n
397 va>         a href="+code=list_for_each_entry_rcu" class="sref">list_for_each_entry_rcu /a>( a href="+code=chunk" class="sref">chunk va>, & a href="+code=pool" class="sref">pool va>-> a href="+code=chunks" class="sref">chunks /a>,  a href="+code=next_chunk" class="sref">next_chunk /a>)n
398 va>                 a href="+code=size" class="sref">size /a> +=  a href="+code=chunk" class="sref">chunk va>-> a href="+code=end_addr" class="sref">end_addr va> -  a href="+code=chunk" class="sref">chunk va>-> a href="+code=start_addr" class="sref">start_addr va>;n
399 va>         a href="+code=rcu_read_unlock" class="sref">rcu_read_unlock /a>();n
40  va>        return  a href="+code=size" class="sref">size /a>;n
401 va>}n
402 va> a href="+code=EXPORT_SYMBOL_GPL" class="sref">EXPORT_SYMBOL_GPL /a>( a href="+code=gen_pool_size" class="sref">gen_pool_size /a>);n
403 /a> /pre> /div>


 /div>