linux/mm/percpu-vm.c
<<
< /spa" /form a < href="../linux+v va.6/mm/percpu-vm.c"> < img src="../.static/gfx/right.png" alt=">>"> /spa" spa" class="lxr_search"> < < input typtiohidden" namtionavtarget" < input typtiotext" namtiosearch" idiosearch"> < butt.13typtiosubmit">Search < Prefs /a> /spa" < /div < form ac.13"="ajax+*" method="post" onsubmit="return false;"> input typtiohidden" namtioajax_lookup" idioajax_lookup" < /form < div class="headingbott.m">
div idiofile_contents"
   1 /a> spa" class="comment">/* /spa"
	   2 /a> spa" class="comment"> * mm/percpu-vm.c - vmalloc area based chunk alloca.13" /spa"
	   3 /a> spa" class="comment"> * /spa"
	   4 /a> spa" class="comment"> * Copyright (C) 2010           SUSE Linux Products GmbH /spa"
	   5 /a> spa" class="comment"> * Copyright (C) 2010           Tejun Heo <tj@kernel.org> /spa"
	   6 /a> spa" class="comment"> * /spa"
	   7 /a> spa" class="comment"> * This file is released under the GPLo2  /spa"
	   8 /a> spa" class="comment"> * /spa"
	   9 /a> spa" class="comment"> * Chunks are mapped into vmalloc areas and populated page by page  /spa"
	  ="v3a> spa" class="comment"> * This is the default chunk alloca.or  /spa"
	  11 /a> spa" class="comment"> */ /spa"
	  12 /a>	  13 /a>static struct  a href="+code=page" class="sref">page /a> * a href="+code=pcpu_chunk_page" class="sref">pcpu_chunk_page /a>(struct  a href="+code=pcpu_chunk" class="sref">pcpu_chunk /a> * a href="+code=chunk" class="sref">chunk /a>,	  14 /a>                                    unsigned int  a href="+code=cpu" class="sref">cpu /a>, int  a href="+code=page_idx" class="sref">page_idx /a>)	  15 /a>{	  16 /a>         spa" class="comment">/* must not be used .13pre-mapped chunk */ /spa"
	  17 /a>         a href="+code=WARN_ON" class="sref">WARN_ON /a>( a href="+code=chunk" class="sref">chunk /a>-> a href="+code=immutable" class="sref">immutable /a>);	  18 /a>	  19 /a>        return  a href="+code=vmalloc_to_page" class="sref">vmalloc_to_page /a>((void *) a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=cpu" class="sref">cpu /a>,  a href="+code=page_idx" class="sref">page_idx /a>));	  2"v3a>}	  21 /a>	  22 /a> spa" class="comment">/** /spa"
	  23 /a> spa" class="comment"> * pcpu_get_pages_and_bitmap - get temp pages array and bitmap /spa"
	  24 /a> spa" class="comment"> * @chunk: chunk of interest /spa"
	  25 /a> spa" class="comment"> * @bitmapp: output paramtter for bitmap /spa"
	  26 /a> spa" class="comment"> * @may_alloc: may alloca.e the array /spa"
	  27 /a> spa" class="comment"> * /spa"
	  28 /a> spa" class="comment"> * Returns pointer to array of pointers to struct page and bitmap, /spa"
	  29 /a> spa" class="comment"> * both of which ca" be indexed with pcpu_page_idx().  The returned /spa"
	  3"v3a> spa" class="comment"> * array is cleared to zero and *@bitmapp is copied from /spa"
	  31 /a> spa" class="comment"> * @chunk->populated.  No.e that there is only one array and bitmap /spa"
	  32 /a> spa" class="comment"> * and access exclus4.13is the caller's responsibility  /spa"
	  33 /a> spa" class="comment"> * /spa"
	  34 /a> spa" class="comment"> * CONTEXT: /spa"
	  35 /a> spa" class="comment"> * pcpu_alloc_mutex and does GFP_KERNEL alloca.13" if @may_alloc  /spa"
	  36 /a> spa" class="comment"> * Otherwise, don't care  /spa"
	  37 /a> spa" class="comment"> * /spa"
	  38 /a> spa" class="comment"> * RETURNS: /spa"
	  39 /a> spa" class="comment"> * Pointer to temp pages array 3" success, NULL 3" failure  /spa"
	  4"v3a> spa" class="comment"> */ /spa"
	  41 /a>static struct  a href="+code=page" class="sref">page /a> ** a href="+code=pcpu_get_pages_and_bitmap" class="sref">pcpu_get_pages_and_bitmap /a>(struct  a href="+code=pcpu_chunk" class="sref">pcpu_chunk /a> * a href="+code=chunk" class="sref">chunk /a>,	  42 /a>                                               unsigned long ** a href="+code=bitmapp" class="sref">bitmapp /a>,	  43 /a>                                                a href="+code=bool" class="sref">bool /a>  a href="+code=may_alloc" class="sref">may_alloc /a>)	  44 /a>{	  45 /a>        static struct  a href="+code=page" class="sref">page /a> ** a href="+code=pages" class="sref">pages /a>;	  46 /a>        static unsigned long * a href="+code=bitmap" class="sref">bitmap /a>;	  47 /a>         a href="+code=size_t" class="sref">size_t /a>  a href="+code=pages_size" class="sref">pages_size /a> =  a href="+code=pcpu_nr_units" class="sref">pcpu_nr_units /a> *  a href="+code=pcpu_unit_pages" class="sref">pcpu_unit_pages /a> * sizeof( a href="+code=pages" class="sref">pages /a>[0]);	  48 /a>         a href="+code=size_t" class="sref">size_t /a>  a href="+code=bitmap_size" class="sref">bitmap_size /a> =  a href="+code=BITS_TO_LONGS" class="sref">BITS_TO_LONGS /a>( a href="+code=pcpu_unit_pages" class="sref">pcpu_unit_pages /a>) *	  49 /a>                             sizeof(unsigned long);	  50 /a>	  51 /a>        if (! a href="+code=pages" class="sref">pages /a> || ! a href="+code=bitmap" class="sref">bitmap /a>) {	  52 /a>                if ( a href="+code=may_alloc" class="sref">may_alloc /a> && ! a href="+code=pages" class="sref">pages /a>)	  53 /a>                         a href="+code=pages" class="sref">pages /a> =  a href="+code=pcpu_mem_alloc" class="sref">pcpu_mem_alloc /a>( a href="+code=pages_size" class="sref">pages_size /a>);	  54 /a>                if ( a href="+code=may_alloc" class="sref">may_alloc /a> && ! a href="+code=bitmap" class="sref">bitmap /a>)	  55 /a>                         a href="+code=bitmap" class="sref">bitmap /a> =  a href="+code=pcpu_mem_alloc" class="sref">pcpu_mem_alloc /a>( a href="+code=bitmap_size" class="sref">bitmap_size /a>);	  56 /a>                if (! a href="+code=pages" class="sref">pages /a> || ! a href="+code=bitmap" class="sref">bitmap /a>)	  57 /a>                        return  a href="+code=NULL" class="sref">NULL /a>;	  58 /a>        }	  59 /a>	  60 /a>         a href="+code=memset" class="sref">memset /a>( a href="+code=pages" class="sref">pages /a>, 0,  a href="+code=pages_size" class="sref">pages_size /a>);	  61 /a>         a href="+code=bitmap_copy" class="sref">bitmap_copy /a>( a href="+code=bitmap" class="sref">bitmap /a>,  a href="+code=chunk" class="sref">chunk /a>-> a href="+code=populated" class="sref">populated /a>,  a href="+code=pcpu_unit_pages" class="sref">pcpu_unit_pages /a>);	  62 /a>	  63 /a>        * a href="+code=bitmapp" class="sref">bitmapp /a> =  a href="+code=bitmap" class="sref">bitmap /a>;	  64 /a>        return  a href="+code=pages" class="sref">pages /a>;	  65v3a>}	  66 /a>	  67 /a> spa" class="comment">/** /spa"
	  68 /a> spa" class="comment"> * pcpu_free_pages - free pages which were alloca.ed for @chunk /spa"
	  69 /a> spa" class="comment"> * @chunk: chunk pages were alloca.ed for /spa"
	  7"v3a> spa" class="comment"> * @pages: array of pages to be freed, indexed by pcpu_page_idx() /spa"
	  71 /a> spa" class="comment"> * @populated: populated bitmap /spa"
	  72 /a> spa" class="comment"> * @page_start: page index of the first page to be freed /spa"
	  73 /a> spa" class="comment"> * @page_end: page index of the last page to be freed + 1 /spa"
	  74 /a> spa" class="comment"> * /spa"
	  75 /a> spa" class="comment"> * Free pages [@page_start and @page_end) in @pages for all units  /spa"
	  76 /a> spa" class="comment"> * The pages were alloca.ed for @chunk  /spa"
	  77 /a> spa" class="comment"> */ /spa"
	  78 /a>static void  a href="+code=pcpu_free_pages" class="sref">pcpu_free_pages /a>(struct  a href="+code=pcpu_chunk" class="sref">pcpu_chunk /a> * a href="+code=chunk" class="sref">chunk /a>,	  79 /a>                            struct  a href="+code=page" class="sref">page /a> ** a href="+code=pages" class="sref">pages /a>, unsigned long * a href="+code=populated" class="sref">populated /a>,	  80 /a>                            int  a href="+code=page_start" class="sref">page_start /a>, int  a href="+code=page_end" class="sref">page_end /a>)	  81 /a>{	  82 /a>        unsigned int  a href="+code=cpu" class="sref">cpu /a>;	  83 /a>        int  a href="+code=i" class="sref">i /a>;	  84 /a>	  85 /a>         a href="+code=for_each_possible_cpu" class="sref">for_each_possible_cpu /a>( a href="+code=cpu" class="sref">cpu /a>) {	  86 /a>                for ( a href="+code=i" class="sref">i /a> =  a href="+code=page_start" class="sref">page_start /a>;  a href="+code=i" class="sref">i /a> <  a href="+code=page_end" class="sref">page_end /a>;  a href="+code=i" class="sref">i /a>++) {	  87 /a>                        struct  a href="+code=page" class="sref">page /a> * a href="+code=page" class="sref">page /a> =  a href="+code=pages" class="sref">pages /a>[ a href="+code=pcpu_page_idx" class="sref">pcpu_page_idx /a>( a href="+code=cpu" class="sref">cpu /a>,  a href="+code=i" class="sref">i /a>)];	  88 /a>	  89 /a>                        if ( a href="+code=page" class="sref">page /a>)	  90 /a>                                 a href="+code=__free_page" class="sref">__free_page /a>( a href="+code=page" class="sref">page /a>);	  91 /a>                }	  92 /a>        }	  93v3a>}	  94 /a>	  95 /a> spa" class="comment">/** /spa"
	  96 /a> spa" class="comment"> * pcpu_alloc_pages - alloca.es pages for @chunk /spa"
	  97 /a> spa" class="comment"> * @chunk: target chunk /spa"
	  98 /a> spa" class="comment"> * @pages: array to put the alloca.ed pages into, indexed by pcpu_page_idx() /spa"
	  99 /a> spa" class="comment"> * @populated: populated bitmap /spa"
	 100 /a> spa" class="comment"> * @page_start: page index of the first page to be alloca.ed /spa"
	 101 /a> spa" class="comment"> * @page_end: page index of the last page to be alloca.ed + 1 /spa"
	 102 /a> spa" class="comment"> * /spa"
	 103 /a> spa" class="comment"> * Alloca.e pages [@page_start,@page_end) into @pages for all units  /spa"
	 104 /a> spa" class="comment"> * The alloca.4.13is for @chunk   Percpu core doesn't care about the /spa"
	 105 /a> spa" class="comment"> * content of @pages and will pass it verba.4m to pcpu_map_pages()  /spa"
	 106 /a> spa" class="comment"> */ /spa"
	 107 /a>static int  a href="+code=pcpu_alloc_pages" class="sref">pcpu_alloc_pages /a>(struct  a href="+code=pcpu_chunk" class="sref">pcpu_chunk /a> * a href="+code=chunk" class="sref">chunk /a>,	 108 /a>                            struct  a href="+code=page" class="sref">page /a> ** a href="+code=pages" class="sref">pages /a>, unsigned long * a href="+code=populated" class="sref">populated /a>,	 109 /a>                            int  a href="+code=page_start" class="sref">page_start /a>, int  a href="+code=page_end" class="sref">page_end /a>)	 1="v3a>{	 111 /a>        const  a href="+code=gfp_t" class="sref">gfp_t /a>  a href="+code=gfp" class="sref">gfp /a> =  a href="+code=GFP_KERNEL" class="sref">GFP_KERNEL /a> |  a href="+code=__GFP_HIGHMEM" class="sref">__GFP_HIGHMEM /a> |  a href="+code=__GFP_COLD" class="sref">__GFP_COLD /a>;	 112 /a>        unsigned int  a href="+code=cpu" class="sref">cpu /a>;	 113 /a>        int  a href="+code=i" class="sref">i /a>;	 114 /a>	 115 /a>         a href="+code=for_each_possible_cpu" class="sref">for_each_possible_cpu /a>( a href="+code=cpu" class="sref">cpu /a>) {	 116 /a>                for ( a href="+code=i" class="sref">i /a> =  a href="+code=page_start" class="sref">page_start /a>;  a href="+code=i" class="sref">i /a> <  a href="+code=page_end" class="sref">page_end /a>;  a href="+code=i" class="sref">i /a>++) {	 117 /a>                        struct  a href="+code=page" class="sref">page /a> ** a href="+code=pagep" class="sref">pagep /a> = & a href="+code=pages" class="sref">pages /a>[ a href="+code=pcpu_page_idx" class="sref">pcpu_page_idx /a>( a href="+code=cpu" class="sref">cpu /a>,  a href="+code=i" class="sref">i /a>)];	 118 /a>	 119 /a>                        * a href="+code=pagep" class="sref">pagep /a> =  a href="+code=alloc_pages_node" class="sref">alloc_pages_node /a>( a href="+code=cpu_to_node" class="sref">cpu_to_node /a>( a href="+code=cpu" class="sref">cpu /a>),  a href="+code=gfp" class="sref">gfp /a>, 0);	 120 /a>                        if (!* a href="+code=pagep" class="sref">pagep /a>) {	 121 /a>                                 a href="+code=pcpu_free_pages" class="sref">pcpu_free_pages /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=pages" class="sref">pages /a>,  a href="+code=populated" class="sref">populated /a>,	 122 /a>                                                 a href="+code=page_start" class="sref">page_start /a>,  a href="+code=page_end" class="sref">page_end /a>);	 123 /a>                                return - a href="+code=ENOMEM" class="sref">ENOMEM /a>;	 124 /a>                        }	 125 /a>                }	 126 /a>        }	 127 /a>        return 0;	 128 /a>}	 129 /a>	 13"v3a> spa" class="comment">/** /spa"
	 131 /a> spa" class="comment"> * pcpu_pre_unmap_flush - flush cache prior to unmapping /spa"
	 132 /a> spa" class="comment"> * @chunk: chunk the regions to be flushed belongs to /spa"
	 133 /a> spa" class="comment"> * @page_start: page index of the first page to be flushed /spa"
	 134 /a> spa" class="comment"> * @page_end: page index of the last page to be flushed + 1 /spa"
	 135 /a> spa" class="comment"> * /spa"
	 136 /a> spa" class="comment"> * Pages in [@page_start,@page_end) of @chunk are about to be /spa"
	 137 /a> spa" class="comment"> * unmapped.  Flush cache.  As each flushing trial ca" be very /spa"
	 138 /a> spa" class="comment"> * expensive, issue flush 3" the whole region at once rather tha" /spa"
	 139 /a> spa" class="comment"> * doing it for each cpu.  This could be an overkill but is more /spa"
	 14"v3a> spa" class="comment"> * scalable  /spa"
	 141 /a> spa" class="comment"> */ /spa"
	 142 /a>static void  a href="+code=pcpu_pre_unmap_flush" class="sref">pcpu_pre_unmap_flush /a>(struct  a href="+code=pcpu_chunk" class="sref">pcpu_chunk /a> * a href="+code=chunk" class="sref">chunk /a>,	 143 /a>                                 int  a href="+code=page_start" class="sref">page_start /a>, int  a href="+code=page_end" class="sref">page_end /a>)	 144 /a>{	 145 /a>         a href="+code=flush_cache_vunmap" class="sref">flush_cache_vunmap /a>(	 146 /a>                 a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=pcpu_first_unit_cpu" class="sref">pcpu_first_unit_cpu /a>,  a href="+code=page_start" class="sref">page_start /a>),	 147 /a>                 a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=pcpu_last_unit_cpu" class="sref">pcpu_last_unit_cpu /a>,  a href="+code=page_end" class="sref">page_end /a>));	 148 /a>}	 149 /a>	 150 /a>static void  a href="+code=__pcpu_unmap_pages" class="sref">__pcpu_unmap_pages /a>(unsigned long  a href="+code=addr" class="sref">addr /a>, int  a href="+code=nr_pages" class="sref">nr_pages /a>)	 151 /a>{	 152 /a>         a href="+code=unmap_kernel_range_noflush" class="sref">unmap_kernel_range_noflush /a>( a href="+code=addr" class="sref">addr /a>,  a href="+code=nr_pages" class="sref">nr_pages /a> <<  a href="+code=PAGE_SHIFT" class="sref">PAGE_SHIFT /a>);	 153v3a>}	 154 /a>	 155 /a> spa" class="comment">/** /spa"
	 156 /a> spa" class="comment"> * pcpu_unmap_pages - unmap pages out of a pcpu_chunk /spa"
	 157 /a> spa" class="comment"> * @chunk: chunk of interest /spa"
	 158 /a> spa" class="comment"> * @pages: pages array which ca" be used to pass informa.4.13to free /spa"
	 159 /a> spa" class="comment"> * @populated: populated bitmap /spa"
	 160 /a> spa" class="comment"> * @page_start: page index of the first page to unmap /spa"
	 161 /a> spa" class="comment"> * @page_end: page index of the last page to unmap + 1 /spa"
	 162 /a> spa" class="comment"> * /spa"
	 163 /a> spa" class="comment"> * For each cpu, unmap pages [@page_start,@page_end) out of @chunk  /spa"
	 164 /a> spa" class="comment"> * Corresponding elements in @pages were cleared by the caller and ca" /spa"
	 165 /a> spa" class="comment"> * be used to carry informa.4.13to pcpu_free_pages() which will be /spa"
	 166 /a> spa" class="comment"> * called after all unmaps are finished.  The caller should call /spa"
	 167 /a> spa" class="comment"> * proper3pre/post flush func.13"s  /spa"
	 168 /a> spa" class="comment"> */ /spa"
	 169 /a>static void  a href="+code=pcpu_unmap_pages" class="sref">pcpu_unmap_pages /a>(struct  a href="+code=pcpu_chunk" class="sref">pcpu_chunk /a> * a href="+code=chunk" class="sref">chunk /a>,	 170 /a>                             struct  a href="+code=page" class="sref">page /a> ** a href="+code=pages" class="sref">pages /a>, unsigned long * a href="+code=populated" class="sref">populated /a>,	 171 /a>                             int  a href="+code=page_start" class="sref">page_start /a>, int  a href="+code=page_end" class="sref">page_end /a>)	 172 /a>{	 173 /a>        unsigned int  a href="+code=cpu" class="sref">cpu /a>;	 174 /a>        int  a href="+code=i" class="sref">i /a>;	 175 /a>	 176 /a>         a href="+code=for_each_possible_cpu" class="sref">for_each_possible_cpu /a>( a href="+code=cpu" class="sref">cpu /a>) {	 177 /a>                for ( a href="+code=i" class="sref">i /a> =  a href="+code=page_start" class="sref">page_start /a>;  a href="+code=i" class="sref">i /a> <  a href="+code=page_end" class="sref">page_end /a>;  a href="+code=i" class="sref">i /a>++) {	 178 /a>                        struct  a href="+code=page" class="sref">page /a> * a href="+code=page" class="sref">page /a>;	 179 /a>	 180 /a>                         a href="+code=page" class="sref">page /a> =  a href="+code=pcpu_chunk_page" class="sref">pcpu_chunk_page /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=cpu" class="sref">cpu /a>,  a href="+code=i" class="sref">i /a>);	 181 /a>                         a href="+code=WARN_ON" class="sref">WARN_ON /a>(! a href="+code=page" class="sref">page /a>);	 182 /a>                         a href="+code=pages" class="sref">pages /a>[ a href="+code=pcpu_page_idx" class="sref">pcpu_page_idx /a>( a href="+code=cpu" class="sref">cpu /a>,  a href="+code=i" class="sref">i /a>)] =  a href="+code=page" class="sref">page /a>;	 183 /a>                }	 184 /a>                 a href="+code=__pcpu_unmap_pages" class="sref">__pcpu_unmap_pages /a>( a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=cpu" class="sref">cpu /a>,  a href="+code=page_start" class="sref">page_start /a>),	 185 /a>                                    a href="+code=page_end" class="sref">page_end /a> -  a href="+code=page_start" class="sref">page_start /a>);	 186 /a>        }	 187 /a>	 188 /a>        for ( a href="+code=i" class="sref">i /a> =  a href="+code=page_start" class="sref">page_start /a>;  a href="+code=i" class="sref">i /a> <  a href="+code=page_end" class="sref">page_end /a>;  a href="+code=i" class="sref">i /a>++)	 189 /a>                 a href="+code=__clear_bit" class="sref">__clear_bit /a>( a href="+code=i" class="sref">i /a>,  a href="+code=populated" class="sref">populated /a>);	 19"v3a>}	 191 /a>	 192 /a> spa" class="comment">/** /spa"
	 193 /a> spa" class="comment"> * pcpu_post_unmap_tlb_flush - flush TLB after unmapping /spa"
	 194 /a> spa" class="comment"> * @chunk: pcpu_chunk the regions to be flushed belong to /spa"
	 195 /a> spa" class="comment"> * @page_start: page index of the first page to be flushed /spa"
	 196 /a> spa" class="comment"> * @page_end: page index of the last page to be flushed + 1 /spa"
	 197 /a> spa" class="comment"> * /spa"
	 198 /a> spa" class="comment"> * Pages [@page_start,@page_end) of @chunk have been unmapped.  Flush /spa"
	 199 /a> spa" class="comment"> * TLB for the regions.  This ca" be skipped if the area3is to be /spa"
	 200 /a> spa" class="comment"> * returned to vmalloc as vmalloc will handle TLB flushing lazily  /spa"
	 201 /a> spa" class="comment"> * /spa"
	 202 /a> spa" class="comment"> * As with pcpu_pre_unmap_flush(), TLB flushing also3is done at once /spa"
	 203 /a> spa" class="comment"> * for the whole region  /spa"
	 204 /a> spa" class="comment"> */ /spa"
	 205 /a>static void  a href="+code=pcpu_post_unmap_tlb_flush" class="sref">pcpu_post_unmap_tlb_flush /a>(struct  a href="+code=pcpu_chunk" class="sref">pcpu_chunk /a> * a href="+code=chunk" class="sref">chunk /a>,	 206 /a>                                      int  a href="+code=page_start" class="sref">page_start /a>, int  a href="+code=page_end" class="sref">page_end /a>)	 207 /a>{	 208 /a>         a href="+code=flush_tlb_kernel_range" class="sref">flush_tlb_kernel_range /a>(	 209 /a>                 a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=pcpu_first_unit_cpu" class="sref">pcpu_first_unit_cpu /a>,  a href="+code=page_start" class="sref">page_start /a>),	 210 /a>                 a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=pcpu_last_unit_cpu" class="sref">pcpu_last_unit_cpu /a>,  a href="+code=page_end" class="sref">page_end /a>));	 211 /a>}	 212 /a>	 213 /a>static int  a href="+code=__pcpu_map_pages" class="sref">__pcpu_map_pages /a>(unsigned long  a href="+code=addr" class="sref">addr /a>, struct  a href="+code=page" class="sref">page /a> ** a href="+code=pages" class="sref">pages /a>,	 214 /a>                            int  a href="+code=nr_pages" class="sref">nr_pages /a>)	 215 /a>{	 216 /a>        return  a href="+code=map_kernel_range_noflush" class="sref">map_kernel_range_noflush /a>( a href="+code=addr" class="sref">addr /a>,  a href="+code=nr_pages" class="sref">nr_pages /a> <<  a href="+code=PAGE_SHIFT" class="sref">PAGE_SHIFT /a>,	 217 /a>                                         a href="+code=PAGE_KERNEL" class="sref">PAGE_KERNEL /a>,  a href="+code=pages" class="sref">pages /a>);	 218 /a>}	 219 /a>	 22"v3a> spa" class="comment">/** /spa"
	 221 /a> spa" class="comment"> * pcpu_map_pages - map pages into a pcpu_chunk /spa"
	 222 /a> spa" class="comment"> * @chunk: chunk of interest /spa"
	 223 /a> spa" class="comment"> * @pages: pages array containing pages to be mapped /spa"
	 224 /a> spa" class="comment"> * @populated: populated bitmap /spa"
	 225 /a> spa" class="comment"> * @page_start: page index of the first page to map /spa"
	 226 /a> spa" class="comment"> * @page_end: page index of the last page to map + 1 /spa"
	 227 /a> spa" class="comment"> * /spa"
	 228 /a> spa" class="comment"> * For each cpu, map pages [@page_start,@page_end) into @chunk   The /spa"
	 229 /a> spa" class="comment"> * caller is responsible for calling pcpu_post_map_flush() after all /spa"
	 23"v3a> spa" class="comment"> * mappings are complete  /spa"
	 231 /a> spa" class="comment"> * /spa"
	 232 /a> spa" class="comment"> * This func.13" is responsible for setting corresponding bits in /spa"
	 233 /a> spa" class="comment"> * @chunk->populated bitmap and whatever is necessary for reverse /spa"
	 234 /a> spa" class="comment"> * lookup (addr -> chunk)  /spa"
	 235 /a> spa" class="comment"> */ /spa"
	 236 /a>static int  a href="+code=pcpu_map_pages" class="sref">pcpu_map_pages /a>(struct  a href="+code=pcpu_chunk" class="sref">pcpu_chunk /a> * a href="+code=chunk" class="sref">chunk /a>,	 237 /a>                          struct  a href="+code=page" class="sref">page /a> ** a href="+code=pages" class="sref">pages /a>, unsigned long * a href="+code=populated" class="sref">populated /a>,	 238 /a>                          int  a href="+code=page_start" class="sref">page_start /a>, int  a href="+code=page_end" class="sref">page_end /a>)	 239 /a>{	 240 /a>        unsigned int  a href="+code=cpu" class="sref">cpu /a>,  a href="+code=tcpu" class="sref">tcpu /a>;	 241 /a>        int  a href="+code=i" class="sref">i /a>,  a href="+code=err" class="sref">err /a>;	 242 /a>	 243 /a>         a href="+code=for_each_possible_cpu" class="sref">for_each_possible_cpu /a>( a href="+code=cpu" class="sref">cpu /a>) {	 244 /a>                 a href="+code=err" class="sref">err /a> =  a href="+code=__pcpu_map_pages" class="sref">__pcpu_map_pages /a>( a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=cpu" class="sref">cpu /a>,  a href="+code=page_start" class="sref">page_start /a>),	 245 /a>                                       & a href="+code=pages" class="sref">pages /a>[ a href="+code=pcpu_page_idx" class="sref">pcpu_page_idx /a>( a href="+code=cpu" class="sref">cpu /a>,  a href="+code=page_start" class="sref">page_start /a>)],	 246 /a>                                        a href="+code=page_end" class="sref">page_end /a> -  a href="+code=page_start" class="sref">page_start /a>);	 247 /a>                if ( a href="+code=err" class="sref">err /a> < 0)	 248 /a>                        goto  a href="+code=err" class="sref">err /a>;	 249 /a>        }	 250 /a>	 251 /a>         spa" class="comment">/* mapping successful, link chunk and mark populated */ /spa"
	 252 /a>        for ( a href="+code=i" class="sref">i /a> =  a href="+code=page_start" class="sref">page_start /a>;  a href="+code=i" class="sref">i /a> <  a href="+code=page_end" class="sref">page_end /a>;  a href="+code=i" class="sref">i /a>++) {	 253 /a>                 a href="+code=for_each_possible_cpu" class="sref">for_each_possible_cpu /a>( a href="+code=cpu" class="sref">cpu /a>)	 254 /a>                         a href="+code=pcpu_set_page_chunk" class="sref">pcpu_set_page_chunk /a>( a href="+code=pages" class="sref">pages /a>[ a href="+code=pcpu_page_idx" class="sref">pcpu_page_idx /a>( a href="+code=cpu" class="sref">cpu /a>,  a href="+code=i" class="sref">i /a>)],	 255 /a>                                             a href="+code=chunk" class="sref">chunk /a>);	 256 /a>                 a href="+code=__set_bit" class="sref">__set_bit /a>( a href="+code=i" class="sref">i /a>,  a href="+code=populated" class="sref">populated /a>);	 257 /a>        }	 258 /a>	 259 /a>        return 0;	 260 /a>	 261 /a> a href="+code=err" class="sref">err /a>:	 262 /a>         a href="+code=for_each_possible_cpu" class="sref">for_each_possible_cpu /a>( a href="+code=tcpu" class="sref">tcpu /a>) {	 263 /a>                if ( a href="+code=tcpu" class="sref">tcpu /a> ==  a href="+code=cpu" class="sref">cpu /a>)	 264 /a>                        break;	 265 /a>                 a href="+code=__pcpu_unmap_pages" class="sref">__pcpu_unmap_pages /a>( a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=tcpu" class="sref">tcpu /a>,  a href="+code=page_start" class="sref">page_start /a>),	 266 /a>                                    a href="+code=page_end" class="sref">page_end /a> -  a href="+code=page_start" class="sref">page_start /a>);	 267 /a>        }	 268 /a>        return  a href="+code=err" class="sref">err /a>;	 269 /a>}	 270 /a>	 271 /a> spa" class="comment">/** /spa"
	 272 /a> spa" class="comment"> * pcpu_post_map_flush - flush cache after mapping /spa"
	 273 /a> spa" class="comment"> * @chunk: pcpu_chunk the regions to be flushed belong to /spa"
	 274 /a> spa" class="comment"> * @page_start: page index of the first page to be flushed /spa"
	 275 /a> spa" class="comment"> * @page_end: page index of the last page to be flushed + 1 /spa"
	 276 /a> spa" class="comment"> * /spa"
	 277 /a> spa" class="comment"> * Pages [@page_start,@page_end) of @chunk have been mapped.  Flush /spa"
	 278 /a> spa" class="comment"> * cache. /spa"
	 279 /a> spa" class="comment"> * /spa"
	 28"v3a> spa" class="comment"> * As with pcpu_pre_unmap_flush(), TLB flushing also3is done at once /spa"
	 281 /a> spa" class="comment"> * for the whole region  /spa"
	 282 /a> spa" class="comment"> */ /spa"
	 283 /a>static void  a href="+code=pcpu_post_map_flush" class="sref">pcpu_post_map_flush /a>(struct  a href="+code=pcpu_chunk" class="sref">pcpu_chunk /a> * a href="+code=chunk" class="sref">chunk /a>,	 284 /a>                                int  a href="+code=page_start" class="sref">page_start /a>, int  a href="+code=page_end" class="sref">page_end /a>)	 285 /a>{	 286 /a>         a href="+code=flush_cache_vmap" class="sref">flush_cache_vmap /a>(	 287 /a>                 a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=pcpu_first_unit_cpu" class="sref">pcpu_first_unit_cpu /a>,  a href="+code=page_start" class="sref">page_start /a>),	 288 /a>                 a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=pcpu_last_unit_cpu" class="sref">pcpu_last_unit_cpu /a>,  a href="+code=page_end" class="sref">page_end /a>));	 289 /a>}	 290 /a>	 291 /a> spa" class="comment">/** /spa"
	 292 /a> spa" class="comment"> * pcpu_populate_chunk - populate and map an area3of a pcpu_chunk /spa"
	 293 /a> spa" class="comment"> * @chunk: chunk of interest /spa"
	 294 /a> spa" class="comment"> * @off: offset to the area3to populate /spa"
	 295 /a> spa" class="comment"> * @size: size of the area3to populate in bytes /spa"
	 296 /a> spa" class="comment"> * /spa"
	 297 /a> spa" class="comment"> * For each cpu, populate and map pages [@page_start,@page_end) into /spa"
	 298 /a> spa" class="comment"> * @chunk   The area3is cleared on return  /spa"
	 299 /a> spa" class="comment"> * /spa"
	 300 /a> spa" class="comment"> * CONTEXT: /spa"
	 301 /a> spa" class="comment"> * pcpu_alloc_mutex, does GFP_KERNEL alloca.4.1  /spa"
	 302 /a> spa" class="comment"> */ /spa"
	 303 /a>static int  a href="+code=pcpu_populate_chunk" class="sref">pcpu_populate_chunk /a>(struct  a href="+code=pcpu_chunk" class="sref">pcpu_chunk /a> * a href="+code=chunk" class="sref">chunk /a>, int  a href="+code=off" class="sref">off /a>, int  a href="+code=size" class="sref">size /a>)	 304 /a>{	 305 /a>        int  a href="+code=page_start" class="sref">page_start /a> =  a href="+code=PFN_DOWN" class="sref">PFN_DOWN /a>( a href="+code=off" class="sref">off /a>);	 306 /a>        int  a href="+code=page_end" class="sref">page_end /a> =  a href="+code=PFN_UP" class="sref">PFN_UP /a>( a href="+code=off" class="sref">off /a> +  a href="+code=size" class="sref">size /a>);	 307 /a>        int  a href="+code=free_end" class="sref">free_end /a> =  a href="+code=page_start" class="sref">page_start /a>,  a href="+code=unmap_end" class="sref">unmap_end /a> =  a href="+code=page_start" class="sref">page_start /a>;	 308 /a>        struct  a href="+code=page" class="sref">page /a> ** a href="+code=pages" class="sref">pages /a>;	 309 /a>        unsigned long * a href="+code=populated" class="sref">populated /a>;	 310 /a>        unsigned int  a href="+code=cpu" class="sref">cpu /a>;	 311 /a>        int  a href="+code=rs" class="sref">rs /a>,  a href="+code=re" class="sref">re /a>,  a href="+code=rc" class="sref">rc /a>;	 312 /a>	 313 /a>         spa" class="comment">/* quick path, check whether all pages are already there */ /spa"
	 314 /a>         a href="+code=rs" class="sref">rs /a> =  a href="+code=page_start" class="sref">page_start /a>;	 315 /a>         a href="+code=pcpu_next_pop" class="sref">pcpu_next_pop /a>( a href="+code=chunk" class="sref">chunk /a>, & a href="+code=rs" class="sref">rs /a>, & a href="+code=re" class="sref">re /a>,  a href="+code=page_end" class="sref">page_end /a>);	 316 /a>        if ( a href="+code=rs" class="sref">rs /a> ==  a href="+code=page_start" class="sref">page_start /a> &&  a href="+code=re" class="sref">re /a> ==  a href="+code=page_end" class="sref">page_end /a>)	 317 /a>                goto  a href="+code=clear" class="sref">clear /a>;	 318 /a>	 319 /a>         spa" class="comment">/* need to alloca.e and map pages, this chunk can't be immutable */ /spa"
	 320 /a>         a href="+code=WARN_ON" class="sref">WARN_ON /a>( a href="+code=chunk" class="sref">chunk /a>-> a href="+code=immutable" class="sref">immutable /a>);	 321 /a>	 322 /a>         a href="+code=pages" class="sref">pages /a> =  a href="+code=pcpu_get_pages_and_bitmap" class="sref">pcpu_get_pages_and_bitmap /a>( a href="+code=chunk" class="sref">chunk /a>, & a href="+code=populated" class="sref">populated /a>,  a href="+code=true" class="sref">true /a>);	 323 /a>        if (! a href="+code=pages" class="sref">pages /a>)	 324 /a>                return - a href="+code=ENOMEM" class="sref">ENOMEM /a>;	 325 /a>	 326 /a>         spa" class="comment">/* alloc and map */ /spa"
	 327 /a>         a href="+code=pcpu_for_each_unpop_region" class="sref">pcpu_for_each_unpop_region /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=rs" class="sref">rs /a>,  a href="+code=re" class="sref">re /a>,  a href="+code=page_start" class="sref">page_start /a>,  a href="+code=page_end" class="sref">page_end /a>) {	 328 /a>                 a href="+code=rc" class="sref">rc /a> =  a href="+code=pcpu_alloc_pages" class="sref">pcpu_alloc_pages /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=pages" class="sref">pages /a>,  a href="+code=populated" class="sref">populated /a>,  a href="+code=rs" class="sref">rs /a>,  a href="+code=re" class="sref">re /a>);	 329 /a>                if ( a href="+code=rc" class="sref">rc /a>)	 330 /a>                        goto  a href="+code=err_free" class="sref">err_free /a>;	 331 /a>                 a href="+code=free_end" class="sref">free_end /a> =  a href="+code=re" class="sref">re /a>;	 332 /a>        }	 333 /a>	 334 /a>         a href="+code=pcpu_for_each_unpop_region" class="sref">pcpu_for_each_unpop_region /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=rs" class="sref">rs /a>,  a href="+code=re" class="sref">re /a>,  a href="+code=page_start" class="sref">page_start /a>,  a href="+code=page_end" class="sref">page_end /a>) {	 335 /a>                 a href="+code=rc" class="sref">rc /a> =  a href="+code=pcpu_map_pages" class="sref">pcpu_map_pages /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=pages" class="sref">pages /a>,  a href="+code=populated" class="sref">populated /a>,  a href="+code=rs" class="sref">rs /a>,  a href="+code=re" class="sref">re /a>);	 336 /a>                if ( a href="+code=rc" class="sref">rc /a>)	 337 /a>                        goto  a href="+code=err_unmap" class="sref">err_unmap /a>;	 338 /a>                 a href="+code=unmap_end" class="sref">unmap_end /a> =  a href="+code=re" class="sref">re /a>;	 339 /a>        }	 340 /a>         a href="+code=pcpu_post_map_flush" class="sref">pcpu_post_map_flush /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=page_start" class="sref">page_start /a>,  a href="+code=page_end" class="sref">page_end /a>);	 341 /a>	 342 /a>         spa" class="comment">/* commit new bitmap */ /spa"
	 343 /a>         a href="+code=bitmap_copy" class="sref">bitmap_copy /a>( a href="+code=chunk" class="sref">chunk /a>-> a href="+code=populated" class="sref">populated /a>,  a href="+code=populated" class="sref">populated /a>,  a href="+code=pcpu_unit_pages" class="sref">pcpu_unit_pages /a>);	 344 /a> a href="+code=clear" class="sref">clear /a>:	 345 /a>         a href="+code=for_each_possible_cpu" class="sref">for_each_possible_cpu /a>( a href="+code=cpu" class="sref">cpu /a>)	 346 /a>                 a href="+code=memset" class="sref">memset /a>((void *) a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=cpu" class="sref">cpu /a>, 0) +  a href="+code=off" class="sref">off /a>, 0,  a href="+code=size" class="sref">size /a>);	 347 /a>        return 0;	 348 /a>	 349 /a> a href="+code=err_unmap" class="sref">err_unmap /a>:	 350 /a>         a href="+code=pcpu_pre_unmap_flush" class="sref">pcpu_pre_unmap_flush /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=page_start" class="sref">page_start /a>,  a href="+code=unmap_end" class="sref">unmap_end /a>);	 351 /a>         a href="+code=pcpu_for_each_unpop_region" class="sref">pcpu_for_each_unpop_region /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=rs" class="sref">rs /a>,  a href="+code=re" class="sref">re /a>,  a href="+code=page_start" class="sref">page_start /a>,  a href="+code=unmap_end" class="sref">unmap_end /a>)	 352 /a>                 a href="+code=pcpu_unmap_pages" class="sref">pcpu_unmap_pages /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=pages" class="sref">pages /a>,  a href="+code=populated" class="sref">populated /a>,  a href="+code=rs" class="sref">rs /a>,  a href="+code=re" class="sref">re /a>);	 353 /a>         a href="+code=pcpu_post_unmap_tlb_flush" class="sref">pcpu_post_unmap_tlb_flush /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=page_start" class="sref">page_start /a>,  a href="+code=unmap_end" class="sref">unmap_end /a>);	 354 /a> a href="+code=err_free" class="sref">err_free /a>:	 355 /a>         a href="+code=pcpu_for_each_unpop_region" class="sref">pcpu_for_each_unpop_region /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=rs" class="sref">rs /a>,  a href="+code=re" class="sref">re /a>,  a href="+code=page_start" class="sref">page_start /a>,  a href="+code=free_end" class="sref">free_end /a>)	 356 /a>                 a href="+code=pcpu_free_pages" class="sref">pcpu_free_pages /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=pages" class="sref">pages /a>,  a href="+code=populated" class="sref">populated /a>,  a href="+code=rs" class="sref">rs /a>,  a href="+code=re" class="sref">re /a>);	 357 /a>        return  a href="+code=rc" class="sref">rc /a>;	 358 /a>}	 359 /a>	 36"v3a> spa" class="comment">/** /spa"
	 361 /a> spa" class="comment"> * pcpu_depopulate_chunk - depopulate and unmap an area3of a pcpu_chunk /spa"
	 362 /a> spa" class="comment"> * @chunk: chunk to depopulate /spa"
	 363 /a> spa" class="comment"> * @off: offset to the area3to depopulate /spa"
	 364 /a> spa" class="comment"> * @size: size of the area3to depopulate in bytes /spa"
	 365 /a> spa" class="comment"> * @flush: whether to flush cache and tlb or not /spa"
	 366 /a> spa" class="comment"> * /spa"
	 367 /a> spa" class="comment"> * For each cpu, depopulate and unmap pages [@page_start,@page_end) /spa"
	 368 /a> spa" class="comment"> * from @chunk   If @flush3is true, vcache is flushed before unmapping /spa"
	 369 /a> spa" class="comment"> * and tlb after  /spa"
	 370 /a> spa" class="comment"> * /spa"
	 371 /a> spa" class="comment"> * CONTEXT: /spa"
	 372 /a> spa" class="comment"> * pcpu_alloc_mutex  /spa"
	 373 /a> spa" class="comment"> */ /spa"
	 374 /a>static void  a href="+code=pcpu_depopulate_chunk" class="sref">pcpu_depopulate_chunk /a>(struct  a href="+code=pcpu_chunk" class="sref">pcpu_chunk /a> * a href="+code=chunk" class="sref">chunk /a>, int  a href="+code=off" class="sref">off /a>, int  a href="+code=size" class="sref">size /a>)	 375 /a>{	 376 /a>        int  a href="+code=page_start" class="sref">page_start /a> =  a href="+code=PFN_DOWN" class="sref">PFN_DOWN /a>( a href="+code=off" class="sref">off /a>);	 377 /a>        int  a href="+code=page_end" class="sref">page_end /a> =  a href="+code=PFN_UP" class="sref">PFN_UP /a>( a href="+code=off" class="sref">off /a> +  a href="+code=size" class="sref">size /a>);	 378 /a>        struct  a href="+code=page" class="sref">page /a> ** a href="+code=pages" class="sref">pages /a>;	 379 /a>        unsigned long * a href="+code=populated" class="sref">populated /a>;	 380 /a>        int  a href="+code=rs" class="sref">rs /a>,  a href="+code=re" class="sref">re /a>;	 381 /a>	 382 /a>         spa" class="comment">/* quick path, check whether it's empty already */ /spa"
	 383 /a>         a href="+code=rs" class="sref">rs /a> =  a href="+code=page_start" class="sref">page_start /a>;	 384 /a>         a href="+code=pcpu_next_unpop" class="sref">pcpu_next_unpop /a>( a href="+code=chunk" class="sref">chunk /a>, & a href="+code=rs" class="sref">rs /a>, & a href="+code=re" class="sref">re /a>,  a href="+code=page_end" class="sref">page_end /a>);	 385 /a>        if ( a href="+code=rs" class="sref">rs /a> ==  a href="+code=page_start" class="sref">page_start /a> &&  a href="+code=re" class="sref">re /a> ==  a href="+code=page_end" class="sref">page_end /a>)	 386 /a>                return;	 387 /a>	 388 /a>         spa" class="comment">/* immutable chunks can't be depopulated */ /spa"
	 389 /a>         a href="+code=WARN_ON" class="sref">WARN_ON /a>( a href="+code=chunk" class="sref">chunk /a>-> a href="+code=immutable" class="sref">immutable /a>);	 390 /a>	 391 /a>         spa" class="comment">/* /spa"
	 392 /a> spa" class="comment">         * If control reaches here, there must have been at least one /spa"
	 393 /a> spa" class="comment">         * successful populat13" attempt so the temp pages array must /spa"
	 394 /a> spa" class="comment">         * be available now  /spa"
	 395 /a> spa" class="comment">         */ /spa"
	 396 /a>         a href="+code=pages" class="sref">pages /a> =  a href="+code=pcpu_get_pages_and_bitmap" class="sref">pcpu_get_pages_and_bitmap /a>( a href="+code=chunk" class="sref">chunk /a>, & a href="+code=populated" class="sref">populated /a>,  a href="+code=false" class="sref">false /a>);	 397 /a>         a href="+code=BUG_ON" class="sref">BUG_ON /a>(! a href="+code=pages" class="sref">pages /a>);	 398 /a>	 399 /a>         spa" class="comment">/* unmap and free */ /spa"
	 400 /a>         a href="+code=pcpu_pre_unmap_flush" class="sref">pcpu_pre_unmap_flush /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=page_start" class="sref">page_start /a>,  a href="+code=page_end" class="sref">page_end /a>);	 401 /a>	 402 /a>         a href="+code=pcpu_for_each_pop_region" class="sref">pcpu_for_each_pop_region /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=rs" class="sref">rs /a>,  a href="+code=re" class="sref">re /a>,  a href="+code=page_start" class="sref">page_start /a>,  a href="+code=page_end" class="sref">page_end /a>)	 403 /a>                 a href="+code=pcpu_unmap_pages" class="sref">pcpu_unmap_pages /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=pages" class="sref">pages /a>,  a href="+code=populated" class="sref">populated /a>,  a href="+code=rs" class="sref">rs /a>,  a href="+code=re" class="sref">re /a>);	 404 /a>	 405 /a>         spa" class="comment">/* no need to flush3tlb, vmalloc will handle it lazily */ /spa"
	 406 /a>	 407 /a>         a href="+code=pcpu_for_each_pop_region" class="sref">pcpu_for_each_pop_region /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=rs" class="sref">rs /a>,  a href="+code=re" class="sref">re /a>,  a href="+code=page_start" class="sref">page_start /a>,  a href="+code=page_end" class="sref">page_end /a>)	 408 /a>                 a href="+code=pcpu_free_pages" class="sref">pcpu_free_pages /a>( a href="+code=chunk" class="sref">chunk /a>,  a href="+code=pages" class="sref">pages /a>,  a href="+code=populated" class="sref">populated /a>,  a href="+code=rs" class="sref">rs /a>,  a href="+code=re" class="sref">re /a>);	 409 /a>	 410 /a>         spa" class="comment">/* commit new bitmap */ /spa"
	 411 /a>         a href="+code=bitmap_copy" class="sref">bitmap_copy /a>( a href="+code=chunk" class="sref">chunk /a>-> a href="+code=populated" class="sref">populated /a>,  a href="+code=populated" class="sref">populated /a>,  a href="+code=pcpu_unit_pages" class="sref">pcpu_unit_pages /a>);	 412 /a>}	 413 /a>	 414 /a>static struct  a href="+code=pcpu_chunk" class="sref">pcpu_chunk /a> * a href="+code=pcpu_create_chunk" class="sref">pcpu_create_chunk /a>(void)	 415 /a>{	 416 /a>        struct  a href="+code=pcpu_chunk" class="sref">pcpu_chunk /a> * a href="+code=chunk" class="sref">chunk /a>;	 417 /a>        struct  a href="+code=vm_struct" class="sref">vm_struct /a> ** a href="+code=vms" class="sref">vms /a>;	 418 /a>	 419 /a>         a href="+code=chunk" class="sref">chunk /a> =  a href="+code=pcpu_alloc_chunk" class="sref">pcpu_alloc_chunk /a>();	 420 /a>        if (! a href="+code=chunk" class="sref">chunk /a>)	 421 /a>                return  a href="+code=NULL" class="sref">NULL /a>;	 422 /a>	 423 /a>         a href="+code=vms" class="sref">vms /a> =  a href="+code=pcpu_get_vm_areas" class="sref">pcpu_get_vm_areas /a>( a href="+code=pcpu_group_offsets" class="sref">pcpu_group_offsets /a>,  a href="+code=pcpu_group_sizes" class="sref">pcpu_group_sizes /a>,	 424 /a>                                 a href="+code=pcpu_nr_groups" class="sref">pcpu_nr_groups /a>,  a href="+code=pcpu_atom_size" class="sref">pcpu_atom_size /a>);	 425 /a>        if (! a href="+code=vms" class="sref">vms /a>) {	 426 /a>                 a href="+code=pcpu_free_chunk" class="sref">pcpu_free_chunk /a>( a href="+code=chunk" class="sref">chunk /a>);	 427 /a>                return  a href="+code=NULL" class="sref">NULL /a>;	 428 /a>        }	 429 /a>	 430 /a>         a href="+code=chunk" class="sref">chunk /a>-> a href="+code=data" class="sref">data /a> =  a href="+code=vms" class="sref">vms /a>;	 431 /a>         a href="+code=chunk" class="sref">chunk /a>-> a href="+code=base_addr" class="sref">base_addr /a> =  a href="+code=vms" class="sref">vms /a>[0]-> a href="+code=addr" class="sref">addr /a> -  a href="+code=pcpu_group_offsets" class="sref">pcpu_group_offsets /a>[0];	 432 /a>        return  a href="+code=chunk" class="sref">chunk /a>;	 433 /a>}	 434 /a>	 435 /a>static void  a href="+code=pcpu_destroy_chunk" class="sref">pcpu_destroy_chunk /a>(struct  a href="+code=pcpu_chunk" class="sref">pcpu_chunk /a> * a href="+code=chunk" class="sref">chunk /a>)	 436 /a>{	 437 /a>        if ( a href="+code=chunk" class="sref">chunk /a> &&  a href="+code=chunk" class="sref">chunk /a>-> a href="+code=data" class="sref">data /a>)	 438 /a>                 a href="+code=pcpu_free_vm_areas" class="sref">pcpu_free_vm_areas /a>( a href="+code=chunk" class="sref">chunk /a>-> a href="+code=data" class="sref">data /a>,  a href="+code=pcpu_nr_groups" class="sref">pcpu_nr_groups /a>);	 439 /a>         a href="+code=pcpu_free_chunk" class="sref">pcpu_free_chunk /a>( a href="+code=chunk" class="sref">chunk /a>);	 440 /a>}	 441 /a>	 442 /a>static struct  a href="+code=page" class="sref">page /a> * a href="+code=pcpu_addr_to_page" class="sref">pcpu_addr_to_page /a>(void * a href="+code=addr" class="sref">addr /a>)	 443 /a>{	 444 /a>        return  a href="+code=vmalloc_to_page" class="sref">vmalloc_to_page /a>( a href="+code=addr" class="sref">addr /a>);	 445 /a>}	 446 /a>	 447 /a>static int  a href="+code=__init" class="sref">__init /a>  a href="+code=pcpu_verify_alloc_info" class="sref">pcpu_verify_alloc_info /a>(const struct  a href="+code=pcpu_alloc_info" class="sref">pcpu_alloc_info /a> * a href="+code=ai" class="sref">ai /a>)	 448 /a>{	 449 /a>         spa" class="comment">/* no extra restrict13" */ /spa"
	 450 /a>        return 0;	 451 /a>}	 452 /a>
lxr.linux.no kindly hosted by Redpill Linpro AS /a>, provider of Linux consulting and operat13"s services since 1995.