linux/mm/percpu-vm.c
<<
ptio v2/spa v2/form v2a ptio v href="../linux+v3.9.1/mm/percpu-vm.c">ptio v2img src="../.static/gfx/right.png" alt=">>">pt2/spa pt2spa class="lxr_search">ptioptio v2input typtiohidden" namtionavtarget" ptio v2input typtiotext" namtiosearch" idiosearch">ptio v2butt2.2typtiosubmit">Searchptio vPrefs v2/a>pt2/spa io v v2/div io v v2form ac1" ="ajax+*" method="post" onsubmit="return false;">pt2input typtiohidden" namtioajax_lookup" idioajax_lookup" pio v v2/form pio v v2div class="headingbott2m">
2div idiofile_contents"
v v12/a>2spa	 class="comment">/*2/spa	  v v22/a>2spa	 class="comment"> * mm/percpu-vm.c - vmalloc area based chunk alloca1"
	2/spa	  v v32/a>2spa	 class="comment"> *2/spa	  v v42/a>2spa	 class="comment"> * Copyright (C) 2010           SUSE LinuxvProducts GmbH2/spa	  v v52/a>2spa	 class="comment"> * Copyright (C) 2010           Tejun Heo <tj@kernel.org>2/spa	  v v62/a>2spa	 class="comment"> *2/spa	  v v72/a>2spa	 class="comment"> * This file is released under the GPLlue2/spa	  v v82/a>2spa	 class="comment"> *2/spa	  v v92/a>2spa	 class="comment"> * Chunks are mapped into vmalloc areas and populated page by pagee2/spa	  v 2.6.a>2spa	 class="comment"> * This is the default chunk alloca1ore2/spa	  v 112/a>2spa	 class="comment"> */2/spa	  v 122/a> v 132/a>static structv2a href="+code=page" class="sref">page2/a> *2a href="+code=pcpu_chunk_page" class="sref">pcpu_chunk_page2/a>(structv2a href="+code=pcpu_chunk" class="sref">pcpu_chunk2/a> *2a href="+code=chunk" class="sref">chunk2/a>, v 142/a>                                    unsigned intv2a href="+code=cpu" class="sref">cpu2/a>, intv2a href="+code=page_idx" class="sref">page_idx2/a>) v 152/a>{ v 162/a>        2spa	 class="comment">/* must not be used 2.2pre-mapped chunk */2/spa	  v 172/a>        2a href="+code=WARN_ON" class="sref">WARN_ON2/a>(2a href="+code=chunk" class="sref">chunk2/a>->2a href="+code=immutable" class="sref">immutable2/a>); v 182/a> v 192/a>        return 2a href="+code=vmalloc_to_page" class="sref">vmalloc_to_page2/a>((void *)2a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=cpu" class="sref">cpu2/a>, 2a href="+code=page_idx" class="sref">page_idx2/a>)); v 2.6.a>} v 212/a> v 222/a>2spa	 class="comment">/**2/spa	  v 232/a>2spa	 class="comment"> * pcpu_get_pages_and_bitmap - get temp pages array and bitmap2/spa	  v 242/a>2spa	 class="comment"> * @chunk: chunk of interest2/spa	  v 252/a>2spa	 class="comment"> * @bitmapp: output paramtter for bitmap2/spa	  v 262/a>2spa	 class="comment"> * @may_alloc: may alloca1e the array2/spa	  v 272/a>2spa	 class="comment"> *2/spa	  v 282/a>2spa	 class="comment"> * Returns pointer to array of pointers to structvpage and bitmap,2/spa	  v 292/a>2spa	 class="comment"> * both of which ca	 be indexed with pcpu_page_idx().  The returned2/spa	  v 3.6.a>2spa	 class="comment"> * array is cleared to zero and *@bitmapp is copied from2/spa	  v 312/a>2spa	 class="comment"> * @chunk->populated.  No1e that there is only one array and bitmap2/spa	  v 322/a>2spa	 class="comment"> * and access exclus22.2is the caller's responsibilitye2/spa	  v 332/a>2spa	 class="comment"> *2/spa	  v 342/a>2spa	 class="comment"> * CONTEXT:2/spa	  v 352/a>2spa	 class="comment"> * pcpu_alloc_mutex and does GFP_KERNEL alloca1"
	 if @may_alloce2/spa	  v 362/a>2spa	 class="comment"> * Otherwise, don't caree2/spa	  v 372/a>2spa	 class="comment"> *2/spa	  v 382/a>2spa	 class="comment"> * RETURNS:2/spa	  v 392/a>2spa	 class="comment"> * Pointer to temp pages array 
	 success, NULL 
	 failuree2/spa	  v 4.6.a>2spa	 class="comment"> */2/spa	  v 412/a>static structv2a href="+code=page" class="sref">page2/a> **2a href="+code=pcpu_get_pages_and_bitmap" class="sref">pcpu_get_pages_and_bitmap2/a>(structv2a href="+code=pcpu_chunk" class="sref">pcpu_chunk2/a> *2a href="+code=chunk" class="sref">chunk2/a>, v 422/a>                                               unsigned long **2a href="+code=bitmapp" class="sref">bitmapp2/a>, v 432/a>                                               2a href="+code=bool" class="sref">bool2/a> 2a href="+code=may_alloc" class="sref">may_alloc2/a>) v 442/a>{ v 452/a>        static structv2a href="+code=page" class="sref">page2/a> **2a href="+code=pages" class="sref">pages2/a>; v 462/a>        static unsigned long *2a href="+code=bitmap" class="sref">bitmap2/a>; v 472/a>        2a href="+code=size_t" class="sref">size_t2/a> 2a href="+code=pages_size" class="sref">pages_size2/a> =v2a href="+code=pcpu_nr_units" class="sref">pcpu_nr_units2/a> *v2a href="+code=pcpu_unit_pages" class="sref">pcpu_unit_pages2/a> *vsizeof(2a href="+code=pages" class="sref">pages2/a>[0]); v 482/a>        2a href="+code=size_t" class="sref">size_t2/a> 2a href="+code=bitmap_size" class="sref">bitmap_size2/a> =v2a href="+code=BITS_TO_LONGS" class="sref">BITS_TO_LONGS2/a>(2a href="+code=pcpu_unit_pages" class="sref">pcpu_unit_pages2/a>) * v 492/a>                             sizeof(unsigned long); v 502/a> v 512/a>        if (!2a href="+code=pages" class="sref">pages2/a> || !2a href="+code=bitmap" class="sref">bitmap2/a>) { v 522/a>                if (2a href="+code=may_alloc" class="sref">may_alloc2/a> && !2a href="+code=pages" class="sref">pages2/a>) v 532/a>                        2a href="+code=pages" class="sref">pages2/a> =v2a href="+code=pcpu_mem_zalloc" class="sref">pcpu_mem_zalloc2/a>(2a href="+code=pages_size" class="sref">pages_size2/a>); v 542/a>                if (2a href="+code=may_alloc" class="sref">may_alloc2/a> && !2a href="+code=bitmap" class="sref">bitmap2/a>) v 552/a>                        2a href="+code=bitmap" class="sref">bitmap2/a> =v2a href="+code=pcpu_mem_zalloc" class="sref">pcpu_mem_zalloc2/a>(2a href="+code=bitmap_size" class="sref">bitmap_size2/a>); v 562/a>                if (!2a href="+code=pages" class="sref">pages2/a> || !2a href="+code=bitmap" class="sref">bitmap2/a>) v 572/a>                        return 2a href="+code=NULL" class="sref">NULL2/a>; v 582/a>        } v 592/a> v 602/a>        2a href="+code=bitmap_copy" class="sref">bitmap_copy2/a>(2a href="+code=bitmap" class="sref">bitmap2/a>,v2a href="+code=chunk" class="sref">chunk2/a>->2a href="+code=populated" class="sref">populated2/a>, 2a href="+code=pcpu_unit_pages" class="sref">pcpu_unit_pages2/a>); v 612/a> v 622/a>        *2a href="+code=bitmapp" class="sref">bitmapp2/a> =v2a href="+code=bitmap" class="sref">bitmap2/a>; v 632/a>        return 2a href="+code=pages" class="sref">pages2/a>; v 646.a>} v 652/a> v 662/a>2spa	 class="comment">/**2/spa	  v 672/a>2spa	 class="comment"> * pcpu_free_pages - free pages which were alloca1ed for @chunk2/spa	  v 682/a>2spa	 class="comment"> * @chunk: chunk pages were alloca1ed for2/spa	  v 692/a>2spa	 class="comment"> * @pages: array of pages to be freed, indexed by pcpu_page_idx()2/spa	  v 7.6.a>2spa	 class="comment"> * @populated: populated bitmap2/spa	  v 712/a>2spa	 class="comment"> * @page_start:vpage index of the firstvpage to be freed2/spa	  v 722/a>2spa	 class="comment"> * @page_end:vpage index of the lastvpage to be freed +v12/spa	  v 732/a>2spa	 class="comment"> *2/spa	  v 742/a>2spa	 class="comment"> * Free pages [@page_start and @page_end) in @pages for all unitse2/spa	  v 752/a>2spa	 class="comment"> * The pages were alloca1ed for @chunke2/spa	  v 762/a>2spa	 class="comment"> */2/spa	  v 772/a>static void 2a href="+code=pcpu_free_pages" class="sref">pcpu_free_pages2/a>(structv2a href="+code=pcpu_chunk" class="sref">pcpu_chunk2/a> *2a href="+code=chunk" class="sref">chunk2/a>, v 782/a>                            structv2a href="+code=page" class="sref">page2/a> **2a href="+code=pages" class="sref">pages2/a>, unsigned long *2a href="+code=populated" class="sref">populated2/a>, v 792/a>                            intv2a href="+code=page_start" class="sref">page_start2/a>, intv2a href="+code=page_end" class="sref">page_end2/a>) v 802/a>{ v 812/a>        unsigned intv2a href="+code=cpu" class="sref">cpu2/a>; v 822/a>        intv2a href="+code=i" class="sref">i2/a>; v 832/a> v 842/a>        2a href="+code=for_each_possible_cpu" class="sref">for_each_possible_cpu2/a>(2a href="+code=cpu" class="sref">cpu2/a>) { v 852/a>                for (2a href="+code=i" class="sref">i2/a> =v2a href="+code=page_start" class="sref">page_start2/a>;v2a href="+code=i" class="sref">i2/a> <v2a href="+code=page_end" class="sref">page_end2/a>;v2a href="+code=i" class="sref">i2/a>++) { v 862/a>                        structv2a href="+code=page" class="sref">page2/a> *2a href="+code=page" class="sref">page2/a> =v2a href="+code=pages" class="sref">pages2/a>[2a href="+code=pcpu_page_idx" class="sref">pcpu_page_idx2/a>(2a href="+code=cpu" class="sref">cpu2/a>, 2a href="+code=i" class="sref">i2/a>)]; v 872/a> v 882/a>                        if (2a href="+code=page" class="sref">page2/a>) v 892/a>                                2a href="+code=__free_page" class="sref">__free_page2/a>(2a href="+code=page" class="sref">page2/a>); v 902/a>                } v 912/a>        } v 926.a>} v 932/a> v 942/a>2spa	 class="comment">/**2/spa	  v 952/a>2spa	 class="comment"> * pcpu_alloc_pages - alloca1es pages for @chunk2/spa	  v 962/a>2spa	 class="comment"> * @chunk: target chunk2/spa	  v 972/a>2spa	 class="comment"> * @pages: array to put the alloca1ed pages into, indexed by pcpu_page_idx()2/spa	  v 982/a>2spa	 class="comment"> * @populated: populated bitmap2/spa	  v 992/a>2spa	 class="comment"> * @page_start:vpage index of the firstvpage to be alloca1ed2/spa	  v1002/a>2spa	 class="comment"> * @page_end:vpage index of the lastvpage to be alloca1ed +v12/spa	  v1012/a>2spa	 class="comment"> *2/spa	  v1022/a>2spa	 class="comment"> * Alloca1e pages [@page_start,@page_end) into @pages for all unitse2/spa	  v1032/a>2spa	 class="comment"> * The alloca122.2is for @chunke  Percpu core doesn't care about the2/spa	  v1042/a>2spa	 class="comment"> * content of @pages and will pass it verba12m to pcpu_map_pages()e2/spa	  v1052/a>2spa	 class="comment"> */2/spa	  v1062/a>static intv2a href="+code=pcpu_alloc_pages" class="sref">pcpu_alloc_pages2/a>(structv2a href="+code=pcpu_chunk" class="sref">pcpu_chunk2/a> *2a href="+code=chunk" class="sref">chunk2/a>, v1072/a>                            structv2a href="+code=page" class="sref">page2/a> **2a href="+code=pages" class="sref">pages2/a>, unsigned long *2a href="+code=populated" class="sref">populated2/a>, v1082/a>                            intv2a href="+code=page_start" class="sref">page_start2/a>, intv2a href="+code=page_end" class="sref">page_end2/a>) v1092/a>{ v1102/a>        constv2a href="+code=gfp_t" class="sref">gfp_t2/a> 2a href="+code=gfp" class="sref">gfp2/a> =v2a href="+code=GFP_KERNEL" class="sref">GFP_KERNEL2/a> | 2a href="+code=__GFP_HIGHMEM" class="sref">__GFP_HIGHMEM2/a> | 2a href="+code=__GFP_COLD" class="sref">__GFP_COLD2/a>; v1112/a>        unsigned intv2a href="+code=cpu" class="sref">cpu2/a>; v1122/a>        intv2a href="+code=i" class="sref">i2/a>; v1132/a> v1142/a>        2a href="+code=for_each_possible_cpu" class="sref">for_each_possible_cpu2/a>(2a href="+code=cpu" class="sref">cpu2/a>) { v1152/a>                for (2a href="+code=i" class="sref">i2/a> =v2a href="+code=page_start" class="sref">page_start2/a>;v2a href="+code=i" class="sref">i2/a> <v2a href="+code=page_end" class="sref">page_end2/a>;v2a href="+code=i" class="sref">i2/a>++) { v1162/a>                        structv2a href="+code=page" class="sref">page2/a> **2a href="+code=pagep" class="sref">pagep2/a> =v&2a href="+code=pages" class="sref">pages2/a>[2a href="+code=pcpu_page_idx" class="sref">pcpu_page_idx2/a>(2a href="+code=cpu" class="sref">cpu2/a>, 2a href="+code=i" class="sref">i2/a>)]; v1172/a> v1182/a>                        *2a href="+code=pagep" class="sref">pagep2/a> =v2a href="+code=alloc_pages_node" class="sref">alloc_pages_node2/a>(2a href="+code=cpu_to_node" class="sref">cpu_to_node2/a>(2a href="+code=cpu" class="sref">cpu2/a>), 2a href="+code=gfp" class="sref">gfp2/a>, 0); v1192/a>                        if (!*2a href="+code=pagep" class="sref">pagep2/a>) { v1202/a>                                2a href="+code=pcpu_free_pages" class="sref">pcpu_free_pages2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=pages" class="sref">pages2/a>,v2a href="+code=populated" class="sref">populated2/a>, v1212/a>                                                2a href="+code=page_start" class="sref">page_start2/a>, 2a href="+code=page_end" class="sref">page_end2/a>); v1222/a>                                return -2a href="+code=ENOMEM" class="sref">ENOMEM2/a>; v1232/a>                        } v1242/a>                } v1252/a>        } v1262/a>        return 0; v1272/a>} v1282/a> v1292/a>2spa	 class="comment">/**2/spa	  v13.6.a>2spa	 class="comment"> * pcpu_pre_unmap_flush - flush cache prior to unmapping2/spa	  v1312/a>2spa	 class="comment"> * @chunk: chunk the regions to be flushed belongs to2/spa	  v1322/a>2spa	 class="comment"> * @page_start:vpage index of the firstvpage to be flushed2/spa	  v1332/a>2spa	 class="comment"> * @page_end:vpage index of the lastvpage to be flushed +v12/spa	  v1342/a>2spa	 class="comment"> *2/spa	  v1352/a>2spa	 class="comment"> * Pages in [@page_start,@page_end) of @chunk are about to be2/spa	  v1362/a>2spa	 class="comment"> * unmapped.  Flush cache.  As each flushing trial ca	 be very2/spa	  v1372/a>2spa	 class="comment"> * expensive, issue flush 
	 the whole region at once rather tha	2/spa	  v1382/a>2spa	 class="comment"> * doing it for each cpu.  This could be an overkill but is more2/spa	  v1392/a>2spa	 class="comment"> * scalablee2/spa	  v14.6.a>2spa	 class="comment"> */2/spa	  v1412/a>static void 2a href="+code=pcpu_pre_unmap_flush" class="sref">pcpu_pre_unmap_flush2/a>(structv2a href="+code=pcpu_chunk" class="sref">pcpu_chunk2/a> *2a href="+code=chunk" class="sref">chunk2/a>, v1422/a>                                 intv2a href="+code=page_start" class="sref">page_start2/a>, intv2a href="+code=page_end" class="sref">page_end2/a>) v1432/a>{ v1442/a>        2a href="+code=flush_cache_vunmap" class="sref">flush_cache_vunmap2/a>( v1452/a>                2a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=pcpu_low_unit_cpu" class="sref">pcpu_low_unit_cpu2/a>, 2a href="+code=page_start" class="sref">page_start2/a>), v1462/a>                2a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=pcpu_high_unit_cpu" class="sref">pcpu_high_unit_cpu2/a>, 2a href="+code=page_end" class="sref">page_end2/a>)); v1472/a>} v1482/a> v1492/a>static void 2a href="+code=__pcpu_unmap_pages" class="sref">__pcpu_unmap_pages2/a>(unsigned long 2a href="+code=addr" class="sref">addr2/a>, intv2a href="+code=nr_pages" class="sref">nr_pages2/a>) v1502/a>{ v1512/a>        2a href="+code=unmap_kernel_range_noflush" class="sref">unmap_kernel_range_noflush2/a>(2a href="+code=addr" class="sref">addr2/a>, 2a href="+code=nr_pages" class="sref">nr_pages2/a> <<v2a href="+code=PAGE_SHIFT" class="sref">PAGE_SHIFT2/a>); v1526.a>} v1532/a> v1542/a>2spa	 class="comment">/**2/spa	  v1552/a>2spa	 class="comment"> * pcpu_unmap_pages - unmap pages out of a pcpu_chunk2/spa	  v1562/a>2spa	 class="comment"> * @chunk: chunk of interest2/spa	  v1572/a>2spa	 class="comment"> * @pages: pages array which ca	 be used to pass informa122.2to free2/spa	  v1582/a>2spa	 class="comment"> * @populated: populated bitmap2/spa	  v1592/a>2spa	 class="comment"> * @page_start:vpage index of the firstvpage to unmap2/spa	  v1602/a>2spa	 class="comment"> * @page_end:vpage index of the lastvpage to unmap +v12/spa	  v1612/a>2spa	 class="comment"> *2/spa	  v1622/a>2spa	 class="comment"> * For each cpu, unmap pages [@page_start,@page_end) out of @chunke2/spa	  v1632/a>2spa	 class="comment"> * Corresponding elements in @pages were cleared by the caller and ca	2/spa	  v1642/a>2spa	 class="comment"> * be used to carry informa122.2to pcpu_free_pages() which will be2/spa	  v1652/a>2spa	 class="comment"> * called after all unmaps are finished.  The caller should call2/spa	  v1662/a>2spa	 class="comment"> * proper2pre/post flush func1"
	se2/spa	  v1672/a>2spa	 class="comment"> */2/spa	  v1682/a>static void 2a href="+code=pcpu_unmap_pages" class="sref">pcpu_unmap_pages2/a>(structv2a href="+code=pcpu_chunk" class="sref">pcpu_chunk2/a> *2a href="+code=chunk" class="sref">chunk2/a>, v1692/a>                             structv2a href="+code=page" class="sref">page2/a> **2a href="+code=pages" class="sref">pages2/a>, unsigned long *2a href="+code=populated" class="sref">populated2/a>, v1702/a>                             intv2a href="+code=page_start" class="sref">page_start2/a>, intv2a href="+code=page_end" class="sref">page_end2/a>) v1712/a>{ v1722/a>        unsigned intv2a href="+code=cpu" class="sref">cpu2/a>; v1732/a>        intv2a href="+code=i" class="sref">i2/a>; v1742/a> v1752/a>        2a href="+code=for_each_possible_cpu" class="sref">for_each_possible_cpu2/a>(2a href="+code=cpu" class="sref">cpu2/a>) { v1762/a>                for (2a href="+code=i" class="sref">i2/a> =v2a href="+code=page_start" class="sref">page_start2/a>;v2a href="+code=i" class="sref">i2/a> <v2a href="+code=page_end" class="sref">page_end2/a>;v2a href="+code=i" class="sref">i2/a>++) { v1772/a>                        structv2a href="+code=page" class="sref">page2/a> *2a href="+code=page" class="sref">page2/a>; v1782/a> v1792/a>                        2a href="+code=page" class="sref">page2/a> =v2a href="+code=pcpu_chunk_page" class="sref">pcpu_chunk_page2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=cpu" class="sref">cpu2/a>, 2a href="+code=i" class="sref">i2/a>); v1802/a>                        2a href="+code=WARN_ON" class="sref">WARN_ON2/a>(!2a href="+code=page" class="sref">page2/a>); v1812/a>                        2a href="+code=pages" class="sref">pages2/a>[2a href="+code=pcpu_page_idx" class="sref">pcpu_page_idx2/a>(2a href="+code=cpu" class="sref">cpu2/a>, 2a href="+code=i" class="sref">i2/a>)] =v2a href="+code=page" class="sref">page2/a>; v1822/a>                } v1832/a>                2a href="+code=__pcpu_unmap_pages" class="sref">__pcpu_unmap_pages2/a>(2a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=cpu" class="sref">cpu2/a>, 2a href="+code=page_start" class="sref">page_start2/a>), v1842/a>                                   2a href="+code=page_end" class="sref">page_end2/a> - 2a href="+code=page_start" class="sref">page_start2/a>); v1852/a>        } v1862/a> v1872/a>        2a href="+code=bitmap_clear" class="sref">bitmap_clear2/a>(2a href="+code=populated" class="sref">populated2/a>, 2a href="+code=page_start" class="sref">page_start2/a>, 2a href="+code=page_end" class="sref">page_end2/a> - 2a href="+code=page_start" class="sref">page_start2/a>); v1882/a>} v1892/a> v1902/a>2spa	 class="comment">/**2/spa	  v1912/a>2spa	 class="comment"> * pcpu_post_unmap_tlb_flush - flush TLB after unmapping2/spa	  v1922/a>2spa	 class="comment"> * @chunk: pcpu_chunk the regions to be flushed belong to2/spa	  v1932/a>2spa	 class="comment"> * @page_start:vpage index of the firstvpage to be flushed2/spa	  v1942/a>2spa	 class="comment"> * @page_end:vpage index of the lastvpage to be flushed +v12/spa	  v1952/a>2spa	 class="comment"> *2/spa	  v1962/a>2spa	 class="comment"> * Pages [@page_start,@page_end) of @chunk have been unmapped.  Flush2/spa	  v1972/a>2spa	 class="comment"> * TLB for the regions.  This ca	 be skipped if the area2is to be2/spa	  v1982/a>2spa	 class="comment"> * returned to vmalloc as vmalloc will handle TLB flushing lazilye2/spa	  v1992/a>2spa	 class="comment"> *2/spa	  v2002/a>2spa	 class="comment"> * As with pcpu_pre_unmap_flush(), TLB flushing also2is done at once2/spa	  v2012/a>2spa	 class="comment"> * for the whole regione2/spa	  v2022/a>2spa	 class="comment"> */2/spa	  v2032/a>static void 2a href="+code=pcpu_post_unmap_tlb_flush" class="sref">pcpu_post_unmap_tlb_flush2/a>(structv2a href="+code=pcpu_chunk" class="sref">pcpu_chunk2/a> *2a href="+code=chunk" class="sref">chunk2/a>, v2042/a>                                      intv2a href="+code=page_start" class="sref">page_start2/a>, intv2a href="+code=page_end" class="sref">page_end2/a>) v2052/a>{ v2062/a>        2a href="+code=flush_tlb_kernel_range" class="sref">flush_tlb_kernel_range2/a>( v2072/a>                2a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=pcpu_low_unit_cpu" class="sref">pcpu_low_unit_cpu2/a>, 2a href="+code=page_start" class="sref">page_start2/a>), v2082/a>                2a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=pcpu_high_unit_cpu" class="sref">pcpu_high_unit_cpu2/a>, 2a href="+code=page_end" class="sref">page_end2/a>)); v2092/a>} v2102/a> v2112/a>static intv2a href="+code=__pcpu_map_pages" class="sref">__pcpu_map_pages2/a>(unsigned long 2a href="+code=addr" class="sref">addr2/a>, structv2a href="+code=page" class="sref">page2/a> **2a href="+code=pages" class="sref">pages2/a>, v2122/a>                            intv2a href="+code=nr_pages" class="sref">nr_pages2/a>) v2132/a>{ v2142/a>        return 2a href="+code=map_kernel_range_noflush" class="sref">map_kernel_range_noflush2/a>(2a href="+code=addr" class="sref">addr2/a>, 2a href="+code=nr_pages" class="sref">nr_pages2/a> <<v2a href="+code=PAGE_SHIFT" class="sref">PAGE_SHIFT2/a>, v2152/a>                                        2a href="+code=PAGE_KERNEL" class="sref">PAGE_KERNEL2/a>, 2a href="+code=pages" class="sref">pages2/a>); v2162/a>} v2172/a> v2182/a>2spa	 class="comment">/**2/spa	  v2192/a>2spa	 class="comment"> * pcpu_map_pages - map pages into a pcpu_chunk2/spa	  v2202/a>2spa	 class="comment"> * @chunk: chunk of interest2/spa	  v2212/a>2spa	 class="comment"> * @pages: pages array containing pages to be mapped2/spa	  v2222/a>2spa	 class="comment"> * @populated: populated bitmap2/spa	  v2232/a>2spa	 class="comment"> * @page_start:vpage index of the firstvpage to map2/spa	  v2242/a>2spa	 class="comment"> * @page_end:vpage index of the lastvpage to map +v12/spa	  v2252/a>2spa	 class="comment"> *2/spa	  v2262/a>2spa	 class="comment"> * For each cpu, map pages [@page_start,@page_end) into @chunke  The2/spa	  v2272/a>2spa	 class="comment"> * caller is responsible for calling pcpu_post_map_flush() after all2/spa	  v2282/a>2spa	 class="comment"> * mappings are completee2/spa	  v2292/a>2spa	 class="comment"> *2/spa	  v23.6.a>2spa	 class="comment"> * This func1"
	 is responsible for setting corresponding bits in2/spa	  v2312/a>2spa	 class="comment"> * @chunk->populated bitmap and whatever is necessary for reverse2/spa	  v2322/a>2spa	 class="comment"> * lookup (addr -> chunk)e2/spa	  v2332/a>2spa	 class="comment"> */2/spa	  v2342/a>static intv2a href="+code=pcpu_map_pages" class="sref">pcpu_map_pages2/a>(structv2a href="+code=pcpu_chunk" class="sref">pcpu_chunk2/a> *2a href="+code=chunk" class="sref">chunk2/a>, v2352/a>                          structv2a href="+code=page" class="sref">page2/a> **2a href="+code=pages" class="sref">pages2/a>, unsigned long *2a href="+code=populated" class="sref">populated2/a>, v2362/a>                          intv2a href="+code=page_start" class="sref">page_start2/a>, intv2a href="+code=page_end" class="sref">page_end2/a>) v2372/a>{ v2382/a>        unsigned intv2a href="+code=cpu" class="sref">cpu2/a>, 2a href="+code=tcpu" class="sref">tcpu2/a>; v2392/a>        intv2a href="+code=i" class="sref">i2/a>, 2a href="+code=err" class="sref">err2/a>; v2402/a> v2412/a>        2a href="+code=for_each_possible_cpu" class="sref">for_each_possible_cpu2/a>(2a href="+code=cpu" class="sref">cpu2/a>) { v2422/a>                2a href="+code=err" class="sref">err2/a> =v2a href="+code=__pcpu_map_pages" class="sref">__pcpu_map_pages2/a>(2a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=cpu" class="sref">cpu2/a>, 2a href="+code=page_start" class="sref">page_start2/a>), v2432/a>                                       &2a href="+code=pages" class="sref">pages2/a>[2a href="+code=pcpu_page_idx" class="sref">pcpu_page_idx2/a>(2a href="+code=cpu" class="sref">cpu2/a>, 2a href="+code=page_start" class="sref">page_start2/a>)], v2442/a>                                       2a href="+code=page_end" class="sref">page_end2/a> - 2a href="+code=page_start" class="sref">page_start2/a>); v2452/a>                if (2a href="+code=err" class="sref">err2/a> <v0) v2462/a>                        goto 2a href="+code=err" class="sref">err2/a>; v2472/a>        } v2482/a> v2492/a>        2spa	 class="comment">/* mapping successful, link chunk and mark populated */2/spa	  v2502/a>        for (2a href="+code=i" class="sref">i2/a> =v2a href="+code=page_start" class="sref">page_start2/a>;v2a href="+code=i" class="sref">i2/a> <v2a href="+code=page_end" class="sref">page_end2/a>;v2a href="+code=i" class="sref">i2/a>++) { v2512/a>                2a href="+code=for_each_possible_cpu" class="sref">for_each_possible_cpu2/a>(2a href="+code=cpu" class="sref">cpu2/a>) v2522/a>                        2a href="+code=pcpu_set_page_chunk" class="sref">pcpu_set_page_chunk2/a>(2a href="+code=pages" class="sref">pages2/a>[2a href="+code=pcpu_page_idx" class="sref">pcpu_page_idx2/a>(2a href="+code=cpu" class="sref">cpu2/a>, 2a href="+code=i" class="sref">i2/a>)], v2532/a>                                            2a href="+code=chunk" class="sref">chunk2/a>); v2542/a>                2a href="+code=__set_bit" class="sref">__set_bit2/a>(2a href="+code=i" class="sref">i2/a>, 2a href="+code=populated" class="sref">populated2/a>); v2552/a>        } v2562/a> v2572/a>        return 0; v2582/a> v2592/a>2a href="+code=err" class="sref">err2/a>: v2602/a>        2a href="+code=for_each_possible_cpu" class="sref">for_each_possible_cpu2/a>(2a href="+code=tcpu" class="sref">tcpu2/a>) { v2612/a>                if (2a href="+code=tcpu" class="sref">tcpu2/a> ==v2a href="+code=cpu" class="sref">cpu2/a>) v2622/a>                        break; v2632/a>                2a href="+code=__pcpu_unmap_pages" class="sref">__pcpu_unmap_pages2/a>(2a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=tcpu" class="sref">tcpu2/a>, 2a href="+code=page_start" class="sref">page_start2/a>), v2642/a>                                   2a href="+code=page_end" class="sref">page_end2/a> - 2a href="+code=page_start" class="sref">page_start2/a>); v2652/a>        } v2662/a>        return 2a href="+code=err" class="sref">err2/a>; v2672/a>} v2682/a> v2692/a>2spa	 class="comment">/**2/spa	  v27.6.a>2spa	 class="comment"> * pcpu_post_map_flush - flush cache after mapping2/spa	  v2712/a>2spa	 class="comment"> * @chunk: pcpu_chunk the regions to be flushed belong to2/spa	  v2722/a>2spa	 class="comment"> * @page_start:vpage index of the firstvpage to be flushed2/spa	  v2732/a>2spa	 class="comment"> * @page_end:vpage index of the lastvpage to be flushed +v12/spa	  v2742/a>2spa	 class="comment"> *2/spa	  v2752/a>2spa	 class="comment"> * Pages [@page_start,@page_end) of @chunk have been mapped.  Flush2/spa	  v2762/a>2spa	 class="comment"> * cache.2/spa	  v2772/a>2spa	 class="comment"> *2/spa	  v2782/a>2spa	 class="comment"> * As with pcpu_pre_unmap_flush(), TLB flushing also2is done at once2/spa	  v2792/a>2spa	 class="comment"> * for the whole regione2/spa	  v28.6.a>2spa	 class="comment"> */2/spa	  v2812/a>static void 2a href="+code=pcpu_post_map_flush" class="sref">pcpu_post_map_flush2/a>(structv2a href="+code=pcpu_chunk" class="sref">pcpu_chunk2/a> *2a href="+code=chunk" class="sref">chunk2/a>, v2822/a>                                intv2a href="+code=page_start" class="sref">page_start2/a>, intv2a href="+code=page_end" class="sref">page_end2/a>) v2832/a>{ v2842/a>        2a href="+code=flush_cache_vmap" class="sref">flush_cache_vmap2/a>( v2852/a>                2a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=pcpu_low_unit_cpu" class="sref">pcpu_low_unit_cpu2/a>, 2a href="+code=page_start" class="sref">page_start2/a>), v2862/a>                2a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=pcpu_high_unit_cpu" class="sref">pcpu_high_unit_cpu2/a>, 2a href="+code=page_end" class="sref">page_end2/a>)); v2872/a>} v2882/a> v2892/a>2spa	 class="comment">/**2/spa	  v2902/a>2spa	 class="comment"> * pcpu_populate_chunk - populate and map an area2of a pcpu_chunk2/spa	  v2912/a>2spa	 class="comment"> * @chunk: chunk of interest2/spa	  v2922/a>2spa	 class="comment"> * @off: offset to the area2to populate2/spa	  v2932/a>2spa	 class="comment"> * @size: size of the area2to populate in bytes2/spa	  v2942/a>2spa	 class="comment"> *2/spa	  v2952/a>2spa	 class="comment"> * For each cpu, populate and map pages [@page_start,@page_end) into2/spa	  v2962/a>2spa	 class="comment"> * @chunke  The area2is cleared on returne2/spa	  v2972/a>2spa	 class="comment"> *2/spa	  v2982/a>2spa	 class="comment"> * CONTEXT:2/spa	  v2992/a>2spa	 class="comment"> * pcpu_alloc_mutex, does GFP_KERNEL alloca122.e2/spa	  v3002/a>2spa	 class="comment"> */2/spa	  v3012/a>static intv2a href="+code=pcpu_populate_chunk" class="sref">pcpu_populate_chunk2/a>(structv2a href="+code=pcpu_chunk" class="sref">pcpu_chunk2/a> *2a href="+code=chunk" class="sref">chunk2/a>, intv2a href="+code=off" class="sref">off2/a>, intv2a href="+code=size" class="sref">size2/a>) v3022/a>{ v3032/a>        intv2a href="+code=page_start" class="sref">page_start2/a> =v2a href="+code=PFN_DOWN" class="sref">PFN_DOWN2/a>(2a href="+code=off" class="sref">off2/a>); v3042/a>        intv2a href="+code=page_end" class="sref">page_end2/a> =v2a href="+code=PFN_UP" class="sref">PFN_UP2/a>(2a href="+code=off" class="sref">off2/a> +v2a href="+code=size" class="sref">size2/a>); v3052/a>        intv2a href="+code=free_end" class="sref">free_end2/a> =v2a href="+code=page_start" class="sref">page_start2/a>, 2a href="+code=unmap_end" class="sref">unmap_end2/a> =v2a href="+code=page_start" class="sref">page_start2/a>; v3062/a>        structv2a href="+code=page" class="sref">page2/a> **2a href="+code=pages" class="sref">pages2/a>; v3072/a>        unsigned long *2a href="+code=populated" class="sref">populated2/a>; v3082/a>        unsigned intv2a href="+code=cpu" class="sref">cpu2/a>; v3092/a>        intv2a href="+code=rs" class="sref">rs2/a>,v2a href="+code=re" class="sref">re2/a>,v2a href="+code=rc" class="sref">rc2/a>; v3102/a> v3112/a>        2spa	 class="comment">/* quick path, check whether all pages are already there */2/spa	  v3122/a>        2a href="+code=rs" class="sref">rs2/a> =v2a href="+code=page_start" class="sref">page_start2/a>; v3132/a>        2a href="+code=pcpu_next_pop" class="sref">pcpu_next_pop2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v&2a href="+code=rs" class="sref">rs2/a>,v&2a href="+code=re" class="sref">re2/a>,v2a href="+code=page_end" class="sref">page_end2/a>); v3142/a>        if (2a href="+code=rs" class="sref">rs2/a> ==v2a href="+code=page_start" class="sref">page_start2/a>v&&v2a href="+code=re" class="sref">re2/a> ==v2a href="+code=page_end" class="sref">page_end2/a>) v3152/a>                goto 2a href="+code=clear" class="sref">clear2/a>; v3162/a> v3172/a>        2spa	 class="comment">/* need to alloca1e and map pages, this chunk can't be immutable */2/spa	  v3182/a>        2a href="+code=WARN_ON" class="sref">WARN_ON2/a>(2a href="+code=chunk" class="sref">chunk2/a>->2a href="+code=immutable" class="sref">immutable2/a>); v3192/a> v3202/a>        2a href="+code=pages" class="sref">pages2/a> =v2a href="+code=pcpu_get_pages_and_bitmap" class="sref">pcpu_get_pages_and_bitmap2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v&2a href="+code=populated" class="sref">populated2/a>, 2a href="+code=true" class="sref">true2/a>); v3212/a>        if (!2a href="+code=pages" class="sref">pages2/a>) v3222/a>                return -2a href="+code=ENOMEM" class="sref">ENOMEM2/a>; v3232/a> v3242/a>        2spa	 class="comment">/* alloc and map */2/spa	  v3252/a>        2a href="+code=pcpu_for_each_unpop_region" class="sref">pcpu_for_each_unpop_region2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=rs" class="sref">rs2/a>,v2a href="+code=re" class="sref">re2/a>,v2a href="+code=page_start" class="sref">page_start2/a>, 2a href="+code=page_end" class="sref">page_end2/a>) { v3262/a>                2a href="+code=rc" class="sref">rc2/a> =v2a href="+code=pcpu_alloc_pages" class="sref">pcpu_alloc_pages2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=pages" class="sref">pages2/a>, 2a href="+code=populated" class="sref">populated2/a>, 2a href="+code=rs" class="sref">rs2/a>,v2a href="+code=re" class="sref">re2/a>); v3272/a>                if (2a href="+code=rc" class="sref">rc2/a>) v3282/a>                        goto 2a href="+code=err_free" class="sref">err_free2/a>; v3292/a>                2a href="+code=free_end" class="sref">free_end2/a> =v2a href="+code=re" class="sref">re2/a>; v3302/a>        } v3312/a> v3322/a>        2a href="+code=pcpu_for_each_unpop_region" class="sref">pcpu_for_each_unpop_region2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=rs" class="sref">rs2/a>,v2a href="+code=re" class="sref">re2/a>,v2a href="+code=page_start" class="sref">page_start2/a>, 2a href="+code=page_end" class="sref">page_end2/a>) { v3332/a>                2a href="+code=rc" class="sref">rc2/a> =v2a href="+code=pcpu_map_pages" class="sref">pcpu_map_pages2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=pages" class="sref">pages2/a>, 2a href="+code=populated" class="sref">populated2/a>, 2a href="+code=rs" class="sref">rs2/a>,v2a href="+code=re" class="sref">re2/a>); v3342/a>                if (2a href="+code=rc" class="sref">rc2/a>) v3352/a>                        goto 2a href="+code=err_unmap" class="sref">err_unmap2/a>; v3362/a>                2a href="+code=unmap_end" class="sref">unmap_end2/a> =v2a href="+code=re" class="sref">re2/a>; v3372/a>        } v3382/a>        2a href="+code=pcpu_post_map_flush" class="sref">pcpu_post_map_flush2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=page_start" class="sref">page_start2/a>, 2a href="+code=page_end" class="sref">page_end2/a>); v3392/a> v3402/a>        2spa	 class="comment">/* commit new bitmap */2/spa	  v3412/a>        2a href="+code=bitmap_copy" class="sref">bitmap_copy2/a>(2a href="+code=chunk" class="sref">chunk2/a>->2a href="+code=populated" class="sref">populated2/a>, 2a href="+code=populated" class="sref">populated2/a>, 2a href="+code=pcpu_unit_pages" class="sref">pcpu_unit_pages2/a>); v3422/a>2a href="+code=clear" class="sref">clear2/a>: v3432/a>        2a href="+code=for_each_possible_cpu" class="sref">for_each_possible_cpu2/a>(2a href="+code=cpu" class="sref">cpu2/a>) v3442/a>                2a href="+code=memset" class="sref">memset2/a>((void *)2a href="+code=pcpu_chunk_addr" class="sref">pcpu_chunk_addr2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=cpu" class="sref">cpu2/a>, 0) +v2a href="+code=off" class="sref">off2/a>, 0,v2a href="+code=size" class="sref">size2/a>); v3452/a>        return 0; v3462/a> v3472/a>2a href="+code=err_unmap" class="sref">err_unmap2/a>: v3482/a>        2a href="+code=pcpu_pre_unmap_flush" class="sref">pcpu_pre_unmap_flush2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=page_start" class="sref">page_start2/a>, 2a href="+code=unmap_end" class="sref">unmap_end2/a>); v3492/a>        2a href="+code=pcpu_for_each_unpop_region" class="sref">pcpu_for_each_unpop_region2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=rs" class="sref">rs2/a>,v2a href="+code=re" class="sref">re2/a>,v2a href="+code=page_start" class="sref">page_start2/a>, 2a href="+code=unmap_end" class="sref">unmap_end2/a>) v3502/a>                2a href="+code=pcpu_unmap_pages" class="sref">pcpu_unmap_pages2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=pages" class="sref">pages2/a>, 2a href="+code=populated" class="sref">populated2/a>, 2a href="+code=rs" class="sref">rs2/a>,v2a href="+code=re" class="sref">re2/a>); v3512/a>        2a href="+code=pcpu_post_unmap_tlb_flush" class="sref">pcpu_post_unmap_tlb_flush2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=page_start" class="sref">page_start2/a>, 2a href="+code=unmap_end" class="sref">unmap_end2/a>); v3522/a>2a href="+code=err_free" class="sref">err_free2/a>: v3532/a>        2a href="+code=pcpu_for_each_unpop_region" class="sref">pcpu_for_each_unpop_region2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=rs" class="sref">rs2/a>,v2a href="+code=re" class="sref">re2/a>,v2a href="+code=page_start" class="sref">page_start2/a>, 2a href="+code=free_end" class="sref">free_end2/a>) v3542/a>                2a href="+code=pcpu_free_pages" class="sref">pcpu_free_pages2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=pages" class="sref">pages2/a>, 2a href="+code=populated" class="sref">populated2/a>, 2a href="+code=rs" class="sref">rs2/a>,v2a href="+code=re" class="sref">re2/a>); v3552/a>        return 2a href="+code=rc" class="sref">rc2/a>; v3562/a>} v3572/a> v3582/a>2spa	 class="comment">/**2/spa	  v3592/a>2spa	 class="comment"> * pcpu_depopulate_chunk - depopulate and unmap an area2of a pcpu_chunk2/spa	  v3602/a>2spa	 class="comment"> * @chunk: chunk to depopulate2/spa	  v3612/a>2spa	 class="comment"> * @off: offset to the area2to depopulate2/spa	  v3622/a>2spa	 class="comment"> * @size: size of the area2to depopulate in bytes2/spa	  v3632/a>2spa	 class="comment"> *2/spa	  v3642/a>2spa	 class="comment"> * For each cpu, depopulate and unmap pages [@page_start,@page_end)2/spa	  v3652/a>2spa	 class="comment"> * from @chunke  If @flush is true, vcache is flushed before unmapping2/spa	  v3662/a>2spa	 class="comment"> * and tlb aftere2/spa	  v3672/a>2spa	 class="comment"> *2/spa	  v3682/a>2spa	 class="comment"> * CONTEXT:2/spa	  v3692/a>2spa	 class="comment"> * pcpu_alloc_mutexe2/spa	  v37.6.a>2spa	 class="comment"> */2/spa	  v3712/a>static void 2a href="+code=pcpu_depopulate_chunk" class="sref">pcpu_depopulate_chunk2/a>(structv2a href="+code=pcpu_chunk" class="sref">pcpu_chunk2/a> *2a href="+code=chunk" class="sref">chunk2/a>, intv2a href="+code=off" class="sref">off2/a>, intv2a href="+code=size" class="sref">size2/a>) v3722/a>{ v3732/a>        intv2a href="+code=page_start" class="sref">page_start2/a> =v2a href="+code=PFN_DOWN" class="sref">PFN_DOWN2/a>(2a href="+code=off" class="sref">off2/a>); v3742/a>        intv2a href="+code=page_end" class="sref">page_end2/a> =v2a href="+code=PFN_UP" class="sref">PFN_UP2/a>(2a href="+code=off" class="sref">off2/a> +v2a href="+code=size" class="sref">size2/a>); v3752/a>        structv2a href="+code=page" class="sref">page2/a> **2a href="+code=pages" class="sref">pages2/a>; v3762/a>        unsigned long *2a href="+code=populated" class="sref">populated2/a>; v3772/a>        intv2a href="+code=rs" class="sref">rs2/a>,v2a href="+code=re" class="sref">re2/a>; v3782/a> v3792/a>        2spa	 class="comment">/* quick path, check whether it's empty already */2/spa	  v3802/a>        2a href="+code=rs" class="sref">rs2/a> =v2a href="+code=page_start" class="sref">page_start2/a>; v3812/a>        2a href="+code=pcpu_next_unpop" class="sref">pcpu_next_unpop2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v&2a href="+code=rs" class="sref">rs2/a>,v&2a href="+code=re" class="sref">re2/a>,v2a href="+code=page_end" class="sref">page_end2/a>); v3822/a>        if (2a href="+code=rs" class="sref">rs2/a> ==v2a href="+code=page_start" class="sref">page_start2/a>v&&v2a href="+code=re" class="sref">re2/a> ==v2a href="+code=page_end" class="sref">page_end2/a>) v3832/a>                return; v3842/a> v3852/a>        2spa	 class="comment">/* immutable chunks can't be depopulated */2/spa	  v3862/a>        2a href="+code=WARN_ON" class="sref">WARN_ON2/a>(2a href="+code=chunk" class="sref">chunk2/a>->2a href="+code=immutable" class="sref">immutable2/a>); v3872/a> v3882/a>        2spa	 class="comment">/*2/spa	  v3892/a>2spa	 class="comment">         * If control reaches here, there must have been at leastvone2/spa	  v3902/a>2spa	 class="comment">         * successful populat"
	 attempt so the temp pages array must2/spa	  v3912/a>2spa	 class="comment">         * be available nowe2/spa	  v3922/a>2spa	 class="comment">         */2/spa	  v3932/a>        2a href="+code=pages" class="sref">pages2/a> =v2a href="+code=pcpu_get_pages_and_bitmap" class="sref">pcpu_get_pages_and_bitmap2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v&2a href="+code=populated" class="sref">populated2/a>, 2a href="+code=false" class="sref">false2/a>); v3942/a>        2a href="+code=BUG_ON" class="sref">BUG_ON2/a>(!2a href="+code=pages" class="sref">pages2/a>); v3952/a> v3962/a>        2spa	 class="comment">/* unmap and free */2/spa	  v3972/a>        2a href="+code=pcpu_pre_unmap_flush" class="sref">pcpu_pre_unmap_flush2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=page_start" class="sref">page_start2/a>, 2a href="+code=page_end" class="sref">page_end2/a>); v3982/a> v3992/a>        2a href="+code=pcpu_for_each_pop_region" class="sref">pcpu_for_each_pop_region2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=rs" class="sref">rs2/a>,v2a href="+code=re" class="sref">re2/a>,v2a href="+code=page_start" class="sref">page_start2/a>, 2a href="+code=page_end" class="sref">page_end2/a>) v4002/a>                2a href="+code=pcpu_unmap_pages" class="sref">pcpu_unmap_pages2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=pages" class="sref">pages2/a>, 2a href="+code=populated" class="sref">populated2/a>, 2a href="+code=rs" class="sref">rs2/a>,v2a href="+code=re" class="sref">re2/a>); v4012/a> v4022/a>        2spa	 class="comment">/* no need to flush tlb, vmalloc will handle it lazily */2/spa	  v4032/a> v4042/a>        2a href="+code=pcpu_for_each_pop_region" class="sref">pcpu_for_each_pop_region2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=rs" class="sref">rs2/a>,v2a href="+code=re" class="sref">re2/a>,v2a href="+code=page_start" class="sref">page_start2/a>, 2a href="+code=page_end" class="sref">page_end2/a>) v4052/a>                2a href="+code=pcpu_free_pages" class="sref">pcpu_free_pages2/a>(2a href="+code=chunk" class="sref">chunk2/a>,v2a href="+code=pages" class="sref">pages2/a>, 2a href="+code=populated" class="sref">populated2/a>, 2a href="+code=rs" class="sref">rs2/a>,v2a href="+code=re" class="sref">re2/a>); v4062/a> v4072/a>        2spa	 class="comment">/* commit new bitmap */2/spa	  v4082/a>        2a href="+code=bitmap_copy" class="sref">bitmap_copy2/a>(2a href="+code=chunk" class="sref">chunk2/a>->2a href="+code=populated" class="sref">populated2/a>, 2a href="+code=populated" class="sref">populated2/a>, 2a href="+code=pcpu_unit_pages" class="sref">pcpu_unit_pages2/a>); v4092/a>} v4102/a> v4112/a>static structv2a href="+code=pcpu_chunk" class="sref">pcpu_chunk2/a> *2a href="+code=pcpu_create_chunk" class="sref">pcpu_create_chunk2/a>(void) v4122/a>{ v4132/a>        structv2a href="+code=pcpu_chunk" class="sref">pcpu_chunk2/a> *2a href="+code=chunk" class="sref">chunk2/a>; v4142/a>        structv2a href="+code=vm_struct" class="sref">vm_struct2/a> **2a href="+code=vms" class="sref">vms2/a>; v4152/a> v4162/a>        2a href="+code=chunk" class="sref">chunk2/a> =v2a href="+code=pcpu_alloc_chunk" class="sref">pcpu_alloc_chunk2/a>(); v4172/a>        if (!2a href="+code=chunk" class="sref">chunk2/a>) v4182/a>                return 2a href="+code=NULL" class="sref">NULL2/a>; v4192/a> v4202/a>        2a href="+code=vms" class="sref">vms2/a> =v2a href="+code=pcpu_get_vm_areas" class="sref">pcpu_get_vm_areas2/a>(2a href="+code=pcpu_group_offsets" class="sref">pcpu_group_offsets2/a>, 2a href="+code=pcpu_group_sizes" class="sref">pcpu_group_sizes2/a>, v4212/a>                                2a href="+code=pcpu_nr_groups" class="sref">pcpu_nr_groups2/a>, 2a href="+code=pcpu_atom_size" class="sref">pcpu_atom_size2/a>); v4222/a>        if (!2a href="+code=vms" class="sref">vms2/a>) { v4232/a>                2a href="+code=pcpu_free_chunk" class="sref">pcpu_free_chunk2/a>(2a href="+code=chunk" class="sref">chunk2/a>); v4242/a>                return 2a href="+code=NULL" class="sref">NULL2/a>; v4252/a>        } v4262/a> v4272/a>        2a href="+code=chunk" class="sref">chunk2/a>->2a href="+code=data" class="sref">data2/a> =v2a href="+code=vms" class="sref">vms2/a>; v4282/a>        2a href="+code=chunk" class="sref">chunk2/a>->2a href="+code=base_addr" class="sref">base_addr2/a> =v2a href="+code=vms" class="sref">vms2/a>[0]->2a href="+code=addr" class="sref">addr2/a> - 2a href="+code=pcpu_group_offsets" class="sref">pcpu_group_offsets2/a>[0]; v4292/a>        return 2a href="+code=chunk" class="sref">chunk2/a>; v4302/a>} v4312/a> v4322/a>static void 2a href="+code=pcpu_destroy_chunk" class="sref">pcpu_destroy_chunk2/a>(structv2a href="+code=pcpu_chunk" class="sref">pcpu_chunk2/a> *2a href="+code=chunk" class="sref">chunk2/a>) v4332/a>{ v4342/a>        if (2a href="+code=chunk" class="sref">chunk2/a> &&v2a href="+code=chunk" class="sref">chunk2/a>->2a href="+code=data" class="sref">data2/a>) v4352/a>                2a href="+code=pcpu_free_vm_areas" class="sref">pcpu_free_vm_areas2/a>(2a href="+code=chunk" class="sref">chunk2/a>->2a href="+code=data" class="sref">data2/a>, 2a href="+code=pcpu_nr_groups" class="sref">pcpu_nr_groups2/a>); v4362/a>        2a href="+code=pcpu_free_chunk" class="sref">pcpu_free_chunk2/a>(2a href="+code=chunk" class="sref">chunk2/a>); v4372/a>} v4382/a> v4392/a>static structv2a href="+code=page" class="sref">page2/a> *2a href="+code=pcpu_addr_to_page" class="sref">pcpu_addr_to_page2/a>(void *2a href="+code=addr" class="sref">addr2/a>) v4402/a>{ v4412/a>        return 2a href="+code=vmalloc_to_page" class="sref">vmalloc_to_page2/a>(2a href="+code=addr" class="sref">addr2/a>); v4422/a>} v4432/a> v4442/a>static intv2a href="+code=__init" class="sref">__init2/a> 2a href="+code=pcpu_verify_alloc_info" class="sref">pcpu_verify_alloc_info2/a>(const structv2a href="+code=pcpu_alloc_info" class="sref">pcpu_alloc_info2/a> *2a href="+code=ai" class="sref">ai2/a>) v4452/a>{ v4462/a>        2spa	 class="comment">/* no extra restrict"
	 */2/spa	  v4472/a>        return 0; v4482/a>} v4492/a>
lxr.linux.no kindly hosted by Redpill Linpro AS2/a>, provider of Linux consulting and operat" s services since 1995.