linux/mm/kmemcheck.c
<<
d_fi > /spa2in > /formin > a d_fi > href="../linux+v> 3.6/mm/kmemcheck.c">d_fi > img src="../.static/gfx/right.png" alt=">>">d_ /spa2ind_ spa2 class="lxr_search">d_fid_fi > input typn vhidden" namn vnavtarget" ption v">d_fi > input typn vtext" namn vsearch" id vsearch">d_fi > butt Search /formin /spa2ind_ spa2 class="lxr_prefs"in > a href="+prefs?return=mm/kmemcheck.c"d_fi > onclick="return ajax_prefs();">d_fi >Prefsn > /a>d_ /spa2infi > > /divinfi > > form acd_ input typn vhidden" namn vajax_lookup" id vajax_lookup" ption v">dfi > > /formindfi > > div class="headingbott m"> > > div id vsearch_results" class="search_results"n infi > > /divin div id vcontent">n div id vfile_contents"i
> >1
/a>#include <linux/gfp.h
/a>>
> >2
/a>#include <linux/mm_typns.h
/a>>
> >3
/a>#include <linux/mm.h
/a>>
> >4
/a>#include <linux/slab.h
/a>>
> >5
/a>#include <linux/kmemcheck.h
/a>>
> >6
/a>d> >7
/a>void>
a href="+code=kmemcheck_alloc_shadow" class="sref">kmemcheck_alloc_shadow
/a>(struct>
a href="+code=page" class="sref">page
/a> *
a href="+code=page" class="sref">page
/a>, int>
a href="+code=order" class="sref">order
/a>, 
a href="+code=gfp_t" class="sref">gfp_t
/a> 
a href="+code=flags" class="sref">flags
/a>, int>
a href="+code=node" class="sref">node
/a>)d> >8
/a>{d> >9
/a>        struct>
a href="+code=page" class="sref">page
/a> *
a href="+code=shadow" class="sref">shadow
/a>;
> 18.1a>        int>
a href="+code=pages" class="sref">pages
/a>;
> 11.1a>        int>
a href="+code=i" class="sref">i
/a>;
> 12
/a>d> 13.1a>        
a href="+code=pages" class="sref">pages
/a> = 1 <<>
a href="+code=order" class="sref">order
/a>;
> 14
/a>d> 15.1a>        
spa2 class="comment">/*
/spa2in> 16
/a>
spa2 class="comment">         * With kmemcheck enabled, we need to allocate a memory area for the
/spa2in> 17
/a>
spa2 class="comment">         * shadow bits as well.
/spa2in> 18
/a>
spa2 class="comment">         */
/spa2in> 19.1a>        
a href="+code=shadow" class="sref">shadow
/a> = 
a href="+code=alloc_pages_node" class="sref">alloc_pages_node
/a>(
a href="+code=node" class="sref">node
/a>, 
a href="+code=flags" class="sref">flags
/a> | 
a href="+code=__GFP_NOTRACK" class="sref">__GFP_NOTRACK
/a>, 
a href="+code=order" class="sref">order
/a>);
> 28.1a>        if (!
a href="+code=shadow" class="sref">shadow
/a>) {d> 21.1a>                if (
a href="+code=printk_ratelimit" class="sref">printk_ratelimit
/a>())d> 22.1a>                        
a href="+code=printk" class="sref">printk
/a>(
a href="+code=KERN_ERR" class="sref">KERN_ERR
/a> 
spa2 class="string">"kmemcheck: failed to allocate "
/spa2in> 23.1a>                                
spa2 class="string">"shadow bitmap\n"
/spa2i);
> 24.1a>                return;
> 25.1a>        }
> 26
/a>d> 27.1a>        for(
a href="+code=i" class="sref">i
/a> = 0;>
a href="+code=i" class="sref">i
/a> <>
a href="+code=pages" class="sref">pages
/a>; ++
a href="+code=i" class="sref">i
/a>)d> 28.1a>                
a href="+code=page" class="sref">page
/a>[
a href="+code=i" class="sref">i
/a>].
a href="+code=shadow" class="sref">shadow
/a> = 
a href="+code=page_address" class="sref">page_address
/a>(&
a href="+code=shadow" class="sref">shadow
/a>[
a href="+code=i" class="sref">i
/a>]);
> 29
/a>d> 38.1a>        
spa2 class="comment">/*
/spa2in> 31
/a>
spa2 class="comment">         * Mark it as non-present for the MMU so that our accesses to
/spa2in> 32
/a>
spa2 class="comment">         * this memory will trigger a page fault and let us analyze
/spa2in> 33
/a>
spa2 class="comment">         * the memory accesses.
/spa2in> 34
/a>
spa2 class="comment">         */
/spa2in> 35.1a>        
a href="+code=kmemcheck_hide_pages" class="sref">kmemcheck_hide_pages
/a>(
a href="+code=page" class="sref">page
/a>, 
a href="+code=pages" class="sref">pages
/a>);
> 36
/a>}
> 37
/a>d> 38
/a>void>
a href="+code=kmemcheck_free_shadow" class="sref">kmemcheck_free_shadow
/a>(struct>
a href="+code=page" class="sref">page
/a> *
a href="+code=page" class="sref">page
/a>, int>
a href="+code=order" class="sref">order
/a>)d> 39
/a>{d> 48.1a>        struct>
a href="+code=page" class="sref">page
/a> *
a href="+code=shadow" class="sref">shadow
/a>;
> 41.1a>        int>
a href="+code=pages" class="sref">pages
/a>;
> 42.1a>        int>
a href="+code=i" class="sref">i
/a>;
> 43
/a>d> 44.1a>        if (!
a href="+code=kmemcheck_page_is_tracked" class="sref">kmemcheck_page_is_tracked
/a>(
a href="+code=page" class="sref">page
/a>))d> 45.1a>                return;
> 46
/a>d> 47.1a>        
a href="+code=pages" class="sref">pages
/a> = 1 <<>
a href="+code=order" class="sref">order
/a>;
> 48
/a>d> 49.1a>        
a href="+code=kmemcheck_show_pages" class="sref">kmemcheck_show_pages
/a>(
a href="+code=page" class="sref">page
/a>, 
a href="+code=pages" class="sref">pages
/a>);
> 50
/a>d> 51.1a>        
a href="+code=shadow" class="sref">shadow
/a> = 
a href="+code=virt_to_page" class="sref">virt_to_page
/a>(
a href="+code=page" class="sref">page
/a>[0].
a href="+code=shadow" class="sref">shadow
/a>);
> 52
/a>d> 53.1a>        for(
a href="+code=i" class="sref">i
/a> = 0;>
a href="+code=i" class="sref">i
/a> <>
a href="+code=pages" class="sref">pages
/a>; ++
a href="+code=i" class="sref">i
/a>)d> 54.1a>                
a href="+code=page" class="sref">page
/a>[
a href="+code=i" class="sref">i
/a>].
a href="+code=shadow" class="sref">shadow
/a> = 
a href="+code=NULL" class="sref">NULL
/a>;
> 55
/a>d> 56.1a>        
a href="+code=__free_pages" class="sref">__free_pages
/a>(
a href="+code=shadow" class="sref">shadow
/a>, 
a href="+code=order" class="sref">order
/a>);
> 57
/a>}
> 58
/a>d> 59
/a>void>
a href="+code=kmemcheck_slab_alloc" class="sref">kmemcheck_slab_alloc
/a>(struct>
a href="+code=kmem_cache" class="sref">kmem_cache
/a> *
a href="+code=s" class="sref">s
/a>, 
a href="+code=gfp_t" class="sref">gfp_t
/a> 
a href="+code=gfpflags" class="sref">gfpflags
/a>, void>*
a href="+code=object" class="sref">object
/a>,
> 60.1a>                          
a href="+code=size_t" class="sref">size_t
/a> 
a href="+code=size" class="sref">size
/a>)d> 61
/a>{d> 62.1a>        
spa2 class="comment">/*
/spa2in> 63
/a>
spa2 class="comment">         * Has already been memset(), which initializes the shadow for us
/spa2in> 64
/a>
spa2 class="comment">         * as well.
/spa2in> 65
/a>
spa2 class="comment">         */
/spa2in> 66.1a>        if (
a href="+code=gfpflags" class="sref">gfpflags
/a> & 
a href="+code=__GFP_ZERO" class="sref">__GFP_ZERO
/a>)d> 67.1a>                return;
> 68
/a>d> 69.1a>        
spa2 class="comment">/* No need to initialize the shadow of a non-tracked slab. */
/spa2in> 78.1a>        if (
a href="+code=s" class="sref">s
/a>->
a href="+code=flags" class="sref">flags
/a> & 
a href="+code=SLAB_NOTRACK" class="sref">SLAB_NOTRACK
/a>)d> 71.1a>                return;
> 72
/a>d> 73.1a>        if (!
a href="+code=kmemcheck_enabled" class="sref">kmemcheck_enabled
/a> || 
a href="+code=gfpflags" class="sref">gfpflags
/a> & 
a href="+code=__GFP_NOTRACK" class="sref">__GFP_NOTRACK
/a>) {d> 74.1a>                
spa2 class="comment">/*
/spa2in> 75
/a>
spa2 class="comment">                 * Allow notracked objects to be allocated from
/spa2in> 76
/a>
spa2 class="comment">                 * tracked caches. Note however that these objects
/spa2in> 77
/a>
spa2 class="comment">                 * will still get page faults on access, they just
/spa2in> 78
/a>
spa2 class="comment">                 * won't ever be flagged as uninitialized. If page
/spa2in> 79
/a>
spa2 class="comment">                 * faults are not acceptable, the slab cache itself
/spa2in> 80
/a>
spa2 class="comment">                 * should be marked NOTRACK.
/spa2in> 81
/a>
spa2 class="comment">                 */
/spa2in> 82.1a>                
a href="+code=kmemcheck_mark_initialized" class="sref">kmemcheck_mark_initialized
/a>(
a href="+code=object" class="sref">object
/a>, 
a href="+code=size" class="sref">size
/a>);
> 83.1a>        } else if (!
a href="+code=s" class="sref">s
/a>->
a href="+code=ctor" class="sref">ctor
/a>) {d> 84.1a>                
spa2 class="comment">/*
/spa2in> 85
/a>
spa2 class="comment">                 * New objects should be marked uninitialized before
/spa2in> 86
/a>
spa2 class="comment">                 * they're returned to the called.
/spa2in> 87
/a>
spa2 class="comment">                 */
/spa2in> 88.1a>                
a href="+code=kmemcheck_mark_uninitialized" class="sref">kmemcheck_mark_uninitialized
/a>(
a href="+code=object" class="sref">object
/a>, 
a href="+code=size" class="sref">size
/a>);
> 89.1a>        }
> 90
/a>}
> 91
/a>d> 92
/a>void>
a href="+code=kmemcheck_slab_free" class="sref">kmemcheck_slab_free
/a>(struct>
a href="+code=kmem_cache" class="sref">kmem_cache
/a> *
a href="+code=s" class="sref">s
/a>, void>*
a href="+code=object" class="sref">object
/a>, 
a href="+code=size_t" class="sref">size_t
/a> 
a href="+code=size" class="sref">size
/a>)d> 93
/a>{d> 94.1a>        
spa2 class="comment">/* TODO: RCU freeing is unsupported for now; hide false positives. */
/spa2in> 95.1a>        if (!
a href="+code=s" class="sref">s
/a>->
a href="+code=ctor" class="sref">ctor
/a> && !(
a href="+code=s" class="sref">s
/a>->
a href="+code=flags" class="sref">flags
/a> & 
a href="+code=SLAB_DESTROY_BY_RCU" class="sref">SLAB_DESTROY_BY_RCU
/a>))d> 96.1a>                
a href="+code=kmemcheck_mark_freed" class="sref">kmemcheck_mark_freed
/a>(
a href="+code=object" class="sref">object
/a>, 
a href="+code=size" class="sref">size
/a>);
> 97
/a>}
> 98
/a>d> 99
/a>void>
a href="+code=kmemcheck_pagealloc_alloc" class="sref">kmemcheck_pagealloc_alloc
/a>(struct>
a href="+code=page" class="sref">page
/a> *
a href="+code=page" class="sref">page
/a>, unsigned int>
a href="+code=order" class="sref">order
/a>,
>100.1a>                               
a href="+code=gfp_t" class="sref">gfp_t
/a> 
a href="+code=gfpflags" class="sref">gfpflags
/a>)d>101
/a>{d>102.1a>        int>
a href="+code=pages" class="sref">pages
/a>;
>103
/a>d>104.1a>        if (
a href="+code=gfpflags" class="sref">gfpflags
/a> & (
a href="+code=__GFP_HIGHMEM" class="sref">__GFP_HIGHMEM
/a> | 
a href="+code=__GFP_NOTRACK" class="sref">__GFP_NOTRACK
/a>))d>105.1a>                return;
>106
/a>d>107.1a>        
a href="+code=pages" class="sref">pages
/a> = 1 <<>
a href="+code=order" class="sref">order
/a>;
>108
/a>d>109.1a>        
spa2 class="comment">/*
/spa2in>110
/a>
spa2 class="comment">         * NOTE: We choose to track GFP_ZERO pages too; in fact, they
/spa2in>111
/a>
spa2 class="comment">         * ca2 become uninitialized by copying uninitialized memory
/spa2in>112
/a>
spa2 class="comment">         * into them.
/spa2in>113
/a>
spa2 class="comment">         */
/spa2in>114
/a>d>115.1a>        
spa2 class="comment">/* XXX: Ca2 use zone->node for node? */
/spa2in>116.1a>        
a href="+code=kmemcheck_alloc_shadow" class="sref">kmemcheck_alloc_shadow
/a>(
a href="+code=page" class="sref">page
/a>, 
a href="+code=order" class="sref">order
/a>, 
a href="+code=gfpflags" class="sref">gfpflags
/a>, -1);
>117
/a>d>118.1a>        if (
a href="+code=gfpflags" class="sref">gfpflags
/a> & 
a href="+code=__GFP_ZERO" class="sref">__GFP_ZERO
/a>)d>119.1a>                
a href="+code=kmemcheck_mark_initialized_pages" class="sref">kmemcheck_mark_initialized_pages
/a>(
a href="+code=page" class="sref">page
/a>, 
a href="+code=pages" class="sref">pages
/a>);
>128.1a>        else
>121.1a>                
a href="+code=kmemcheck_mark_uninitialized_pages" class="sref">kmemcheck_mark_uninitialized_pages
/a>(
a href="+code=page" class="sref">page
/a>, 
a href="+code=pages" class="sref">pages
/a>);
>122.1a>}
>123.1a>
The original LXR software by the LXR community /a>, this experimental vers lxr@linux.no /a>. /divin div class="subfooter"> lxr.linux.no kindly hosted by Redpill Linpro AS /a>, provider of Linux consulting and opera