linux/fs/block_dev.c
<<
v414./spa 14./form 14.a v414 href="../linux+v3.9.7/fs/block_dev.c"> v414.img src="../.static/gfx/right.png" alt=">>"> ./spa .spa class="lxr_search"> v4 ="+search" method="post" onsubmit="return do_search(this);"> v414.input typ hidden" nam navtarget" ion> "> v414.input typ text" nam search" id search"> v414.buttopttyp submit">Search v414Prefs 14./a> ./spa v414 4./div v414 4.form ac > ="ajax+*" method="post" onsubmit="return false;"> .input typ hidden" nam ajax_lookup" id ajax_lookup" ion> "> v414 4./form v414 4.div class="headingbottom">
.div id file_contents"
4 41./a>.spa
 class="comment">/*./spa
  4 42./a>.spa
 class="comment"> *  linux/fs/block_dev.c./spa
  4 43./a>.spa
 class="comment"> *./spa
  4 44./a>.spa
 class="comment"> *  Copyright (C) 1991, 1992  Linus Torionds./spa
  4 45./a>.spa
 class="comment"> *  Copyright (C) 2001  Andrea Arcangeli <andrea@suse.de> SuSE./spa
  4 46./a>.spa
 class="comment"> */./spa
  4 47./a> 4 48./a>#include <linux/init.h./a>> 4 49./a>#include <linux/mm.h./a>> 4 .18"a>#include <linux/fcntl.h./a>> 4 118"a>#include <linux/slab.h./a>> 4 128"a>#include <linux/kmod.h./a>> 4 13./a>#include <linux/major.h./a>> 4 14./a>#include <linux/device_cgroup.h./a>> 4 15./a>#include <linux/highmem.h./a>> 4 16./a>#include <linux/blkdev.h./a>> 4 17./a>#include <linux/module.h./a>> 4 18./a>#include <linux/blkpg.h./a>> 4 19./a>#include <linux/magic.h./a>> 4 218"a>#include <linux/buffer_head.h./a>> 4 218"a>#include <linux/swap.h./a>> 4 228"a>#include <linux/pagevec.h./a>> 4 23./a>#include <linux/writeback.h./a>> 4 24./a>#include <linux/mpage.h./a>> 4 25./a>#include <linux/mount.h./a>> 4 26./a>#include <linux/uio.h./a>> 4 27./a>#include <linux/nam
i.h./a>> 4 28./a>#include <linux/log2.h./a>> 4 29./a>#include <linux/cleancache.h./a>> 4 318"a>#include <asm/uaccess.h./a>> 4 318"a>#include "internal.h./a>" 4 32./a> 4 33./a>struct4.a href="+code=bdev_inode" class="sref">bdev_inode./a> { 4 34./a>        struct4.a href="+code=block_device" class="sref">block_device./a> .a href="+code=bdev" class="sref">bdev./a>; 4 35./a>        struct4.a href="+code=inode" class="sref">inode./a> .a href="+code=vfs_inode" class="sref">vfs_inode./a>; 4 36./a>}; 4 37./a> 4 38./a>static const struct4.a href="+code=address_space_opera  >
s" class="sref">address_space_opera  >
s./a> .a href="+code=def_blk_aops" class="sref">def_blk_aops./a>; 4 39./a> 4 40./a>static .a href="+code=inline" class="sref">inline./a> struct4.a href="+code=bdev_inode" class="sref">bdev_inode./a> *.a href="+code=BDEV_I" class="sref">BDEV_I./a>(struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=inode" class="sref">inode./a>) 4 418"a>{ 4 42./a>        return .a href="+code=container_of" class="sref">container_of./a>(.a href="+code=inode" class="sref">inode./a>, struct4.a href="+code=bdev_inode" class="sref">bdev_inode./a>, .a href="+code=vfs_inode" class="sref">vfs_inode./a>); 4 43./a>} 4 44./a> 4 45./a>.a href="+code=inline" class="sref">inline./a> struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=I_BDEV" class="sref">I_BDEV./a>(struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=inode" class="sref">inode./a>) 4 468"a>{ 4 47./a>        return &.a href="+code=BDEV_I" class="sref">BDEV_I./a>(.a href="+code=inode" class="sref">inode./a>)->.a href="+code=bdev" class="sref">bdev./a>; 4 48./a>} 4 49./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=I_BDEV" class="sref">I_BDEV./a>); 4 50./a> 4 51./a>.spa
 class="comment">/*./spa
  4 52./a>.spa
 class="comment"> * Move the inode from its current bdi to a new bdi. If the inode is dirty we./spa
  4 53./a>.spa
 class="comment"> * need to move it onto the dirty list of @dst so that the inode is always on./spa
  4 54./a>.spa
 class="comment"> * the right list../spa
  4 55./a>.spa
 class="comment"> */./spa
  4 56./a>static void4.a href="+code=bdev_inode_switch_bdi" class="sref">bdev_inode_switch_bdi./a>(struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=inode" class="sref">inode./a>, 4 57./a>                        struct4.a href="+code=backing_dev_info" class="sref">backing_dev_info./a> *.a href="+code=dst" class="sref">dst./a>) 4 588"a>{ 4 59./a>        struct4.a href="+code=backing_dev_info" class="sref">backing_dev_info./a> *.a href="+code=old" class="sref">old./a> =4.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_data" class="sref">i_data./a>..a href="+code=backing_dev_info" class="sref">backing_dev_info./a>; 4 60./a> 4 61./a>        if (.a href="+code=unlikely" class="sref">unlikely./a>(.a href="+code=dst" class="sref">dst./a> ==4.a href="+code=old" class="sref">old./a>))               .spa
 class="comment">/* deadlock avoidance */./spa
  4 62./a>                return; 4 63./a>        .a href="+code=bdi_lock_two" class="sref">bdi_lock_two./a>(&.a href="+code=old" class="sref">old./a>->.a href="+code=wb" class="sref">wb./a>, &.a href="+code=dst" class="sref">dst./a>->.a href="+code=wb" class="sref">wb./a>); 4 64./a>        .a href="+code=spin_lock" class="sref">spin_lock./a>(&.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_lock" class="sref">i_lock./a>); 4 65./a>        .a href="+code=inode" class="sref">inode./a>->.a href="+code=i_data" class="sref">i_data./a>..a href="+code=backing_dev_info" class="sref">backing_dev_info./a> =4.a href="+code=dst" class="sref">dst./a>; 4 66./a>        if (.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_state" class="sref">i_state./a> &4.a href="+code=I_DIRTY" class="sref">I_DIRTY./a>) 4 67./a>                .a href="+code=list_move" class="sref">list_move./a>(&.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_wb_list" class="sref">i_wb_list./a>, &.a href="+code=dst" class="sref">dst./a>->.a href="+code=wb" class="sref">wb./a>..a href="+code=b_dirty" class="sref">b_dirty./a>); 4 68./a>        .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_lock" class="sref">i_lock./a>); 4 69./a>        .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=old" class="sref">old./a>->.a href="+code=wb" class="sref">wb./a>..a href="+code=list_lock" class="sref">list_lock./a>); 4 70./a>        .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=dst" class="sref">dst./a>->.a href="+code=wb" class="sref">wb./a>..a href="+code=list_lock" class="sref">list_lock./a>); 4 71./a>} 4 72./a> 4 73./a>.spa
 class="comment">/* Kill _all_ buffers and pagecache , dirty or not.. */./spa
  4 74./a>void4.a href="+code=kill_bdev" class="sref">kill_bdev./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>) 4 758"a>{ 4 76./a>        struct4.a href="+code=address_space" class="sref">address_space./a> *.a href="+code=mapping" class="sref">mapping./a> =4.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>->.a href="+code=i_mapping" class="sref">i_mapping./a>; 4 77./a> 4 78./a>        if (.a href="+code=mapping" class="sref">mapping./a>->.a href="+code=nrpages" class="sref">nrpages./a> ==40) 4 79./a>                return; 4 80./a> 4 81./a>        .a href="+code=invalidate_bh_lrus" class="sref">invalidate_bh_lrus./a>(); 4 82./a>        .a href="+code=truncate_inode_pages" class="sref">truncate_inode_pages./a>(.a href="+code=mapping" class="sref">mapping./a>,40); 4 83./a>}        4 84./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=kill_bdev" class="sref">kill_bdev./a>); 4 85./a> 4 86./a>.spa
 class="comment">/* Invalidate clean unused buffers and pagecache. */./spa
  4 87./a>void4.a href="+code=invalidate_bdev" class="sref">invalidate_bdev./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>) 4 888"a>{ 4 89./a>        struct4.a href="+code=address_space" class="sref">address_space./a> *.a href="+code=mapping" class="sref">mapping./a> =4.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>->.a href="+code=i_mapping" class="sref">i_mapping./a>; 4 90./a> 4 91./a>        if (.a href="+code=mapping" class="sref">mapping./a>->.a href="+code=nrpages" class="sref">nrpages./a> ==40) 4 92./a>                return; 4 93./a> 4 94./a>        .a href="+code=invalidate_bh_lrus" class="sref">invalidate_bh_lrus./a>(); 4 95./a>        .a href="+code=lru_add_drain_all" class="sref">lru_add_drain_all./a>();    .spa
 class="comment">/* make sure all lru add caches are flushed */./spa
  4 96./a>        .a href="+code=invalidate_mapping_pages" class="sref">invalidate_mapping_pages./a>(.a href="+code=mapping" class="sref">mapping./a>,40, -1); 4 97./a>        .spa
 class="comment">/* 99% of the time, we don't need to flush the cleancache optthe bdev../spa
  4 98./a>.spa
 class="comment">         * But, for the strange corners, lets be cautious./spa
  4 99./a>.spa
 class="comment">         */./spa
  4100./a>        .a href="+code=cleancache_invalidate_inode" class="sref">cleancache_invalidate_inode./a>(.a href="+code=mapping" class="sref">mapping./a>); 4101./a>} 4102./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=invalidate_bdev" class="sref">invalidate_bdev./a>); 4103./a> 4104./a>int4.a href="+code=set_blocksize" class="sref">set_blocksize./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>, int4.a href="+code=size" class="sref">size./a>) 41058"a>{ 4106./a>        .spa
 class="comment">/* Size must be a power of two, and between 512 and PAGE_SIZE */./spa
  4107./a>        if (.a href="+code=size" class="sref">size./a> > .a href="+code=PAGE_SIZE" class="sref">PAGE_SIZE./a> ||4.a href="+code=size" class="sref">size./a> < 512 ||4!.a href="+code=is_power_of_2" class="sref">is_power_of_2./a>(.a href="+code=size" class="sref">size./a>)) 4108./a>                return -.a href="+code=EINVAL" class="sref">EINVAL./a>; 4109./a> 4110./a>        .spa
 class="comment">/* Size cannot be smaller thaptthe size supported by the device */./spa
  4111./a>        if (.a href="+code=size" class="sref">size./a> < .a href="+code=bdev_logical_block_size" class="sref">bdev_logical_block_size./a>(.a href="+code=bdev" class="sref">bdev./a>)) 4112./a>                return -.a href="+code=EINVAL" class="sref">EINVAL./a>; 4113./a> 4114./a>        .spa
 class="comment">/* Don't change the size if it is sam
 as current */./spa
  4115./a>        if (.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_block_size" class="sref">bd_block_size./a> !=4.a href="+code=size" class="sref">size./a>) { 4116./a>                .a href="+code=sync_blockdev" class="sref">sync_blockdev./a>(.a href="+code=bdev" class="sref">bdev./a>); 4117./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_block_size" class="sref">bd_block_size./a> =4.a href="+code=size" class="sref">size./a>; 4118./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>->.a href="+code=i_blkbits" class="sref">i_blkbits./a> =4.a href="+code=blksize_bits" class="sref">blksize_bits./a>(.a href="+code=size" class="sref">size./a>); 4119./a>                .a href="+code=kill_bdev" class="sref">kill_bdev./a>(.a href="+code=bdev" class="sref">bdev./a>); 4120./a>        } 4121./a>        return 0; 41228"a>} 4123./a> 4124./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=set_blocksize" class="sref">set_blocksize./a>); 4125./a> 4126./a>int4.a href="+code=sb_set_blocksize" class="sref">sb_set_blocksize./a>(struct4.a href="+code=super_block" class="sref">super_block./a> *.a href="+code=sb" class="sref">sb./a>, int4.a href="+code=size" class="sref">size./a>) 4127./a>{ 4128./a>        if (.a href="+code=set_blocksize" class="sref">set_blocksize./a>(.a href="+code=sb" class="sref">sb./a>->.a href="+code=s_bdev" class="sref">s_bdev./a>, .a href="+code=size" class="sref">size./a>)) 4129./a>                return 0; 4130./a>        .spa
 class="comment">/* If we get here, we know size is power of two./spa
  4131./a>.spa
 class="comment">         * and it's ion>
 is between 512 and PAGE_SIZE */./spa
  4132./a>        .a href="+code=sb" class="sref">sb./a>->.a href="+code=s_blocksize" class="sref">s_blocksize./a> =4.a href="+code=size" class="sref">size./a>; 4133./a>        .a href="+code=sb" class="sref">sb./a>->.a href="+code=s_blocksize_bits" class="sref">s_blocksize_bits./a> =4.a href="+code=blksize_bits" class="sref">blksize_bits./a>(.a href="+code=size" class="sref">size./a>); 4134./a>        return .a href="+code=sb" class="sref">sb./a>->.a href="+code=s_blocksize" class="sref">s_blocksize./a>; 4135./a>} 4136./a> 4137./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=sb_set_blocksize" class="sref">sb_set_blocksize./a>); 4138./a> 4139./a>int4.a href="+code=sb_min_blocksize" class="sref">sb_min_blocksize./a>(struct4.a href="+code=super_block" class="sref">super_block./a> *.a href="+code=sb" class="sref">sb./a>, int4.a href="+code=size" class="sref">size./a>) 4140./a>{ 4141./a>        int4.a href="+code=minsize" class="sref">minsize./a> =4.a href="+code=bdev_logical_block_size" class="sref">bdev_logical_block_size./a>(.a href="+code=sb" class="sref">sb./a>->.a href="+code=s_bdev" class="sref">s_bdev./a>); 4142./a>        if (.a href="+code=size" class="sref">size./a> < .a href="+code=minsize" class="sref">minsize./a>) 4143./a>                .a href="+code=size" class="sref">size./a> =4.a href="+code=minsize" class="sref">minsize./a>; 4144./a>        return .a href="+code=sb_set_blocksize" class="sref">sb_set_blocksize./a>(.a href="+code=sb" class="sref">sb./a>, .a href="+code=size" class="sref">size./a>); 4145./a>} 4146./a> 4147./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=sb_min_blocksize" class="sref">sb_min_blocksize./a>); 4148./a> 4149./a>static int 4150./a>.a href="+code=blkdev_get_block" class="sref">blkdev_get_block./a>(struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=inode" class="sref">inode./a>,4.a href="+code=sector_t" class="sref">sector_t./a> .a href="+code=iblock" class="sref">iblock./a>, 4151./a>                struct4.a href="+code=buffer_head" class="sref">buffer_head./a> *.a href="+code=bh" class="sref">bh./a>, int4.a href="+code=create" class="sref">create./a>) 4152./a>{ 4153./a>        .a href="+code=bh" class="sref">bh./a>->.a href="+code=b_bdev" class="sref">b_bdev./a> =4.a href="+code=I_BDEV" class="sref">I_BDEV./a>(.a href="+code=inode" class="sref">inode./a>); 4154./a>        .a href="+code=bh" class="sref">bh./a>->.a href="+code=b_blocknr" class="sref">b_blocknr./a> =4.a href="+code=iblock" class="sref">iblock./a>; 4155./a>        .a href="+code=set_buffer_mapped" class="sref">set_buffer_mapped./a>(.a href="+code=bh" class="sref">bh./a>); 4156./a>        return 0; 4157./a>} 4158./a> 4159./a>static .a href="+code=ssize_t" class="sref">ssize_t./a> 4160./a>.a href="+code=blkdev_direct_IO" class="sref">blkdev_direct_IO./a>(int4.a href="+code=rw" class="sref">rw./a>, struct4.a href="+code=kiocb" class="sref">kiocb./a> *.a href="+code=iocb" class="sref">iocb./a>, const struct4.a href="+code=iovec" class="sref">iovec./a> *.a href="+code=iov" class="sref">iov./a>, 4161./a>                        .a href="+code=loff_t" class="sref">loff_t./a> .a href="+code=offset" class="sref">offset./a>, unsigned long .a href="+code=nr_segs" class="sref">nr_segs./a>) 4162./a>{ 4163./a>        struct4.a href="+code=file" class="sref">file./a> *.a href="+code=file" class="sref">file./a> =4.a href="+code=iocb" class="sref">iocb./a>->.a href="+code=ki_filp" class="sref">ki_filp./a>; 4164./a>        struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=inode" class="sref">inode./a> =4.a href="+code=file" class="sref">file./a>->.a href="+code=f_mapping" class="sref">f_mapping./a>->.a href="+code=host" class="sref">host./a>; 4165./a> 4166./a>        return .a href="+code=__blockdev_direct_IO" class="sref">__blockdev_direct_IO./a>(.a href="+code=rw" class="sref">rw./a>, .a href="+code=iocb" class="sref">iocb./a>, .a href="+code=inode" class="sref">inode./a>,4.a href="+code=I_BDEV" class="sref">I_BDEV./a>(.a href="+code=inode" class="sref">inode./a>), .a href="+code=iov" class="sref">iov./a>, .a href="+code=offset" class="sref">offset./a>, 4167./a>                                    .a href="+code=nr_segs" class="sref">nr_segs./a>, .a href="+code=blkdev_get_block" class="sref">blkdev_get_block./a>, .a href="+code=NULL" class="sref">NULL./a>, .a href="+code=NULL" class="sref">NULL./a>, 0); 4168./a>} 4169./a> 4170./a>int4.a href="+code=__sync_blockdev" class="sref">__sync_blockdev./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>, int4.a href="+code=wait" class="sref">wait./a>) 41718"a>{ 4172./a>        if (!.a href="+code=bdev" class="sref">bdev./a>) 4173./a>                return 0; 4174./a>        if (!.a href="+code=wait" class="sref">wait./a>) 4175./a>                return .a href="+code=filemap_flush" class="sref">filemap_flush./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>->.a href="+code=i_mapping" class="sref">i_mapping./a>); 4176./a>        return .a href="+code=filemap_write_and_wait" class="sref">filemap_write_and_wait./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>->.a href="+code=i_mapping" class="sref">i_mapping./a>); 4177./a>} 4178./a> 4179./a>.spa
 class="comment">/*./spa
  4180./a>.spa
 class="comment"> * Write out and wait upon all the dirty data associated with a block./spa
  4181./a>.spa
 class="comment"> * device via its mapping.  Does not take the superblock lock../spa
  4182./a>.spa
 class="comment"> */./spa
  4183./a>int4.a href="+code=sync_blockdev" class="sref">sync_blockdev./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>) 4184./a>{ 4185./a>        return .a href="+code=__sync_blockdev" class="sref">__sync_blockdev./a>(.a href="+code=bdev" class="sref">bdev./a>, 1); 4186./a>} 4187./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=sync_blockdev" class="sref">sync_blockdev./a>); 4188./a> 4189./a>.spa
 class="comment">/*./spa
  4190./a>.spa
 class="comment"> * Write out and wait upon all dirty data associated with this./spa
  4191./a>.spa
 class="comment"> * device.   Filesystem data as well as the underlying block./spa
  4192./a>.spa
 class="comment"> * device.  Takes the superblock lock../spa
  4193./a>.spa
 class="comment"> */./spa
  4194./a>int4.a href="+code=fsync_bdev" class="sref">fsync_bdev./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>) 41958"a>{ 4196./a>        struct4.a href="+code=super_block" class="sref">super_block./a> *.a href="+code=sb" class="sref">sb./a> =4.a href="+code=get_super" class="sref">get_super./a>(.a href="+code=bdev" class="sref">bdev./a>); 4197./a>        if (.a href="+code=sb" class="sref">sb./a>) { 4198./a>                int4.a href="+code=res" class="sref">res./a> =4.a href="+code=sync_filesystem" class="sref">sync_filesystem./a>(.a href="+code=sb" class="sref">sb./a>); 4199./a>                .a href="+code=drop_super" class="sref">drop_super./a>(.a href="+code=sb" class="sref">sb./a>); 4200./a>                return .a href="+code=res" class="sref">res./a>; 4201./a>        } 4202./a>        return .a href="+code=sync_blockdev" class="sref">sync_blockdev./a>(.a href="+code=bdev" class="sref">bdev./a>); 4203./a>} 4204./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=fsync_bdev" class="sref">fsync_bdev./a>); 4205./a> 4206./a>.spa
 class="comment">/**./spa
  4207./a>.spa
 class="comment"> * freeze_bdev  --  lock a filesystem and force it into a consistent state./spa
  4208./a>.spa
 class="comment"> * @bdev:       blockdevice to lock./spa
  4209./a>.spa
 class="comment"> *./spa
  4210./a>.spa
 class="comment"> * If a superblock is found optthis device, we take the s_umount semaphore./spa
  4211./a>.spa
 class="comment"> * optit to make sure nobody unmounts until the snapshot creatioptis done../spa
  4212./a>.spa
 class="comment"> * The reference counter (bd_fsfreeze_count) guarantees that only the last./spa
  4213./a>.spa
 class="comment"> * unfreeze process can unfreeze the frozen filesystem actually when multiple./spa
  4214./a>.spa
 class="comment"> * freeze requests arrive simultaneously. It counts up in freeze_bdev() and./spa
  4215./a>.spa
 class="comment"> * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze./spa
  4216./a>.spa
 class="comment"> * actually../spa
  4217./a>.spa
 class="comment"> */./spa
  4218./a>struct4.a href="+code=super_block" class="sref">super_block./a> *.a href="+code=freeze_bdev" class="sref">freeze_bdev./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>) 4219./a>{ 4220./a>        struct4.a href="+code=super_block" class="sref">super_block./a> *.a href="+code=sb" class="sref">sb./a>; 4221./a>        int4.a href="+code=error" class="sref">error./a> =40; 4222./a> 4223./a>        .a href="+code=mutex_lock" class="sref">mutex_lock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_fsfreeze_mutex" class="sref">bd_fsfreeze_mutex./a>); 4224./a>        if (++.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_fsfreeze_count" class="sref">bd_fsfreeze_count./a> > 1) { 4225./a>                .spa
 class="comment">/*./spa
  4226./a>.spa
 class="comment">                 * We don't even need to grab a reference - the first call./spa
  4227./a>.spa
 class="comment">                 * to freeze_bdev grab an active reference and oply the last./spa
  4228./a>.spa
 class="comment">                 * thaw_bdev drops it../spa
  4229./a>.spa
 class="comment">                 */./spa
  4230./a>                .a href="+code=sb" class="sref">sb./a> =4.a href="+code=get_super" class="sref">get_super./a>(.a href="+code=bdev" class="sref">bdev./a>); 4231./a>                .a href="+code=drop_super" class="sref">drop_super./a>(.a href="+code=sb" class="sref">sb./a>); 4232./a>                .a href="+code=mutex_unlock" class="sref">mutex_unlock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_fsfreeze_mutex" class="sref">bd_fsfreeze_mutex./a>); 4233./a>                return .a href="+code=sb" class="sref">sb./a>; 4234./a>        } 4235./a> 4236./a>        .a href="+code=sb" class="sref">sb./a> =4.a href="+code=get_active_super" class="sref">get_active_super./a>(.a href="+code=bdev" class="sref">bdev./a>); 4237./a>        if (!.a href="+code=sb" class="sref">sb./a>) 4238./a>                goto .a href="+code=out" class="sref">out./a>; 4239./a>        .a href="+code=error" class="sref">error./a> =4.a href="+code=freeze_super" class="sref">freeze_super./a>(.a href="+code=sb" class="sref">sb./a>); 4240./a>        if (.a href="+code=error" class="sref">error./a>) { 4241./a>                .a href="+code=deactivate_super" class="sref">deactivate_super./a>(.a href="+code=sb" class="sref">sb./a>); 4242./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_fsfreeze_count" class="sref">bd_fsfreeze_count./a>--; 4243./a>                .a href="+code=mutex_unlock" class="sref">mutex_unlock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_fsfreeze_mutex" class="sref">bd_fsfreeze_mutex./a>); 4244./a>                return .a href="+code=ERR_PTR" class="sref">ERR_PTR./a>(.a href="+code=error" class="sref">error./a>); 4245./a>        } 4246./a>        .a href="+code=deactivate_super" class="sref">deactivate_super./a>(.a href="+code=sb" class="sref">sb./a>); 4247./a> .a href="+code=out" class="sref">out./a>: 4248./a>        .a href="+code=sync_blockdev" class="sref">sync_blockdev./a>(.a href="+code=bdev" class="sref">bdev./a>); 4249./a>        .a href="+code=mutex_unlock" class="sref">mutex_unlock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_fsfreeze_mutex" class="sref">bd_fsfreeze_mutex./a>); 4250./a>        return .a href="+code=sb" class="sref">sb./a>;      .spa
 class="comment">/* thaw_bdev releases s->s_umount */./spa
  4251./a>} 4252./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=freeze_bdev" class="sref">freeze_bdev./a>); 4253./a> 4254./a>.spa
 class="comment">/**./spa
  4255./a>.spa
 class="comment"> * thaw_bdev  -- unlock filesystem./spa
  4256./a>.spa
 class="comment"> * @bdev:       blockdevice to unlock./spa
  4257./a>.spa
 class="comment"> * @sb:         associated superblock./spa
  4258./a>.spa
 class="comment"> *./spa
  4259./a>.spa
 class="comment"> * Unlocks the filesystem and marks it writeable again after freeze_bdev()../spa
  4260./a>.spa
 class="comment"> */./spa
  4261./a>int4.a href="+code=thaw_bdev" class="sref">thaw_bdev./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>, struct4.a href="+code=super_block" class="sref">super_block./a> *.a href="+code=sb" class="sref">sb./a>) 4262./a>{ 4263./a>        int4.a href="+code=error" class="sref">error./a> =4-.a href="+code=EINVAL" class="sref">EINVAL./a>; 4264./a> 4265./a>        .a href="+code=mutex_lock" class="sref">mutex_lock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_fsfreeze_mutex" class="sref">bd_fsfreeze_mutex./a>); 4266./a>        if (!.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_fsfreeze_count" class="sref">bd_fsfreeze_count./a>) 4267./a>                goto .a href="+code=out" class="sref">out./a>; 4268./a> 4269./a>        .a href="+code=error" class="sref">error./a> =40; 4270./a>        if (--.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_fsfreeze_count" class="sref">bd_fsfreeze_count./a> > 0) 4271./a>                goto .a href="+code=out" class="sref">out./a>; 4272./a> 4273./a>        if (!.a href="+code=sb" class="sref">sb./a>) 4274./a>                goto .a href="+code=out" class="sref">out./a>; 4275./a> 4276./a>        .a href="+code=error" class="sref">error./a> =4.a href="+code=thaw_super" class="sref">thaw_super./a>(.a href="+code=sb" class="sref">sb./a>); 4277./a>        if (.a href="+code=error" class="sref">error./a>) { 4278./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_fsfreeze_count" class="sref">bd_fsfreeze_count./a>++; 4279./a>                .a href="+code=mutex_unlock" class="sref">mutex_unlock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_fsfreeze_mutex" class="sref">bd_fsfreeze_mutex./a>); 4280./a>                return .a href="+code=error" class="sref">error./a>; 4281./a>        } 4282./a>.a href="+code=out" class="sref">out./a>: 4283./a>        .a href="+code=mutex_unlock" class="sref">mutex_unlock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_fsfreeze_mutex" class="sref">bd_fsfreeze_mutex./a>); 4284./a>        return 0; 4285./a>} 4286./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=thaw_bdev" class="sref">thaw_bdev./a>); 4287./a> 4288./a>static int .a href="+code=blkdev_writepage" class="sref">blkdev_writepage./a>(struct4.a href="+code=page" class="sref">page./a> *.a href="+code=page" class="sref">page./a>, struct4.a href="+code=writeback_control" class="sref">writeback_control./a> *.a href="+code=wbc" class="sref">wbc./a>) 4289./a>{ 4290./a>        return .a href="+code=block_write_full_page" class="sref">block_write_full_page./a>(.a href="+code=page" class="sref">page./a>, .a href="+code=blkdev_get_block" class="sref">blkdev_get_block./a>, .a href="+code=wbc" class="sref">wbc./a>); 4291./a>} 4292./a> 4293./a>static int .a href="+code=blkdev_readpage" class="sref">blkdev_readpage./a>(struct4.a href="+code=file" class="sref">file./a> *4.a href="+code=file" class="sref">file./a>, struct4.a href="+code=page" class="sref">page./a> *4.a href="+code=page" class="sref">page./a>) 4294./a>{ 4295./a>        return .a href="+code=block_read_full_page" class="sref">block_read_full_page./a>(.a href="+code=page" class="sref">page./a>, .a href="+code=blkdev_get_block" class="sref">blkdev_get_block./a>); 4296./a>} 4297./a> 4298./a>static int .a href="+code=blkdev_write_begin" class="sref">blkdev_write_begin./a>(struct4.a href="+code=file" class="sref">file./a> *.a href="+code=file" class="sref">file./a>, struct4.a href="+code=address_space" class="sref">address_space./a> *.a href="+code=mapping" class="sref">mapping./a>, 4299./a>                        .a href="+code=loff_t" class="sref">loff_t./a> .a href="+code=pos" class="sref">pos./a>, unsigned .a href="+code=len" class="sref">len./a>, unsigned .a href="+code=flags" class="sref">flags./a>, 4300./a>                        struct4.a href="+code=page" class="sref">page./a> **.a href="+code=pagep" class="sref">pagep./a>, void **.a href="+code=fsdata" class="sref">fsdata./a>) 43018"a>{ 4302./a>        return .a href="+code=block_write_begin" class="sref">block_write_begin./a>(.a href="+code=mapping" class="sref">mapping./a>, .a href="+code=pos" class="sref">pos./a>, .a href="+code=len" class="sref">len./a>, .a href="+code=flags" class="sref">flags./a>,4.a href="+code=pagep" class="sref">pagep./a>, 4303./a>                                 .a href="+code=blkdev_get_block" class="sref">blkdev_get_block./a>); 4304./a>} 4305./a> 4306./a>static int .a href="+code=blkdev_write_end" class="sref">blkdev_write_end./a>(struct4.a href="+code=file" class="sref">file./a> *.a href="+code=file" class="sref">file./a>, struct4.a href="+code=address_space" class="sref">address_space./a> *.a href="+code=mapping" class="sref">mapping./a>, 4307./a>                        .a href="+code=loff_t" class="sref">loff_t./a> .a href="+code=pos" class="sref">pos./a>, unsigned .a href="+code=len" class="sref">len./a>, unsigned .a href="+code=copied" class="sref">copied./a>, 4308./a>                        struct4.a href="+code=page" class="sref">page./a> *.a href="+code=page" class="sref">page./a>, void *.a href="+code=fsdata" class="sref">fsdata./a>) 4309./a>{ 4310./a>        int4.a href="+code=ret" class="sref">ret./a>; 4311./a>        .a href="+code=ret" class="sref">ret./a> =4.a href="+code=block_write_end" class="sref">block_write_end./a>(.a href="+code=file" class="sref">file./a>, .a href="+code=mapping" class="sref">mapping./a>, .a href="+code=pos" class="sref">pos./a>, .a href="+code=len" class="sref">len./a>, .a href="+code=copied" class="sref">copied./a>,4.a href="+code=page" class="sref">page./a>, .a href="+code=fsdata" class="sref">fsdata./a>); 4312./a> 4313./a>        .a href="+code=unlock_page" class="sref">unlock_page./a>(.a href="+code=page" class="sref">page./a>); 4314./a>        .a href="+code=page_cache_release" class="sref">page_cache_release./a>(.a href="+code=page" class="sref">page./a>); 4315./a> 4316./a>        return .a href="+code=ret" class="sref">ret./a>; 4317./a>} 4318./a> 4319./a>.spa
 class="comment">/*./spa
  4320./a>.spa
 class="comment"> * private llseek:./spa
  4321./a>.spa
 class="comment"> * for a block special file file_inode(file)->i_size is zero./spa
  4322./a>.spa
 class="comment"> * so we compute the size by hand (just as in block_read/write above)./spa
  4323./a>.spa
 class="comment"> */./spa
  4324./a>static .a href="+code=loff_t" class="sref">loff_t./a> .a href="+code=block_llseek" class="sref">block_llseek./a>(struct4.a href="+code=file" class="sref">file./a> *.a href="+code=file" class="sref">file./a>, .a href="+code=loff_t" class="sref">loff_t./a> .a href="+code=offset" class="sref">offset./a>, int4.a href="+code=whence" class="sref">whence./a>) 43258"a>{ 4326./a>        struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=bd_inode" class="sref">bd_inode./a> =4.a href="+code=file" class="sref">file./a>->.a href="+code=f_mapping" class="sref">f_mapping./a>->.a href="+code=host" class="sref">host./a>; 4327./a>        .a href="+code=loff_t" class="sref">loff_t./a> .a href="+code=size" class="sref">size./a>; 4328./a>        .a href="+code=loff_t" class="sref">loff_t./a> .a href="+code=retval" class="sref">retval./a>; 4329./a> 4330./a>        .a href="+code=mutex_lock" class="sref">mutex_lock./a>(&.a href="+code=bd_inode" class="sref">bd_inode./a>->.a href="+code=i_mutex" class="sref">i_mutex./a>); 4331./a>        .a href="+code=size" class="sref">size./a> =4.a href="+code=i_size_read" class="sref">i_size_read./a>(.a href="+code=bd_inode" class="sref">bd_inode./a>); 4332./a> 4333./a>        .a href="+code=retval" class="sref">retval./a> =4-.a href="+code=EINVAL" class="sref">EINVAL./a>; 4334./a>        switch (.a href="+code=whence" class="sref">whence./a>) { 4335./a>                case .a href="+code=SEEK_END" class="sref">SEEK_END./a>: 4336./a>                        .a href="+code=offset" class="sref">offset./a> +=4.a href="+code=size" class="sref">size./a>; 4337./a>                        break; 4338./a>                case .a href="+code=SEEK_CUR" class="sref">SEEK_CUR./a>: 4339./a>                        .a href="+code=offset" class="sref">offset./a> +=4.a href="+code=file" class="sref">file./a>->.a href="+code=f_pos" class="sref">f_pos./a>; 4340./a>                case .a href="+code=SEEK_SET" class="sref">SEEK_SET./a>: 4341./a>                        break; 4342./a>                default: 4343./a>                        goto .a href="+code=out" class="sref">out./a>; 4344./a>        } 4345./a>        if (.a href="+code=offset" class="sref">offset./a> >= 0 && .a href="+code=offset" class="sref">offset./a> <= .a href="+code=size" class="sref">size./a>) { 4346./a>                if (.a href="+code=offset" class="sref">offset./a> !=4.a href="+code=file" class="sref">file./a>->.a href="+code=f_pos" class="sref">f_pos./a>) { 4347./a>                        .a href="+code=file" class="sref">file./a>->.a href="+code=f_pos" class="sref">f_pos./a> =4.a href="+code=offset" class="sref">offset./a>; 4348./a>                } 4349./a>                .a href="+code=retval" class="sref">retval./a> =4.a href="+code=offset" class="sref">offset./a>; 4350./a>        } 4351./a>.a href="+code=out" class="sref">out./a>: 4352./a>        .a href="+code=mutex_unlock" class="sref">mutex_unlock./a>(&.a href="+code=bd_inode" class="sref">bd_inode./a>->.a href="+code=i_mutex" class="sref">i_mutex./a>); 4353./a>        return .a href="+code=retval" class="sref">retval./a>; 4354./a>} 4355./a>         4356./a>int .a href="+code=blkdev_fsync" class="sref">blkdev_fsync./a>(struct4.a href="+code=file" class="sref">file./a> *.a href="+code=filp" class="sref">filp./a>, .a href="+code=loff_t" class="sref">loff_t./a> .a href="+code=start" class="sref">start./a>, .a href="+code=loff_t" class="sref">loff_t./a> .a href="+code=end" class="sref">end./a>, int4.a href="+code=datasync" class="sref">datasync./a>) 4357./a>{ 4358./a>        struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=bd_inode" class="sref">bd_inode./a> =4.a href="+code=filp" class="sref">filp./a>->.a href="+code=f_mapping" class="sref">f_mapping./a>->.a href="+code=host" class="sref">host./a>; 4359./a>        struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a> =4.a href="+code=I_BDEV" class="sref">I_BDEV./a>(.a href="+code=bd_inode" class="sref">bd_inode./a>); 4360./a>        int4.a href="+code=error" class="sref">error./a>; 4361./a>         4362./a>        .a href="+code=error" class="sref">error./a> =4.a href="+code=filemap_write_and_wait_range" class="sref">filemap_write_and_wait_range./a>(.a href="+code=filp" class="sref">filp./a>->.a href="+code=f_mapping" class="sref">f_mapping./a>, .a href="+code=start" class="sref">start./a>, .a href="+code=end" class="sref">end./a>); 4363./a>        if (.a href="+code=error" class="sref">error./a>) 4364./a>                return .a href="+code=error" class="sref">error./a>; 4365./a> 4366./a>        .spa
 class="comment">/*./spa
  4367./a>.spa
 class="comment">         * There is no need to serialise calls to blkdev_issue_flush with./spa
  4368./a>.spa
 class="comment">         * i_mutex and doing so causes performance issues with concurrent./spa
  4369./a>.spa
 class="comment">         * O_SYNC writers to a block device../spa
  4370./a>.spa
 class="comment">         */./spa
  4371./a>        .a href="+code=error" class="sref">error./a> =4.a href="+code=blkdev_issue_flush" class="sref">blkdev_issue_flush./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=GFP_KERNEL" class="sref">GFP_KERNEL./a>, .a href="+code=NULL" class="sref">NULL./a>); 4372./a>        if (.a href="+code=error" class="sref">error./a> ==4-.a href="+code=EOPNOTSUPP" class="sref">EOPNOTSUPP./a>) 4373./a>                .a href="+code=error" class="sref">error./a> =40; 4374./a> 4375./a>        return .a href="+code=error" class="sref">error./a>; 4376./a>} 4377./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=blkdev_fsync" class="sref">blkdev_fsync./a>); 4378./a> 4379./a>.spa
 class="comment">/*./spa
  4380./a>.spa
 class="comment"> * pseudo-fs./spa
  4381./a>.spa
 class="comment"> */./spa
  4382./a> 4383./a>static  .a href="+code=__cacheline_aligned_in_smp" class="sref">__cacheline_aligned_in_smp./a> .a href="+code=DEFINE_SPINLOCK" class="sref">DEFINE_SPINLOCK./a>(.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4384./a>static struct4.a href="+code=kmem_cache" class="sref">kmem_cache./a> * .a href="+code=bdev_cachep" class="sref">bdev_cachep./a> .a href="+code=__read_mostly" class="sref">__read_mostly./a>; 4385./a> 4386./a>static struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=bdev_alloc_inode" class="sref">bdev_alloc_inode./a>(struct4.a href="+code=super_block" class="sref">super_block./a> *.a href="+code=sb" class="sref">sb./a>) 4387./a>{ 4388./a>        struct4.a href="+code=bdev_inode" class="sref">bdev_inode./a> *.a href="+code=ei" class="sref">ei./a> =4.a href="+code=kmem_cache_alloc" class="sref">kmem_cache_alloc./a>(.a href="+code=bdev_cachep" class="sref">bdev_cachep./a>, .a href="+code=GFP_KERNEL" class="sref">GFP_KERNEL./a>); 4389./a>        if (!.a href="+code=ei" class="sref">ei./a>) 4390./a>                return .a href="+code=NULL" class="sref">NULL./a>; 4391./a>        return &.a href="+code=ei" class="sref">ei./a>->.a href="+code=vfs_inode" class="sref">vfs_inode./a>; 4392./a>} 4393./a> 4394./a>static void .a href="+code=bdev_i_callback" class="sref">bdev_i_callback./a>(struct4.a href="+code=rcu_head" class="sref">rcu_head./a> *.a href="+code=head" class="sref">head./a>) 43958"a>{ 4396./a>        struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=inode" class="sref">inode./a> =4.a href="+code=container_of" class="sref">container_of./a>(.a href="+code=head" class="sref">head./a>, struct4.a href="+code=inode" class="sref">inode./a>, .a href="+code=i_rcu" class="sref">i_rcu./a>); 4397./a>        struct4.a href="+code=bdev_inode" class="sref">bdev_inode./a> *.a href="+code=bdi" class="sref">bdi./a> =4.a href="+code=BDEV_I" class="sref">BDEV_I./a>(.a href="+code=inode" class="sref">inode./a>); 4398./a> 4399./a>        .a href="+code=kmem_cache_free" class="sref">kmem_cache_free./a>(.a href="+code=bdev_cachep" class="sref">bdev_cachep./a>, .a href="+code=bdi" class="sref">bdi./a>); 4400./a>} 44018"a> 4402./a>static void .a href="+code=bdev_destroy_inode" class="sref">bdev_destroy_inode./a>(struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=inode" class="sref">inode./a>) 4403./a>{ 4404./a>        .a href="+code=call_rcu" class="sref">call_rcu./a>(&.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_rcu" class="sref">i_rcu./a>, .a href="+code=bdev_i_callback" class="sref">bdev_i_callback./a>); 4405./a>} 4406./a> 4407./a>static void .a href="+code=init_once" class="sref">init_once./a>(void *.a href="+code=foo" class="sref">foo./a>) 4408./a>{ 4409./a>        struct4.a href="+code=bdev_inode" class="sref">bdev_inode./a> *.a href="+code=ei" class="sref">ei./a> =4(struct4.a href="+code=bdev_inode" class="sref">bdev_inode./a> *)4.a href="+code=foo" class="sref">foo./a>; 4410./a>        struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a> =4&.a href="+code=ei" class="sref">ei./a>->.a href="+code=bdev" class="sref">bdev./a>; 44118"a> 4412./a>        .a href="+code=memset" class="sref">memset./a>(.a href="+code=bdev" class="sref">bdev./a>, 0, sizeof(*.a href="+code=bdev" class="sref">bdev./a>)); 4413./a>        .a href="+code=mutex_init" class="sref">mutex_init./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_mutex" class="sref">bd_mutex./a>); 4414./a>        .a href="+code=INIT_LIST_HEAD" class="sref">INIT_LIST_HEAD./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inodes" class="sref">bd_inodes./a>); 4415./a>        .a href="+code=INIT_LIST_HEAD" class="sref">INIT_LIST_HEAD./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_list" class="sref">bd_list./a>); 4416./a>#ifdef .a href="+code=CONFIG_SYSFS" class="sref">CONFIG_SYSFS8"a> 4417./a>        .a href="+code=INIT_LIST_HEAD" class="sref">INIT_LIST_HEAD./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_holder_disks" class="sref">bd_holder_disks./a>); 4418./a>#endif 4419./a>        .a href="+code=inode_init_once" class="sref">inode_init_once./a>(&.a href="+code=ei" class="sref">ei./a>->.a href="+code=vfs_inode" class="sref">vfs_inode./a>); 4420./a>        .spa
 class="comment">/* Initialize mutex for freeze. */./spa
  4421./a>        .a href="+code=mutex_init" class="sref">mutex_init./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_fsfreeze_mutex" class="sref">bd_fsfreeze_mutex./a>); 4422./a>} 4423./a> 4424./a>static .a href="+code=inline" class="sref">inline./a> void .a href="+code=__bd_forget" class="sref">__bd_forget./a>(struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=inode" class="sref">inode./a>) 44258"a>{ 4426./a>        .a href="+code=list_del_init" class="sref">list_del_init./a>(&.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_devices" class="sref">i_devices./a>); 4427./a>        .a href="+code=inode" class="sref">inode./a>->.a href="+code=i_bdev" class="sref">i_bdev./a> =4.a href="+code=NULL" class="sref">NULL./a>; 4428./a>        .a href="+code=inode" class="sref">inode./a>->.a href="+code=i_mapping" class="sref">i_mapping./a> =4&.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_data" class="sref">i_data./a>; 4429./a>} 4430./a> 4431./a>static void .a href="+code=bdev_evict_inode" class="sref">bdev_evict_inode./a>(struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=inode" class="sref">inode./a>) 4432./a>{ 4433./a>        struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a> =4&.a href="+code=BDEV_I" class="sref">BDEV_I./a>(.a href="+code=inode" class="sref">inode./a>)->.a href="+code=bdev" class="sref">bdev./a>; 4434./a>        struct4.a href="+code=list_head" class="sref">list_head./a> *.a href="+code=p" class="sref">p./a>; 4435./a>        .a href="+code=truncate_inode_pages" class="sref">truncate_inode_pages./a>(&.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_data" class="sref">i_data./a>, 0); 4436./a>        .a href="+code=invalidate_inode_buffers" class="sref">invalidate_inode_buffers./a>(.a href="+code=inode" class="sref">inode./a>); .spa
 class="comment">/* is it needed here? */./spa
  4437./a>        .a href="+code=clear_inode" class="sref">clear_inode./a>(.a href="+code=inode" class="sref">inode./a>); 4438./a>        .a href="+code=spin_lock" class="sref">spin_lock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4439./a>        while ( (.a href="+code=p" class="sref">p./a> =4.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inodes" class="sref">bd_inodes./a>..a href="+code=next" class="sref">next./a>) !=4&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inodes" class="sref">bd_inodes./a> ) { 4440./a>                .a href="+code=__bd_forget" class="sref">__bd_forget./a>(.a href="+code=list_entry" class="sref">list_entry./a>(.a href="+code=p" class="sref">p./a>, struct4.a href="+code=inode" class="sref">inode./a>, .a href="+code=i_devices" class="sref">i_devices./a>)); 4441./a>        } 4442./a>        .a href="+code=list_del_init" class="sref">list_del_init./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_list" class="sref">bd_list./a>); 4443./a>        .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4444./a>} 4445./a> 4446./a>static const struct4.a href="+code=super_operations" class="sref">super_operations./a> .a href="+code=bdev_sops" class="sref">bdev_sops./a> =4{ 4447./a>        ..a href="+code=statfs" class="sref">statfs./a> =4.a href="+code=simple_statfs" class="sref">simple_statfs./a>, 4448./a>        ..a href="+code=alloc_inode" class="sref">alloc_inode./a> =4.a href="+code=bdev_alloc_inode" class="sref">bdev_alloc_inode./a>, 4449./a>        ..a href="+code=destroy_inode" class="sref">destroy_inode./a> =4.a href="+code=bdev_destroy_inode" class="sref">bdev_destroy_inode./a>, 4450./a>        ..a href="+code=drop_inode" class="sref">drop_inode./a> =4.a href="+code=generic_delete_inode" class="sref">generic_delete_inode./a>, 4451./a>        ..a href="+code=evict_inode" class="sref">evict_inode./a> =4.a href="+code=bdev_evict_inode" class="sref">bdev_evict_inode./a>, 4452./a>}; 4453./a> 4454./a>static struct4.a href="+code=dentry" class="sref">dentry./a> *.a href="+code=bd_mount" class="sref">bd_mount./a>(struct4.a href="+code=file_system_type" class="sref">file_system_type./a> *.a href="+code=fs_type" class="sref">fs_type./a>, 4455./a>        int4.a href="+code=flags" class="sref">flags./a>,4const char *.a href="+code=dev_nam
" class="sref">dev_nam
./a>,4void *.a href="+code=data" class="sref">data./a>) 4456./a>{ 4457./a>        return .a href="+code=mount_pseudo" class="sref">mount_pseudo./a>(.a href="+code=fs_type" class="sref">fs_type./a>, .spa
 class="string">"bdev:"./spa
 ,4&.a href="+code=bdev_sops" class="sref">bdev_sops./a>, .a href="+code=NULL" class="sref">NULL./a>, .a href="+code=BDEVFS_MAGIC" class="sref">BDEVFS_MAGIC./a>); 4458./a>} 4459./a> 4460./a>static struct4.a href="+code=file_system_type" class="sref">file_system_type./a> .a href="+code=bd_type" class="sref">bd_type./a> =4{ 4461./a>        ..a href="+code=nam
" class="sref">nam
./a>           = .spa
 class="string">"bdev"./spa
 , 4462./a>        ..a href="+code=mount" class="sref">mount./a>          = .a href="+code=bd_mount" class="sref">bd_mount./a>, 4463./a>        ..a href="+code=kill_sb" class="sref">kill_sb./a>        =4.a href="+code=kill_anon_super" class="sref">kill_anon_super./a>, 4464./a>}; 4465./a> 4466./a>static struct4.a href="+code=super_block" class="sref">super_block./a> *.a href="+code=blockdev_superblock" class="sref">blockdev_superblock./a> .a href="+code=__read_mostly" class="sref">__read_mostly./a>; 4467./a> 4468./a>void .a href="+code=__init" class="sref">__init./a> .a href="+code=bdev_cache_init" class="sref">bdev_cache_init./a>(void) 4469./a>{ 4470./a>        int4.a href="+code=err" class="sref">err./a>; 4471./a>        static struct4.a href="+code=vfsmount" class="sref">vfsmount./a> *.a href="+code=bd_mnt" class="sref">bd_mnt./a>; 4472./a> 4473./a>        .a href="+code=bdev_cachep" class="sref">bdev_cachep./a> =4.a href="+code=kmem_cache_creat
" class="sref">kmem_cache_creat
./a>(.spa
 class="string">"bdev_cache"./spa
 ,4sizeof(struct4.a href="+code=bdev_inode" class="sref">bdev_inode./a>), 4474./a>                        0, (.a href="+code=SLAB_HWCACHE_ALIGN" class="sref">SLAB_HWCACHE_ALIGN./a>|.a href="+code=SLAB_RECLAIM_ACCOUNT" class="sref">SLAB_RECLAIM_ACCOUNT./a>| 4475./a>                                .a href="+code=SLAB_MEM_SPREAD" class="sref">SLAB_MEM_SPREAD./a>|.a href="+code=SLAB_PANIC" class="sref">SLAB_PANIC./a>), 4476./a>                        .a href="+code=init_once" class="sref">init_once./a>); 4477./a>        .a href="+code=err" class="sref">err./a> =4.a href="+code=register_filesystem" class="sref">register_filesystem./a>(&.a href="+code=bd_type" class="sref">bd_type./a>); 4478./a>        if (.a href="+code=err" class="sref">err./a>) 4479./a>                .a href="+code=pa
ic" class="sref">pa
ic./a>(.spa
 class="string">"Cannot register bdev pseudo-fs"./spa
 ); 4480./a>        .a href="+code=bd_mnt" class="sref">bd_mnt./a> =4.a href="+code=kern_mount" class="sref">kern_mount./a>(&.a href="+code=bd_type" class="sref">bd_type./a>); 4481./a>        if (.a href="+code=IS_ERR" class="sref">IS_ERR./a>(.a href="+code=bd_mnt" class="sref">bd_mnt./a>)) 4482./a>                .a href="+code=pa
ic" class="sref">pa
ic./a>(.spa
 class="string">"Cannot creat
 bdev pseudo-fs"./spa
 ); 4483./a>        .a href="+code=blockdev_superblock" class="sref">blockdev_superblock./a> = .a href="+code=bd_mnt" class="sref">bd_mnt./a>->.a href="+code=mnt_sb" class="sref">mnt_sb./a>;   .spa
 class="comment">/* For writeback */./spa
  4484./a>} 4485./a> 4486./a>.spa
 class="comment">/*./spa
  4487./a>.spa
 class="comment"> * Most likely _very_ bad one - but then it's hardly critical for small./spa
  4488./a>.spa
 class="comment"> * /dev and ca
 be fixed when somebody will need really large one../spa
  4489./a>.spa
 class="comment"> * Keep in mind that it will be fed through icache hash function too../spa
  4490./a>.spa
 class="comment"> */./spa
  4491./a>static .a href="+code=inline" class="sref">inline./a> unsigned long .a href="+code=hash" class="sref">hash./a>(.a href="+code=dev_t" class="sref">dev_t./a> .a href="+code=dev" class="sref">dev./a>) 4492./a>{ 4493./a>        return .a href="+code=MAJOR" class="sref">MAJOR./a>(.a href="+code=dev" class="sref">dev./a>)+.a href="+code=MINOR" class="sref">MINOR./a>(.a href="+code=dev" class="sref">dev./a>); 4494./a>} 4495./a> 4496./a>static int .a href="+code=bdev_test" class="sref">bdev_test./a>(struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=inode" class="sref">inode./a>,4void *.a href="+code=data" class="sref">data./a>) 4497./a>{ 4498./a>        return .a href="+code=BDEV_I" class="sref">BDEV_I./a>(.a href="+code=inode" class="sref">inode./a>)->.a href="+code=bdev" class="sref">bdev./a>..a href="+code=bd_dev" class="sref">bd_dev./a> == *(.a href="+code=dev_t" class="sref">dev_t./a> *).a href="+code=data" class="sref">data./a>; 4499./a>} 4500./a> 4501./a>static int .a href="+code=bdev_set" class="sref">bdev_set./a>(struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=inode" class="sref">inode./a>,4void *.a href="+code=data" class="sref">data./a>) 4502./a>{ 4503./a>        .a href="+code=BDEV_I" class="sref">BDEV_I./a>(.a href="+code=inode" class="sref">inode./a>)->.a href="+code=bdev" class="sref">bdev./a>..a href="+code=bd_dev" class="sref">bd_dev./a> = *(.a href="+code=dev_t" class="sref">dev_t./a> *).a href="+code=data" class="sref">data./a>; 4504./a>        return 0; 4505./a>} 4506./a> 4507./a>static .a href="+code=LIST_HEAD" class="sref">LIST_HEAD./a>(.a href="+code=all_bdevs" class="sref">all_bdevs./a>); 4508./a> 4509./a>struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdget" class="sref">bdget./a>(.a href="+code=dev_t" class="sref">dev_t./a> .a href="+code=dev" class="sref">dev./a>) 4510./a>{ 4511./a>        struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>; 4512./a>        struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=inode" class="sref">inode./a>; 4513./a> 4514./a>        .a href="+code=inode" class="sref">inode./a> =4.a href="+code=iget5_locked" class="sref">iget5_locked./a>(.a href="+code=blockdev_superblock" class="sref">blockdev_superblock./a>, .a href="+code=hash" class="sref">hash./a>(.a href="+code=dev" class="sref">dev./a>), 4515./a>                        .a href="+code=bdev_test" class="sref">bdev_test./a>, .a href="+code=bdev_set" class="sref">bdev_set./a>,4&.a href="+code=dev" class="sref">dev./a>); 4516./a> 4517./a>        if (!.a href="+code=inode" class="sref">inode./a>) 4518./a>                return .a href="+code=NULL" class="sref">NULL./a>; 4519./a> 4520./a>        .a href="+code=bdev" class="sref">bdev./a> =4&.a href="+code=BDEV_I" class="sref">BDEV_I./a>(.a href="+code=inode" class="sref">inode./a>)->.a href="+code=bdev" class="sref">bdev./a>; 45218"a> 4522./a>        if (.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_state" class="sref">i_state./a> & .a href="+code=I_NEW" class="sref">I_NEW./a>)4{ 4523./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_contains" class="sref">bd_contains./a> =4.a href="+code=NULL" class="sref">NULL./a>; 4524./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_super" class="sref">bd_super./a> =4.a href="+code=NULL" class="sref">NULL./a>; 4525./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a> =4.a href="+code=inode" class="sref">inode./a>; 4526./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_block_size" class="sref">bd_block_size./a> =4(1 << .a href="+code=inode" class="sref">inode./a>->.a href="+code=i_blkbits" class="sref">i_blkbits./a>); 4527./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part_count" class="sref">bd_part_count./a> =40; 4528./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_invalidated" class="sref">bd_invalidated./a> =40; 4529./a>                .a href="+code=inode" class="sref">inode./a>->.a href="+code=i_mode" class="sref">i_mode./a> =4.a href="+code=S_IFBLK" class="sref">S_IFBLK./a>; 4530./a>                .a href="+code=inode" class="sref">inode./a>->.a href="+code=i_rdev" class="sref">i_rdev./a> =4.a href="+code=dev" class="sref">dev./a>; 4531./a>                .a href="+code=inode" class="sref">inode./a>->.a href="+code=i_bdev" class="sref">i_bdev./a> =4.a href="+code=bdev" class="sref">bdev./a>; 4532./a>                .a href="+code=inode" class="sref">inode./a>->.a href="+code=i_data" class="sref">i_data./a>..a href="+code=a_ops" class="sref">a_ops./a> =4&.a href="+code=def_blk_aops" class="sref">def_blk_aops./a>; 4533./a>                .a href="+code=mapping_set_gfp_mask" class="sref">mapping_set_gfp_mask./a>(&.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_data" class="sref">i_data./a>, .a href="+code=GFP_USER" class="sref">GFP_USER./a>); 4534./a>                .a href="+code=inode" class="sref">inode./a>->.a href="+code=i_data" class="sref">i_data./a>..a href="+code=backing_dev_info" class="sref">backing_dev_info./a> =4&.a href="+code=default_backing_dev_info" class="sref">default_backing_dev_info./a>; 4535./a>                .a href="+code=spin_lock" class="sref">spin_lock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4536./a>                .a href="+code=list_add" class="sref">list_add./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_list" class="sref">bd_list./a>,4&.a href="+code=all_bdevs" class="sref">all_bdevs./a>); 4537./a>                .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4538./a>                .a href="+code=unlock_new_inode" class="sref">unlock_new_inode./a>(.a href="+code=inode" class="sref">inode./a>); 4539./a>        } 4540./a>        return .a href="+code=bdev" class="sref">bdev./a>; 4541./a>} 4542./a> 4543./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=bdget" class="sref">bdget./a>); 4544./a> 4545./a>.spa
 class="comment">/**./spa
  4546./a>.spa
 class="comment"> * bdgrab -- Grab a reference to an already referenced block device./spa
  4547./a>.spa
 class="comment"> * @bdev:       Block device to grab a reference to../spa
  4548./a>.spa
 class="comment"> */./spa
  4549./a>struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdgrab" class="sref">bdgrab./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>) 4550./a>{ 4551./a>        .a href="+code=ihold" class="sref">ihold./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>); 4552./a>        return .a href="+code=bdev" class="sref">bdev./a>; 4553./a>} 4554./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=bdgrab" class="sref">bdgrab./a>); 4555./a> 4556./a>long .a href="+code=nr_blockdev_pages" class="sref">nr_blockdev_pages./a>(void) 4557./a>{ 4558./a>        struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>; 4559./a>        long .a href="+code=ret" class="sref">ret./a> =40; 4560./a>        .a href="+code=spin_lock" class="sref">spin_lock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4561./a>        .a href="+code=list_for_each_entry" class="sref">list_for_each_entry./a>(.a href="+code=bdev" class="sref">bdev./a>,4&.a href="+code=all_bdevs" class="sref">all_bdevs./a>, .a href="+code=bd_list" class="sref">bd_list./a>)4{ 4562./a>                .a href="+code=ret" class="sref">ret./a> +=4.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>->.a href="+code=i_mapping" class="sref">i_mapping./a>->.a href="+code=nrpages" class="sref">nrpages./a>; 4563./a>        } 4564./a>        .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4565./a>        return .a href="+code=ret" class="sref">ret./a>; 4566./a>} 4567./a> 4568./a>void .a href="+code=bdput" class="sref">bdput./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>) 4569./a>{ 4570./a>        .a href="+code=iput" class="sref">iput./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>); 4571./a>} 4572./a> 4573./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=bdput" class="sref">bdput./a>); 4574./a>  4575./a>static struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bd_acquire" class="sref">bd_acquire./a>(struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=inode" class="sref">inode./a>) 4576./a>{ 4577./a>        struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>; 4578./a> 4579./a>        .a href="+code=spin_lock" class="sref">spin_lock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4580./a>        .a href="+code=bdev" class="sref">bdev./a> =4.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_bdev" class="sref">i_bdev./a>; 4581./a>        if (.a href="+code=bdev" class="sref">bdev./a>)4{ 4582./a>                .a href="+code=ihold" class="sref">ihold./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>); 4583./a>                .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4584./a>                return .a href="+code=bdev" class="sref">bdev./a>; 4585./a>        } 4586./a>        .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4587./a> 4588./a>        .a href="+code=bdev" class="sref">bdev./a> =4.a href="+code=bdget" class="sref">bdget./a>(.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_rdev" class="sref">i_rdev./a>); 4589./a>        if (.a href="+code=bdev" class="sref">bdev./a>)4{ 4590./a>                .a href="+code=spin_lock" class="sref">spin_lock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4591./a>                if (!.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_bdev" class="sref">i_bdev./a>)4{ 4592./a>                        .spa
 class="comment">/*./spa
  4593./a>.spa
 class="comment">                         * We take an additional reference to bd_inode,./spa
  4594./a>.spa
 class="comment">                         * and it's released in clear_inode() of inode../spa
  4595./a>.spa
 class="comment">                         * So, we ca
 access it via ->i_mapping always./spa
  4596./a>.spa
 class="comment">                         * without igrab()../spa
  4597./a>.spa
 class="comment">                         */./spa
  4598./a>                        .a href="+code=ihold" class="sref">ihold./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>); 4599./a>                        .a href="+code=inode" class="sref">inode./a>->.a href="+code=i_bdev" class="sref">i_bdev./a> =4.a href="+code=bdev" class="sref">bdev./a>; 4600./a>                        .a href="+code=inode" class="sref">inode./a>->.a href="+code=i_mapping" class="sref">i_mapping./a> =4.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>->.a href="+code=i_mapping" class="sref">i_mapping./a>; 4601./a>                        .a href="+code=list_add" class="sref">list_add./a>(&.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_devices" class="sref">i_devices./a>,4&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inodes" class="sref">bd_inodes./a>); 4602./a>                } 4603./a>                .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4604./a>        } 4605./a>        return .a href="+code=bdev" class="sref">bdev./a>; 4606./a>} 4607./a> 4608./a>static .a href="+code=inline" class="sref">inline./a> int .a href="+code=sb_is_blkdev_sb" class="sref">sb_is_blkdev_sb./a>(struct4.a href="+code=super_block" class="sref">super_block./a> *.a href="+code=sb" class="sref">sb./a>) 4609./a>{ 4610./a>        return .a href="+code=sb" class="sref">sb./a> == .a href="+code=blockdev_superblock" class="sref">blockdev_superblock./a>; 4611./a>} 4612./a> 4613./a>.spa
 class="comment">/* Call when you free inode */./spa
  4614./a> 4615./a>void .a href="+code=bd_forget" class="sref">bd_forget./a>(struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=inode" class="sref">inode./a>) 4616./a>{ 4617./a>        struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a> =4.a href="+code=NULL" class="sref">NULL./a>; 4618./a> 4619./a>        .a href="+code=spin_lock" class="sref">spin_lock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4620./a>        if (.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_bdev" class="sref">i_bdev./a>)4{ 4621./a>                if (!.a href="+code=sb_is_blkdev_sb" class="sref">sb_is_blkdev_sb./a>(.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_sb" class="sref">i_sb./a>)) 4622./a>                        .a href="+code=bdev" class="sref">bdev./a> =4.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_bdev" class="sref">i_bdev./a>; 4623./a>                .a href="+code=__bd_forget" class="sref">__bd_forget./a>(.a href="+code=inode" class="sref">inode./a>); 4624./a>        } 4625./a>        .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4626./a> 4627./a>        if (.a href="+code=bdev" class="sref">bdev./a>) 4628./a>                .a href="+code=iput" class="sref">iput./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>); 4629./a>} 4630./a> 4631./a>.spa
 class="comment">/**./spa
  4632./a>.spa
 class="comment"> * bd_may_claim - test whether a block device ca
 be claimed./spa
  4633./a>.spa
 class="comment"> * @bdev: block device of interest./spa
  4634./a>.spa
 class="comment"> * @whole: whole block device containing @bdev, may equal @bdev./spa
  4635./a>.spa
 class="comment"> * @holder: holder trying to claim @bdev./spa
  4636./a>.spa
 class="comment"> *./spa
  4637./a>.spa
 class="comment"> * Test whether @bdev ca
 be claimed by @holder../spa
  4638./a>.spa
 class="comment"> *./spa
  4639./a>.spa
 class="comment"> * CONTEXT:./spa
  4640./a>.spa
 class="comment"> * spin_lock(&bdev_lock)../spa
  4641./a>.spa
 class="comment"> *./spa
  4642./a>.spa
 class="comment"> * RETURNS:./spa
  4643./a>.spa
 class="comment"> * %true if @bdev ca
 be claimed, %false otherwise../spa
  4644./a>.spa
 class="comment"> */./spa
  4645./a>static .a href="+code=bool" class="sref">bool./a> .a href="+code=bd_may_claim" class="sref">bd_may_claim./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>, struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=whole" class="sref">whole./a>, 4646./a>                        4void *.a href="+code=holder" class="sref">holder./a>) 4647./a>{ 4648./a>        if (.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_holder" class="sref">bd_holder./a> == .a href="+code=holder" class="sref">holder./a>) 4649./a>                return .a href="+code=true" class="sref">true./a>;     .spa
 class="comment">/* already a holder */./spa
  4650./a>        else if (.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_holder" class="sref">bd_holder./a> !=4.a href="+code=NULL" class="sref">NULL./a>) 4651./a>                return .a href="+code=false" class="sref">false./a>;    .spa
 class="comment">/* held by someone else */./spa
  4652./a>        else if (.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_contains" class="sref">bd_contains./a> ==4.a href="+code=bdev" class="sref">bdev./a>) 4653./a>                return .a href="+code=true" class="sref">true./a>;     .spa
 class="comment">/* is a whole device which isn't held */./spa
  4654./a> 4655./a>        else if (.a href="+code=whole" class="sref">whole./a>->.a href="+code=bd_holder" class="sref">bd_holder./a> == .a href="+code=bd_may_claim" class="sref">bd_may_claim./a>) 4656./a>                return .a href="+code=true" class="sref">true./a>;     .spa
 class="comment">/* is a partition of a device that is being partitioned */./spa
  4657./a>        else if (.a href="+code=whole" class="sref">whole./a>->.a href="+code=bd_holder" class="sref">bd_holder./a> !=4.a href="+code=NULL" class="sref">NULL./a>) 4658./a>                return .a href="+code=false" class="sref">false./a>;    .spa
 class="comment">/* is a partition of a held device */./spa
  4659./a>        else 4660./a>                return .a href="+code=true" class="sref">true./a>;     .spa
 class="comment">/* is a partition of an un-held device */./spa
  4661./a>} 4662./a> 4663./a>.spa
 class="comment">/**./spa
  4664./a>.spa
 class="comment"> * bd_prepare_to_claim - prepare to claim a block device./spa
  4665./a>.spa
 class="comment"> * @bdev: block device of interest./spa
  4666./a>.spa
 class="comment"> * @whole: the whole device containing @bdev, may equal @bdev./spa
  4667./a>.spa
 class="comment"> * @holder: holder trying to claim @bdev./spa
  4668./a>.spa
 class="comment"> *./spa
  4669./a>.spa
 class="comment"> * Prepare to claim @bdev.  This function fails if @bdev is already./spa
  4670./a>.spa
 class="comment"> * claimed by another holder and waits if another claiming is in./spa
  4671./a>.spa
 class="comment"> * progress.  This function doesn't actually claim.  On successful./spa
  4672./a>.spa
 class="comment"> * return, the caller has ownership of bd_claiming and bd_holder[s]../spa
  4673./a>.spa
 class="comment"> *./spa
  4674./a>.spa
 class="comment"> * CONTEXT:./spa
  4675./a>.spa
 class="comment"> * spin_lock(&bdev_lock).  Might release bdev_lock, sleep and regrab./spa
  4676./a>.spa
 class="comment"> * it multiple times../spa
  4677./a>.spa
 class="comment"> *./spa
  4678./a>.spa
 class="comment"> * RETURNS:./spa
  4679./a>.spa
 class="comment"> * 0 if @bdev ca
 be claimed, -EBUSY otherwise../spa
  4680./a>.spa
 class="comment"> */./spa
  4681./a>static int .a href="+code=bd_prepare_to_claim" class="sref">bd_prepare_to_claim./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>, 4682./a>                               struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=whole" class="sref">whole./a>,4void *.a href="+code=holder" class="sref">holder./a>) 4683./a>{ 4684./a>.a href="+code=retry" class="sref">retry./a>: 4685./a>        .spa
 class="comment">/* if someone else claimed, fail */./spa
  4686./a>        if (!.a href="+code=bd_may_claim" class="sref">bd_may_claim./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=whole" class="sref">whole./a>,4.a href="+code=holder" class="sref">holder./a>)) 4687./a>                return -.a href="+code=EBUSY" class="sref">EBUSY./a>; 4688./a> 4689./a>        .spa
 class="comment">/* if claiming is already in progress, wait for it to finish */./spa
  4690./a>        if (.a href="+code=whole" class="sref">whole./a>->.a href="+code=bd_claiming" class="sref">bd_claiming./a>)4{ 4691./a>                .a href="+code=wait_queue_head_t" class="sref">wait_queue_head_t./a> *.a href="+code=wq" class="sref">wq./a> =4.a href="+code=bit_waitqueue" class="sref">bit_waitqueue./a>(&.a href="+code=whole" class="sref">whole./a>->.a href="+code=bd_claiming" class="sref">bd_claiming./a>, 0); 4692./a>                .a href="+code=DEFINE_WAIT" class="sref">DEFINE_WAIT./a>(.a href="+code=wait" class="sref">wait./a>); 4693./a> 4694./a>                .a href="+code=prepare_to_wait" class="sref">prepare_to_wait./a>(.a href="+code=wq" class="sref">wq./a>,4&.a href="+code=wait" class="sref">wait./a>,4.a href="+code=TASK_UNINTERRUPTIBLE" class="sref">TASK_UNINTERRUPTIBLE./a>); 4695./a>                .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4696./a>                .a href="+code=schedule" class="sref">schedule./a>(); 4697./a>                .a href="+code=finish_wait" class="sref">finish_wait./a>(.a href="+code=wq" class="sref">wq./a>,4&.a href="+code=wait" class="sref">wait./a>); 4698./a>                .a href="+code=spin_lock" class="sref">spin_lock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4699./a>                goto .a href="+code=retry" class="sref">retry./a>; 4700./a>        } 47018"a> 4702./a>        .spa
 class="comment">/* yay, all mine */./spa
  4703./a>        return 0; 4704./a>} 4705./a> 4706./a>.spa
 class="comment">/**./spa
  4707./a>.spa
 class="comment"> * bd_start_claiming - start claiming a block device./spa
  4708./a>.spa
 class="comment"> * @bdev: block device of interest./spa
  4709./a>.spa
 class="comment"> * @holder: holder trying to claim @bdev./spa
  4710./a>.spa
 class="comment"> *./spa
  4711./a>.spa
 class="comment"> * @bdev is about to be opened exclusively.  Check @bdev ca
 be opened./spa
  4712./a>.spa
 class="comment"> * exclusively and mark that a
 exclusive open is in progress.  Each./spa
  4713./a>.spa
 class="comment"> * successful call to this function must be matched with a call to./spa
  4714./a>.spa
 class="comment"> * either bd_finish_claiming() or bd_abort_claiming() (which do not./spa
  4715./a>.spa
 class="comment"> * fail)../spa
  4716./a>.spa
 class="comment"> *./spa
  4717./a>.spa
 class="comment"> * This function is used to gai
 exclusive access to the block device./spa
  4718./a>.spa
 class="comment"> * without actually causing other exclusive open attempts to fail. It./spa
  4719./a>.spa
 class="comment"> * should be used when the open sequence itself requires exclusive./spa
  4720./a>.spa
 class="comment"> * access but may subsequently fail../spa
  4721./a>.spa
 class="comment"> *./spa
  4722./a>.spa
 class="comment"> * CONTEXT:./spa
  4723./a>.spa
 class="comment"> * Might sleep../spa
  4724./a>.spa
 class="comment"> *./spa
  4725./a>.spa
 class="comment"> * RETURNS:./spa
  4726./a>.spa
 class="comment"> * Pointer to the block device containing @bdev on success, ERR_PTR()./spa
  4727./a>.spa
 class="comment"> * value on failure../spa
  4728./a>.spa
 class="comment"> */./spa
  4729./a>static struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bd_start_claiming" class="sref">bd_start_claiming./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>, 4730./a>                                              void *.a href="+code=holder" class="sref">holder./a>) 4731./a>{ 4732./a>        struct4.a href="+code=gendisk" class="sref">gendisk./a> *.a href="+code=disk" class="sref">disk./a>; 4733./a>        struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=whole" class="sref">whole./a>; 4734./a>        int .a href="+code=partno" class="sref">partno./a>,4.a href="+code=err" class="sref">err./a>; 4735./a> 4736./a>        .a href="+code=might_sleep" class="sref">might_sleep./a>(); 4737./a> 4738./a>        .spa
 class="comment">/*./spa
  4739./a>.spa
 class="comment">         * @bdev might not have been initialized properly yet, look up./spa
  4740./a>.spa
 class="comment">         * and grab the outer block device the hard way../spa
  4741./a>.spa
 class="comment">         */./spa
  4742./a>        .a href="+code=disk" class="sref">disk./a> =4.a href="+code=get_gendisk" class="sref">get_gendisk./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_dev" class="sref">bd_dev./a>, &.a href="+code=partno" class="sref">partno./a>); 4743./a>        if (!.a href="+code=disk" class="sref">disk./a>) 4744./a>                return .a href="+code=ERR_PTR" class="sref">ERR_PTR./a>(-.a href="+code=ENXIO" class="sref">ENXIO./a>); 4745./a> 4746./a>        .spa
 class="comment">/*./spa
  4747./a>.spa
 class="comment">         * Normally, @bdev should equal what's returned from bdget_disk()./spa
  4748./a>.spa
 class="comment">         * if partno is 0; however, some drivers (floppy) use multiple./spa
  4749./a>.spa
 class="comment">         * bdev's for the sam
 physical device and @bdev may be one of the./spa
  4750./a>.spa
 class="comment">         * aliases.  Keep @bdev if partno is 0.  This means claimer./spa
  4751./a>.spa
 class="comment">         * tracking is broken for those devices but it has always been that./spa
  4752./a>.spa
 class="comment">         * way../spa
  4753./a>.spa
 class="comment">         */./spa
  4754./a>        if (.a href="+code=partno" class="sref">partno./a>) 4755./a>                .a href="+code=whole" class="sref">whole./a> =4.a href="+code=bdget_disk" class="sref">bdget_disk./a>(.a href="+code=disk" class="sref">disk./a>, 0); 4756./a>        else 4757./a>                .a href="+code=whole" class="sref">whole./a> =4.a href="+code=bdgrab" class="sref">bdgrab./a>(.a href="+code=bdev" class="sref">bdev./a>); 4758./a> 4759./a>        .a href="+code=module_put" class="sref">module_put./a>(.a href="+code=disk" class="sref">disk./a>->.a href="+code=fops" class="sref">fops./a>->.a href="+code=owner" class="sref">owner./a>); 4760./a>        .a href="+code=put_disk" class="sref">put_disk./a>(.a href="+code=disk" class="sref">disk./a>); 4761./a>        if (!.a href="+code=whole" class="sref">whole./a>) 4762./a>                return .a href="+code=ERR_PTR" class="sref">ERR_PTR./a>(-.a href="+code=ENOMEM" class="sref">ENOMEM./a>); 4763./a> 4764./a>        .spa
 class="comment">/* prepare to claim, if successful, mark claiming in progress */./spa
  4765./a>        .a href="+code=spin_lock" class="sref">spin_lock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4766./a> 4767./a>        .a href="+code=err" class="sref">err./a> =4.a href="+code=bd_prepare_to_claim" class="sref">bd_prepare_to_claim./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=whole" class="sref">whole./a>,4.a href="+code=holder" class="sref">holder./a>); 4768./a>        if (.a href="+code=err" class="sref">err./a> == 0)4{ 4769./a>                .a href="+code=whole" class="sref">whole./a>->.a href="+code=bd_claiming" class="sref">bd_claiming./a> =4.a href="+code=holder" class="sref">holder./a>; 4770./a>                .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4771./a>                return .a href="+code=whole" class="sref">whole./a>; 4772./a>        } else { 4773./a>                .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 4774./a>                .a href="+code=bdput" class="sref">bdput./a>(.a href="+code=whole" class="sref">whole./a>); 4775./a>                return .a href="+code=ERR_PTR" class="sref">ERR_PTR./a>(.a href="+code=err" class="sref">err./a>); 4776./a>        } 4777./a>} 4778./a> 4779./a>#ifdef .a href="+code=CONFIG_SYSFS" class="sref">CONFIG_SYSFS./a> 4780./a>struct4.a href="+code=bd_holder_disk" class="sref">bd_holder_disk./a> { 4781./a>        struct4.a href="+code=list_head" class="sref">list_head./a>        .a href="+code=list" class="sref">list./a>; 4782./a>        struct4.a href="+code=gendisk" class="sref">gendisk./a>          *.a href="+code=disk" class="sref">disk./a>; 4783./a>        int                     .a href="+code=refcnt" class="sref">refcnt./a>; 4784./a>}; 4785./a> 4786./a>static struct4.a href="+code=bd_holder_disk" class="sref">bd_holder_disk./a> *.a href="+code=bd_find_holder_disk" class="sref">bd_find_holder_disk./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>, 4787./a>                                                  struct4.a href="+code=gendisk" class="sref">gendisk./a> *.a href="+code=disk" class="sref">disk./a>) 4788./a>{ 4789./a>        struct4.a href="+code=bd_holder_disk" class="sref">bd_holder_disk./a> *.a href="+code=holder" class="sref">holder./a>; 4790./a> 4791./a>        .a href="+code=list_for_each_entry" class="sref">list_for_each_entry./a>(.a href="+code=holder" class="sref">holder./a>,4&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_holder_disks" class="sref">bd_holder_disks./a>,4.a href="+code=list" class="sref">list./a>) 4792./a>                if (.a href="+code=holder" class="sref">holder./a>->.a href="+code=disk" class="sref">disk./a> ==4.a href="+code=disk" class="sref">disk./a>) 4793./a>                        return .a href="+code=holder" class="sref">holder./a>; 4794./a>        return .a href="+code=NULL" class="sref">NULL./a>; 4795./a>} 4796./a> 4797./a>static int .a href="+code=add_symlink" class="sref">add_symlink./a>(struct4.a href="+code=kobject" class="sref">kobject./a> *.a href="+code=from" class="sref">from./a>,4struct4.a href="+code=kobject" class="sref">kobject./a> *.a href="+code=to" class="sref">to./a>) 4798./a>{ 4799./a>        return .a href="+code=sysfs_create_link" class="sref">sysfs_create_link./a>(.a href="+code=from" class="sref">from./a>,4.a href="+code=to" class="sref">to./a>,4.a href="+code=kobject_nam
" class="sref">kobject_nam
./a>(.a href="+code=to" class="sref">to./a>)); 4800./a>} 48018"a> 4802./a>static void .a href="+code=del_symlink" class="sref">del_symlink./a>(struct4.a href="+code=kobject" class="sref">kobject./a> *.a href="+code=from" class="sref">from./a>,4struct4.a href="+code=kobject" class="sref">kobject./a> *.a href="+code=to" class="sref">to./a>) 4803./a>{ 4804./a>        .a href="+code=sysfs_remove_link" class="sref">sysfs_remove_link./a>(.a href="+code=from" class="sref">from./a>,4.a href="+code=kobject_nam
" class="sref">kobject_nam
./a>(.a href="+code=to" class="sref">to./a>)); 4805./a>} 4806./a> 4807./a>.spa
 class="comment">/**./spa
  4808./a>.spa
 class="comment"> * bd_link_disk_holder - create symlinks between holding disk and slave bdev./spa
  4809./a>.spa
 class="comment"> * @bdev: the claimed slave bdev./spa
  4810./a>.spa
 class="comment"> * @disk: the holding disk./spa
  4811./a>.spa
 class="comment"> *./spa
  4812./a>.spa
 class="comment"> * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT../spa
  4813./a>.spa
 class="comment"> *./spa
  4814./a>.spa
 class="comment"> * This functions creates the following sysfs symlinks../spa
  4815./a>.spa
 class="comment"> *./spa
  4816./a>.spa
 class="comment"> * - from "slaves" directory of the holder @disk to the claimed @bdev./spa
  4817./a>.spa
 class="comment"> * - from "holders" directory of the @bdev to the holder @disk./spa
  4818./a>.spa
 class="comment"> *./spa
  4819./a>.spa
 class="comment"> * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is./spa
  4820./a>.spa
 class="comment"> * passed to bd_link_disk_holder(), then:./spa
  4821./a>.spa
 class="comment"> *./spa
  4822./a>.spa
 class="comment"> *   /sys/block/dm-0/slaves/sda --> /sys/block/sda./spa
  4823./a>.spa
 class="comment"> *   /sys/block/sda/holders/dm-0 --> /sys/block/dm-0./spa
  4824./a>.spa
 class="comment"> *./spa
  4825./a>.spa
 class="comment"> * The caller must have claimed @bdev before calling this function and./spa
  4826./a>.spa
 class="comment"> * ensure that both @bdev and @disk are valid during the creation and./spa
  4827./a>.spa
 class="comment"> * lifetime of these symlinks../spa
  4828./a>.spa
 class="comment"> *./spa
  4829./a>.spa
 class="comment"> * CONTEXT:./spa
  4830./a>.spa
 class="comment"> * Might sleep../spa
  4831./a>.spa
 class="comment"> *./spa
  4832./a>.spa
 class="comment"> * RETURNS:./spa
  4833./a>.spa
 class="comment"> * 0 on success, -errno on failure../spa
  4834./a>.spa
 class="comment"> */./spa
  4835./a>int .a href="+code=bd_link_disk_holder" class="sref">bd_link_disk_holder./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>, struct4.a href="+code=gendisk" class="sref">gendisk./a> *.a href="+code=disk" class="sref">disk./a>) 4836./a>{ 4837./a>        struct4.a href="+code=bd_holder_disk" class="sref">bd_holder_disk./a> *.a href="+code=holder" class="sref">holder./a>; 4838./a>        int .a href="+code=ret" class="sref">ret./a> = 0; 4839./a> 4840./a>        .a href="+code=mutex_lock" class="sref">mutex_lock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_mutex" class="sref">bd_mutex./a>); 48418"a> 4842./a>        .a href="+code=WARN_ON_ONCE" class="sref">WARN_ON_ONCE./a>(!.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_holder" class="sref">bd_holder./a>); 4843./a> 4844./a>        .spa
 class="comment">/* FIXME: remove the following once add_disk() handles errors */./spa
  4845./a>        if (.a href="+code=WARN_ON" class="sref">WARN_ON./a>(!.a href="+code=disk" class="sref">disk./a>->.a href="+code=slave_dir" class="sref">slave_dir./a> || !.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a>->.a href="+code=holder_dir" class="sref">holder_dir./a>)) 4846./a>                goto .a href="+code=out_unlock" class="sref">out_unlock./a>; 4847./a> 4848./a>        .a href="+code=holder" class="sref">holder./a> =4.a href="+code=bd_find_holder_disk" class="sref">bd_find_holder_disk./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=disk" class="sref">disk./a>); 4849./a>        if (.a href="+code=holder" class="sref">holder./a>)4{ 4850./a>                .a href="+code=holder" class="sref">holder./a>->.a href="+code=refcnt" class="sref">refcnt./a>++; 4851./a>                goto .a href="+code=out_unlock" class="sref">out_unlock./a>; 4852./a>        } 4853./a> 4854./a>        .a href="+code=holder" class="sref">holder./a> =4.a href="+code=kzalloc" class="sref">kzalloc./a>(sizeof(*.a href="+code=holder" class="sref">holder./a>), .a href="+code=GFP_KERNEL" class="sref">GFP_KERNEL./a>); 4855./a>        if (!.a href="+code=holder" class="sref">holder./a>)4{ 4856./a>                .a href="+code=ret" class="sref">ret./a> = -.a href="+code=ENOMEM" class="sref">ENOMEM./a>; 4857./a>                goto .a href="+code=out_unlock" class="sref">out_unlock./a>; 4858./a>        } 4859./a> 4860./a>        .a href="+code=INIT_LIST_HEAD" class="sref">INIT_LIST_HEAD./a>(&.a href="+code=holder" class="sref">holder./a>->.a href="+code=list" class="sref">list./a>); 4861./a>        .a href="+code=holder" class="sref">holder./a>->.a href="+code=disk" class="sref">disk./a> = .a href="+code=disk" class="sref">disk./a>; 4862./a>        .a href="+code=holder" class="sref">holder./a>->.a href="+code=refcnt" class="sref">refcnt./a> = 1; 4863./a> 4864./a>        .a href="+code=ret" class="sref">ret./a> = .a href="+code=add_symlink" class="sref">add_symlink./a>(.a href="+code=disk" class="sref">disk./a>->.a href="+code=slave_dir" class="sref">slave_dir./a>, &.a href="+code=part_to_dev" class="sref">part_to_dev./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a>)->.a href="+code=kobj" class="sref">kobj./a>); 4865./a>        if (.a href="+code=ret" class="sref">ret./a>) 4866./a>                goto .a href="+code=out_free" class="sref">out_free./a>; 4867./a> 4868./a>        .a href="+code=ret" class="sref">ret./a> = .a href="+code=add_symlink" class="sref">add_symlink./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a>->.a href="+code=holder_dir" class="sref">holder_dir./a>, &.a href="+code=disk_to_dev" class="sref">disk_to_dev./a>(.a href="+code=disk" class="sref">disk./a>)->.a href="+code=kobj" class="sref">kobj./a>); 4869./a>        if (.a href="+code=ret" class="sref">ret./a>) 4870./a>                goto .a href="+code=out_del" class="sref">out_del./a>; 4871./a>        .spa
 class="comment">/*./spa
  4872./a>.spa
 class="comment">         * bdev could be deleted beneath us which would implicitly destroy./spa
  4873./a>.spa
 class="comment">         * the holder directory.  Hold on to it../spa
  4874./a>.spa
 class="comment">         */./spa
  4875./a>        .a href="+code=kobject_get" class="sref">kobject_get./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a>->.a href="+code=holder_dir" class="sref">holder_dir./a>); 4876./a> 4877./a>        .a href="+code=list_add" class="sref">list_add./a>(&.a href="+code=holder" class="sref">holder./a>->.a href="+code=list" class="sref">list./a>,4&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_holder_disks" class="sref">bd_holder_disks./a>); 4878./a>        goto .a href="+code=out_unlock" class="sref">out_unlock./a>; 4879./a> 4880./a>.a href="+code=out_del" class="sref">out_del./a>: 4881./a>        .a href="+code=del_symlink" class="sref">del_symlink./a>(.a href="+code=disk" class="sref">disk./a>->.a href="+code=slave_dir" class="sref">slave_dir./a>, &.a href="+code=part_to_dev" class="sref">part_to_dev./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a>)->.a href="+code=kobj" class="sref">kobj./a>); 4882./a>.a href="+code=out_free" class="sref">out_free./a>: 4883./a>        .a href="+code=kfree" class="sref">kfree./a>(.a href="+code=holder" class="sref">holder./a>); 4884./a>.a href="+code=out_unlock" class="sref">out_unlock./a>: 4885./a>        .a href="+code=mutex_unlock" class="sref">mutex_unlock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_mutex" class="sref">bd_mutex./a>); 4886./a>        return .a href="+code=ret" class="sref">ret./a>; 4887./a>} 4888./a>.a href="+code=EXPORT_SYMBOL_GPL" class="sref">EXPORT_SYMBOL_GPL./a>(.a href="+code=bd_link_disk_holder" class="sref">bd_link_disk_holder./a>); 4889./a> 4890./a>.spa
 class="comment">/**./spa
  4891./a>.spa
 class="comment"> * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()./spa
  4892./a>.spa
 class="comment"> * @bdev: the calimed slave bdev./spa
  4893./a>.spa
 class="comment"> * @disk: the holding disk./spa
  4894./a>.spa
 class="comment"> *./spa
  4895./a>.spa
 class="comment"> * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT../spa
  4896./a>.spa
 class="comment"> *./spa
  4897./a>.spa
 class="comment"> * CONTEXT:./spa
  4898./a>.spa
 class="comment"> * Might sleep../spa
  4899./a>.spa
 class="comment"> */./spa
  4900./a>void .a href="+code=bd_unlink_disk_holder" class="sref">bd_unlink_disk_holder./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>, struct4.a href="+code=gendisk" class="sref">gendisk./a> *.a href="+code=disk" class="sref">disk./a>) 4901./a>{ 4902./a>        struct4.a href="+code=bd_holder_disk" class="sref">bd_holder_disk./a> *.a href="+code=holder" class="sref">holder./a>; 4903./a> 4904./a>        .a href="+code=mutex_lock" class="sref">mutex_lock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_mutex" class="sref">bd_mutex./a>); 4905./a> 4906./a>        .a href="+code=holder" class="sref">holder./a> =4.a href="+code=bd_find_holder_disk" class="sref">bd_find_holder_disk./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=disk" class="sref">disk./a>); 4907./a> 4908./a>        if (!.a href="+code=WARN_ON_ONCE" class="sref">WARN_ON_ONCE./a>(.a href="+code=holder" class="sref">holder./a> ==4.a href="+code=NULL" class="sref">NULL./a>) && !--.a href="+code=holder" class="sref">holder./a>->.a href="+code=refcnt" class="sref">refcnt./a>)4{ 4909./a>                .a href="+code=del_symlink" class="sref">del_symlink./a>(.a href="+code=disk" class="sref">disk./a>->.a href="+code=slave_dir" class="sref">slave_dir./a>, &.a href="+code=part_to_dev" class="sref">part_to_dev./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a>)->.a href="+code=kobj" class="sref">kobj./a>); 4910./a>                .a href="+code=del_symlink" class="sref">del_symlink./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a>->.a href="+code=holder_dir" class="sref">holder_dir./a>, 4911./a>                            &.a href="+code=disk_to_dev" class="sref">disk_to_dev./a>(.a href="+code=disk" class="sref">disk./a>)->.a href="+code=kobj" class="sref">kobj./a>); 4912./a>                .a href="+code=kobject_put" class="sref">kobject_put./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a>->.a href="+code=holder_dir" class="sref">holder_dir./a>); 4913./a>                .a href="+code=list_del_init" class="sref">list_del_init./a>(&.a href="+code=holder" class="sref">holder./a>->.a href="+code=list" class="sref">list./a>); 4914./a>                .a href="+code=kfree" class="sref">kfree./a>(.a href="+code=holder" class="sref">holder./a>); 4915./a>        } 4916./a> 4917./a>        .a href="+code=mutex_unlock" class="sref">mutex_unlock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_mutex" class="sref">bd_mutex./a>); 4918./a>} 4919./a>.a href="+code=EXPORT_SYMBOL_GPL" class="sref">EXPORT_SYMBOL_GPL./a>(.a href="+code=bd_unlink_disk_holder" class="sref">bd_unlink_disk_holder./a>); 4920./a>#endif 49218"a> 4922./a>.spa
 class="comment">/**./spa
  4923./a>.spa
 class="comment"> * flush_disk - invalidates all buffer-cache entries on a disk./spa
  4924./a>.spa
 class="comment"> *./spa
  4925./a>.spa
 class="comment"> * @bdev:      struct4block device to be flushed./spa
  4926./a>.spa
 class="comment"> * @kill_dirty: flag to guide handling of dirty inodes./spa
  4927./a>.spa
 class="comment"> *./spa
  4928./a>.spa
 class="comment"> * Invalidates all buffer-cache entries on a disk. It should be called./spa
  4929./a>.spa
 class="comment"> * when a disk has been changed -- either by a media change or online./spa
  4930./a>.spa
 class="comment"> * resize../spa
  4931./a>.spa
 class="comment"> */./spa
  4932./a>static void .a href="+code=flush_disk" class="sref">flush_disk./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=bool" class="sref">bool./a> .a href="+code=kill_dirty" class="sref">kill_dirty./a>) 4933./a>{ 4934./a>        if (.a href="+code=__invalidate_device" class="sref">__invalidate_device./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=kill_dirty" class="sref">kill_dirty./a>))4{ 4935./a>                char .a href="+code=nam
" class="sref">nam
./a>[.a href="+code=BDEVNAME_SIZE" class="sref">BDEVNAME_SIZE./a>] =4.spa
 class="string">""./spa
 ; 4936./a> 4937./a>                if (.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_disk" class="sref">bd_disk./a>) 4938./a>                        .a href="+code=disk_nam
" class="sref">disk_nam
./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_disk" class="sref">bd_disk./a>, 0, .a href="+code=nam
" class="sref">nam
./a>); 4939./a>                .a href="+code=printk" class="sref">printk./a>(.a href="+code=KERN_WARNING" class="sref">KERN_WARNING./a> .spa
 class="string">"VFS: busy inodes on changed media or "./spa
  4940./a>                       .spa
 class="string">"resized disk %s\n"./spa
 , .a href="+code=nam
" class="sref">nam
./a>); 4941./a>        } 4942./a> 4943./a>        if (!.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_disk" class="sref">bd_disk./a>) 4944./a>                return; 4945./a>        if (.a href="+code=disk_part_scan_enabled" class="sref">disk_part_scan_enabled./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_disk" class="sref">bd_disk./a>)) 4946./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_invalidated" class="sref">bd_invalidated./a> =41; 4947./a>} 4948./a> 4949./a>.spa
 class="comment">/**./spa
  4950./a>.spa
 class="comment"> * check_disk_size_change - checks for disk size change and adjusts bdev size../spa
  4951./a>.spa
 class="comment"> * @disk: struct4gendisk to check./spa
  4952./a>.spa
 class="comment"> * @bdev: struct4bdev to adjust../spa
  4953./a>.spa
 class="comment"> *./spa
  4954./a>.spa
 class="comment"> * This routine checks to see if the bdev size does not match the disk size./spa
  4955./a>.spa
 class="comment"> * and adjusts it if it differs../spa
  4956./a>.spa
 class="comment"> */./spa
  4957./a>void .a href="+code=check_disk_size_change" class="sref">check_disk_size_change./a>(struct4.a href="+code=gendisk" class="sref">gendisk./a> *.a href="+code=disk" class="sref">disk./a>, struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>) 4958./a>{ 4959./a>        .a href="+code=loff_t" class="sref">loff_t./a> .a href="+code=disk_size" class="sref">disk_size./a>, .a href="+code=bdev_size" class="sref">bdev_size./a>; 4960./a> 4961./a>        .a href="+code=disk_size" class="sref">disk_size./a> =4(.a href="+code=loff_t" class="sref">loff_t./a>).a href="+code=get_capacity" class="sref">get_capacity./a>(.a href="+code=disk" class="sref">disk./a>) << 9; 4962./a>        .a href="+code=bdev_size" class="sref">bdev_size./a> =4.a href="+code=i_size_read" class="sref">i_size_read./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>); 4963./a>        if (.a href="+code=disk_size" class="sref">disk_size./a> !=4.a href="+code=bdev_size" class="sref">bdev_size./a>)4{ 4964./a>                char .a href="+code=nam
" class="sref">nam
./a>[.a href="+code=BDEVNAME_SIZE" class="sref">BDEVNAME_SIZE./a>]; 4965./a> 4966./a>                .a href="+code=disk_nam
" class="sref">disk_nam
./a>(.a href="+code=disk" class="sref">disk./a>, 0, .a href="+code=nam
" class="sref">nam
./a>); 4967./a>                .a href="+code=printk" class="sref">printk./a>(.a href="+code=KERN_INFO" class="sref">KERN_INFO./a> 4968./a>                       .spa
 class="string">"%s: detected capacity change from %lld to %lld\n"./spa
 , 4969./a>                       .a href="+code=nam
" class="sref">nam
./a>, .a href="+code=bdev_size" class="sref">bdev_size./a>, .a href="+code=disk_size" class="sref">disk_size./a>); 4970./a>                .a href="+code=i_size_write" class="sref">i_size_write./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>, .a href="+code=disk_size" class="sref">disk_size./a>); 4971./a>                .a href="+code=flush_disk" class="sref">flush_disk./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=false" class="sref">false./a>); 4972./a>        } 4973./a>} 4974./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=check_disk_size_change" class="sref">check_disk_size_change./a>); 4975./a> 4976./a>.spa
 class="comment">/**./spa
  4977./a>.spa
 class="comment"> * revalidate_disk - wrapper for lower-level driver's revalidate_disk call-back./spa
  4978./a>.spa
 class="comment"> * @disk: struct4gendisk to be revalidated./spa
  4979./a>.spa
 class="comment"> *./spa
  4980./a>.spa
 class="comment"> * This routine is a wrapper for lower-level driver's revalidate_disk./spa
  4981./a>.spa
 class="comment"> * call-backs.  It is used to do common pre and post operations needed./spa
  4982./a>.spa
 class="comment"> * for all revalidate_disk operations../spa
  4983./a>.spa
 class="comment"> */./spa
  4984./a>int .a href="+code=revalidate_disk" class="sref">revalidate_disk./a>(struct4.a href="+code=gendisk" class="sref">gendisk./a> *.a href="+code=disk" class="sref">disk./a>) 4985./a>{ 4986./a>        struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>; 4987./a>        int .a href="+code=ret" class="sref">ret./a> = 0; 4988./a> 4989./a>        if (.a href="+code=disk" class="sref">disk./a>->.a href="+code=fops" class="sref">fops./a>->.a href="+code=revalidate_disk" class="sref">revalidate_disk./a>) 4990./a>                .a href="+code=ret" class="sref">ret./a> = .a href="+code=disk" class="sref">disk./a>->.a href="+code=fops" class="sref">fops./a>->.a href="+code=revalidate_disk" class="sref">revalidate_disk./a>(.a href="+code=disk" class="sref">disk./a>); 49918"a> 4992./a>        .a href="+code=bdev" class="sref">bdev./a> =4.a href="+code=bdget_disk" class="sref">bdget_disk./a>(.a href="+code=disk" class="sref">disk./a>, 0); 4993./a>        if (!.a href="+code=bdev" class="sref">bdev./a>) 4994./a>                return .a href="+code=ret" class="sref">ret./a>; 4995./a> 4996./a>        .a href="+code=mutex_lock" class="sref">mutex_lock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_mutex" class="sref">bd_mutex./a>); 4997./a>        .a href="+code=check_disk_size_change" class="sref">check_disk_size_change./a>(.a href="+code=disk" class="sref">disk./a>, .a href="+code=bdev" class="sref">bdev./a>); 4998./a>        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_invalidated" class="sref">bd_invalidated./a> =40; 4999./a>        .a href="+code=mutex_unlock" class="sref">mutex_unlock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_mutex" class="sref">bd_mutex./a>); 1000./a>        .a href="+code=bdput" class="sref">bdput./a>(.a href="+code=bdev" class="sref">bdev./a>); 
1001./a>        return .a href="+code=ret" class="sref">ret./a>; 1002./a>} 1003./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=revalidate_disk" class="sref">revalidate_disk./a>); 1004./a> 1005./a>.spa
 class="comment">/*./spa
  1006./a>.spa
 class="comment"> * This routine checks whether a removable media has been changed,./spa
  1007./a>.spa
 class="comment"> * and invalidates all buffer-cache-entries in that case. This./spa
  1008./a>.spa
 class="comment"> * is a relatively slow routine, so we have to try to minimize using./spa
  1009./a>.spa
 class="comment"> * it. Thus it is called only upon a 'mount' or 'open'. This./spa
  1010./a>.spa
 class="comment"> * is the best way of combining speed and utility, I think../spa
  1011./a>.spa
 class="comment"> * People changing diskettes in the middle of a
 operation deserve./spa
  1012./a>.spa
 class="comment"> * to lose :-)./spa
  1013./a>.spa
 class="comment"> */./spa
  1014./a>int .a href="+code=check_disk_change" class="sref">check_disk_change./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>) 1015./a>{ 1016./a>        struct4.a href="+code=gendisk" class="sref">gendisk./a> *.a href="+code=disk" class="sref">disk./a> =4.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_disk" class="sref">bd_disk./a>; 1017./a>        const struct4.a href="+code=block_device_operations" class="sref">block_device_operations./a> *.a href="+code=bdops" class="sref">bdops./a> = .a href="+code=disk" class="sref">disk./a>->.a href="+code=fops" class="sref">fops./a>; 1018./a>        unsigned int .a href="+code=events" class="sref">events./a>; 1019./a> 1020./a>        .a href="+code=events" class="sref">events./a> = .a href="+code=disk_clear_events" class="sref">disk_clear_events./a>(.a href="+code=disk" class="sref">disk./a>, .a href="+code=DISK_EVENT_MEDIA_CHANGE" class="sref">DISK_EVENT_MEDIA_CHANGE./a> | 1021./a>                                   .a href="+code=DISK_EVENT_EJECT_REQUEST" class="sref">DISK_EVENT_EJECT_REQUEST./a>); 1022./a>        if (!(.a href="+code=events" class="sref">events./a> & .a href="+code=DISK_EVENT_MEDIA_CHANGE" class="sref">DISK_EVENT_MEDIA_CHANGE./a>)) 1023./a>                return 0; 1024./a> 1025./a>        .a href="+code=flush_disk" class="sref">flush_disk./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=true" class="sref">true./a>); 1026./a>        if (.a href="+code=bdops" class="sref">bdops./a>->.a href="+code=revalidate_disk" class="sref">revalidate_disk./a>) 1027./a>                .a href="+code=bdops" class="sref">bdops./a>->.a href="+code=revalidate_disk" class="sref">revalidate_disk./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_disk" class="sref">bd_disk./a>); 1028./a>        return 1; 1029./a>} 1030./a> 1031./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=check_disk_change" class="sref">check_disk_change./a>); 1032./a> 1033./a>void .a href="+code=bd_set_size" class="sref">bd_set_size./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=loff_t" class="sref">loff_t./a> .a href="+code=size" class="sref">size./a>) 1034./a>{ 1035./a>        unsigned .a href="+code=bsize" class="sref">bsize./a> =4.a href="+code=bdev_logical_block_size" class="sref">bdev_logical_block_size./a>(.a href="+code=bdev" class="sref">bdev./a>); 1036./a> 1037./a>        .a href="+code=mutex_lock" class="sref">mutex_lock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>->.a href="+code=i_mutex" class="sref">i_mutex./a>); 1038./a>        .a href="+code=i_size_write" class="sref">i_size_write./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>, .a href="+code=size" class="sref">size./a>); 1039./a>        .a href="+code=mutex_unlock" class="sref">mutex_unlock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>->.a href="+code=i_mutex" class="sref">i_mutex./a>); 1040./a>        while (.a href="+code=bsize" class="sref">bsize./a> < .a href="+code=PAGE_CACHE_SIZE" class="sref">PAGE_CACHE_SIZE./a>)4{ 1041./a>                if (.a href="+code=size" class="sref">size./a> & .a href="+code=bsize" class="sref">bsize./a>) 1042./a>                        break; 1043./a>                .a href="+code=bsize" class="sref">bsize./a> <<=41; 1044./a>        } 1045./a>        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_block_size" class="sref">bd_block_size./a> =4.a href="+code=bsize" class="sref">bsize./a>; 1046./a>        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>->.a href="+code=i_blkbits" class="sref">i_blkbits./a> =4.a href="+code=blksize_bits" class="sref">blksize_bits./a>(.a href="+code=bsize" class="sref">bsize./a>); 1047./a>} 1048./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=bd_set_size" class="sref">bd_set_size./a>); 1049./a> 1050./a>static int .a href="+code=__blkdev_put" class="sref">__blkdev_put./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=fmode_t" class="sref">fmode_t./a> .a href="+code=mode" class="sref">mode./a>, int .a href="+code=for_part" class="sref">for_part./a>); 10518"a> 1052./a>.spa
 class="comment">/*./spa
  1053./a>.spa
 class="comment"> * bd_mutex locking:./spa
  1054./a>.spa
 class="comment"> *./spa
  1055./a>.spa
 class="comment"> *  mutex_lock(part->bd_mutex)./spa
  1056./a>.spa
 class="comment"> *    mutex_lock_nested(whole->bd_mutex, 1)./spa
  1057./a>.spa
 class="comment"> */./spa
  1058./a> 1059./a>static int .a href="+code=__blkdev_get" class="sref">__blkdev_get./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=fmode_t" class="sref">fmode_t./a> .a href="+code=mode" class="sref">mode./a>, int .a href="+code=for_part" class="sref">for_part./a>) 1060./a>{ 1061./a>        struct4.a href="+code=gendisk" class="sref">gendisk./a> *.a href="+code=disk" class="sref">disk./a>; 1062./a>        struct4.a href="+code=module" class="sref">module./a> *.a href="+code=owner" class="sref">owner./a>; 1063./a>        int .a href="+code=ret" class="sref">ret./a>; 1064./a>        int .a href="+code=partno" class="sref">partno./a>; 1065./a>        int .a href="+code=perm" class="sref">perm./a> =40; 1066./a> 1067./a>        if (.a href="+code=mode" class="sref">mode./a> & .a href="+code=FMODE_READ" class="sref">FMODE_READ./a>) 1068./a>                .a href="+code=perm" class="sref">perm./a> |=4.a href="+code=MAY_READ" class="sref">MAY_READ./a>; 1069./a>        if (.a href="+code=mode" class="sref">mode./a> & .a href="+code=FMODE_WRITE" class="sref">FMODE_WRITE./a>) 1070./a>                .a href="+code=perm" class="sref">perm./a> |=4.a href="+code=MAY_WRITE" class="sref">MAY_WRITE./a>; 1071./a>        .spa
 class="comment">/*./spa
  1072./a>.spa
 class="comment">         * hooks: /n/, see "layering violations"../spa
  1073./a>.spa
 class="comment">         */./spa
  1074./a>        if (!.a href="+code=for_part" class="sref">for_part./a>)4{ 1075./a>                .a href="+code=ret" class="sref">ret./a> = .a href="+code=devcgroup_inode_permission" class="sref">devcgroup_inode_permission./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>, .a href="+code=perm" class="sref">perm./a>); 1076./a>                if (.a href="+code=ret" class="sref">ret./a> !=40)4{ 1077./a>                        .a href="+code=bdput" class="sref">bdput./a>(.a href="+code=bdev" class="sref">bdev./a>); 1078./a>                        return .a href="+code=ret" class="sref">ret./a>; 1079./a>                } 1080./a>        } 10818"a> 1082./a> .a href="+code=restart" class="sref">restart./a>: 1083./a> 1084./a>        .a href="+code=ret" class="sref">ret./a> = -.a href="+code=ENXIO" class="sref">ENXIO./a>; 1085./a>        .a href="+code=disk" class="sref">disk./a> =4.a href="+code=get_gendisk" class="sref">get_gendisk./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_dev" class="sref">bd_dev./a>, &.a href="+code=partno" class="sref">partno./a>); 1086./a>        if (!.a href="+code=disk" class="sref">disk./a>) 1087./a>                goto4.a href="+code=out" class="sref">out./a>; 1088./a>        .a href="+code=owner" class="sref">owner./a> = .a href="+code=disk" class="sref">disk./a>->.a href="+code=fops" class="sref">fops./a>->.a href="+code=owner" class="sref">owner./a>; 1089./a> 1090./a>        .a href="+code=disk_block_events" class="sref">disk_block_events./a>(.a href="+code=disk" class="sref">disk./a>); 1091./a>        .a href="+code=mutex_lock_nested" class="sref">mutex_lock_nested./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_mutex" class="sref">bd_mutex./a>, .a href="+code=for_part" class="sref">for_part./a>); 1092./a>        if (!.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_openers" class="sref">bd_openers./a>)4{ 1093./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_disk" class="sref">bd_disk./a> = .a href="+code=disk" class="sref">disk./a>; 1094./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_queue" class="sref">bd_queue./a> = .a href="+code=disk" class="sref">disk./a>->.a href="+code=queue" class="sref">queue./a>; 1095./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_contains" class="sref">bd_contains./a> =4.a href="+code=bdev" class="sref">bdev./a>; 1096./a>                if (!.a href="+code=partno" class="sref">partno./a>)4{ 1097./a>                        struct4.a href="+code=backing_dev_info" class="sref">backing_dev_info./a> *.a href="+code=bdi" class="sref">bdi./a>; 1098./a> 1099./a>                        .a href="+code=ret" class="sref">ret./a> = -.a href="+code=ENXIO" class="sref">ENXIO./a>; 1100./a>                        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a> = .a href="+code=disk_get_part" class="sref">disk_get_part./a>(.a href="+code=disk" class="sref">disk./a>, .a href="+code=partno" class="sref">partno./a>); 1101./a>                        if (!.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a>) 1102./a>                                goto4.a href="+code=out_clear" class="sref">out_clear./a>; 1103./a> 1104./a>                        .a href="+code=ret" class="sref">ret./a> = 0; 1105./a>                        if (.a href="+code=disk" class="sref">disk./a>->.a href="+code=fops" class="sref">fops./a>->.a href="+code=open" class="sref">open./a>)4{ 1106./a>                                .a href="+code=ret" class="sref">ret./a> = .a href="+code=disk" class="sref">disk./a>->.a href="+code=fops" class="sref">fops./a>->.a href="+code=open" class="sref">open./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=mode" class="sref">mode./a>); 1107./a>                                if (.a href="+code=ret" class="sref">ret./a> == -.a href="+code=ERESTARTSYS" class="sref">ERESTARTSYS./a>)4{ 1108./a>                                        .spa
 class="comment">/* Lost a race with 'disk' being./spa
  1109./a>.spa
 class="comment">                                         * deleted, try again../spa
  1110./a>.spa
 class="comment">                                         * See md.c./spa
  1111./a>.spa
 class="comment">                                         */./spa
  1112./a>                                        .a href="+code=disk_put_part" class="sref">disk_put_part./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a>); 1113./a>                                        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a> = .a href="+code=NULL" class="sref">NULL./a>; 1114./a>                                        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_disk" class="sref">bd_disk./a> = .a href="+code=NULL" class="sref">NULL./a>; 1115./a>                                        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_queue" class="sref">bd_queue./a> = .a href="+code=NULL" class="sref">NULL./a>; 1116./a>                                        .a href="+code=mutex_unlock" class="sref">mutex_unlock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_mutex" class="sref">bd_mutex./a>); 1117./a>                                        .a href="+code=disk_unblock_events" class="sref">disk_unblock_events./a>(.a href="+code=disk" class="sref">disk./a>); 1118./a>                                        .a href="+code=put_disk" class="sref">put_disk./a>(.a href="+code=disk" class="sref">disk./a>); 1119./a>                                        .a href="+code=module_put" class="sref">module_put./a>(.a href="+code=owner" class="sref">owner./a>); 1120./a>                                        goto4.a href="+code=restart" class="sref">restart./a>; 1121./a>                                } 1122./a>                        } 1123./a> 1124./a>                        if (!.a href="+code=ret" class="sref">ret./a>)4{ 1125./a>                                .a href="+code=bd_set_size" class="sref">bd_set_size./a>(.a href="+code=bdev" class="sref">bdev./a>,(.a href="+code=loff_t" class="sref">loff_t./a>).a href="+code=get_capacity" class="sref">get_capacity./a>(.a href="+code=disk" class="sref">disk./a>)<<9); 1126./a>                                .a href="+code=bdi" class="sref">bdi./a> =4.a href="+code=blk_get_backing_dev_info" class="sref">blk_get_backing_dev_info./a>(.a href="+code=bdev" class="sref">bdev./a>); 1127./a>                                if (.a href="+code=bdi" class="sref">bdi./a> == .a href="+code=NULL" class="sref">NULL./a>) 1128./a>                                        .a href="+code=bdi" class="sref">bdi./a> =4&.a href="+code=default_backing_dev_info" class="sref">default_backing_dev_info./a>; 1129./a>                                .a href="+code=bdev_inode_switch_bdi" class="sref">bdev_inode_switch_bdi./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>, .a href="+code=bdi" class="sref">bdi./a>); 1130./a>                        } 11318"a> 1132./a>                        .spa
 class="comment">/*./spa
  1133./a>.spa
 class="comment">                         * If the device is invalidated, resca
 partition./spa
  1134./a>.spa
 class="comment">                         * if open succeeded or failed with -ENOMEDIUM../spa
  1135./a>.spa
 class="comment">                         * The latter is necessary to prevent ghost./spa
  1136./a>.spa
 class="comment">                         * partitions on a removed medium../spa
  1137./a>.spa
 class="comment">                         */./spa
  1138./a>                        if (.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_invalidated" class="sref">bd_invalidated./a>)4{ 1139./a>                                if (!.a href="+code=ret" class="sref">ret./a>) 1140./a>                                        .a href="+code=resca
_partitions" class="sref">resca
_partitions./a>(.a href="+code=disk" class="sref">disk./a>, .a href="+code=bdev" class="sref">bdev./a>); 1141./a>                                else if (.a href="+code=ret" class="sref">ret./a> == -.a href="+code=ENOMEDIUM" class="sref">ENOMEDIUM./a>) 1142./a>                                        .a href="+code=invalidate_partitions" class="sref">invalidate_partitions./a>(.a href="+code=disk" class="sref">disk./a>, .a href="+code=bdev" class="sref">bdev./a>); 1143./a>                        } 1144./a>                        if (.a href="+code=ret" class="sref">ret./a>) 1145./a>                                goto4.a href="+code=out_clear" class="sref">out_clear./a>; 1146./a>                } else { 1147./a>                        struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=whole" class="sref">whole./a>; 1148./a>                        .a href="+code=whole" class="sref">whole./a> =4.a href="+code=bdget_disk" class="sref">bdget_disk./a>(.a href="+code=disk" class="sref">disk./a>, 0); 1149./a>                        .a href="+code=ret" class="sref">ret./a> = -.a href="+code=ENOMEM" class="sref">ENOMEM./a>; 1150./a>                        if (!.a href="+code=whole" class="sref">whole./a>) 1151./a>                                goto4.a href="+code=out_clear" class="sref">out_clear./a>; 1152./a>                        .a href="+code=BUG_ON" class="sref">BUG_ON./a>(.a href="+code=for_part" class="sref">for_part./a>); 1153./a>                        .a href="+code=ret" class="sref">ret./a> = .a href="+code=__blkdev_get" class="sref">__blkdev_get./a>(.a href="+code=whole" class="sref">whole./a>, .a href="+code=mode" class="sref">mode./a>, 1); 1154./a>                        if (.a href="+code=ret" class="sref">ret./a>) 1155./a>                                goto4.a href="+code=out_clear" class="sref">out_clear./a>; 1156./a>                        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_contains" class="sref">bd_contains./a> =4.a href="+code=whole" class="sref">whole./a>; 1157./a>                        .a href="+code=bdev_inode_switch_bdi" class="sref">bdev_inode_switch_bdi./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>, 1158./a>                                .a href="+code=whole" class="sref">whole./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>->.a href="+code=i_data" class="sref">i_data./a>..a href="+code=backing_dev_info" class="sref">backing_dev_info./a>); 1159./a>                        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a> = .a href="+code=disk_get_part" class="sref">disk_get_part./a>(.a href="+code=disk" class="sref">disk./a>, .a href="+code=partno" class="sref">partno./a>); 1160./a>                        if (!(.a href="+code=disk" class="sref">disk./a>->.a href="+code=flags" class="sref">flags./a> & .a href="+code=GENHD_FL_UP" class="sref">GENHD_FL_UP./a>)4|| 1161./a>                            !.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a>4|| !.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a>->.a href="+code=nr_sects" class="sref">nr_sects./a>)4{ 1162./a>                                .a href="+code=ret" class="sref">ret./a> = -.a href="+code=ENXIO" class="sref">ENXIO./a>; 1163./a>                                goto4.a href="+code=out_clear" class="sref">out_clear./a>; 1164./a>                        } 1165./a>                        .a href="+code=bd_set_size" class="sref">bd_set_size./a>(.a href="+code=bdev" class="sref">bdev./a>, (.a href="+code=loff_t" class="sref">loff_t./a>).a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a>->.a href="+code=nr_sects" class="sref">nr_sects./a> << 9); 1166./a>                } 1167./a>        } else { 1168./a>                if (.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_contains" class="sref">bd_contains./a> ==4.a href="+code=bdev" class="sref">bdev./a>)4{ 1169./a>                        .a href="+code=ret" class="sref">ret./a> = 0; 1170./a>                        if (.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_disk" class="sref">bd_disk./a>->.a href="+code=fops" class="sref">fops./a>->.a href="+code=open" class="sref">open./a>) 1171./a>                                .a href="+code=ret" class="sref">ret./a> = .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_disk" class="sref">bd_disk./a>->.a href="+code=fops" class="sref">fops./a>->.a href="+code=open" class="sref">open./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=mode" class="sref">mode./a>); 1172./a>                        .spa
 class="comment">/* the sam
 as first opener case, read comment there */./spa
  1173./a>                        if (.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_invalidated" class="sref">bd_invalidated./a>)4{ 1174./a>                                if (!.a href="+code=ret" class="sref">ret./a>) 1175./a>                                        .a href="+code=resca
_partitions" class="sref">resca
_partitions./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_disk" class="sref">bd_disk./a>, .a href="+code=bdev" class="sref">bdev./a>); 1176./a>                                else if (.a href="+code=ret" class="sref">ret./a> == -.a href="+code=ENOMEDIUM" class="sref">ENOMEDIUM./a>) 1177./a>                                        .a href="+code=invalidate_partitions" class="sref">invalidate_partitions./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_disk" class="sref">bd_disk./a>, .a href="+code=bdev" class="sref">bdev./a>); 1178./a>                        } 1179./a>                        if (.a href="+code=ret" class="sref">ret./a>) 1180./a>                                goto4.a href="+code=out_unlock_bdev" class="sref">out_unlock_bdev./a>; 1181./a>                } 1182./a>                .spa
 class="comment">/* only one opener holds refs to the module and disk */./spa
  1183./a>                .a href="+code=put_disk" class="sref">put_disk./a>(.a href="+code=disk" class="sref">disk./a>); 1184./a>                .a href="+code=module_put" class="sref">module_put./a>(.a href="+code=owner" class="sref">owner./a>); 1185./a>        } 1186./a>        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_openers" class="sref">bd_openers./a>++; 1187./a>        if (.a href="+code=for_part" class="sref">for_part./a>) 1188./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part_count" class="sref">bd_part_count./a>++; 1189./a>        .a href="+code=mutex_unlock" class="sref">mutex_unlock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_mutex" class="sref">bd_mutex./a>); 1190./a>        .a href="+code=disk_unblock_events" class="sref">disk_unblock_events./a>(.a href="+code=disk" class="sref">disk./a>); 1191./a>        return 0; 1192./a> 1193./a> .a href="+code=out_clear" class="sref">out_clear./a>: 1194./a>        .a href="+code=disk_put_part" class="sref">disk_put_part./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a>); 1195./a>        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_disk" class="sref">bd_disk./a> = .a href="+code=NULL" class="sref">NULL./a>; 1196./a>        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a> = .a href="+code=NULL" class="sref">NULL./a>; 1197./a>        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_queue" class="sref">bd_queue./a> = .a href="+code=NULL" class="sref">NULL./a>; 1198./a>        .a href="+code=bdev_inode_switch_bdi" class="sref">bdev_inode_switch_bdi./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>,4&.a href="+code=default_backing_dev_info" class="sref">default_backing_dev_info./a>); 1199./a>        if (.a href="+code=bdev" class="sref">bdev./a> !=4.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_contains" class="sref">bd_contains./a>) 1200./a>                .a href="+code=__blkdev_put" class="sref">__blkdev_put./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_contains" class="sref">bd_contains./a>, .a href="+code=mode" class="sref">mode./a>, 1); 1201./a>        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_contains" class="sref">bd_contains./a> =4.a href="+code=NULL" class="sref">NULL./a>; 1202./a> .a href="+code=out_unlock_bdev" class="sref">out_unlock_bdev./a>: 1203./a>        .a href="+code=mutex_unlock" class="sref">mutex_unlock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_mutex" class="sref">bd_mutex./a>); 1204./a>        .a href="+code=disk_unblock_events" class="sref">disk_unblock_events./a>(.a href="+code=disk" class="sref">disk./a>); 1205./a>        .a href="+code=put_disk" class="sref">put_disk./a>(.a href="+code=disk" class="sref">disk./a>); 1206./a>        .a href="+code=module_put" class="sref">module_put./a>(.a href="+code=owner" class="sref">owner./a>); 1207./a> .a href="+code=out" class="sref">out./a>: 1208./a>        .a href="+code=bdput" class="sref">bdput./a>(.a href="+code=bdev" class="sref">bdev./a>); 1209./a> 1210./a>        return .a href="+code=ret" class="sref">ret./a>; 1211./a>} 1212./a> 1213./a>.spa
 class="comment">/**./spa
  1214./a>.spa
 class="comment"> * blkdev_get - open a block device./spa
  1215./a>.spa
 class="comment"> * @bdev: block_device to open./spa
  1216./a>.spa
 class="comment"> * @mode: FMODE_* mask./spa
  1217./a>.spa
 class="comment"> * @holder: exclusive holder identifier./spa
  1218./a>.spa
 class="comment"> *./spa
  1219./a>.spa
 class="comment"> * Open @bdev with @mode.  If @mode includes %FMODE_EXCL, @bdev is./spa
  1220./a>.spa
 class="comment"> * open with exclusive access.  Specifying %FMODE_EXCL with %NULL./spa
  1221./a>.spa
 class="comment"> * @holder is invalid.  Exclusive opens may nest for the sam
 @holder../spa
  1222./a>.spa
 class="comment"> *./spa
  1223./a>.spa
 class="comment"> * On success, the reference count of @bdev is unchanged.  On failure,./spa
  1224./a>.spa
 class="comment"> * @bdev is put../spa
  1225./a>.spa
 class="comment"> *./spa
  1226./a>.spa
 class="comment"> * CONTEXT:./spa
  1227./a>.spa
 class="comment"> * Might sleep../spa
  1228./a>.spa
 class="comment"> *./spa
  1229./a>.spa
 class="comment"> * RETURNS:./spa
  1230./a>.spa
 class="comment"> * 0 on success, -errno on failure../spa
  1231./a>.spa
 class="comment"> */./spa
  1232./a>int .a href="+code=blkdev_get" class="sref">blkdev_get./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=fmode_t" class="sref">fmode_t./a> .a href="+code=mode" class="sref">mode./a>, void *.a href="+code=holder" class="sref">holder./a>) 1233./a>{ 1234./a>        struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=whole" class="sref">whole./a> =4.a href="+code=NULL" class="sref">NULL./a>; 1235./a>        int .a href="+code=res" class="sref">res./a>; 1236./a> 1237./a>        .a href="+code=WARN_ON_ONCE" class="sref">WARN_ON_ONCE./a>((.a href="+code=mode" class="sref">mode./a> & .a href="+code=FMODE_EXCL" class="sref">FMODE_EXCL./a>) && !.a href="+code=holder" class="sref">holder./a>); 1238./a> 1239./a>        if ((.a href="+code=mode" class="sref">mode./a> & .a href="+code=FMODE_EXCL" class="sref">FMODE_EXCL./a>) && .a href="+code=holder" class="sref">holder./a>)4{ 1240./a>                .a href="+code=whole" class="sref">whole./a> =4.a href="+code=bd_start_claiming" class="sref">bd_start_claiming./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=holder" class="sref">holder./a>); 1241./a>                if (.a href="+code=IS_ERR" class="sref">IS_ERR./a>(.a href="+code=whole" class="sref">whole./a>))4{ 1242./a>                        .a href="+code=bdput" class="sref">bdput./a>(.a href="+code=bdev" class="sref">bdev./a>); 1243./a>                        return .a href="+code=PTR_ERR" class="sref">PTR_ERR./a>(.a href="+code=whole" class="sref">whole./a>); 1244./a>                } 1245./a>        } 1246./a> 1247./a>        .a href="+code=res" class="sref">res./a> = .a href="+code=__blkdev_get" class="sref">__blkdev_get./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=mode" class="sref">mode./a>, 0); 1248./a> 1249./a>        if (.a href="+code=whole" class="sref">whole./a>)4{ 1250./a>                struct4.a href="+code=gendisk" class="sref">gendisk./a> *.a href="+code=disk" class="sref">disk./a> =4.a href="+code=whole" class="sref">whole./a>->.a href="+code=bd_disk" class="sref">bd_disk./a>; 12518"a> 1252./a>                .spa
 class="comment">/* finish claiming */./spa
  1253./a>                .a href="+code=mutex_lock" class="sref">mutex_lock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_mutex" class="sref">bd_mutex./a>); 1254./a>                .a href="+code=spin_lock" class="sref">spin_lock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 1255./a> 1256./a>                if (!.a href="+code=res" class="sref">res./a>)4{ 1257./a>                        .a href="+code=BUG_ON" class="sref">BUG_ON./a>(!.a href="+code=bd_may_claim" class="sref">bd_may_claim./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=whole" class="sref">whole./a>, .a href="+code=holder" class="sref">holder./a>)); 1258./a>                        .spa
 class="comment">/*./spa
  1259./a>.spa
 class="comment">                         * Note that for a whole device bd_holders./spa
  1260./a>.spa
 class="comment">                         * will be incremented twice, and bd_holder./spa
  1261./a>.spa
 class="comment">                         * will be set to bd_may_claim before being./spa
  1262./a>.spa
 class="comment">                         * set to holder./spa
  1263./a>.spa
 class="comment">                         */./spa
  1264./a>                        .a href="+code=whole" class="sref">whole./a>->.a href="+code=bd_holders" class="sref">bd_holders./a>++; 1265./a>                        .a href="+code=whole" class="sref">whole./a>->.a href="+code=bd_holder" class="sref">bd_holder./a> =4.a href="+code=bd_may_claim" class="sref">bd_may_claim./a>; 1266./a>                        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_holders" class="sref">bd_holders./a>++; 1267./a>                        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_holder" class="sref">bd_holder./a> =4.a href="+code=holder" class="sref">holder./a>; 1268./a>                } 1269./a> 1270./a>                .spa
 class="comment">/* tell others that we're done */./spa
  1271./a>                .a href="+code=BUG_ON" class="sref">BUG_ON./a>(.a href="+code=whole" class="sref">whole./a>->.a href="+code=bd_claiming" class="sref">bd_claiming./a> !=4.a href="+code=holder" class="sref">holder./a>); 1272./a>                .a href="+code=whole" class="sref">whole./a>->.a href="+code=bd_claiming" class="sref">bd_claiming./a> =4.a href="+code=NULL" class="sref">NULL./a>; 1273./a>                .a href="+code=wake_up_bit" class="sref">wake_up_bit./a>(&.a href="+code=whole" class="sref">whole./a>->.a href="+code=bd_claiming" class="sref">bd_claiming./a>, 0); 1274./a> 1275./a>                .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 1276./a> 1277./a>                .spa
 class="comment">/*./spa
  1278./a>.spa
 class="comment">                 * Block event polling for write claims if requested.  Any./spa
  1279./a>.spa
 class="comment">                 * write holder makes the write_holder state stick until./spa
  1280./a>.spa
 class="comment">                 * all are released.  This is good enough and tracking./spa
  1281./a>.spa
 class="comment">                 * individual writeable reference is too fragile given the./spa
  1282./a>.spa
 class="comment">                 * way @mode is used in blkdev_get/put()../spa
  1283./a>.spa
 class="comment">                 */./spa
  1284./a>                if (!.a href="+code=res" class="sref">res./a> && (.a href="+code=mode" class="sref">mode./a> & .a href="+code=FMODE_WRITE" class="sref">FMODE_WRITE./a>) && !.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_write_holder" class="sref">bd_write_holder./a> && 1285./a>                    (.a href="+code=disk" class="sref">disk./a>->.a href="+code=flags" class="sref">flags./a> & .a href="+code=GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE" class="sref">GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE./a>))4{ 1286./a>                        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_write_holder" class="sref">bd_write_holder./a> =4.a href="+code=true" class="sref">true./a>; 1287./a>                        .a href="+code=disk_block_events" class="sref">disk_block_events./a>(.a href="+code=disk" class="sref">disk./a>); 1288./a>                } 1289./a> 1290./a>                .a href="+code=mutex_unlock" class="sref">mutex_unlock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_mutex" class="sref">bd_mutex./a>); 1291./a>                .a href="+code=bdput" class="sref">bdput./a>(.a href="+code=whole" class="sref">whole./a>); 1292./a>        } 1293./a> 1294./a>        return .a href="+code=res" class="sref">res./a>; 1295./a>} 1296./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=blkdev_get" class="sref">blkdev_get./a>); 1297./a> 1298./a>.spa
 class="comment">/**./spa
  1299./a>.spa
 class="comment"> * blkdev_get_by_path - open a block device by nam
./spa
  1300./a>.spa
 class="comment"> * @path: path to the block device to open./spa
  1301./a>.spa
 class="comment"> * @mode: FMODE_* mask./spa
  1302./a>.spa
 class="comment"> * @holder: exclusive holder identifier./spa
  1303./a>.spa
 class="comment"> *./spa
  1304./a>.spa
 class="comment"> * Open the blockdevice described by the device file at @path.  @mode./spa
  1305./a>.spa
 class="comment"> * and @holder are identical to blkdev_get()../spa
  1306./a>.spa
 class="comment"> *./spa
  1307./a>.spa
 class="comment"> * On success, the returned block_device has reference count of one../spa
  1308./a>.spa
 class="comment"> *./spa
  1309./a>.spa
 class="comment"> * CONTEXT:./spa
  1310./a>.spa
 class="comment"> * Might sleep../spa
  1311./a>.spa
 class="comment"> *./spa
  1312./a>.spa
 class="comment"> * RETURNS:./spa
  1313./a>.spa
 class="comment"> * Pointer to block_device on success, ERR_PTR(-errno) on failure../spa
  1314./a>.spa
 class="comment"> */./spa
  1315./a>struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=blkdev_get_by_path" class="sref">blkdev_get_by_path./a>(const char *.a href="+code=path" class="sref">path./a>, .a href="+code=fmode_t" class="sref">fmode_t./a> .a href="+code=mode" class="sref">mode./a>, 1316./a>                                        void *.a href="+code=holder" class="sref">holder./a>) 1317./a>{ 1318./a>        struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>; 1319./a>        int .a href="+code=err" class="sref">err./a>; 1320./a> 1321./a>        .a href="+code=bdev" class="sref">bdev./a> =4.a href="+code=lookup_bdev" class="sref">lookup_bdev./a>(.a href="+code=path" class="sref">path./a>); 1322./a>        if (.a href="+code=IS_ERR" class="sref">IS_ERR./a>(.a href="+code=bdev" class="sref">bdev./a>)) 1323./a>                return .a href="+code=bdev" class="sref">bdev./a>; 1324./a> 1325./a>        .a href="+code=err" class="sref">err./a> =4.a href="+code=blkdev_get" class="sref">blkdev_get./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=mode" class="sref">mode./a>, .a href="+code=holder" class="sref">holder./a>); 1326./a>        if (.a href="+code=err" class="sref">err./a>) 1327./a>                return .a href="+code=ERR_PTR" class="sref">ERR_PTR./a>(.a href="+code=err" class="sref">err./a>); 1328./a> 1329./a>        if ((.a href="+code=mode" class="sref">mode./a> & .a href="+code=FMODE_WRITE" class="sref">FMODE_WRITE./a>) && .a href="+code=bdev_read_only" class="sref">bdev_read_only./a>(.a href="+code=bdev" class="sref">bdev./a>))4{ 1330./a>                .a href="+code=blkdev_put" class="sref">blkdev_put./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=mode" class="sref">mode./a>); 1331./a>                return .a href="+code=ERR_PTR" class="sref">ERR_PTR./a>(-.a href="+code=EACCES" class="sref">EACCES./a>); 1332./a>        } 1333./a> 1334./a>        return .a href="+code=bdev" class="sref">bdev./a>; 1335./a>} 1336./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=blkdev_get_by_path" class="sref">blkdev_get_by_path./a>); 1337./a> 1338./a>.spa
 class="comment">/**./spa
  1339./a>.spa
 class="comment"> * blkdev_get_by_dev - open a block device by device number./spa
  1340./a>.spa
 class="comment"> * @dev: device number of block device to open./spa
  1341./a>.spa
 class="comment"> * @mode: FMODE_* mask./spa
  1342./a>.spa
 class="comment"> * @holder: exclusive holder identifier./spa
  1343./a>.spa
 class="comment"> *./spa
  1344./a>.spa
 class="comment"> * Open the blockdevice described by device number @dev.  @mode and./spa
  1345./a>.spa
 class="comment"> * @holder are identical to blkdev_get()../spa
  1346./a>.spa
 class="comment"> *./spa
  1347./a>.spa
 class="comment"> * Use it ONLY if you really do not have anything better - i.e. when./spa
  1348./a>.spa
 class="comment"> * you are behind a truly sucky interface and all you are given is a./spa
  1349./a>.spa
 class="comment"> * device number.  _Never_ to be used for internal purposes.  If you./spa
  1350./a>.spa
 class="comment"> * ever need it - reconsider your API../spa
  1351./a>.spa
 class="comment"> *./spa
  1352./a>.spa
 class="comment"> * On success, the returned block_device has reference count of one../spa
  1353./a>.spa
 class="comment"> *./spa
  1354./a>.spa
 class="comment"> * CONTEXT:./spa
  1355./a>.spa
 class="comment"> * Might sleep../spa
  1356./a>.spa
 class="comment"> *./spa
  1357./a>.spa
 class="comment"> * RETURNS:./spa
  1358./a>.spa
 class="comment"> * Pointer to block_device on success, ERR_PTR(-errno) on failure../spa
  1359./a>.spa
 class="comment"> */./spa
  1360./a>struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=blkdev_get_by_dev" class="sref">blkdev_get_by_dev./a>(.a href="+code=dev_t" class="sref">dev_t./a> .a href="+code=dev" class="sref">dev./a>, .a href="+code=fmode_t" class="sref">fmode_t./a> .a href="+code=mode" class="sref">mode./a>, void *.a href="+code=holder" class="sref">holder./a>) 1361./a>{ 1362./a>        struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>; 1363./a>        int .a href="+code=err" class="sref">err./a>; 1364./a> 1365./a>        .a href="+code=bdev" class="sref">bdev./a> =4.a href="+code=bdget" class="sref">bdget./a>(.a href="+code=dev" class="sref">dev./a>); 1366./a>        if (!.a href="+code=bdev" class="sref">bdev./a>) 1367./a>                return .a href="+code=ERR_PTR" class="sref">ERR_PTR./a>(-.a href="+code=ENOMEM" class="sref">ENOMEM./a>); 1368./a> 1369./a>        .a href="+code=err" class="sref">err./a> =4.a href="+code=blkdev_get" class="sref">blkdev_get./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=mode" class="sref">mode./a>, .a href="+code=holder" class="sref">holder./a>); 1370./a>        if (.a href="+code=err" class="sref">err./a>) 1371./a>                return .a href="+code=ERR_PTR" class="sref">ERR_PTR./a>(.a href="+code=err" class="sref">err./a>); 1372./a> 1373./a>        return .a href="+code=bdev" class="sref">bdev./a>; 1374./a>} 1375./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=blkdev_get_by_dev" class="sref">blkdev_get_by_dev./a>); 1376./a> 1377./a>static int .a href="+code=blkdev_open" class="sref">blkdev_open./a>(struct4.a href="+code=inode" class="sref">inode./a> * .a href="+code=inode" class="sref">inode./a>, struct4.a href="+code=file" class="sref">file./a> * .a href="+code=filp" class="sref">filp./a>) 1378./a>{ 1379./a>        struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>; 1380./a> 1381./a>        .spa
 class="comment">/*./spa
  1382./a>.spa
 class="comment">         * Preserve backwards compatibility and allow large file access./spa
  1383./a>.spa
 class="comment">         * even if userspace doesn't ask for it explicitly. Some mkfs./spa
  1384./a>.spa
 class="comment">         * binary needs it. We might want to drop this workaround./spa
  1385./a>.spa
 class="comment">         * during a
 unstable branch../spa
  1386./a>.spa
 class="comment">         */./spa
  1387./a>        .a href="+code=filp" class="sref">filp./a>->.a href="+code=f_flags" class="sref">f_flags./a> |=4.a href="+code=O_LARGEFILE" class="sref">O_LARGEFILE./a>; 1388./a> 1389./a>        if (.a href="+code=filp" class="sref">filp./a>->.a href="+code=f_flags" class="sref">f_flags./a> & .a href="+code=O_NDELAY" class="sref">O_NDELAY./a>) 1390./a>                .a href="+code=filp" class="sref">filp./a>->.a href="+code=f_mode" class="sref">f_mode./a> |=4.a href="+code=FMODE_NDELAY" class="sref">FMODE_NDELAY./a>; 1391./a>        if (.a href="+code=filp" class="sref">filp./a>->.a href="+code=f_flags" class="sref">f_flags./a> & .a href="+code=O_EXCL" class="sref">O_EXCL./a>) 1392./a>                .a href="+code=filp" class="sref">filp./a>->.a href="+code=f_mode" class="sref">f_mode./a> |=4.a href="+code=FMODE_EXCL" class="sref">FMODE_EXCL./a>; 1393./a>        if ((.a href="+code=filp" class="sref">filp./a>->.a href="+code=f_flags" class="sref">f_flags./a> & .a href="+code=O_ACCMODE" class="sref">O_ACCMODE./a>) == 3) 1394./a>                .a href="+code=filp" class="sref">filp./a>->.a href="+code=f_mode" class="sref">f_mode./a> |=4.a href="+code=FMODE_WRITE_IOCTL" class="sref">FMODE_WRITE_IOCTL./a>; 1395./a> 1396./a>        .a href="+code=bdev" class="sref">bdev./a> =4.a href="+code=bd_acquire" class="sref">bd_acquire./a>(.a href="+code=inode" class="sref">inode./a>); 1397./a>        if (.a href="+code=bdev" class="sref">bdev./a> ==4.a href="+code=NULL" class="sref">NULL./a>) 1398./a>                return -.a href="+code=ENOMEM" class="sref">ENOMEM./a>; 1399./a> 1400./a>        .a href="+code=filp" class="sref">filp./a>->.a href="+code=f_mapping" class="sref">f_mapping./a> =4.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>->.a href="+code=i_mapping" class="sref">i_mapping./a>; 14018"a> 1402./a>        return .a href="+code=blkdev_get" class="sref">blkdev_get./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=filp" class="sref">filp./a>->.a href="+code=f_mode" class="sref">f_mode./a>, .a href="+code=filp" class="sref">filp./a>); 1403./a>} 1404./a> 1405./a>static int .a href="+code=__blkdev_put" class="sref">__blkdev_put./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=fmode_t" class="sref">fmode_t./a> .a href="+code=mode" class="sref">mode./a>, int .a href="+code=for_part" class="sref">for_part./a>) 1406./a>{ 1407./a>        int .a href="+code=ret" class="sref">ret./a> =40; 1408./a>        struct4.a href="+code=gendisk" class="sref">gendisk./a> *.a href="+code=disk" class="sref">disk./a> =4.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_disk" class="sref">bd_disk./a>; 1409./a>        struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=victim" class="sref">victim./a> =4.a href="+code=NULL" class="sref">NULL./a>; 1410./a> 1411./a>        .a href="+code=mutex_lock_nested" class="sref">mutex_lock_nested./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_mutex" class="sref">bd_mutex./a>, .a href="+code=for_part" class="sref">for_part./a>); 1412./a>        if (.a href="+code=for_part" class="sref">for_part./a>) 1413./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part_count" class="sref">bd_part_count./a>--; 1414./a> 1415./a>        if (!--.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_openers" class="sref">bd_openers./a>) { 1416./a>                .a href="+code=WARN_ON_ONCE" class="sref">WARN_ON_ONCE./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_holders" class="sref">bd_holders./a>); 1417./a>                .a href="+code=sync_blockdev" class="sref">sync_blockdev./a>(.a href="+code=bdev" class="sref">bdev./a>); 1418./a>                .a href="+code=kill_bdev" class="sref">kill_bdev./a>(.a href="+code=bdev" class="sref">bdev./a>); 1419./a>                .spa
 class="comment">/* ->release ca
 cause the old bdi to disappear,./spa
  1420./a>.spa
 class="comment">                 * so must switch it out first./spa
  1421./a>.spa
 class="comment">                 */./spa
  1422./a>                .a href="+code=bdev_inode_switch_bdi" class="sref">bdev_inode_switch_bdi./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_inode" class="sref">bd_inode./a>, 1423./a>                                        &.a href="+code=default_backing_dev_info" class="sref">default_backing_dev_info./a>); 1424./a>        } 1425./a>        if (.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_contains" class="sref">bd_contains./a> ==4.a href="+code=bdev" class="sref">bdev./a>) { 1426./a>                if (.a href="+code=disk" class="sref">disk./a>->.a href="+code=fops" class="sref">fops./a>->.a href="+code=release" class="sref">release./a>) 1427./a>                        .a href="+code=ret" class="sref">ret./a> =4.a href="+code=disk" class="sref">disk./a>->.a href="+code=fops" class="sref">fops./a>->.a href="+code=release" class="sref">release./a>(.a href="+code=disk" class="sref">disk./a>, .a href="+code=mode" class="sref">mode./a>); 1428./a>        } 1429./a>        if (!.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_openers" class="sref">bd_openers./a>) { 1430./a>                struct4.a href="+code=module" class="sref">module./a> *.a href="+code=owner" class="sref">owner./a> =4.a href="+code=disk" class="sref">disk./a>->.a href="+code=fops" class="sref">fops./a>->.a href="+code=owner" class="sref">owner./a>; 14318"a> 1432./a>                .a href="+code=disk_put_part" class="sref">disk_put_part./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a>); 1433./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_part" class="sref">bd_part./a> =4.a href="+code=NULL" class="sref">NULL./a>; 1434./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_disk" class="sref">bd_disk./a> =4.a href="+code=NULL" class="sref">NULL./a>; 1435./a>                if (.a href="+code=bdev" class="sref">bdev./a> !=4.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_contains" class="sref">bd_contains./a>) 1436./a>                        .a href="+code=victim" class="sref">victim./a> =4.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_contains" class="sref">bd_contains./a>; 1437./a>                .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_contains" class="sref">bd_contains./a> =4.a href="+code=NULL" class="sref">NULL./a>; 1438./a> 1439./a>                .a href="+code=put_disk" class="sref">put_disk./a>(.a href="+code=disk" class="sref">disk./a>); 1440./a>                .a href="+code=module_put" class="sref">module_put./a>(.a href="+code=owner" class="sref">owner./a>); 1441./a>        } 1442./a>        .a href="+code=mutex_unlock" class="sref">mutex_unlock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_mutex" class="sref">bd_mutex./a>); 1443./a>        .a href="+code=bdput" class="sref">bdput./a>(.a href="+code=bdev" class="sref">bdev./a>); 1444./a>        if (.a href="+code=victim" class="sref">victim./a>) 1445./a>                .a href="+code=__blkdev_put" class="sref">__blkdev_put./a>(.a href="+code=victim" class="sref">victim./a>, .a href="+code=mode" class="sref">mode./a>, 1); 1446./a>        return .a href="+code=ret" class="sref">ret./a>; 1447./a>} 1448./a> 1449./a>int .a href="+code=blkdev_put" class="sref">blkdev_put./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=fmode_t" class="sref">fmode_t./a> .a href="+code=mode" class="sref">mode./a>) 1450./a>{ 1451./a>        .a href="+code=mutex_lock" class="sref">mutex_lock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_mutex" class="sref">bd_mutex./a>); 1452./a> 1453./a>        if (.a href="+code=mode" class="sref">mode./a> & .a href="+code=FMODE_EXCL" class="sref">FMODE_EXCL./a>) { 1454./a>                .a href="+code=bool" class="sref">bool./a> .a href="+code=bdev_free" class="sref">bdev_free./a>; 1455./a> 1456./a>                .spa
 class="comment">/*./spa
  1457./a>.spa
 class="comment">                 * Release a claim on the device.  The holder fields./spa
  1458./a>.spa
 class="comment">                 * are protected with bdev_lock.  bd_mutex is to./spa
  1459./a>.spa
 class="comment">                 * synchronize disk_holder unlinking../spa
  1460./a>.spa
 class="comment">                 */./spa
  1461./a>                .a href="+code=spin_lock" class="sref">spin_lock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 1462./a> 1463./a>                .a href="+code=WARN_ON_ONCE" class="sref">WARN_ON_ONCE./a>(--.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_holders" class="sref">bd_holders./a> < 0); 1464./a>                .a href="+code=WARN_ON_ONCE" class="sref">WARN_ON_ONCE./a>(--.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_contains" class="sref">bd_contains./a>->.a href="+code=bd_holders" class="sref">bd_holders./a> < 0); 1465./a> 1466./a>                .spa
 class="comment">/* bd_contains might point to self, check in a separate step */./spa
  1467./a>                if ((.a href="+code=bdev_free" class="sref">bdev_free./a> = !.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_holders" class="sref">bd_holders./a>)) 1468./a>                        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_holder" class="sref">bd_holder./a> =4.a href="+code=NULL" class="sref">NULL./a>; 1469./a>                if (!.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_contains" class="sref">bd_contains./a>->.a href="+code=bd_holders" class="sref">bd_holders./a>) 1470./a>                        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_contains" class="sref">bd_contains./a>->.a href="+code=bd_holder" class="sref">bd_holder./a> =4.a href="+code=NULL" class="sref">NULL./a>; 14718"a> 1472./a>                .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=bdev_lock" class="sref">bdev_lock./a>); 1473./a> 1474./a>                .spa
 class="comment">/*./spa
  1475./a>.spa
 class="comment">                 * If this was the last claim, remove holder link and./spa
  1476./a>.spa
 class="comment">                 * unblock evpoll if it was a write holder../spa
  1477./a>.spa
 class="comment">                 */./spa
  1478./a>                if (.a href="+code=bdev_free" class="sref">bdev_free./a> && .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_write_holder" class="sref">bd_write_holder./a>) { 1479./a>                        .a href="+code=disk_unblock_events" class="sref">disk_unblock_events./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_disk" class="sref">bd_disk./a>); 1480./a>                        .a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_write_holder" class="sref">bd_write_holder./a> =4.a href="+code=false" class="sref">false./a>; 1481./a>                } 1482./a>        } 1483./a> 1484./a>        .spa
 class="comment">/*./spa
  1485./a>.spa
 class="comment">         * Trigger event checking a
d tell drivers to flush MEDIA_CHANGE./spa
  1486./a>.spa
 class="comment">         * event.  This is to ensure detection of media removal comma
ded./spa
  1487./a>.spa
 class="comment">         * from userla
d - e.g. eject(1)../spa
  1488./a>.spa
 class="comment">         */./spa
  1489./a>        .a href="+code=disk_flush_events" class="sref">disk_flush_events./a>(.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_disk" class="sref">bd_disk./a>, .a href="+code=DISK_EVENT_MEDIA_CHANGE" class="sref">DISK_EVENT_MEDIA_CHANGE./a>); 1490./a> 1491./a>        .a href="+code=mutex_unlock" class="sref">mutex_unlock./a>(&.a href="+code=bdev" class="sref">bdev./a>->.a href="+code=bd_mutex" class="sref">bd_mutex./a>); 1492./a> 1493./a>        return .a href="+code=__blkdev_put" class="sref">__blkdev_put./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=mode" class="sref">mode./a>, 0); 1494./a>} 1495./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=blkdev_put" class="sref">blkdev_put./a>); 1496./a> 1497./a>static int .a href="+code=blkdev_close" class="sref">blkdev_close./a>(struct4.a href="+code=inode" class="sref">inode./a> * .a href="+code=inode" class="sref">inode./a>, struct4.a href="+code=file" class="sref">file./a> * .a href="+code=filp" class="sref">filp./a>) 1498./a>{ 1499./a>        struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a> =4.a href="+code=I_BDEV" class="sref">I_BDEV./a>(.a href="+code=filp" class="sref">filp./a>->.a href="+code=f_mapping" class="sref">f_mapping./a>->.a href="+code=host" class="sref">host./a>); 1500./a> 1501./a>        return .a href="+code=blkdev_put" class="sref">blkdev_put./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=filp" class="sref">filp./a>->.a href="+code=f_mode" class="sref">f_mode./a>); 1502./a>} 1503./a> 1504./a>static long .a href="+code=block_ioctl" class="sref">block_ioctl./a>(struct4.a href="+code=file" class="sref">file./a> *.a href="+code=file" class="sref">file./a>, unsigned .a href="+code=cmd" class="sref">cmd./a>, unsigned long .a href="+code=arg" class="sref">arg./a>) 1505./a>{ 1506./a>        struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a> =4.a href="+code=I_BDEV" class="sref">I_BDEV./a>(.a href="+code=file" class="sref">file./a>->.a href="+code=f_mapping" class="sref">f_mapping./a>->.a href="+code=host" class="sref">host./a>); 1507./a>        .a href="+code=fmode_t" class="sref">fmode_t./a> .a href="+code=mode" class="sref">mode./a> =4.a href="+code=file" class="sref">file./a>->.a href="+code=f_mode" class="sref">f_mode./a>; 1508./a> 1509./a>        .spa
 class="comment">/*./spa
  1510./a>.spa
 class="comment">         * O_NDELAY ca
 be altered using fcntl(.., F_SETFL, ..), so we have./spa
  1511./a>.spa
 class="comment">         * to updated it before every ioctl../spa
  1512./a>.spa
 class="comment">         */./spa
  1513./a>        if (.a href="+code=file" class="sref">file./a>->.a href="+code=f_flags" class="sref">f_flags./a> & .a href="+code=O_NDELAY" class="sref">O_NDELAY./a>) 1514./a>                .a href="+code=mode" class="sref">mode./a> |=4.a href="+code=FMODE_NDELAY" class="sref">FMODE_NDELAY./a>; 1515./a>        else 1516./a>                .a href="+code=mode" class="sref">mode./a> &= ~.a href="+code=FMODE_NDELAY" class="sref">FMODE_NDELAY./a>; 1517./a> 1518./a>        return .a href="+code=blkdev_ioctl" class="sref">blkdev_ioctl./a>(.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=mode" class="sref">mode./a>, .a href="+code=cmd" class="sref">cmd./a>, .a href="+code=arg" class="sref">arg./a>); 1519./a>} 1520./a> 1521./a>.spa
 class="comment">/*./spa
  1522./a>.spa
 class="comment"> * Write data to the block device.  Only inte
ded for the block device itself./spa
  1523./a>.spa
 class="comment"> * a
d the raw driver which basically is a fake block device../spa
  1524./a>.spa
 class="comment"> *./spa
  1525./a>.spa
 class="comment"> * Does not take i_mutex for the write a
d thus is not for general purpose./spa
  1526./a>.spa
 class="comment"> * use../spa
  1527./a>.spa
 class="comment"> */./spa
  1528./a>.a href="+code=ssize_t" class="sref">ssize_t./a> .a href="+code=blkdev_aio_write" class="sref">blkdev_aio_write./a>(struct4.a href="+code=kiocb" class="sref">kiocb./a> *.a href="+code=iocb" class="sref">iocb./a>, const struct4.a href="+code=iovec" class="sref">iovec./a> *.a href="+code=iov" class="sref">iov./a>, 1529./a>                         unsigned long .a href="+code=nr_segs" class="sref">nr_segs./a>, .a href="+code=loff_t" class="sref">loff_t./a> .a href="+code=pos" class="sref">pos./a>) 1530./a>{ 1531./a>        struct4.a href="+code=file" class="sref">file./a> *.a href="+code=file" class="sref">file./a> =4.a href="+code=iocb" class="sref">iocb./a>->.a href="+code=ki_filp" class="sref">ki_filp./a>; 1532./a>        struct4.a href="+code=blk_plug" class="sref">blk_plug./a> .a href="+code=plug" class="sref">plug./a>; 1533./a>        .a href="+code=ssize_t" class="sref">ssize_t./a> .a href="+code=ret" class="sref">ret./a>; 1534./a> 1535./a>        .a href="+code=BUG_ON" class="sref">BUG_ON./a>(.a href="+code=iocb" class="sref">iocb./a>->.a href="+code=ki_pos" class="sref">ki_pos./a> !=4.a href="+code=pos" class="sref">pos./a>); 1536./a> 1537./a>        .a href="+code=blk_start_plug" class="sref">blk_start_plug./a>(&.a href="+code=plug" class="sref">plug./a>); 1538./a>        .a href="+code=ret" class="sref">ret./a> =4.a href="+code=__generic_file_aio_write" class="sref">__generic_file_aio_write./a>(.a href="+code=iocb" class="sref">iocb./a>, .a href="+code=iov" class="sref">iov./a>, .a href="+code=nr_segs" class="sref">nr_segs./a>, &.a href="+code=iocb" class="sref">iocb./a>->.a href="+code=ki_pos" class="sref">ki_pos./a>); 1539./a>        if (.a href="+code=ret" class="sref">ret./a> > 0 || .a href="+code=ret" class="sref">ret./a> == -.a href="+code=EIOCBQUEUED" class="sref">EIOCBQUEUED./a>) { 1540./a>                .a href="+code=ssize_t" class="sref">ssize_t./a> .a href="+code=err" class="sref">err./a>; 15418"a> 1542./a>                .a href="+code=err" class="sref">err./a> =4.a href="+code=generic_write_sync" class="sref">generic_write_sync./a>(.a href="+code=file" class="sref">file./a>, .a href="+code=pos" class="sref">pos./a>, .a href="+code=ret" class="sref">ret./a>); 1543./a>                if (.a href="+code=err" class="sref">err./a> < 0 && .a href="+code=ret" class="sref">ret./a> > 0) 1544./a>                        .a href="+code=ret" class="sref">ret./a> =4.a href="+code=err" class="sref">err./a>; 1545./a>        } 1546./a>        .a href="+code=blk_finish_plug" class="sref">blk_finish_plug./a>(&.a href="+code=plug" class="sref">plug./a>); 1547./a>        return .a href="+code=ret" class="sref">ret./a>; 1548./a>} 1549./a>.a href="+code=EXPORT_SYMBOL_GPL" class="sref">EXPORT_SYMBOL_GPL./a>(.a href="+code=blkdev_aio_write" class="sref">blkdev_aio_write./a>); 1550./a> 1551./a>static .a href="+code=ssize_t" class="sref">ssize_t./a> .a href="+code=blkdev_aio_read" class="sref">blkdev_aio_read./a>(struct4.a href="+code=kiocb" class="sref">kiocb./a> *.a href="+code=iocb" class="sref">iocb./a>, const struct4.a href="+code=iovec" class="sref">iovec./a> *.a href="+code=iov" class="sref">iov./a>, 1552./a>                         unsigned long .a href="+code=nr_segs" class="sref">nr_segs./a>, .a href="+code=loff_t" class="sref">loff_t./a> .a href="+code=pos" class="sref">pos./a>) 1553./a>{ 1554./a>        struct4.a href="+code=file" class="sref">file./a> *.a href="+code=file" class="sref">file./a> =4.a href="+code=iocb" class="sref">iocb./a>->.a href="+code=ki_filp" class="sref">ki_filp./a>; 1555./a>        struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=bd_inode" class="sref">bd_inode./a> =4.a href="+code=file" class="sref">file./a>->.a href="+code=f_mapping" class="sref">f_mapping./a>->.a href="+code=host" class="sref">host./a>; 1556./a>        .a href="+code=loff_t" class="sref">loff_t./a> .a href="+code=size" class="sref">size./a> =4.a href="+code=i_size_read" class="sref">i_size_read./a>(.a href="+code=bd_inode" class="sref">bd_inode./a>); 1557./a> 1558./a>        if (.a href="+code=pos" class="sref">pos./a> >=4.a href="+code=size" class="sref">size./a>) 1559./a>                return 0; 1560./a> 1561./a>        .a href="+code=size" class="sref">size./a> -=4.a href="+code=pos" class="sref">pos./a>; 1562./a>        if (.a href="+code=size" class="sref">size./a> < .a href="+code=INT_MAX" class="sref">INT_MAX./a>) 1563./a>                .a href="+code=nr_segs" class="sref">nr_segs./a> =4.a href="+code=iov_shorten" class="sref">iov_shorten./a>((struct4.a href="+code=iovec" class="sref">iovec./a> *).a href="+code=iov" class="sref">iov./a>, .a href="+code=nr_segs" class="sref">nr_segs./a>, .a href="+code=size" class="sref">size./a>); 1564./a>        return .a href="+code=generic_file_aio_read" class="sref">generic_file_aio_read./a>(.a href="+code=iocb" class="sref">iocb./a>, .a href="+code=iov" class="sref">iov./a>, .a href="+code=nr_segs" class="sref">nr_segs./a>, .a href="+code=pos" class="sref">pos./a>); 1565./a>} 1566./a> 1567./a>.spa
 class="comment">/*./spa
  1568./a>.spa
 class="comment"> * Try to release a page associated with block device when the system./spa
  1569./a>.spa
 class="comment"> * is under memory pressure../spa
  1570./a>.spa
 class="comment"> */./spa
  1571./a>static int .a href="+code=blkdev_releasepage" class="sref">blkdev_releasepage./a>(struct4.a href="+code=page" class="sref">page./a> *.a href="+code=page" class="sref">page./a>, .a href="+code=gfp_t" class="sref">gfp_t./a> .a href="+code=wait" class="sref">wait./a>) 1572./a>{ 1573./a>        struct4.a href="+code=super_block" class="sref">super_block./a> *.a href="+code=super" class="sref">super./a> =4.a href="+code=BDEV_I" class="sref">BDEV_I./a>(.a href="+code=page" class="sref">page./a>->.a href="+code=mapping" class="sref">mapping./a>->.a href="+code=host" class="sref">host./a>)->.a href="+code=bdev" class="sref">bdev./a>..a href="+code=bd_super" class="sref">bd_super./a>; 1574./a> 1575./a>        if (.a href="+code=super" class="sref">super./a> && .a href="+code=super" class="sref">super./a>->.a href="+code=s_op" class="sref">s_op./a>->.a href="+code=bdev_try_to_free_page" class="sref">bdev_try_to_free_page./a>) 1576./a>                return .a href="+code=super" class="sref">super./a>->.a href="+code=s_op" class="sref">s_op./a>->.a href="+code=bdev_try_to_free_page" class="sref">bdev_try_to_free_page./a>(.a href="+code=super" class="sref">super./a>, .a href="+code=page" class="sref">page./a>, .a href="+code=wait" class="sref">wait./a>); 1577./a> 1578./a>        return .a href="+code=try_to_free_buffers" class="sref">try_to_free_buffers./a>(.a href="+code=page" class="sref">page./a>); 1579./a>} 1580./a> 1581./a>static const struct4.a href="+code=address_space_operations" class="sref">address_space_operations./a> .a href="+code=def_blk_aops" class="sref">def_blk_aops./a> =4{ 1582./a>        ..a href="+code=readpage" class="sref">readpage./a>       =4.a href="+code=blkdev_readpage" class="sref">blkdev_readpage./a>, 1583./a>        ..a href="+code=writepage" class="sref">writepage./a>      =4.a href="+code=blkdev_writepage" class="sref">blkdev_writepage./a>, 1584./a>        ..a href="+code=write_begin" class="sref">write_begin./a>    =4.a href="+code=blkdev_write_begin" class="sref">blkdev_write_begin./a>, 1585./a>        ..a href="+code=write_end" class="sref">write_end./a>      =4.a href="+code=blkdev_write_end" class="sref">blkdev_write_end./a>, 1586./a>        ..a href="+code=writepages" class="sref">writepages./a>     =4.a href="+code=generic_writepages" class="sref">generic_writepages./a>, 1587./a>        ..a href="+code=releasepage" class="sref">releasepage./a>    =4.a href="+code=blkdev_releasepage" class="sref">blkdev_releasepage./a>, 1588./a>        ..a href="+code=direct_IO" class="sref">direct_IO./a>      =4.a href="+code=blkdev_direct_IO" class="sref">blkdev_direct_IO./a>, 1589./a>}; 1590./a> 1591./a>const struct4.a href="+code=file_operations" class="sref">file_operations./a> .a href="+code=def_blk_fops" class="sref">def_blk_fops./a> =4{ 1592./a>        ..a href="+code=open" class="sref">open./a>           =4.a href="+code=blkdev_open" class="sref">blkdev_open./a>, 1593./a>        ..a href="+code=release" class="sref">release./a>        =4.a href="+code=blkdev_close" class="sref">blkdev_close./a>, 1594./a>        ..a href="+code=llseek" class="sref">llseek./a>         =4.a href="+code=block_llseek" class="sref">block_llseek./a>, 1595./a>        ..a href="+code=read" class="sref">read./a>           =4.a href="+code=do_sync_read" class="sref">do_sync_read./a>, 1596./a>        ..a href="+code=write" class="sref">write./a>          =4.a href="+code=do_sync_write" class="sref">do_sync_write./a>, 1597./a>        ..a href="+code=aio_read" class="sref">aio_read./a>       =4.a href="+code=blkdev_aio_read" class="sref">blkdev_aio_read./a>, 1598./a>        ..a href="+code=aio_write" class="sref">aio_write./a>      =4.a href="+code=blkdev_aio_write" class="sref">blkdev_aio_write./a>, 1599./a>        ..a href="+code=mmap" class="sref">mmap./a>           =4.a href="+code=generic_file_mmap" class="sref">generic_file_mmap./a>, 1600./a>        ..a href="+code=fsync" class="sref">fsync./a>          =4.a href="+code=blkdev_fsync" class="sref">blkdev_fsync./a>, 1601./a>        ..a href="+code=unlocked_ioctl" class="sref">unlocked_ioctl./a> =4.a href="+code=block_ioctl" class="sref">block_ioctl./a>, 1602./a>#ifdef4.a href="+code=CONFIG_COMPAT" class="sref">CONFIG_COMPAT./a> 1603./a>        ..a href="+code=compat_ioctl" class="sref">compat_ioctl./a>   =4.a href="+code=compat_blkdev_ioctl" class="sref">compat_blkdev_ioctl./a>, 1604./a>#endif 1605./a>        ..a href="+code=splice_read" class="sref">splice_read./a>    =4.a href="+code=generic_file_splice_read" class="sref">generic_file_splice_read./a>, 1606./a>        ..a href="+code=splice_write" class="sref">splice_write./a>   =4.a href="+code=generic_file_splice_write" class="sref">generic_file_splice_write./a>, 1607./a>}; 1608./a> 1609./a>int .a href="+code=ioctl_by_bdev" class="sref">ioctl_by_bdev./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>, unsigned .a href="+code=cmd" class="sref">cmd./a>, unsigned long .a href="+code=arg" class="sref">arg./a>) 1610./a>{ 1611./a>        int .a href="+code=res" class="sref">res./a>; 1612./a>        .a href="+code=mm_segment_t" class="sref">mm_segment_t./a> .a href="+code=old_fs" class="sref">old_fs./a> =4.a href="+code=get_fs" class="sref">get_fs./a>(); 1613./a>        .a href="+code=set_fs" class="sref">set_fs./a>(.a href="+code=KERNEL_DS" class="sref">KERNEL_DS./a>); 1614./a>        .a href="+code=res" class="sref">res./a> =4.a href="+code=blkdev_ioctl" class="sref">blkdev_ioctl./a>(.a href="+code=bdev" class="sref">bdev./a>, 0, .a href="+code=cmd" class="sref">cmd./a>, .a href="+code=arg" class="sref">arg./a>); 1615./a>        .a href="+code=set_fs" class="sref">set_fs./a>(.a href="+code=old_fs" class="sref">old_fs./a>); 1616./a>        return .a href="+code=res" class="sref">res./a>; 1617./a>} 1618./a> 1619./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=ioctl_by_bdev" class="sref">ioctl_by_bdev./a>); 1620./a> 1621./a>.spa
 class="comment">/**./spa
  1622./a>.spa
 class="comment"> * lookup_bdev  - lookup a struct4block_device by nam
./spa
  1623./a>.spa
 class="comment"> * @pathnam
:   special file representing the block device./spa
  1624./a>.spa
 class="comment"> *./spa
  1625./a>.spa
 class="comment"> * Get a reference to the blockdevice at @pathnam
 in the current./spa
  1626./a>.spa
 class="comment"> * nam
space if possible a
d return it.  Return ERR_PTR(error)./spa
  1627./a>.spa
 class="comment"> * otherwise../spa
  1628./a>.spa
 class="comment"> */./spa
  1629./a>struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=lookup_bdev" class="sref">lookup_bdev./a>(const char *.a href="+code=pathnam
" class="sref">pathnam
./a>) 1630./a>{ 1631./a>        struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>; 1632./a>        struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=inode" class="sref">inode./a>; 1633./a>        struct4.a href="+code=path" class="sref">path./a> .a href="+code=path" class="sref">path./a>; 1634./a>        int .a href="+code=error" class="sref">error./a>; 1635./a> 1636./a>        if (!.a href="+code=pathnam
" class="sref">pathnam
./a> || !*.a href="+code=pathnam
" class="sref">pathnam
./a>) 1637./a>                return .a href="+code=ERR_PTR" class="sref">ERR_PTR./a>(-.a href="+code=EINVAL" class="sref">EINVAL./a>); 1638./a> 1639./a>        .a href="+code=error" class="sref">error./a> =4.a href="+code=kern_path" class="sref">kern_path./a>(.a href="+code=pathnam
" class="sref">pathnam
./a>, .a href="+code=LOOKUP_FOLLOW" class="sref">LOOKUP_FOLLOW./a>, &.a href="+code=path" class="sref">path./a>); 1640./a>        if (.a href="+code=error" class="sref">error./a>) 1641./a>                return .a href="+code=ERR_PTR" class="sref">ERR_PTR./a>(.a href="+code=error" class="sref">error./a>); 1642./a> 1643./a>        .a href="+code=inode" class="sref">inode./a> =4.a href="+code=path" class="sref">path./a>..a href="+code=dentry" class="sref">dentry./a>->.a href="+code=d_inode" class="sref">d_inode./a>; 1644./a>        .a href="+code=error" class="sref">error./a> =4-.a href="+code=ENOTBLK" class="sref">ENOTBLK./a>; 1645./a>        if (!.a href="+code=S_ISBLK" class="sref">S_ISBLK./a>(.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_mode" class="sref">i_mode./a>)) 1646./a>                goto .a href="+code=fail" class="sref">fail./a>; 1647./a>        .a href="+code=error" class="sref">error./a> =4-.a href="+code=EACCES" class="sref">EACCES./a>; 1648./a>        if (.a href="+code=path" class="sref">path./a>..a href="+code=mnt" class="sref">mnt./a>->.a href="+code=mnt_flags" class="sref">mnt_flags./a> & .a href="+code=MNT_NODEV" class="sref">MNT_NODEV./a>) 1649./a>                goto .a href="+code=fail" class="sref">fail./a>; 1650./a>        .a href="+code=error" class="sref">error./a> =4-.a href="+code=ENOMEM" class="sref">ENOMEM./a>; 1651./a>        .a href="+code=bdev" class="sref">bdev./a> =4.a href="+code=bd_acquire" class="sref">bd_acquire./a>(.a href="+code=inode" class="sref">inode./a>); 1652./a>        if (!.a href="+code=bdev" class="sref">bdev./a>) 1653./a>                goto .a href="+code=fail" class="sref">fail./a>; 1654./a>.a href="+code=out" class="sref">out./a>: 1655./a>        .a href="+code=path_put" class="sref">path_put./a>(&.a href="+code=path" class="sref">path./a>); 1656./a>        return .a href="+code=bdev" class="sref">bdev./a>; 1657./a>.a href="+code=fail" class="sref">fail./a>: 1658./a>        .a href="+code=bdev" class="sref">bdev./a> =4.a href="+code=ERR_PTR" class="sref">ERR_PTR./a>(.a href="+code=error" class="sref">error./a>); 1659./a>        goto .a href="+code=out" class="sref">out./a>; 1660./a>} 1661./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=lookup_bdev" class="sref">lookup_bdev./a>); 1662./a> 1663./a>int .a href="+code=__invalidate_device" class="sref">__invalidate_device./a>(struct4.a href="+code=block_device" class="sref">block_device./a> *.a href="+code=bdev" class="sref">bdev./a>, .a href="+code=bool" class="sref">bool./a> .a href="+code=kill_dirty" class="sref">kill_dirty./a>) 1664./a>{ 1665./a>        struct4.a href="+code=super_block" class="sref">super_block./a> *.a href="+code=sb" class="sref">sb./a> =4.a href="+code=get_super" class="sref">get_super./a>(.a href="+code=bdev" class="sref">bdev./a>); 1666./a>        int .a href="+code=res" class="sref">res./a> =40; 1667./a> 1668./a>        if (.a href="+code=sb" class="sref">sb./a>) { 1669./a>                .spa
 class="comment">/*./spa
  1670./a>.spa
 class="comment">                 * no need to lock the super, get_super holds the./spa
  1671./a>.spa
 class="comment">                 * read mutex so the filesystem ca
not go away./spa
  1672./a>.spa
 class="comment">                 * under us (->put_super runs with the write lock./spa
  1673./a>.spa
 class="comment">                 * hold)../spa
  1674./a>.spa
 class="comment">                 */./spa
  1675./a>                .a href="+code=shrink_dcache_sb" class="sref">shrink_dcache_sb./a>(.a href="+code=sb" class="sref">sb./a>); 1676./a>                .a href="+code=res" class="sref">res./a> =4.a href="+code=invalidate_inodes" class="sref">invalidate_inodes./a>(.a href="+code=sb" class="sref">sb./a>, .a href="+code=kill_dirty" class="sref">kill_dirty./a>); 1677./a>                .a href="+code=drop_super" class="sref">drop_super./a>(.a href="+code=sb" class="sref">sb./a>); 1678./a>        } 1679./a>        .a href="+code=invalidate_bdev" class="sref">invalidate_bdev./a>(.a href="+code=bdev" class="sref">bdev./a>); 1680./a>        return .a href="+code=res" class="sref">res./a>; 1681./a>} 1682./a>.a href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOL./a>(.a href="+code=__invalidate_device" class="sref">__invalidate_device./a>); 1683./a> 1684./a>void .a href="+code=iterate_bdevs" class="sref">iterate_bdevs./a>(void (*.a href="+code=func" class="sref">func./a>)(struct4.a href="+code=block_device" class="sref">block_device./a> *, void *), void *.a href="+code=arg" class="sref">arg./a>) 1685./a>{ 1686./a>        struct4.a href="+code=inode" class="sref">inode./a> *.a href="+code=inode" class="sref">inode./a>, *.a href="+code=old_inode" class="sref">old_inode./a> =4.a href="+code=NULL" class="sref">NULL./a>; 1687./a> 1688./a>        .a href="+code=spin_lock" class="sref">spin_lock./a>(&.a href="+code=inode_sb_list_lock" class="sref">inode_sb_list_lock./a>); 1689./a>        .a href="+code=list_for_each_entry" class="sref">list_for_each_entry./a>(.a href="+code=inode" class="sref">inode./a>, &.a href="+code=blockdev_superblock" class="sref">blockdev_superblock./a>->.a href="+code=s_inodes" class="sref">s_inodes./a>, .a href="+code=i_sb_list" class="sref">i_sb_list./a>) { 1690./a>                struct4.a href="+code=address_space" class="sref">address_space./a> *.a href="+code=mapping" class="sref">mapping./a> =4.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_mapping" class="sref">i_mapping./a>; 16918"a> 1692./a>                .a href="+code=spin_lock" class="sref">spin_lock./a>(&.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_lock" class="sref">i_lock./a>); 1693./a>                if (.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_state" class="sref">i_state./a> & (.a href="+code=I_FREEING" class="sref">I_FREEING./a>|.a href="+code=I_WILL_FREE" class="sref">I_WILL_FREE./a>|.a href="+code=I_NEW" class="sref">I_NEW./a>) || 1694./a>                    .a href="+code=mapping" class="sref">mapping./a>->.a href="+code=nrpages" class="sref">nrpages./a> == 0) { 1695./a>                        .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_lock" class="sref">i_lock./a>); 1696./a>                        continue; 1697./a>                } 1698./a>                .a href="+code=__iget" class="sref">__iget./a>(.a href="+code=inode" class="sref">inode./a>); 1699./a>                .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=inode" class="sref">inode./a>->.a href="+code=i_lock" class="sref">i_lock./a>); 1700./a>                .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=inode_sb_list_lock" class="sref">inode_sb_list_lock./a>); 1701./a>                .spa
 class="comment">/*./spa
  1702./a>.spa
 class="comment">                 * We hold a reference to 'inode' so it couldn't have been./spa
  1703./a>.spa
 class="comment">                 * removed from s_inodes list while we dropped the./spa
  1704./a>.spa
 class="comment">                 * inode_sb_list_lock.  We ca
not iput the inode now as we ca
./spa
  1705./a>.spa
 class="comment">                 * be holding the last reference a
d we ca
not iput it under./spa
  1706./a>.spa
 class="comment">                 * inode_sb_list_lock. So we keep the reference a
d iput it./spa
  1707./a>.spa
 class="comment">                 * later../spa
  1708./a>.spa
 class="comment">                 */./spa
  1709./a>                .a href="+code=iput" class="sref">iput./a>(.a href="+code=old_inode" class="sref">old_inode./a>); 1710./a>                .a href="+code=old_inode" class="sref">old_inode./a> =4.a href="+code=inode" class="sref">inode./a>; 17118"a> 1712./a>                .a href="+code=func" class="sref">func./a>(.a href="+code=I_BDEV" class="sref">I_BDEV./a>(.a href="+code=inode" class="sref">inode./a>), .a href="+code=arg" class="sref">arg./a>); 1713./a> 1714./a>                .a href="+code=spin_lock" class="sref">spin_lock./a>(&.a href="+code=inode_sb_list_lock" class="sref">inode_sb_list_lock./a>); 1715./a>        } 1716./a>        .a href="+code=spin_unlock" class="sref">spin_unlock./a>(&.a href="+code=inode_sb_list_lock" class="sref">inode_sb_list_lock./a>); 1717./a>        .a href="+code=iput" class="sref">iput./a>(.a href="+code=old_inode" class="sref">old_inode./a>); 1718./a>} 1719./a>./pre>./div>


./div>