linux/fs/ext4/fsync.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/ext4/fsync.c
   3 *
   4 *  Copyright (C) 1993  Stephen Tweedie (sct@redhat.com)
   5 *  from
   6 *  Copyright (C) 1992  Remy Card (card@masi.ibp.fr)
   7 *                      Laboratoire MASI - Institut Blaise Pascal
   8 *                      Universite Pierre et Marie Curie (Paris VI)
   9 *  from
  10 *  linux/fs/minix/truncate.c   Copyright (C) 1991, 1992  Linus Torvalds
  11 *
  12 *  ext4fs fsync primitive
  13 *
  14 *  Big-endian to little-endian byte-swapping/bitmaps by
  15 *        David S. Miller (davem@caip.rutgers.edu), 1995
  16 *
  17 *  Removed unnecessary code duplication for little endian machines
  18 *  and excessive __inline__s.
  19 *        Andi Kleen, 1997
  20 *
  21 * Major simplications and cleanup - we only need to do the metadata, because
  22 * we can depend on generic_block_fdatasync() to sync the data blocks.
  23 */
  24
  25#include <linux/time.h>
  26#include <linux/fs.h>
  27#include <linux/sched.h>
  28#include <linux/writeback.h>
  29#include <linux/jbd2.h>
  30#include <linux/blkdev.h>
  31
  32#include "ext4.h"
  33#include "ext4_jbd2.h"
  34
  35#include <trace/events/ext4.h>
  36
  37static void dump_completed_IO(struct inode * inode)
  38{
  39#ifdef  EXT4FS_DEBUG
  40        struct list_head *cur, *before, *after;
  41        ext4_io_end_t *io, *io0, *io1;
  42        unsigned long flags;
  43
  44        if (list_empty(&EXT4_I(inode)->i_completed_io_list)){
  45                ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino);
  46                return;
  47        }
  48
  49        ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino);
  50        spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
  51        list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){
  52                cur = &io->list;
  53                before = cur->prev;
  54                io0 = container_of(before, ext4_io_end_t, list);
  55                after = cur->next;
  56                io1 = container_of(after, ext4_io_end_t, list);
  57
  58                ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
  59                            io, inode->i_ino, io0, io1);
  60        }
  61        spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
  62#endif
  63}
  64
  65/*
  66 * This function is called from ext4_sync_file().
  67 *
  68 * When IO is completed, the work to convert unwritten extents to
  69 * written is queued on workqueue but may not get immediately
  70 * scheduled. When fsync is called, we need to ensure the
  71 * conversion is complete before fsync returns.
  72 * The inode keeps track of a list of pending/completed IO that
  73 * might needs to do the conversion. This function walks through
  74 * the list and convert the related unwritten extents for completed IO
  75 * to written.
  76 * The function return the number of pending IOs on success.
  77 */
  78int ext4_flush_completed_IO(struct inode *inode)
  79{
  80        ext4_io_end_t *io;
  81        struct ext4_inode_info *ei = EXT4_I(inode);
  82        unsigned long flags;
  83        int ret = 0;
  84        int ret2 = 0;
  85
  86        dump_completed_IO(inode);
  87        spin_lock_irqsave(&ei->i_completed_io_lock, flags);
  88        while (!list_empty(&ei->i_completed_io_list)){
  89                io = list_entry(ei->i_completed_io_list.next,
  90                                ext4_io_end_t, list);
  91                list_del_init(&io->list);
  92                io->flag |= EXT4_IO_END_IN_FSYNC;
  93                /*
  94                 * Calling ext4_end_io_nolock() to convert completed
  95                 * IO to written.
  96                 *
  97                 * When ext4_sync_file() is called, run_queue() may already
  98                 * about to flush the work corresponding to this io structure.
  99                 * It will be upset if it founds the io structure related
 100                 * to the work-to-be schedule is freed.
 101                 *
 102                 * Thus we need to keep the io structure still valid here after
 103                 * conversion finished. The io structure has a flag to
 104                 * avoid double converting from both fsync and background work
 105                 * queue work.
 106                 */
 107                spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 108                ret = ext4_end_io_nolock(io);
 109                if (ret < 0)
 110                        ret2 = ret;
 111                spin_lock_irqsave(&ei->i_completed_io_lock, flags);
 112                io->flag &= ~EXT4_IO_END_IN_FSYNC;
 113        }
 114        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 115        return (ret2 < 0) ? ret2 : 0;
 116}
 117
 118/*
 119 * If we're not journaling and this is a just-created file, we have to
 120 * sync our parent directory (if it was freshly created) since
 121 * otherwise it will only be written by writeback, leaving a huge
 122 * window during which a crash may lose the file.  This may apply for
 123 * the parent directory's parent as well, and so on recursively, if
 124 * they are also freshly created.
 125 */
 126static int ext4_sync_parent(struct inode *inode)
 127{
 128        struct writeback_control wbc;
 129        struct dentry *dentry = NULL;
 130        struct inode *next;
 131        int ret = 0;
 132
 133        if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY))
 134                return 0;
 135        inode = igrab(inode);
 136        while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
 137                ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
 138                dentry = d_find_any_alias(inode);
 139                if (!dentry)
 140                        break;
 141                igrab(igrab(ment"> *
linudRsref">ext4_href="+code=f">ment"> inode);
 112                dpuan>
linudRpuass="sref">igrab(igrab(inode);
  93                if (!dentry)
 134                        break;
  45                ipuan>
linudipuass="sref">igrab(inode);
  46 135        inode = next;
 137                ret = ss="sm-endia_bufferd_find_any_aliasss="sm-endia_bufferdss="sref">igrab(ei->inode);
 138                if (dentry)
  59                        break;
 140            if (memsa href="+code=retmemsa lock_irqrestore(&inode);
 141                .ss="sm href="+code=inodess="sm hrlass="sref">ret = WB_XT4__A href="+code=NULLWB_XT4__A hass="sref">next;
  52                .nr_to_" claref="+code=next"r_to_" clalass="sre  52  L118"> 118 */
  53                ret = ss="s(inode);
  54                if (dentry)
  55                        break;
 "L116"> 116}
  87        ipuan>
linudipuass="sref">igrab(inode);
        ef">ret2 = ret;
 116}
 116}
   1/*
 122
ting fwith     ="ci_co;re not nt">m-e_" clala="comment">/*
 123  mmentck_fdatla="comment">/*
 124/*
  65
  66
ting fwith     ="ci_co;rey lose te t    is ahis may apply for
  67  * mis background work
  68
  69 */
 126static int _    * inode *igraeneric_lassref">dentry)
)) {
  126static int ret;
  83        int ret;
  64
 135        ret = ss="sm-endia_bufferd_find_any_aliasss="sm-endia_bufferdss="sref">igrab(ei->inode);
      sref">igrab(ei->ret = I_DIRT4_STATE_NEWENTRYI_DIRT4_STATE_NEWENTRY))
 137        ef">ret2 = ret;
     static int generic_f="+code=f">igraeneric_lassrirqreirqres sref">igrab(ei->ret = I_DIRT4_DATAXT4_IO_END_IN_FSYNCI_DIRT4_DATAXT4__STATE_NEWENTRY))
  59        ef">ret2 = ret;
 116}
  41        ret = ss="s(inode);
                if (ret < 0)
  53                ret = ret;
        ef">ret2 = ret;
 116}
  36
  67/*
  68
  69
 120
 121 ope onlynee tner    * queue work.
 122
 123  94
  95 waiennt">ty lose t* othsnapshot need to ensure the
  96
  97
  98
  99 */
 116}
static int inode *Whenef="+code=afterWhenxt4_t4_io_end_t, ret = starde=ext4_io_end_tstardxt4_t4_io_end_t, ret = en"+code=list_headen"ss=",> 126static int generic_f="+code=f">igraeneric_lassref">dentry)
)) {
 ync_parent(struct inode *ret = Whenef="+code=afterWhenxt4_s="sref">io->io->ho href="+code=listho hlass="sref">ret;
 ync_parent(struct ext4_inode_info *ei = EXT4_I(inode);
 135         we'ode=ext4_io_end_t we'odext4_inode_info * we'e=ext4_io_end_t we'class="sref">ei = igrab(ei->ei->s_ we'e=ext4_io_end_ts_ we'lass="sref">ret;
  83        int ret;
  87        ticode=ext4_io_end_tticodxt4_sref">ret = a>ret;
 ref">ret = bowriteback_controlbowrxt4_sref">ret = ment"_barrihref="+code=afterment"_barrihrss="="sref">ret = Walsnef="+code=afterWalsnlass="sref">ret;
ret;
  80        J_ASSERTef="+code=afterJ_ASSERTss="sref">igrab(" cla we'ocurf">e_hnotenef="+code=aftere_hnotenss="s)===fref">igrab(inode);
  31
  87        t cla_igrab(Whenef="+code=afterWhenxt4_t4_io_end_t, generic_f="+code=f">igraeneric_lassrref">inode);
  43
 114        ret = nt">m-e_" cla_not_waie_rangnef="+code=afterWhenm-e_" cla_not_waie_rangnss="sref">igrab(ei->inode);
                if (dentry)
  46               int ret;
  87        mutex=i_completed_io_lockmutex=i_coss="st_del_init(&ei->inode);
  48
                if (ei->ei->s_ret = MS_RDONL4_STATE_NEWENTRYMS_RDONL4lassref">dentry)
 140 gotosref">ret = ouan>
linudouclass="sref">ret;
  31
  87        ret = ext4_flush_coef">EXT4_I(inode);
 133< 87        ret < 0)
  54 gotosref">ret = ouan>
linudouclass="sref">ret;
  85
      node_info * we'e=ext4_io_end_t we'clasE_NEWENTRY)) {
 137                ret = _    * igraeneric_lassrref">inode);
 138      node_info *io->hlist_empty(&ei->igr))
  59          87        ret = ext4_sef">EXT4_I(inode);
 140 gotosref">ret = ouan>
linudouclass="sref">ret;
 "L116"> 116}
 132
 ="L67">  67/*
 104/*
 105 106  file, we have to
  97  98/*
  99/*
 100 101
 102
 103 oft  wly-dirtied pagesve __inline__s.
 104
 105 * ed, weive __inline__s.
 106 */
 133< 87        " clash  ldr we'o to ef="+code=inode" clash  ldr we'o to xt4_sef">EXT4_I( 138  87        ret = igrab(ei->ret;
  59 gotosref">ret = ouan>
linudouclass="sref">ret;
 "L116"> 116}
  31
  87        a>dentry = igraeneric_lassrt2 < 0) ? ei-> < 0) ? ei->ret;
 133< 87         we'e=ext4_io_end_t we'class="sref">ei->j_ret = JBD2_BARRIERef="+code=afterJBD2_BARRIERlass="rqreirqre"sref">ret;
  54< node_info * bd2_he ns_* ot_sss="aene_barrihref="+code=after bd2_he ns_* ot_sss="aene_barrihrss="sref">igrab( we'e=ext4_io_end_t we'classr_io_end_t, a>))
  55 ref">ret = ment"_barrihref="+code=afterment"_barrihrss="="sref">ret = truae=ext4_io_end_tt unlass="sref">ret;
  86         bd2_log_stard_a>igrab( we'e=ext4_io_end_t we'classr_io_end_t, a>ret;
  87        ret =  bd2_log_waie_a>igrab( we'e=ext4_io_end_t we'classr_io_end_t, a>ret;
     static int ment"_barrihref="+code=afterment"_barrihrss="E_NEWENTRY))
  89                blkdev_issueass="siteback_controlblkdev_issueass="sss="sref">igrab(ei->ei->s_bdevef="+code=flagss_bdevclassr_io_end_t, GFP_KERNEhref="+code=NULLGFP_KERNEhclassr_io_end_t, inode);
ret = ouan>
linudouclass:ef">inode);
  41        mutex=ss="srmpleted_io_lockmutex=ss="srss="st_del_init(&ei->inode);
  87        t cla_(inode);
               int ret;
 116}



=/div>


os"corigi#39 LXR softwmmennlynee      http://swe&cesorge.net/projects/lxre>LXR a>      mailto:lxr@d="ux.no">lxr@d="ux.noref".
=/div>

lxr.d="ux.no kindly ho hed byl/a>      http://www.redp ot-d="pro.no">Redp ot L="pro ASref",uprovider oftL="ux * asult;re not operant">s servicesreshlyn1" 5.
=/div>