linux/fs/ext4/extents.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
   3 * Written by Alex Tomas <alex@clusterfs.com>
   4 *
   5 * Architecture independence:
   6 *   Copyright (c) 2005, Bull S.A.
   7 *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public Licens
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
  21 */
  22
  23/*
  24 * Extents support for EXT4
  25 *
  26 * TODO:
  27 *   - ext4*_error() should be used in some situations
  28 *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
  29 *   - smart tree reduction
  30 */
  31
  32#include <linux/fs.h>
  33#include <linux/time.h>
  34#include <linux/jbd2.h>
  35#include <linux/highuid.h>
  36#include <linux/pagemap.h>
  37#include <linux/quotaops.h>
  38#include <linux/string.h>
  39#include <linux/slab.h>
  40#include <linux/falloc.h>
  41#include <asm/uaccess.h>
  42#include <linux/fiemap.h>
  43#include "ext4_jbd2.h"
  44
  45#include <trace/events/ext4.h>
  46
  47/*
  48 * used by extent splitting.
  49 */
  50#define EXT4_EXT_MAY_ZEROOUT    0x1  /* safe to zeroout if split fails \
  51                                        due to ENOSPC */
  52#define EXT4_EXT_MARK_UNINIT1   0x2  /* mark first half uninitialized */
  53#define EXT4_EXT_MARK_UNINIT2   0x4  /* mark second half uninitialized */
  54
  55#define EXT4_EXT_DATA_VALID1    0x8  /* first half contains valid data */
  56#define EXT4_EXT_DATA_VALID2    0x10 /* second half contains valid data */
  57
  58static __le32 ext4_extent_block_csum(struct inode *inode,
  59                                     struct ext4_extent_header *eh)
  60{
  61        struct ext4_inode_info *ei = EXT4_I(inode);
  62        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  63        __u32 csum;
  64
  65        csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
  66                           EXT4_EXTENT_TAIL_OFFSET(eh));
  67        return cpu_to_le32(csum);
  68}
  69
  70static int ext4_extent_block_csum_verify(struct inode *inode,
  71                                         struct ext4_extent_header *eh)
  72{
  73        struct ext4_extent_tail *et;
  74
  75        if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
  76                EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
  77                return 1;
  78
  79        et = find_ext4_extent_tail(eh);
  80        if (et->et_checksum != ext4_extent_block_csum(inode, eh))
  81                return 0;
  82        return 1;
  83}
  84
  85static void ext4_extent_block_csum_set(struct inode *inode,
  86                                       struct ext4_extent_header *eh)
  87{
  88        struct ext4_extent_tail *et;
  89
  90        if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
  91                EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
  92                return;
  93
  94        et = find_ext4_extent_tail(eh);
  95        et->et_checksum = ext4_extent_block_csum(inode, eh);
  96}
  97
  98static int ext4_split_extent(handle_t *handle,
  99                                struct inode *inode,
 100                                struct ext4_ext_path *path,
 101                                struct ext4_map_blocks *map,
 102                                int split_flag,
 103                                int flags);
 104
 105static int ext4_split_extent_at(handle_t *handle,
 106                             struct inode *inode,
 107                             struct ext4_ext_path *path,
 108                             ext4_lblk_t split,
 109                             int split_flag,
 110                             int flags);
 111
 112static int ext4_ext_truncate_extend_restart(handle_t *handle,
 113                                            struct inode *inode,
 114                                            int needed)
 115{
 116        int err;
 117
 118        if (!ext4_handle_valid(handle))
 119                return 0;
 120        if (handle->h_buffer_credits > needed)
 121                return 0;
 122        err = ext4_journal_extend(handle, needed);
 123        if (err <= 0)
 124                return err;
 125        err = ext4_truncate_restart_trans(handle, inode, needed);
 126        if (err == 0)
 127                err = -EAGAIN;
 128
 129        return err;
 130}
 131
 132/*
 133 * could return:
 134 *  - EROFS
 135 *  - ENOMEM
 136 */
 137static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
 138                                struct ext4_ext_path *path)
 139{
 140        if (path->p_bh) {
 141                /* path points to block */
 142                return ext4_journal_get_write_access(handle, path->p_bh);
 143        }
 144        /* path points to leaf/index in inode body */
 145        /* we use in-core data, no need to protect them */
 146        return 0;
 147}
 148
 149/*
 150 * could return:
 151 *  - EROFS
 152 *  - ENOMEM
 153 *  - EIO
 154 */
 155#define ext4_ext_dirty(handle, inode, path) \
 156                __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
 157static int __ext4_ext_dirty(const char *where, unsigned int line,
 158                            handle_t *handle, struct inode *inode,
 159                            struct ext4_ext_path *path)
 160{
 161        int err;
 162        if (path->p_bh) {
 163                ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
 164                /* path points to block */
 165                err = __ext4_handle_dirty_metadata(where, line, handle,
 166                                                   inode, path->p_bh);
 167        } else {
 168                /* path points to leaf/index in inode body */
 169                err = ext4_mark_inode_dirty(handle, inode);
 170        }
 171        return err;
 172}
 173
 174static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
 175                              struct ext4_ext_path *path,
 176                              ext4_lblk_t block)
 177{
 178        if (path) {
 179                int depth = path->p_depth;
 180                struct ext4_extent *ex;
 181
 182                /*
 183                 * Try to predict block placement assuming that we are
 184                 * filling in a file which will eventually be
 185                 * non-sparse --- i.e., in the case of libbfd writing
 186                 * an ELF object sections out-of-order but in a way
 187                 * the eventually results in a contiguous object or
 188                 * executable file, or some database extending a table
 189                 * space file.  However, this is actually somewhat
 190                 * non-ideal if we are writing a sparse file such as
 191                 * qemu or KVM writing a raw image file that is going
 192                 * to stay fairly sparse, since it will end up
 193                 * fragmenting the file system's free space.  Maybe we
 194                 * should have some hueristics or some way to allow
 195                 * userspace to pass a hint to file system,
 196                 * especially if the latter case turns out to be
 197                 * common.
 198                 */
 199                ex = path[depth].p_ext;
 200                if (ex) {
 201                        ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
 202                        ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
 203
 204                        if (block > ext_block)
 205                                return ext_pblk + (block - ext_block);
 206                        else
 207                                return ext_pblk - (ext_block - block);
 208                }
 209
 210                /* it looks like index is empty;
 211                 * try to find starting block from index itself */
 212                if (path[depth].p_bh)
 213                        return path[depth].p_bh->b_blocknr;
 214        }
 215
 216        /* OK. use inode's group */
 217        return ext4_inode_to_goal_block(inode);
 218}
 219
 220/*
 221 * Allocation for a meta data block
 222 */
 223static ext4_fsblk_t
 224ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
 225                        struct ext4_ext_path *path,
 226                        struct ext4_extent *ex, int *err, unsigned int flags)
 227{
 228        ext4_fsblk_t goal, newblock;
 229
 230        goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
 231        newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
 232                                        NULL, err);
 233        return newblock;
 234}
 235
 236static inline int ext4_ext_space_block(struct inode *inode, int check)
 237{
 238        int size;
 239
 240        size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
 241                        / sizeof(struct ext4_extent);
 242#ifdef AGGRESSIVE_TEST
 243        if (!check && size > 6)
 244                size = 6;
 245#endif
 246        return size;
 247}
 248
 249static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
 250{
 251        int size;
 252
 253        size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
 254                        / sizeof(struct ext4_extent_idx);
 255#ifdef AGGRESSIVE_TEST
 256        if (!check && size > 5)
 257                size = 5;
 258#endif
 259        return size;
 260}
 261
 262static inline int ext4_ext_space_root(struct inode *inode, int check)
 263{
 264        int size;
 265
 266        size = sizeof(EXT4_I(inode)->i_data);
 267        size -= sizeof(struct ext4_extent_header);
 268        size /= sizeof(struct ext4_extent);
 269#ifdef AGGRESSIVE_TEST
 270        if (!check && size > 3)
 271                size = 3;
 272#endif
 273        return size;
 274}
 275
 276static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
 277{
 278        int size;
 279
 280        size = sizeof(EXT4_I(inode)->i_data);
 281        size -= sizeof(struct ext4_extent_header);
 282        size /= sizeof(struct ext4_extent_idx);
 283#ifdef AGGRESSIVE_TEST
 284        if (!check && size > 4)
 285                size = 4;
 286#endif
 287        return size;
 288}
 289
 290/*
 291 * Calculate the number of metadata blocks needed
 292 * to allocate @blocks
 293 * Worse case is one block per extent
 294 */
 295int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
 296{
 297        struct ext4_inode_info *ei = EXT4_I(inode);
 298        int idxs;
 299
 300        idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
 301                / sizeof(struct ext4_extent_idx));
 302
 303        /*
 304         * If the new delayed allocation block is contiguous with the
 305         * previous da block, it can share index blocks with the
 306         * previous block, so we only need to allocate a new index
 307         * block every idxs leaf blocks.  At ldxs**2 blocks, we need
 308         * an additional index block, and at ldxs**3 blocks, yet
 309         * another index blocks.
 310         */
 311        if (ei->i_da_metadata_calc_len &&
 312            ei->i_da_metadata_calc_last_lblock+1 == lblock) {
 313                int num = 0;
 314
 315                if ((ei->i_da_metadata_calc_len % idxs) == 0)
 316                        num++;
 317                if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
 318                        num++;
 319                if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
 320                        num++;
 321                        ei->i_da_metadata_calc_len = 0;
 322                } else
 323                        ei->i_da_metadata_calc_len++;
 324                ei->i_da_metadata_calc_last_lblock++;
 325                return num;
 326        }
 327
 328        /*
 329         * In the worst case we need a new set of index blocks at
 330         * every level of the inode's extent tree.
 331         */
 332        ei->i_da_metadata_calc_len = 1;
 333        ei->i_da_metadata_calc_last_lblock = lblock;
 334        return ext_depth(inode) + 1;
 335}
 336
 337static int
 338ext4_ext_max_entries(struct inode *inode, int depth)
 339{
 340        int max;
 341
 342        if (depth == ext_depth(inode)) {
 343                if (depth == 0)
 344                        max = ext4_ext_space_root(inode, 1);
 345                else
 346                        max = ext4_ext_space_root_idx(inode, 1);
 347        } else {
 348                if (depth == 0)
 349                        max = ext4_ext_space_block(inode, 1);
 350                else
 351                        max = ext4_ext_space_block_idx(inode, 1);
 352        }
 353
 354        return max;
 355}
 356
 357static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
 358{
 359        ext4_fsblk_t block = ext4_ext_pblock(ext);
 360        int len = ext4_ext_get_actual_len(ext);
 361
 362        if (len == 0)
 363                return 0;
 364        return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
 365}
 366
 367static int ext4_valid_extent_idx(struct inode *inode,
 368                                struct ext4_extent_idx *ext_idx)
 369{
 370        ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
 371
 372        return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
 373}
 374
 375static int ext4_valid_extent_entries(struct inode *inode,
 376                                struct ext4_extent_header *eh,
 377                                int depth)
 378{
 379        unsigned short entries;
 380        if (eh->eh_entries == 0)
 381                return 1;
 382
 383        entries = le16_to_cpu(eh->eh_entries);
 384
 385        if (depth == 0) {
 386                /* leaf entries */
 387                struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
 388                while (entries) {
 389                        if (!ext4_valid_extent(inode, ext))
 390                                return 0;
 391                        ext++;
 392                        entries--;
 393                }
 394        } else {
 395                struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
 396                while (entries) {
 397                        if (!ext4_valid_extent_idx(inode, ext_idx))
 398                                return 0;
 399                        ext_idx++;
 400                        entries--;
 401                }
 402        }
 403        return 1;
 404}
 405
 406static int __ext4_ext_check(const char *function, unsigned int line,
 407                            struct inode *inode, struct ext4_extent_header *eh,
 408                            int depth)
 409{
 410        const char *error_msg;
 411        int max = 0;
 412
 413        if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
 414                error_msg = "invalid magic";
 415                goto corrupted;
 416        }
 417        if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
 418                error_msg = "unexpected eh_depth";
 419                goto corrupted;
 420        }
 421        if (unlikely(eh->eh_max == 0)) {
 422                error_msg = "invalid eh_max";
 423                goto corrupted;
 424        }
 425        max = ext4_ext_max_entries(inode, depth);
 426        if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
 427                error_msg = "too large eh_max";
 428                goto corrupted;
 429        }
 430        if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
 431                error_msg = "invalid eh_entries";
 432                goto corrupted;
 433        }
 434        if (!ext4_valid_extent_entries(inode, eh, depth)) {
 435                error_msg = "invalid extent entries";
 436                goto corrupted;
 437        }
 438        /* Verify checksum on non-root extent tree nodes */
 439        if (ext_depth(inode) != depth &&
 440            !ext4_extent_block_csum_verify(inode, eh)) {
 441                error_msg = "extent tree corrupted";
 442                goto corrupted;
 443        }
 444        return 0;
 445
 446corrupted:
 447        ext4_error_inode(inode, function, line, 0,
 448                        "bad header/extent: %s - magic %x, "
 449                        "entries %u, max %u(%u), depth %u(%u)",
 450                        error_msg, le16_to_cpu(eh->eh_magic),
 451                        le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
 452                        max, le16_to_cpu(eh->eh_depth), depth);
 453
 454        return -EIO;
 455}
 456
 457#define ext4_ext_check(inode, eh, depth)        \
 458        __ext4_ext_check(__func__, __LINE__, inode, eh, depth)
 459
 460int ext4_ext_check_inode(struct inode *inode)
 461{
 462        return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
 463}
 464
 465static int __ext4_ext_check_block(const char *function, unsigned int line,
 466                                  struct inode *inode,
 467                                  struct ext4_extent_header *eh,
 468                                  int depth,
 469                                  struct buffer_head *bh)
 470{
 471        int ret;
 472
 473        if (buffer_verified(bh))
 474                return 0;
 475        ret = ext4_ext_check(inode, eh, depth);
 476        if (ret)
 477                return ret;
 478        set_buffer_verified(bh);
 479        return ret;
 480}
 481
 482#define ext4_ext_check_block(inode, eh, depth, bh)      \
 483        __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh)
 484
 485#ifdef EXT_DEBUG
 486static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
 487{
 488        int k, l = path->p_depth;
 489
 490        ext_debug("path:");
 491        for (k = 0; k <= l; k++, path++) {
 492                if (path->p_idx) {
 493                  ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
 494                            ext4_idx_pblock(path->p_idx));
 495                } else if (path->p_ext) {
 496                        ext_debug("  %d:[%d]%d:%llu ",
 497                                  le32_to_cpu(path->p_ext->ee_block),
 498                                  ext4_ext_is_uninitialized(path->p_ext),
 499                                  ext4_ext_get_actual_len(path->p_ext),
 500                                  ext4_ext_pblock(path->p_ext));
 501                } else
 502                        ext_debug("  []");
 503        }
 504        ext_debug("\n");
 505}
 506
 507static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
 508{
 509        int depth = ext_depth(inode);
 510        struct ext4_extent_header *eh;
 511        struct ext4_extent *ex;
 512        int i;
 513
 514        if (!path)
 515                return;
 516
 517        eh = path[depth].p_hdr;
 518        ex = EXT_FIRST_EXTENT(eh);
 519
 520        ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
 521
 522        for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
 523                ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
 524                          ext4_ext_is_uninitialized(ex),
 525                          ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
 526        }
 527        ext_debug("\n");
 528}
 529
 530static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
 531                        ext4_fsblk_t newblock, int level)
 532{
 533        int depth = ext_depth(inode);
 534        struct ext4_extent *ex;
 535
 536        if (depth != level) {
 537                struct ext4_extent_idx *idx;
 538                idx = path[level].p_idx;
 539                while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
 540                        ext_debug("%d: move %d:%llu in new index %llu\n", level,
 541                                        le32_to_cpu(idx->ei_block),
 542                                        ext4_idx_pblock(idx),
 543                                        newblock);
 544                        idx++;
 545                }
 546
 547                return;
 548        }
 549
 550        ex = path[depth].p_ext;
 551        while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
 552                ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
 553                                le32_to_cpu(ex->ee_block),
 554                                ext4_ext_pblock(ex),
 555                                ext4_ext_is_uninitialized(ex),
 556                                ext4_ext_get_actual_len(ex),
 557                                newblock);
 558                ex++;
 559        }
 560}
 561
 562#else
 563#define ext4_ext_show_path(inode, path)
 564#define ext4_ext_show_leaf(inode, path)
 565#define ext4_ext_show_move(inode, path, newblock, level)
 566#endif
 567
 568void ext4_ext_drop_refs(struct ext4_ext_path *path)
 569{
 570        int depth = path->p_depth;
 571        int i;
 572
 573        for (i = 0; i <= depth; i++, path++)
 574                if (path->p_bh) {
 575                        brelse(path->p_bh);
 576                        path->p_bh = NULL;
 577                }
 578}
 579
 580/*
 581 * ext4_ext_binsearch_idx:
 582 * binary search for the closest index of the given block
 583 * the header must be checked before calling this
 584 */
 585static void
 586ext4_ext_binsearch_idx(struct inode *inode,
 587                        struct ext4_ext_path *path, ext4_lblk_t block)
 588{
 589        struct ext4_extent_header *eh = path->p_hdr;
 590        struct ext4_extent_idx *r, *l, *m;
 591
 592
 593        ext_debug("binsearch for %u(idx):  ", block);
 594
 595        l = EXT_FIRST_INDEX(eh) + 1;
 596        r = EXT_LAST_INDEX(eh);
 597        while (l <= r) {
 598                m = l + (r - l) / 2;
 599                if (block < le32_to_cpu(m->ei_block))
 600                        r = m - 1;
 601                else
 602                        l = m + 1;
 603                ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
 604                                m, le32_to_cpu(m->ei_block),
 605                                r, le32_to_cpu(r->ei_block));
 606        }
 607
 608        path->p_idx = l - 1;
 609        ext_debug("  -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
 610                  ext4_idx_pblock(path->p_idx));
 611
 612#ifdef CHECK_BINSEARCH
 613        {
 614                struct ext4_extent_idx *chix, *ix;
 615                int k;
 616
 617                chix = ix = EXT_FIRST_INDEX(eh);
 618                for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
 619                  if (k != 0 &&
 620                      le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
 621                                printk(KERN_DEBUG "k=%d, ix=0x%p, "
 622                                       "first=0x%p\n", k,
 623                                       ix, EXT_FIRST_INDEX(eh));
 624                                printk(KERN_DEBUG "%u <= %u\n",
 625                                       le32_to_cpu(ix->ei_block),
 626                                       le32_to_cpu(ix[-1].ei_block));
 627                        }
 628                        BUG_ON(k && le32_to_cpu(ix->ei_block)
 629                                           <= le32_to_cpu(ix[-1].ei_block));
 630                        if (block < le32_to_cpu(ix->ei_block))
 631                                break;
 632                        chix = ix;
 633                }
 634                BUG_ON(chix != path->p_idx);
 635        }
 636#endif
 637
 638}
 639
 640/*
 641 * ext4_ext_binsearch:
 642 * binary search for closest extent of the given block
 643 * the header must be checked before calling this
 644 */
 645static void
 646ext4_ext_binsearch(struct inode *inode,
 647                struct ext4_ext_path *path, ext4_lblk_t block)
 648{
 649        struct ext4_extent_header *eh = path->p_hdr;
 650        struct ext4_extent *r, *l, *m;
 651
 652        if (eh->eh_entries == 0) {
 653                /*
 654                 * this leaf is empty:
 655                 * we get such a leaf in split/add case
 656                 */
 657                return;
 658        }
 659
 660        ext_debug("binsearch for %u:  ", block);
 661
 662        l = EXT_FIRST_EXTENT(eh) + 1;
 663        r = EXT_LAST_EXTENT(eh);
 664
 665        while (l <= r) {
 666                m = l + (r - l) / 2;
 667                if (block < le32_to_cpu(m->ee_block))
 668                        r = m - 1;
 669                else
 670                        l = m + 1;
 671                ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
 672                                m, le32_to_cpu(m->ee_block),
 673                                r, le32_to_cpu(r->ee_block));
 674        }
 675
 676        path->p_ext = l - 1;
 677        ext_debug("  -> %d:%llu:[%d]%d ",
 678                        le32_to_cpu(path->p_ext->ee_block),
 679                        ext4_ext_pblock(path->p_ext),
 680                        ext4_ext_is_uninitialized(path->p_ext),
 681                        ext4_ext_get_actual_len(path->p_ext));
 682
 683#ifdef CHECK_BINSEARCH
 684        {
 685                struct ext4_extent *chex, *ex;
 686                int k;
 687
 688                chex = ex = EXT_FIRST_EXTENT(eh);
 689                for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
 690                        BUG_ON(k && le32_to_cpu(ex->ee_block)
 691                                          <= le32_to_cpu(ex[-1].ee_block));
 692                        if (block < le32_to_cpu(ex->ee_block))
 693                                break;
 694                        chex = ex;
 695                }
 696                BUG_ON(chex != path->p_ext);
 697        }
 698#endif
 699
 700}
 701
 702int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
 703{
 704        struct ext4_extent_header *eh;
 705
 706        eh = ext_inode_hdr(inode);
 707        eh->eh_depth = 0;
 708        eh->eh_entries = 0;
 709        eh->eh_magic = EXT4_EXT_MAGIC;
 710        eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
 711        ext4_mark_inode_dirty(handle, inode);
 712        ext4_ext_invalidate_cache(inode);
 713        return 0;
 714}
 715
 716struct ext4_ext_path *
 717ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
 718                                        struct ext4_ext_path *path)
 719{
 720        struct ext4_extent_header *eh;
 721        struct buffer_head *bh;
 722        short int depth, i, ppos = 0, alloc = 0;
 723
 724        eh = ext_inode_hdr(inode);
 725        depth = ext_depth(inode);
 726
 727        /* account possible depth increase */
 728        if (!path) {
 729                path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
 730                                GFP_NOFS);
 731                if (!path)
 732                        return ERR_PTR(-ENOMEM);
 733                alloc = 1;
 734        }
 735        path[0].p_hdr = eh;
 736        path[0].p_bh = NULL;
 737
 738        i = depth;
 739        /* walk through the tree */
 740        while (i) {
 741                ext_debug("depth %d: num %d, max %d\n",
 742                          ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
 743
 744                ext4_ext_binsearch_idx(inode, path + ppos, block);
 745                path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
 746                path[ppos].p_depth = i;
 747                path[ppos].p_ext = NULL;
 748
 749                bh = sb_getblk(inode->i_sb, path[ppos].p_block);
 750                if (unlikely(!bh))
 751                        goto err;
 752                if (!bh_uptodate_or_lock(bh)) {
 753                        trace_ext4_ext_load_extent(inode, block,
 754                                                path[ppos].p_block);
 755                        if (bh_submit_read(bh) < 0) {
 756                                put_bh(bh);
 757                                goto err;
 758                        }
 759                }
 760                eh = ext_block_hdr(bh);
 761                ppos++;
 762                if (unlikely(ppos > depth)) {
 763                        put_bh(bh);
 764                        EXT4_ERROR_INODE(inode,
 765                                         "ppos %d > depth %d", ppos, depth);
 766                        goto err;
 767                }
 768                path[ppos].p_bh = bh;
 769                path[ppos].p_hdr = eh;
 770                i--;
 771
 772                if (ext4_ext_check_block(inode, eh, i, bh))
 773                        goto err;
 774        }
 775
 776        path[ppos].p_depth = i;
 777        path[ppos].p_ext = NULL;
 778        path[ppos].p_idx = NULL;
 779
 780        /* find extent */
 781        ext4_ext_binsearch(inode, path + ppos, block);
 782        /* if not an empty leaf */
 783        if (path[ppos].p_ext)
 784                path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
 785
 786        ext4_ext_show_path(inode, path);
 787
 788        return path;
 789
 790err:
 791        ext4_ext_drop_refs(path);
 792        if (alloc)
 793                kfree(path);
 794        return ERR_PTR(-EIO);
 795}
 796
 797/*
 798 * ext4_ext_insert_index:
 799 * insert new index [@logical;@ptr] into the block at @curp;
 800 * check where to insert: before @curp or after @curp
 801 */
 802static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
 803                                 struct ext4_ext_path *curp,
 804                                 int logical, ext4_fsblk_t ptr)
 805{
 806        struct ext4_extent_idx *ix;
 807        int len, err;
 808
 809        err = ext4_ext_get_access(handle, inode, curp);
 810        if (err)
 811                return err;
 812
 813        if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
 814                EXT4_ERROR_INODE(inode,
 815                                 "logical %d == ei_block %d!",
 816                                 logical, le32_to_cpu(curp->p_idx->ei_block));
 817                return -EIO;
 818        }
 819
 820        if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
 821                             >= le16_to_cpu(curp->p_hdr->eh_max))) {
 822                EXT4_ERROR_INODE(inode,
 823                                 "eh_entries %d >= eh_max %d!",
 824                                 le16_to_cpu(curp->p_hdr->eh_entries),
 825                                 le16_to_cpu(curp->p_hdr->eh_max));
 826                return -EIO;
 827        }
 828
 829        if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
 830                /* insert after */
 831                ext_debug("insert new index %d after: %llu\n", logical, ptr);
 832                ix = curp->p_idx + 1;
 833        } else {
 834                /* insert before */
 835                ext_debug("insert new index %d before: %llu\n", logical, ptr);
 836                ix = curp->p_idx;
 837        }
 838
 839        len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
 840        BUG_ON(len < 0);
 841        if (len > 0) {
 842                ext_debug("insert new index %d: "
 843                                "move %d indices from 0x%p to 0x%p\n",
 844                                logical, len, ix, ix + 1);
 845                memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
 846        }
 847
 848        if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
 849                EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
 850                return -EIO;
 851        }
 852
 853        ix->ei_block = cpu_to_le32(logical);
 854        ext4_idx_store_pblock(ix, ptr);
 855        le16_add_cpu(&curp->p_hdr->eh_entries, 1);
 856
 857        if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
 858                EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
 859                return -EIO;
 860        }
 861
 862        err = ext4_ext_dirty(handle, inode, curp);
 863        ext4_std_error(inode->i_sb, err);
 864
 865        return err;
 866}
 867
 868/*
 869 * ext4_ext_split:
 870 * inserts new subtree into the path, using free index entry
 871 * at depth @at:
 872 * - allocates all needed blocks (new leaf and all intermediate index blocks)
 873 * - makes decision where to split
 874 * - moves remaining extents and index entries (right to the split point)
 875 *   into the newly allocated blocks
 876 * - initializes subtree
 877 */
 878static int ext4_ext_split(handle_t *handle, struct inode *inode,
 879                          unsigned int flags,
 880                          struct ext4_ext_path *path,
 881                          struct ext4_extent *newext, int at)
 882{
 883        struct buffer_head *bh = NULL;
 884        int depth = ext_depth(inode);
 885        struct ext4_extent_header *neh;
 886        struct ext4_extent_idx *fidx;
 887        int i = at, k, m, a;
 888        ext4_fsblk_t newblock, oldblock;
 889        __le32 border;
 890        ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
 891        int err = 0;
 892
 893        /* make decision: where to split? */
 894        /* FIXME: now decision is simplest: at current extent */
 895
 896        /* if current leaf will be split, then we should use
 897         * border from split point */
 898        if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
 899                EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
 900                return -EIO;
 901        }
 902        if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
 903                border = path[depth].p_ext[1].ee_block;
 904                ext_debug("leaf will be split."
 905                                " next leaf starts at %d\n",
 906                                  le32_to_cpu(border));
 907        } else {
 908                border = newext->ee_block;
 909                ext_debug("leaf will be added."
 910                                " next leaf starts at %d\n",
 911                                le32_to_cpu(border));
 912        }
 913
 914        /*
 915         * If error occurs, then we break processing
 916         * and mark filesystem read-only. index won't
 917         * be inserted and tree will be in consistent
 918         * state. Next mount will repair buffers too.
 919         */
 920
 921        /*
 922         * Get array to track all allocated blocks.
 923         * We need this to handle errors and free blocks
 924         * upon them.
 925         */
 926        ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
 927        if (!ablocks)
 928                return -ENOMEM;
 929
 930        /* allocate all needed blocks */
 931        ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
 932        for (a = 0; a < depth - at; a++) {
 933                newblock = ext4_ext_new_meta_block(handle, inode, path,
 934                                                   newext, &err, flags);
 935                if (newblock == 0)
 936                        goto cleanup;
 937                ablocks[a] = newblock;
 938        }
 939
 940        /* initialize new leaf */
 941        newblock = ablocks[--a];
 942        if (unlikely(newblock == 0)) {
 943                EXT4_ERROR_INODE(inode, "newblock == 0!");
 944                err = -EIO;
 945                goto cleanup;
 946        }
 947        bh = sb_getblk(inode->i_sb, newblock);
 948        if (!bh) {
 949                err = -EIO;
 950                goto cleanup;
 951        }
 952        lock_buffer(bh);
 953
 954        err = ext4_journal_get_create_access(handle, bh);
 955        if (err)
 956                goto cleanup;
 957
 958        neh = ext_block_hdr(bh);
 959        neh->eh_entries = 0;
 960        neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
 961        neh->eh_magic = EXT4_EXT_MAGIC;
 962        neh->eh_depth = 0;
 963
 964        /* move remainder of path[depth] to the new leaf */
 965        if (unlikely(path[depth].p_hdr->eh_entries !=
 966                     path[depth].p_hdr->eh_max)) {
 967                EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
 968                                 path[depth].p_hdr->eh_entries,
 969                                 path[depth].p_hdr->eh_max);
 970                err = -EIO;
 971                goto cleanup;
 972        }
 973        /* start copy from next extent */
 974        m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
 975        ext4_ext_show_move(inode, path, newblock, depth);
 976        if (m) {
 977                struct ext4_extent *ex;
 978                ex = EXT_FIRST_EXTENT(neh);
 979                memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
 980                le16_add_cpu(&neh->eh_entries, m);
 981        }
 982
 983        ext4_extent_block_csum_set(inode, neh);
 984        set_buffer_uptodate(bh);
 985        unlock_buffer(bh);
 986
 987        err = ext4_handle_dirty_metadata(handle, inode, bh);
 988        if (err)
 989                goto cleanup;
 990        brelse(bh);
 991        bh = NULL;
 992
 993        /* correct old leaf */
 994        if (m) {
 995                err = ext4_ext_get_access(handle, inode, path + depth);
 996                if (err)
 997                        goto cleanup;
 998                le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
 999                err = ext4_ext_dirty(handle, inode, path + depth);
1000                if (err)
1001                        goto cleanup;
1002
1003        }
1004
1005        /* create intermediate indexes */
1006        k = depth - at - 1;
1007        if (unlikely(k < 0)) {
1008                EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1009                err = -EIO;
1010                goto cleanup;
1011        }
1012        if (k)
1013                ext_debug("create %d intermediate indices\n", k);
1014        /* insert new index into current index block */
1015        /* current depth stored in i var */
1016        i = depth - 1;
1017        while (k--) {
1018                oldblock = newblock;
1019                newblock = ablocks[--a];
1020                bh = sb_getblk(inode->i_sb, newblock);
1021                if (!bh) {
1022                        err = -EIO;
1023                        goto cleanup;
1024                }
1025                lock_buffer(bh);
1026
1027                err = ext4_journal_get_create_access(handle, bh);
1028                if (err)
1029                        goto cleanup;
1030
1031                neh = ext_block_hdr(bh);
1032                neh->eh_entries = cpu_to_le16(1);
1033                neh->eh_magic = EXT4_EXT_MAGIC;
1034                neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1035                neh->eh_depth = cpu_to_le16(depth - i);
1036                fidx = EXT_FIRST_INDEX(neh);
1037                fidx->ei_block = border;
1038                ext4_idx_store_pblock(fidx, oldblock);
1039
1040                ext_debug("int.index at %d (block %llu): %u -> %llu\n",
1041                                i, newblock, le32_to_cpu(border), oldblock);
1042
1043                /* move remainder of path[i] to the new index block */
1044                if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1045                                        EXT_LAST_INDEX(path[i].p_hdr))) {
1046                        EXT4_ERROR_INODE(inode,
1047                                         "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1048                                         le32_to_cpu(path[i].p_ext->ee_block));
1049                        err = -EIO;
1050                        goto cleanup;
1051                }
1052                /* start copy indexes */
1053                m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1054                ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
1055                                EXT_MAX_INDEX(path[i].p_hdr));
1056                ext4_ext_show_move(inode, path, newblock, i);
1057                if (m) {
1058                        memmove(++fidx, path[i].p_idx,
1059                                sizeof(struct ext4_extent_idx) * m);
1060                        le16_add_cpu(&neh->eh_entries, m);
1061                }
1062                ext4_extent_block_csum_set(inode, neh);
1063                set_buffer_uptodate(bh);
1064                unlock_buffer(bh);
1065
1066                err = ext4_handle_dirty_metadata(handle, inode, bh);
1067                if (err)
1068                        goto cleanup;
1069                brelse(bh);
1070                bh = NULL;
1071
1072                /* correct old index */
1073                if (m) {
1074                        err = ext4_ext_get_access(handle, inode, path + i);
1075                        if (err)
1076                                goto cleanup;
1077                        le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1078                        err = ext4_ext_dirty(handle, inode, path + i);
1079                        if (err)
1080                                goto cleanup;
1081                }
1082
1083                i--;
1084        }
1085
1086        /* insert new index */
1087        err = ext4_ext_insert_index(handle, inode, path + at,
1088                                    le32_to_cpu(border), newblock);
1089
1090cleanup:
1091        if (bh) {
1092                if (buffer_locked(bh))
1093                        unlock_buffer(bh);
1094                brelse(bh);
1095        }
1096
1097        if (err) {
1098                /* free all allocated blocks in error case */
1099                for (i = 0; i < depth; i++) {
1100                        if (!ablocks[i])
1101                                continue;
1102                        ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1103                                         EXT4_FREE_BLOCKS_METADATA);
1104                }
1105        }
1106        kfree(ablocks);
1107
1108        return err;
1109}
1110
1111/*
1112 * ext4_ext_grow_indepth:
1113 * implements tree growing procedure:
1114 * - allocates new block
1115 * - moves top-level data (index block or leaf) into the new block
1116 * - initializes new top-level, creating index that points to the
1117 *   just created block
1118 */
1119static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1120                                 unsigned int flags,
1121                                 struct ext4_extent *newext)
1122{
1123        struct ext4_extent_header *neh;
1124        struct buffer_head *bh;
1125        ext4_fsblk_t newblock;
1126        int err = 0;
1127
1128        newblock = ext4_ext_new_meta_block(handle, inode, NULL,
1129                newext, &err, flags);
1130        if (newblock == 0)
1131                return err;
1132
1133        bh = sb_getblk(inode->i_sb, newblock);
1134        if (!bh) {
1135                err = -EIO;
1136                ext4_std_error(inode->i_sb, err);
1137                return err;
1138        }
1139        lock_buffer(bh);
1140
1141        err = ext4_journal_get_create_access(handle, bh);
1142        if (err) {
1143                unlock_buffer(bh);
1144                goto out;
1145        }
1146
1147        /* move top-level index/leaf into new block */
1148        memmove(bh->b_data, EXT4_I(inode)->i_data,
1149                sizeof(EXT4_I(inode)->i_data));
1150
1151        /* set size of new block */
1152        neh = ext_block_hdr(bh);
1153        /* old root could have indexes or leaves
1154         * so calculate e_max right way */
1155        if (ext_depth(inode))
1156                neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1157        else
1158                neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1159        neh->eh_magic = EXT4_EXT_MAGIC;
1160        ext4_extent_block_csum_set(inode, neh);
1161        set_buffer_uptodate(bh);
1162        unlock_buffer(bh);
1163
1164        err = ext4_handle_dirty_metadata(handle, inode, bh);
1165        if (err)
1166                goto out;
1167
1168        /* Update top-level index: num,max,pointer */
1169        neh = ext_inode_hdr(inode);
1170        neh->eh_entries = cpu_to_le16(1);
1171        ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1172        if (neh->eh_depth == 0) {
1173                /* Root extent block becomes index block */
1174                neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1175                EXT_FIRST_INDEX(neh)->ei_block =
1176                        EXT_FIRST_EXTENT(neh)->ee_block;
1177        }
1178        ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1179                  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1180                  le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1181                  ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1182
1183        neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
1184        ext4_mark_inode_dirty(handle, inode);
1185out:
1186        brelse(bh);
1187
1188        return err;
1189}
1190
1191/*
1192 * ext4_ext_create_new_leaf:
1193 * finds empty index and adds new leaf.
1194 * if no free index is found, then it requests in-depth growing.
1195 */
1196static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1197                                    unsigned int flags,
1198                                    struct ext4_ext_path *path,
1199                                    struct ext4_extent *newext)
1200{
1201        struct ext4_ext_path *curp;
1202        int depth, i, err = 0;
1203
1204repeat:
1205        i = depth = ext_depth(inode);
1206
1207        /* walk up to the tree and look for free index entry */
1208        curp = path + depth;
1209        while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1210                i--;
1211                curp--;
1212        }
1213
1214        /* we use already allocated block for index block,
1215         * so subsequent data blocks should be contiguous */
1216        if (EXT_HAS_FREE_INDEX(curp)) {
1217                /* if we found index with free entry, then use that
1218                 * entry: create all needed subtree and add new leaf */
1219                err = ext4_ext_split(handle, inode, flags, path, newext, i);
1220                if (err)
1221                        goto out;
1222
1223                /* refill path */
1224                ext4_ext_drop_refs(path);
1225                path = ext4_ext_find_extent(inode,
1226                                    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1227                                    path);
1228                if (IS_ERR(path))
1229                        err = PTR_ERR(path);
1230        } else {
1231                /* tree is full, time to grow in depth */
1232                err = ext4_ext_grow_indepth(handle, inode, flags, newext);
1233                if (err)
1234                        goto out;
1235
1236                /* refill path */
1237                ext4_ext_drop_refs(path);
1238                path = ext4_ext_find_extent(inode,
1239                                   (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1240                                    path);
1241                if (IS_ERR(path)) {
1242                        err = PTR_ERR(path);
1243                        goto out;
1244                }
1245
1246                /*
1247                 * only first (depth 0 -> 1) produces free space;
1248                 * in all other cases we have to split the grown tree
1249                 */
1250                depth = ext_depth(inode);
1251                if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1252                        /* now we need to split */
1253                        goto repeat;
1254                }
1255        }
1256
1257out:
1258        return err;
1259}
1260
1261/*
1262 * search the closest allocated block to the left for *logical
1263 * and returns it at @logical + it's physical address at @phys
1264 * if *logical is the smallest allocated block, the function
1265 * returns 0 at @phys
1266 * return value contains 0 (success) or error code
1267 */
1268static int ext4_ext_search_left(struct inode *inode,
1269                                struct ext4_ext_path *path,
1270                                ext4_lblk_t *logical, ext4_fsblk_t *phys)
1271{
1272        struct ext4_extent_idx *ix;
1273        struct ext4_extent *ex;
1274        int depth, ee_len;
1275
1276        if (unlikely(path == NULL)) {
1277                EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1278                return -EIO;
1279        }
1280        depth = path->p_depth;
1281        *phys = 0;
1282
1283        if (depth == 0 && path->p_ext == NULL)
1284                return 0;
1285
1286        /* usually extent in the path covers blocks smaller
1287         * then *logical, but it can be that extent is the
1288         * first one in the file */
1289
1290        ex = path[depth].p_ext;
1291        ee_len = ext4_ext_get_actual_len(ex);
1292        if (*logical < le32_to_cpu(ex->ee_block)) {
1293                if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1294                        EXT4_ERROR_INODE(inode,
1295                                         "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1296                                         *logical, le32_to_cpu(ex->ee_block));
1297                        return -EIO;
1298                }
1299                while (--depth >= 0) {
1300                        ix = path[depth].p_idx;
1301                        if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1302                                EXT4_ERROR_INODE(inode,
1303                                  "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1304                                  ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1305                                  EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1306                le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1307                                  depth);
1308                                return -EIO;
1309                        }
1310                }
1311                return 0;
1312        }
1313
1314        if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1315                EXT4_ERROR_INODE(inode,
1316                                 "logical %d < ee_block %d + ee_len %d!",
1317                                 *logical, le32_to_cpu(ex->ee_block), ee_len);
1318                return -EIO;
1319        }
1320
1321        *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1322        *phys = ext4_ext_pblock(ex) + ee_len - 1;
1323        return 0;
1324}
1325
1326/*
1327 * search the closest allocated block to the right for *logical
1328 * and returns it at @logical + it's physical address at @phys
1329 * if *logical is the largest allocated block, the function
1330 * returns 0 at @phys
1331 * return value contains 0 (success) or error code
1332 */
1333static int ext4_ext_search_right(struct inode *inode,
1334                                 struct ext4_ext_path *path,
1335                                 ext4_lblk_t *logical, ext4_fsblk_t *phys,
1336                                 struct ext4_extent **ret_ex)
1337{
1338        struct buffer_head *bh = NULL;
1339        struct ext4_extent_header *eh;
1340        struct ext4_extent_idx *ix;
1341        struct ext4_extent *ex;
1342        ext4_fsblk_t block;
1343        int depth;      /* Note, NOT eh_depth; depth from top of tree */
1344        int ee_len;
1345
1346        if (unlikely(path == NULL)) {
1347                EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1348                return -EIO;
1349        }
1350        depth = path->p_depth;
1351        *phys = 0;
1352
1353        if (depth == 0 && path->p_ext == NULL)
1354                return 0;
1355
1356        /* usually extent in the path covers blocks smaller
1357         * then *logical, but it can be that extent is the
1358         * first one in the file */
1359
1360        ex = path[depth].p_ext;
1361        ee_len = ext4_ext_get_actual_len(ex);
1362        if (*logical < le32_to_cpu(ex->ee_block)) {
1363                if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1364                        EXT4_ERROR_INODE(inode,
1365                                         "first_extent(path[%d].p_hdr) != ex",
1366                                         depth);
1367                        return -EIO;
1368                }
1369                while (--depth >= 0) {
1370                        ix = path[depth].p_idx;
1371                        if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1372                                EXT4_ERROR_INODE(inode,
1373                                                 "ix != EXT_FIRST_INDEX *logical %d!",
1374                                                 *logical);
1375                                return -EIO;
1376                        }
1377                }
1378                goto found_extent;
1379        }
1380
1381        if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1382                EXT4_ERROR_INODE(inode,
1383                                 "logical %d < ee_block %d + ee_len %d!",
1384                                 *logical, le32_to_cpu(ex->ee_block), ee_len);
1385                return -EIO;
1386        }
1387
1388        if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1389                /* next allocated block in this leaf */
1390                ex++;
1391                goto found_extent;
1392        }
1393
1394        /* go up and search for index to the right */
1395        while (--depth >= 0) {
1396                ix = path[depth].p_idx;
1397                if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1398                        goto got_index;
1399        }
1400
1401        /* we've gone up to the root and found no index to the right */
1402        return 0;
1403
1404got_index:
1405        /* we've found index to the right, let's
1406         * follow it and find the closest allocated
1407         * block to the right */
1408        ix++;
1409        block = ext4_idx_pblock(ix);
1410        while (++depth < path->p_depth) {
1411                bh = sb_bread(inode->i_sb, block);
1412                if (bh == NULL)
1413                        return -EIO;
1414                eh = ext_block_hdr(bh);
1415                /* subtract from p_depth to get proper eh_depth */
1416                if (ext4_ext_check_block(inode, eh,
1417                                         path->p_depth - depth, bh)) {
1418                        put_bh(bh);
1419                        return -EIO;
1420                }
1421                ix = EXT_FIRST_INDEX(eh);
1422                block = ext4_idx_pblock(ix);
1423                put_bh(bh);
1424        }
1425
1426        bh = sb_bread(inode->i_sb, block);
1427        if (bh == NULL)
1428                return -EIO;
1429        eh = ext_block_hdr(bh);
1430        if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) {
1431                put_bh(bh);
1432                return -EIO;
1433        }
1434        ex = EXT_FIRST_EXTENT(eh);
1435found_extent:
1436        *logical = le32_to_cpu(ex->ee_block);
1437        *phys = ext4_ext_pblock(ex);
1438        *ret_ex = ex;
1439        if (bh)
1440                put_bh(bh);
1441        return 0;
1442}
1443
1444/*
1445 * ext4_ext_next_allocated_block:
1446 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1447 * NOTE: it considers block number from index entry as
1448 * allocated block. Thus, index entries have to be consistent
1449 * with leaves.
1450 */
1451static ext4_lblk_t
1452ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1453{
1454        int depth;
1455
1456        BUG_ON(path == NULL);
1457        depth = path->p_depth;
1458
1459        if (depth == 0 && path->p_ext == NULL)
1460                return EXT_MAX_BLOCKS;
1461
1462        while (depth >= 0) {
1463                if (depth == path->p_depth) {
1464                        /* leaf */
1465                        if (path[depth].p_ext &&
1466                                path[depth].p_ext !=
1467                                        EXT_LAST_EXTENT(path[depth].p_hdr))
1468                          return le32_to_cpu(path[depth].p_ext[1].ee_block);
1469                } else {
1470                        /* index */
1471                        if (path[depth].p_idx !=
1472                                        EXT_LAST_INDEX(path[depth].p_hdr))
1473                          return le32_to_cpu(path[depth].p_idx[1].ei_block);
1474                }
1475                depth--;
1476        }
1477
1478        return EXT_MAX_BLOCKS;
1479}
1480
1481/*
1482 * ext4_ext_next_leaf_block:
1483 * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1484 */
1485static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1486{
1487        int depth;
1488
1489        BUG_ON(path == NULL);
1490        depth = path->p_depth;
1491
1492        /* zero-tree has no leaf blocks at all */
1493        if (depth == 0)
1494                return EXT_MAX_BLOCKS;
1495
1496        /* go to index block */
1497        depth--;
1498
1499        while (depth >= 0) {
1500                if (path[depth].p_idx !=
1501                                EXT_LAST_INDEX(path[depth].p_hdr))
1502                        return (ext4_lblk_t)
1503                                le32_to_cpu(path[depth].p_idx[1].ei_block);
1504                depth--;
1505        }
1506
1507        return EXT_MAX_BLOCKS;
1508}
1509
1510/*
1511 * ext4_ext_correct_indexes:
1512 * if leaf gets modified and modified extent is first in the leaf,
1513 * then we have to correct all indexes above.
1514 * TODO: do we need to correct tree in all cases?
1515 */
1516static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1517                                struct ext4_ext_path *path)
1518{
1519        struct ext4_extent_header *eh;
1520        int depth = ext_depth(inode);
1521        struct ext4_extent *ex;
1522        __le32 border;
1523        int k, err = 0;
1524
1525        eh = path[depth].p_hdr;
1526        ex = path[depth].p_ext;
1527
1528        if (unlikely(ex == NULL || eh == NULL)) {
1529                EXT4_ERROR_INODE(inode,
1530                                 "ex %p == NULL or eh %p == NULL", ex, eh);
1531                return -EIO;
1532        }
1533
1534        if (depth == 0) {
1535                /* there is no tree at all */
1536                return 0;
1537        }
1538
1539        if (ex != EXT_FIRST_EXTENT(eh)) {
1540                /* we correct tree if first leaf got modified only */
1541                return 0;
1542        }
1543
1544        /*
1545         * TODO: we need correction if border is smaller than current one
1546         */
1547        k = depth - 1;
1548        border = path[depth].p_ext->ee_block;
1549        err = ext4_ext_get_access(handle, inode, path + k);
1550        if (err)
1551                return err;
1552        path[k].p_idx->ei_block = border;
1553        err = ext4_ext_dirty(handle, inode, path + k);
1554        if (err)
1555                return err;
1556
1557        while (k--) {
1558                /* change all left-side indexes */
1559                if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1560                        break;
1561                err = ext4_ext_get_access(handle, inode, path + k);
1562                if (err)
1563                        break;
1564                path[k].p_idx->ei_block = border;
1565                err = ext4_ext_dirty(handle, inode, path + k);
1566                if (err)
1567                        break;
1568        }
1569
1570        return err;
1571}
1572
1573int
1574ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1575                                struct ext4_extent *ex2)
1576{
1577        unsigned short ext1_ee_len, ext2_ee_len, max_len;
1578
1579        /*
1580         * Make sure that either both extents are uninitialized, or
1581         * both are _not_.
1582         */
1583        if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1584                return 0;
1585
1586        if (ext4_ext_is_uninitialized(ex1))
1587                max_len = EXT_UNINIT_MAX_LEN;
1588        else
1589                max_len = EXT_INIT_MAX_LEN;
1590
1591        ext1_ee_len = ext4_ext_get_actual_len(ex1);
1592        ext2_ee_len = ext4_ext_get_actual_len(ex2);
1593
1594        if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1595                        le32_to_cpu(ex2->ee_block))
1596                return 0;
1597
1598        /*
1599         * To allow future support for preallocated extents to be added
1600         * as an RO_COMPAT feature, refuse to merge to extents if
1601         * this can result in the top bit of ee_len being set.
1602         */
1603        if (ext1_ee_len + ext2_ee_len > max_len)
1604                return 0;
1605#ifdef AGGRESSIVE_TEST
1606        if (ext1_ee_len >= 4)
1607                return 0;
1608#endif
1609
1610        if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1611                return 1;
1612        return 0;
1613}
1614
1615/*
1616 * This function tries to merge the "ex" extent to the next extent in the tree.
1617 * It always tries to merge towards right. If you want to merge towards
1618 * left, pass "ex - 1" as argument instead of "ex".
1619 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1620 * 1 if they got merged.
1621 */
1622static int ext4_ext_try_to_merge_right(struct inode *inode,
1623                                 struct ext4_ext_path *path,
1624                                 struct ext4_extent *ex)
1625{
1626        struct ext4_extent_header *eh;
1627        unsigned int depth, len;
1628        int merge_done = 0;
1629        int uninitialized = 0;
1630
1631        depth = ext_depth(inode);
1632        BUG_ON(path[depth].p_hdr == NULL);
1633        eh = path[depth].p_hdr;
1634
1635        while (ex < EXT_LAST_EXTENT(eh)) {
1636                if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1637                        break;
1638                /* merge with next extent! */
1639                if (ext4_ext_is_uninitialized(ex))
1640                        uninitialized = 1;
1641                ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1642                                + ext4_ext_get_actual_len(ex + 1));
1643                if (uninitialized)
1644                        ext4_ext_mark_uninitialized(ex);
1645
1646                if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1647                        len = (EXT_LAST_EXTENT(eh) - ex - 1)
1648                                * sizeof(struct ext4_extent);
1649                        memmove(ex + 1, ex + 2, len);
1650                }
1651                le16_add_cpu(&eh->eh_entries, -1);
1652                merge_done = 1;
1653                WARN_ON(eh->eh_entries == 0);
1654                if (!eh->eh_entries)
1655                        EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1656        }
1657
1658        return merge_done;
1659}
1660
1661/*
1662 * This function tries to merge the @ex extent to neighbours in the tree.
1663 * return 1 if merge left else 0.
1664 */
1665static int ext4_ext_try_to_merge(struct inode *inode,
1666                                  struct ext4_ext_path *path,
1667                                  struct ext4_extent *ex) {
1668        struct ext4_extent_header *eh;
1669        unsigned int depth;
1670        int merge_done = 0;
1671        int ret = 0;
1672
1673        depth = ext_depth(inode);
1674        BUG_ON(path[depth].p_hdr == NULL);
1675        eh = path[depth].p_hdr;
1676
1677        if (ex > EXT_FIRST_EXTENT(eh))
1678                merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1679
1680        if (!merge_done)
1681                ret = ext4_ext_try_to_merge_right(inode, path, ex);
1682
1683        return ret;
1684}
1685
1686/*
1687 * check if a portion of the "newext" extent overlaps with an
1688 * existing extent.
1689 *
1690 * If there is an overlap discovered, it updates the length of the newext
1691 * such that there will be no overlap, and then returns 1.
1692 * If there is no overlap found, it returns 0.
1693 */
1694static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1695                                           struct inode *inode,
1696                                           struct ext4_extent *newext,
1697                                           struct ext4_ext_path *path)
1698{
1699        ext4_lblk_t b1, b2;
1700        unsigned int depth, len1;
1701        unsigned int ret = 0;
1702
1703        b1 = le32_to_cpu(newext->ee_block);
1704        len1 = ext4_ext_get_actual_len(newext);
1705        depth = ext_depth(inode);
1706        if (!path[depth].p_ext)
1707                goto out;
1708        b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1709        b2 &= ~(sbi->s_cluster_ratio - 1);
1710
1711        /*
1712         * get the next allocated block if the extent in the path
1713         * is before the requested block(s)
1714         */
1715        if (b2 < b1) {
1716                b2 = ext4_ext_next_allocated_block(path);
1717                if (b2 == EXT_MAX_BLOCKS)
1718                        goto out;
1719                b2 &= ~(sbi->s_cluster_ratio - 1);
1720        }
1721
1722        /* check for wrap through zero on extent logical start block*/
1723        if (b1 + len1 < b1) {
1724                len1 = EXT_MAX_BLOCKS - b1;
1725                newext->ee_len = cpu_to_le16(len1);
1726                ret = 1;
1727        }
1728
1729        /* check for overlap */
1730        if (b1 + len1 > b2) {
1731                newext->ee_len = cpu_to_le16(b2 - b1);
1732                ret = 1;
1733        }
1734out:
1735        return ret;
1736}
1737
1738/*
1739 * ext4_ext_insert_extent:
1740 * tries to merge requsted extent into the existing extent or
1741 * inserts requested extent as new one into the tree,
1742 * creating new leaf in the no-space case.
1743 */
1744int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1745                                struct ext4_ext_path *path,
1746                                struct ext4_extent *newext, int flag)
1747{
1748        struct ext4_extent_header *eh;
1749        struct ext4_extent *ex, *fex;
1750        struct ext4_extent *nearex; /* nearest extent */
1751        struct ext4_ext_path *npath = NULL;
1752        int depth, len, err;
1753        ext4_lblk_t next;
1754        unsigned uninitialized = 0;
1755        int flags = 0;
1756
1757        if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1758                EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1759                return -EIO;
1760        }
1761        depth = ext_depth(inode);
1762        ex = path[depth].p_ext;
1763        if (unlikely(path[depth].p_hdr == NULL)) {
1764                EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1765                return -EIO;
1766        }
1767
1768        /* try to insert block into found extent and return */
1769        if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
1770                && ext4_can_extents_be_merged(inode, ex, newext)) {
1771                ext_debug("append [%d]%d block to %u:[%d]%d (from %llu)\n",
1772                          ext4_ext_is_uninitialized(newext),
1773                          ext4_ext_get_actual_len(newext),
1774                          le32_to_cpu(ex->ee_block),
1775                          ext4_ext_is_uninitialized(ex),
1776                          ext4_ext_get_actual_len(ex),
1777                          ext4_ext_pblock(ex));
1778                err = ext4_ext_get_access(handle, inode, path + depth);
1779                if (err)
1780                        return err;
1781
1782                /*
1783                 * ext4_can_extents_be_merged should have checked that either
1784                 * both extents are uninitialized, or both aren't. Thus we
1785                 * need to check only one of them here.
1786                 */
1787                if (ext4_ext_is_uninitialized(ex))
1788                        uninitialized = 1;
1789                ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1790                                        + ext4_ext_get_actual_len(newext));
1791                if (uninitialized)
1792                        ext4_ext_mark_uninitialized(ex);
1793                eh = path[depth].p_hdr;
1794                nearex = ex;
1795                goto merge;
1796        }
1797
1798        depth = ext_depth(inode);
1799        eh = path[depth].p_hdr;
1800        if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1801                goto has_space;
1802
1803        /* probably next leaf has space for us? */
1804        fex = EXT_LAST_EXTENT(eh);
1805        next = EXT_MAX_BLOCKS;
1806        if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
1807                next = ext4_ext_next_leaf_block(path);
1808        if (next != EXT_MAX_BLOCKS) {
1809                ext_debug("next leaf block - %u\n", next);
1810                BUG_ON(npath != NULL);
1811                npath = ext4_ext_find_extent(inode, next, NULL);
1812                if (IS_ERR(npath))
1813                        return PTR_ERR(npath);
1814                BUG_ON(npath->p_depth != path->p_depth);
1815                eh = npath[depth].p_hdr;
1816                if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1817                        ext_debug("next leaf isn't full(%d)\n",
1818                                  le16_to_cpu(eh->eh_entries));
1819                        path = npath;
1820                        goto has_space;
1821                }
1822                ext_debug("next leaf has no free space(%d,%d)\n",
1823                          le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1824        }
1825
1826        /*
1827         * There is no free space in the found leaf.
1828         * We're gonna add a new leaf in the tree.
1829         */
1830        if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT)
1831                flags = EXT4_MB_USE_ROOT_BLOCKS;
1832        err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
1833        if (err)
1834                goto cleanup;
1835        depth = ext_depth(inode);
1836        eh = path[depth].p_hdr;
1837
1838has_space:
1839        nearex = path[depth].p_ext;
1840
1841        err = ext4_ext_get_access(handle, inode, path + depth);
1842        if (err)
1843                goto cleanup;
1844
1845        if (!nearex) {
1846                /* there is no extent in this leaf, create first one */
1847                ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
1848                                le32_to_cpu(newext->ee_block),
1849                                ext4_ext_pblock(newext),
1850                                ext4_ext_is_uninitialized(newext),
1851                                ext4_ext_get_actual_len(newext));
1852                nearex = EXT_FIRST_EXTENT(eh);
1853        } else {
1854                if (le32_to_cpu(newext->ee_block)
1855                           > le32_to_cpu(nearex->ee_block)) {
1856                        /* Insert after */
1857                        ext_debug("insert %u:%llu:[%d]%d before: "
1858                                        "nearest %p\n",
1859                                        le32_to_cpu(newext->ee_block),
1860                                        ext4_ext_pblock(newext),
1861                                        ext4_ext_is_uninitialized(newext),
1862                                        ext4_ext_get_actual_len(newext),
1863                                        nearex);
1864                        nearex++;
1865                } else {
1866                        /* Insert before */
1867                        BUG_ON(newext->ee_block == nearex->ee_block);
1868                        ext_debug("insert %u:%llu:[%d]%d after: "
1869                                        "nearest %p\n",
1870                                        le32_to_cpu(newext->ee_block),
1871                                        ext4_ext_pblock(newext),
1872                                        ext4_ext_is_uninitialized(newext),
1873                                        ext4_ext_get_actual_len(newext),
1874                                        nearex);
1875                }
1876                len = EXT_LAST_EXTENT(eh) - nearex + 1;
1877                if (len > 0) {
1878                        ext_debug("insert %u:%llu:[%d]%d: "
1879                                        "move %d extents from 0x%p to 0x%p\n",
1880                                        le32_to_cpu(newext->ee_block),
1881                                        ext4_ext_pblock(newext),
1882                                        ext4_ext_is_uninitialized(newext),
1883                                        ext4_ext_get_actual_len(newext),
1884                                        len, nearex, nearex + 1);
1885                        memmove(nearex + 1, nearex,
1886                                len * sizeof(struct ext4_extent));
1887                }
1888        }
1889
1890        le16_add_cpu(&eh->eh_entries, 1);
1891        path[depth].p_ext = nearex;
1892        nearex->ee_block = newext->ee_block;
1893        ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
1894        nearex->ee_len = newext->ee_len;
1895
1896merge:
1897        /* try to merge extents */
1898        if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1899                ext4_ext_try_to_merge(inode, path, nearex);
1900
1901
1902        /* time to correct all indexes above */
1903        err = ext4_ext_correct_indexes(handle, inode, path);
1904        if (err)
1905                goto cleanup;
1906
1907        err = ext4_ext_dirty(handle, inode, path + depth);
1908
1909cleanup:
1910        if (npath) {
1911                ext4_ext_drop_refs(npath);
1912                kfree(npath);
1913        }
1914        ext4_ext_invalidate_cache(inode);
1915        return err;
1916}
1917
1918static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1919                               ext4_lblk_t num, ext_prepare_callback func,
1920                               void *cbdata)
1921{
1922        struct ext4_ext_path *path = NULL;
1923        struct ext4_ext_cache cbex;
1924        struct ext4_extent *ex;
1925        ext4_lblk_t next, start = 0, end = 0;
1926        ext4_lblk_t last = block + num;
1927        int depth, exists, err = 0;
1928
1929        BUG_ON(func == NULL);
1930        BUG_ON(inode == NULL);
1931
1932        while (block < last && block != EXT_MAX_BLOCKS) {
1933                num = last - block;
1934                /* find extent for this block */
1935                down_read(&EXT4_I(inode)->i_data_sem);
1936                path = ext4_ext_find_extent(inode, block, path);
1937                up_read(&EXT4_I(inode)->i_data_sem);
1938                if (IS_ERR(path)) {
1939                        err = PTR_ERR(path);
1940                        path = NULL;
1941                        break;
1942                }
1943
1944                depth = ext_depth(inode);
1945                if (unlikely(path[depth].p_hdr == NULL)) {
1946                        EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1947                        err = -EIO;
1948                        break;
1949                }
1950                ex = path[depth].p_ext;
1951                next = ext4_ext_next_allocated_block(path);
1952
1953                exists = 0;
1954                if (!ex) {
1955                        /* there is no extent yet, so try to allocate
1956                         * all requested space */
1957                        start = block;
1958                        end = block + num;
1959                } else if (le32_to_cpu(ex->ee_block) > block) {
1960                        /* need to allocate space before found extent */
1961                        start = block;
1962                        end = le32_to_cpu(ex->ee_block);
1963                        if (block + num < end)
1964                                end = block + num;
1965                } else if (block >= le32_to_cpu(ex->ee_block)
1966                                        + ext4_ext_get_actual_len(ex)) {
1967                        /* need to allocate space after found extent */
1968                        start = block;
1969                        end = block + num;
1970                        if (end >= next)
1971                                end = next;
1972                } else if (block >= le32_to_cpu(ex->ee_block)) {
1973                        /*
1974                         * some part of requested space is covered
1975                         * by found extent
1976                         */
1977                        start = block;
1978                        end = le32_to_cpu(ex->ee_block)
1979                                + ext4_ext_get_actual_len(ex);
1980                        if (block + num < end)
1981                                end = block + num;
1982                        exists = 1;
1983                } else {
1984                        BUG();
1985                }
1986                BUG_ON(end <= start);
1987
1988                if (!exists) {
1989                        cbex.ec_block = start;
1990                        cbex.ec_len = end - start;
1991                        cbex.ec_start = 0;
1992                } else {
1993                        cbex.ec_block = le32_to_cpu(ex->ee_block);
1994                        cbex.ec_len = ext4_ext_get_actual_len(ex);
1995                        cbex.ec_start = ext4_ext_pblock(ex);
1996                }
1997
1998                if (unlikely(cbex.ec_len == 0)) {
1999                        EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
2000                        err = -EIO;
2001                        break;
2002                }
2003                err = func(inode, next, &cbex, ex, cbdata);
2004                ext4_ext_drop_refs(path);
2005
2006                if (err < 0)
2007                        break;
2008
2009                if (err == EXT_REPEAT)
2010                        continue;
2011                else if (err == EXT_BREAK) {
2012                        err = 0;
2013                        break;
2014                }
2015
2016                if (ext_depth(inode) != depth) {
2017                        /* depth was changed. we have to realloc path */
2018                        kfree(path);
2019                        path = NULL;
2020                }
2021
2022                block = cbex.ec_block + cbex.ec_len;
2023        }
2024
2025        if (path) {
2026                ext4_ext_drop_refs(path);
2027                kfree(path);
2028        }
2029
2030        return err;
2031}
2032
2033static void
2034ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
2035                        __u32 len, ext4_fsblk_t start)
2036{
2037        struct ext4_ext_cache *cex;
2038        BUG_ON(len == 0);
2039        spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2040        trace_ext4_ext_put_in_cache(inode, block, len, start);
2041        cex = &EXT4_I(inode)->i_cached_extent;
2042        cex->ec_block = block;
2043        cex->ec_len = len;
2044        cex->ec_start = start;
2045        spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2046}
2047
2048/*
2049 * ext4_ext_put_gap_in_cache:
2050 * calculate boundaries of the gap that the requested block fits into
2051 * and cache this gap
2052 */
2053static void
2054ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2055                                ext4_lblk_t block)
2056{
2057        int depth = ext_depth(inode);
2058        unsigned long len;
2059        ext4_lblk_t lblock;
2060        struct ext4_extent *ex;
2061
2062        ex = path[depth].p_ext;
2063        if (ex == NULL) {
2064                /* there is no extent yet, so gap is [0;-] */
2065                lblock = 0;
2066                len = EXT_MAX_BLOCKS;
2067                ext_debug("cache gap(whole file):");
2068        } else if (block < le32_to_cpu(ex->ee_block)) {
2069                lblock = block;
2070                len = le32_to_cpu(ex->ee_block) - block;
2071                ext_debug("cache gap(before): %u [%u:%u]",
2072                                block,
2073                                le32_to_cpu(ex->ee_block),
2074                                 ext4_ext_get_actual_len(ex));
2075        } else if (block >= le32_to_cpu(ex->ee_block)
2076                        + ext4_ext_get_actual_len(ex)) {
2077                ext4_lblk_t next;
2078                lblock = le32_to_cpu(ex->ee_block)
2079                        + ext4_ext_get_actual_len(ex);
2080
2081                next = ext4_ext_next_allocated_block(path);
2082                ext_debug("cache gap(after): [%u:%u] %u",
2083                                le32_to_cpu(ex->ee_block),
2084                                ext4_ext_get_actual_len(ex),
2085                                block);
2086                BUG_ON(next == lblock);
2087                len = next - lblock;
2088        } else {
2089                lblock = len = 0;
2090                BUG();
2091        }
2092
2093        ext_debug(" -> %u:%lu\n", lblock, len);
2094        ext4_ext_put_in_cache(inode, lblock, len, 0);
2095}
2096
2097/*
2098 * ext4_ext_check_cache()
2099 * Checks to see if the given block is in the cache.
2100 * If it is, the cached extent is stored in the given
2101 * cache extent pointer.  If the cached extent is a hole,
2102 * this routine should be used instead of
2103 * ext4_ext_in_cache if the calling function needs to
2104 * know the size of the hole.
2105 *
2106 * @inode: The files inode
2107 * @block: The block to look for in the cache
2108 * @ex:    Pointer where the cached extent will be stored
2109 *         if it contains block
2110 *
2111 * Return 0 if cache is invalid; 1 if the cache is valid
2112 */
2113static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block,
2114        struct ext4_ext_cache *ex){
2115        struct ext4_ext_cache *cex;
2116        struct ext4_sb_info *sbi;
2117        int ret = 0;
2118
2119        /*
2120         * We borrow i_block_reservation_lock to protect i_cached_extent
2121         */
2122        spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2123        cex = &EXT4_I(inode)->i_cached_extent;
2124        sbi = EXT4_SB(inode->i_sb);
2125
2126        /* has cache valid data? */
2127        if (cex->ec_len == 0)
2128                goto errout;
2129
2130        if (in_range(block, cex->ec_block, cex->ec_len)) {
2131                memcpy(ex, cex, sizeof(struct ext4_ext_cache));
2132                ext_debug("%u cached by %u:%u:%llu\n",
2133                                block,
2134                                cex->ec_block, cex->ec_len, cex->ec_start);
2135                ret = 1;
2136        }
2137errout:
2138        trace_ext4_ext_in_cache(inode, block, ret);
2139        spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2140        return ret;
2141}
2142
2143/*
2144 * ext4_ext_in_cache()
2145 * Checks to see if the given block is in the cache.
2146 * If it is, the cached extent is stored in the given
2147 * extent pointer.
2148 *
2149 * @inode: The files inode
2150 * @block: The block to look for in the cache
2151 * @ex:    Pointer where the cached extent will be stored
2152 *         if it contains block
2153 *
2154 * Return 0 if cache is invalid; 1 if the cache is valid
2155 */
2156static int
2157ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
2158                        struct ext4_extent *ex)
2159{
2160        struct ext4_ext_cache cex;
2161        int ret = 0;
2162
2163        if (ext4_ext_check_cache(inode, block, &cex)) {
2164                ex->ee_block = cpu_to_le32(cex.ec_block);
2165                ext4_ext_store_pblock(ex, cex.ec_start);
2166                ex->ee_len = cpu_to_le16(cex.ec_len);
2167                ret = 1;
2168        }
2169
2170        return ret;
2171}
2172
2173
2174/*
2175 * ext4_ext_rm_idx:
2176 * removes index from the index block.
2177 */
2178static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2179                        struct ext4_ext_path *path)
2180{
2181        int err;
2182        ext4_fsblk_t leaf;
2183
2184        /* free index block */
2185        path--;
2186        leaf = ext4_idx_pblock(path->p_idx);
2187        if (unlikely(path->p_hdr->eh_entries == 0)) {
2188                EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2189                return -EIO;
2190        }
2191        err = ext4_ext_get_access(handle, inode, path);
2192        if (err)
2193                return err;
2194
2195        if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2196                int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2197                len *= sizeof(struct ext4_extent_idx);
2198                memmove(path->p_idx, path->p_idx + 1, len);
2199        }
2200
2201        le16_add_cpu(&path->p_hdr->eh_entries, -1);
2202        err = ext4_ext_dirty(handle, inode, path);
2203        if (err)
2204                return err;
2205        ext_debug("index is empty, remove it, free block %llu\n", leaf);
2206        trace_ext4_ext_rm_idx(inode, leaf);
2207
2208        ext4_free_blocks(handle, inode, NULL, leaf, 1,
2209                         EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2210        return err;
2211}
2212
2213/*
2214 * ext4_ext_calc_credits_for_single_extent:
2215 * This routine returns max. credits that needed to insert an extent
2216 * to the extent tree.
2217 * When pass the actual path, the caller should calculate credits
2218 * under i_data_sem.
2219 */
2220int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2221                                                struct ext4_ext_path *path)
2222{
2223        if (path) {
2224                int depth = ext_depth(inode);
2225                int ret = 0;
2226
2227                /* probably there is space in leaf? */
2228                if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2229                                < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2230
2231                        /*
2232                         *  There are some space in the leaf tree, no
2233                         *  need to account for leaf block credit
2234                         *
2235                         *  bitmaps and block group descriptor blocks
2236                         *  and other metadata blocks still need to be
2237                         *  accounted.
2238                         */
2239                        /* 1 bitmap, 1 block group descriptor */
2240                        ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2241                        return ret;
2242                }
2243        }
2244
2245        return ext4_chunk_trans_blocks(inode, nrblocks);
2246}
2247
2248/*
2249 * How many index/leaf blocks need to change/allocate to modify nrblocks?
2250 *
2251 * if nrblocks are fit in a single extent (chunk flag is 1), then
2252 * in the worse case, each tree level index/leaf need to be changed
2253 * if the tree split due to insert a new extent, then the old tree
2254 * index/leaf need to be updated too
2255 *
2256 * If the nrblocks are discontiguous, they could cause
2257 * the whole tree split more than once, but this is really rare.
2258 */
2259int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2260{
2261        int index;
2262        int depth = ext_depth(inode);
2263
2264        if (chunk)
2265                index = depth * 2;
2266        else
2267                index = depth * 3;
2268
2269        return index;
2270}
2271
2272static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2273                              struct ext4_extent *ex,
2274                              ext4_fsblk_t *partial_cluster,
2275                              ext4_lblk_t from, ext4_lblk_t to)
2276{
2277        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2278        unsigned short ee_len =  ext4_ext_get_actual_len(ex);
2279        ext4_fsblk_t pblk;
2280        int flags = EXT4_FREE_BLOCKS_FORGET;
2281
2282        if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2283                flags |= EXT4_FREE_BLOCKS_METADATA;
2284        /*
2285         * For bigalloc file systems, we never free a partial cluster
2286         * at the beginning of the extent.  Instead, we make a note
2287         * that we tried freeing the cluster, and check to see if we
2288         * need to free it on a subsequent call to ext4_remove_blocks,
2289         * or at the end of the ext4_truncate() operation.
2290         */
2291        flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2292
2293        trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
2294        /*
2295         * If we have a partial cluster, and it's different from the
2296         * cluster of the last block, we need to explicitly free the
2297         * partial cluster here.
2298         */
2299        pblk = ext4_ext_pblock(ex) + ee_len - 1;
2300        if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
2301                ext4_free_blocks(handle, inode, NULL,
2302                                 EXT4_C2B(sbi, *partial_cluster),
2303                                 sbi->s_cluster_ratio, flags);
2304                *partial_cluster = 0;
2305        }
2306
2307#ifdef EXTENTS_STATS
2308        {
2309                struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2310                spin_lock(&sbi->s_ext_stats_lock);
2311                sbi->s_ext_blocks += ee_len;
2312                sbi->s_ext_extents++;
2313                if (ee_len < sbi->s_ext_min)
2314                        sbi->s_ext_min = ee_len;
2315                if (ee_len > sbi->s_ext_max)
2316                        sbi->s_ext_max = ee_len;
2317                if (ext_depth(inode) > sbi->s_depth_max)
2318                        sbi->s_depth_max = ext_depth(inode);
2319                spin_unlock(&sbi->s_ext_stats_lock);
2320        }
2321#endif
2322        if (from >= le32_to_cpu(ex->ee_block)
2323            && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2324                /* tail removal */
2325                ext4_lblk_t num;
2326
2327                num = le32_to_cpu(ex->ee_block) + ee_len - from;
2328                pblk = ext4_ext_pblock(ex) + ee_len - num;
2329                ext_debug("free last %u blocks starting %llu\n", num, pblk);
2330                ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2331                /*
2332                 * If the block range to be freed didn't start at the
2333                 * beginning of a cluster, and we removed the entire
2334                 * extent, save the partial cluster here, since we
2335                 * might need to delete if we determine that the
2336                 * truncate operation has removed all of the blocks in
2337                 * the cluster.
2338                 */
2339                if (pblk & (sbi->s_cluster_ratio - 1) &&
2340                    (ee_len == num))
2341                        *partial_cluster = EXT4_B2C(sbi, pblk);
2342                else
2343                        *partial_cluster = 0;
2344        } else if (from == le32_to_cpu(ex->ee_block)
2345                   && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
2346                /* head removal */
2347                ext4_lblk_t num;
2348                ext4_fsblk_t start;
2349
2350                num = to - from;
2351                start = ext4_ext_pblock(ex);
2352
2353                ext_debug("free first %u blocks starting %llu\n", num, start);
2354                ext4_free_blocks(handle, inode, NULL, start, num, flags);
2355
2356        } else {
2357                printk(KERN_INFO "strange request: removal(2) "
2358                                "%u-%u from %u:%u\n",
2359                                from, to, le32_to_cpu(ex->ee_block), ee_len);
2360        }
2361        return 0;
2362}
2363
2364
2365/*
2366 * ext4_ext_rm_leaf() Removes the extents associated with the
2367 * blocks appearing between "start" and "end", and splits the extents
2368 * if "start" and "end" appear in the same extent
2369 *
2370 * @handle: The journal handle
2371 * @inode:  The files inode
2372 * @path:   The path to the leaf
2373 * @start:  The first block to remove
2374 * @end:   The last block to remove
2375 */
2376static int
2377ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2378                 struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster,
2379                 ext4_lblk_t start, ext4_lblk_t end)
2380{
2381        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2382        int err = 0, correct_index = 0;
2383        int depth = ext_depth(inode), credits;
2384        struct ext4_extent_header *eh;
2385        ext4_lblk_t a, b;
2386        unsigned num;
2387        ext4_lblk_t ex_ee_block;
2388        unsigned short ex_ee_len;
2389        unsigned uninitialized = 0;
2390        struct ext4_extent *ex;
2391
2392        /* the header must be checked already in ext4_ext_remove_space() */
2393        ext_debug("truncate since %u in leaf to %u\n", start, end);
2394        if (!path[depth].p_hdr)
2395                path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2396        eh = path[depth].p_hdr;
2397        if (unlikely(path[depth].p_hdr == NULL)) {
2398                EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2399                return -EIO;
2400        }
2401        /* find where to start removing */
2402        ex = EXT_LAST_EXTENT(eh);
2403
2404        ex_ee_block = le32_to_cpu(ex->ee_block);
2405        ex_ee_len = ext4_ext_get_actual_len(ex);
2406
2407        trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2408
2409        while (ex >= EXT_FIRST_EXTENT(eh) &&
2410                        ex_ee_block + ex_ee_len > start) {
2411
2412                if (ext4_ext_is_uninitialized(ex))
2413                        uninitialized = 1;
2414                else
2415                        uninitialized = 0;
2416
2417                ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2418                         uninitialized, ex_ee_len);
2419                path[depth].p_ext = ex;
2420
2421                a = ex_ee_block > start ? ex_ee_block : start;
2422                b = ex_ee_block+ex_ee_len - 1 < end ?
2423                        ex_ee_block+ex_ee_len - 1 : end;
2424
2425                ext_debug("  border %u:%u\n", a, b);
2426
2427                /* If this extent is beyond the end of the hole, skip it */
2428                if (end < ex_ee_block) {
2429                        ex--;
2430                        ex_ee_block = le32_to_cpu(ex->ee_block);
2431                        ex_ee_len = ext4_ext_get_actual_len(ex);
2432                        continue;
2433                } else if (b != ex_ee_block + ex_ee_len - 1) {
2434                        EXT4_ERROR_INODE(inode,
2435                                         "can not handle truncate %u:%u "
2436                                         "on extent %u:%u",
2437                                         start, end, ex_ee_block,
2438                                         ex_ee_block + ex_ee_len - 1);
2439                        err = -EIO;
2440                        goto out;
2441                } else if (a != ex_ee_block) {
2442                        /* remove tail of the extent */
2443                        num = a - ex_ee_block;
2444                } else {
2445                        /* remove whole extent: excellent! */
2446                        num = 0;
2447                }
2448                /*
2449                 * 3 for leaf, sb, and inode plus 2 (bmap and group
2450                 * descriptor) for each block group; assume two block
2451                 * groups plus ex_ee_len/blocks_per_block_group for
2452                 * the worst case
2453                 */
2454                credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2455                if (ex == EXT_FIRST_EXTENT(eh)) {
2456                        correct_index = 1;
2457                        credits += (ext_depth(inode)) + 1;
2458                }
2459                credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2460
2461                err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2462                if (err)
2463                        goto out;
2464
2465                err = ext4_ext_get_access(handle, inode, path + depth);
2466                if (err)
2467                        goto out;
2468
2469                err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
2470                                         a, b);
2471                if (err)
2472                        goto out;
2473
2474                if (num == 0)
2475                        /* this extent is removed; mark slot entirely unused */
2476                        ext4_ext_store_pblock(ex, 0);
2477
2478                ex->ee_len = cpu_to_le16(num);
2479                /*
2480                 * Do not mark uninitialized if all the blocks in the
2481                 * extent have been removed.
2482                 */
2483                if (uninitialized && num)
2484                        ext4_ext_mark_uninitialized(ex);
2485                /*
2486                 * If the extent was completely released,
2487                 * we need to remove it from the leaf
2488                 */
2489                if (num == 0) {
2490                        if (end != EXT_MAX_BLOCKS - 1) {
2491                                /*
2492                                 * For hole punching, we need to scoot all the
2493                                 * extents up when an extent is removed so that
2494                                 * we dont have blank extents in the middle
2495                                 */
2496                                memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2497                                        sizeof(struct ext4_extent));
2498
2499                                /* Now get rid of the one at the end */
2500                                memset(EXT_LAST_EXTENT(eh), 0,
2501                                        sizeof(struct ext4_extent));
2502                        }
2503                        le16_add_cpu(&eh->eh_entries, -1);
2504                } else
2505                        *partial_cluster = 0;
2506
2507                err = ext4_ext_dirty(handle, inode, path + depth);
2508                if (err)
2509                        goto out;
2510
2511                ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
2512                                ext4_ext_pblock(ex));
2513                ex--;
2514                ex_ee_block = le32_to_cpu(ex->ee_block);
2515                ex_ee_len = ext4_ext_get_actual_len(ex);
2516        }
2517
2518        if (correct_index && eh->eh_entries)
2519                err = ext4_ext_correct_indexes(handle, inode, path);
2520
2521        /*
2522         * If there is still a entry in the leaf node, check to see if
2523         * it references the partial cluster.  This is the only place
2524         * where it could; if it doesn't, we can free the cluster.
2525         */
2526        if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) &&
2527            (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
2528             *partial_cluster)) {
2529                int flags = EXT4_FREE_BLOCKS_FORGET;
2530
2531                if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2532                        flags |= EXT4_FREE_BLOCKS_METADATA;
2533
2534                ext4_free_blocks(handle, inode, NULL,
2535                                 EXT4_C2B(sbi, *partial_cluster),
2536                                 sbi->s_cluster_ratio, flags);
2537                *partial_cluster = 0;
2538        }
2539
2540        /* if this leaf is free, then we should
2541         * remove it from index block above */
2542        if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2543                err = ext4_ext_rm_idx(handle, inode, path + depth);
2544
2545out:
2546        return err;
2547}
2548
2549/*
2550 * ext4_ext_more_to_rm:
2551 * returns 1 if current index has to be freed (even partial)
2552 */
2553static int
2554ext4_ext_more_to_rm(struct ext4_ext_path *path)
2555{
2556        BUG_ON(path->p_idx == NULL);
2557
2558        if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2559                return 0;
2560
2561        /*
2562         * if truncate on deeper level happened, it wasn't partial,
2563         * so we have to consider current index for truncation
2564         */
2565        if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2566                return 0;
2567        return 1;
2568}
2569
2570static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2571                                 ext4_lblk_t end)
2572{
2573        struct super_block *sb = inode->i_sb;
2574        int depth = ext_depth(inode);
2575        struct ext4_ext_path *path = NULL;
2576        ext4_fsblk_t partial_cluster = 0;
2577        handle_t *handle;
2578        int i = 0, err;
2579
2580        ext_debug("truncate since %u to %u\n", start, end);
2581
2582        /* probably first extent we're gonna free will be last in block */
2583        handle = ext4_journal_start(inode, depth + 1);
2584        if (IS_ERR(handle))
2585                return PTR_ERR(handle);
2586
2587again:
2588        ext4_ext_invalidate_cache(inode);
2589
2590        trace_ext4_ext_remove_space(inode, start, depth);
2591
2592        /*
2593         * Check if we are removing extents inside the extent tree. If that
2594         * is the case, we are going to punch a hole inside the extent tree
2595         * so we have to check whether we need to split the extent covering
2596         * the last block to remove so we can easily remove the part of it
2597         * in ext4_ext_rm_leaf().
2598         */
2599        if (end < EXT_MAX_BLOCKS - 1) {
2600                struct ext4_extent *ex;
2601                ext4_lblk_t ee_block;
2602
2603                /* find extent for this block */
2604                path = ext4_ext_find_extent(inode, end, NULL);
2605                if (IS_ERR(path)) {
2606                        ext4_journal_stop(handle);
2607                        return PTR_ERR(path);
2608                }
2609                depth = ext_depth(inode);
2610                ex = path[depth].p_ext;
2611                if (!ex) {
2612                        ext4_ext_drop_refs(path);
2613                        kfree(path);
2614                        path = NULL;
2615                        goto cont;
2616                }
2617
2618                ee_block = le32_to_cpu(ex->ee_block);
2619
2620                /*
2621                 * See if the last block is inside the extent, if so split
2622                 * the extent at 'end' block so we can easily remove the
2623                 * tail of the first part of the split extent in
2624                 * ext4_ext_rm_leaf().
2625                 */
2626                if (end >= ee_block &&
2627                    end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
2628                        int split_flag = 0;
2629
2630                        if (ext4_ext_is_uninitialized(ex))
2631                                split_flag = EXT4_EXT_MARK_UNINIT1 |
2632                                             EXT4_EXT_MARK_UNINIT2;
2633
2634                        /*
2635                         * Split the extent in two so that 'end' is the last
2636                         * block in the first new extent
2637                         */
2638                        err = ext4_split_extent_at(handle, inode, path,
2639                                                end + 1, split_flag,
2640                                                EXT4_GET_BLOCKS_PRE_IO |
2641                                                EXT4_GET_BLOCKS_PUNCH_OUT_EXT);
2642
2643                        if (err < 0)
2644                                goto out;
2645                }
2646        }
2647cont:
2648
2649        /*
2650         * We start scanning from right side, freeing all the blocks
2651         * after i_size and walking into the tree depth-wise.
2652         */
2653        depth = ext_depth(inode);
2654        if (path) {
2655                int k = i = depth;
2656                while (--k > 0)
2657                        path[k].p_block =
2658                                le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2659        } else {
2660                path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
2661                               GFP_NOFS);
2662                if (path == NULL) {
2663                        ext4_journal_stop(handle);
2664                        return -ENOMEM;
2665                }
2666                path[0].p_depth = depth;
2667                path[0].p_hdr = ext_inode_hdr(inode);
2668                i = 0;
2669
2670                if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2671                        err = -EIO;
2672                        goto out;
2673                }
2674        }
2675        err = 0;
2676
2677        while (i >= 0 && err == 0) {
2678                if (i == depth) {
2679                        /* this is leaf block */
2680                        err = ext4_ext_rm_leaf(handle, inode, path,
2681                                               &partial_cluster, start,
2682                                               end);
2683                        /* root level has p_bh == NULL, brelse() eats this */
2684                        brelse(path[i].p_bh);
2685                        path[i].p_bh = NULL;
2686                        i--;
2687                        continue;
2688                }
2689
2690                /* this is index block */
2691                if (!path[i].p_hdr) {
2692                        ext_debug("initialize header\n");
2693                        path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2694                }
2695
2696                if (!path[i].p_idx) {
2697                        /* this level hasn't been touched yet */
2698                        path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2699                        path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2700                        ext_debug("init index ptr: hdr 0x%p, num %d\n",
2701                                  path[i].p_hdr,
2702                                  le16_to_cpu(path[i].p_hdr->eh_entries));
2703                } else {
2704                        /* we were already here, see at next index */
2705                        path[i].p_idx--;
2706                }
2707
2708                ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2709                                i, EXT_FIRST_INDEX(path[i].p_hdr),
2710                                path[i].p_idx);
2711                if (ext4_ext_more_to_rm(path + i)) {
2712                        struct buffer_head *bh;
2713                        /* go to the next level */
2714                        ext_debug("move to level %d (block %llu)\n",
2715                                  i + 1, ext4_idx_pblock(path[i].p_idx));
2716                        memset(path + i + 1, 0, sizeof(*path));
2717                        bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
2718                        if (!bh) {
2719                                /* should we reset i_size? */
2720                                err = -EIO;
2721                                break;
2722                        }
2723                        if (WARN_ON(i + 1 > depth)) {
2724                                err = -EIO;
2725                                break;
2726                        }
2727                        if (ext4_ext_check_block(inode, ext_block_hdr(bh),
2728                                                        depth - i - 1, bh)) {
2729                                err = -EIO;
2730                                break;
2731                        }
2732                        path[i + 1].p_bh = bh;
2733
2734                        /* save actual number of indexes since this
2735                         * number is changed at the next iteration */
2736                        path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2737                        i++;
2738                } else {
2739                        /* we finished processing this index, go up */
2740                        if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2741                                /* index is empty, remove it;
2742                                 * handle must be already prepared by the
2743                                 * truncatei_leaf() */
2744                                err = ext4_ext_rm_idx(handle, inode, path + i);
2745                        }
2746                        /* root level has p_bh == NULL, brelse() eats this */
2747                        brelse(path[i].p_bh);
2748                        path[i].p_bh = NULL;
2749                        i--;
2750                        ext_debug("return to level %d\n", i);
2751                }
2752        }
2753
2754        trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
2755                        path->p_hdr->eh_entries);
2756
2757        /* If we still have something in the partial cluster and we have removed
2758         * even the first extent, then we should free the blocks in the partial
2759         * cluster as well. */
2760        if (partial_cluster && path->p_hdr->eh_entries == 0) {
2761                int flags = EXT4_FREE_BLOCKS_FORGET;
2762
2763                if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2764                        flags |= EXT4_FREE_BLOCKS_METADATA;
2765
2766                ext4_free_blocks(handle, inode, NULL,
2767                                 EXT4_C2B(EXT4_SB(sb), partial_cluster),
2768                                 EXT4_SB(sb)->s_cluster_ratio, flags);
2769                partial_cluster = 0;
2770        }
2771
2772        /* TODO: flexible tree reduction should be here */
2773        if (path->p_hdr->eh_entries == 0) {
2774                /*
2775                 * truncate to zero freed all the tree,
2776                 * so we need to correct eh_depth
2777                 */
2778                err = ext4_ext_get_access(handle, inode, path);
2779                if (err == 0) {
2780                        ext_inode_hdr(inode)->eh_depth = 0;
2781                        ext_inode_hdr(inode)->eh_max =
2782                                cpu_to_le16(ext4_ext_space_root(inode, 0));
2783                        err = ext4_ext_dirty(handle, inode, path);
2784                }
2785        }
2786out:
2787        ext4_ext_drop_refs(path);
2788        kfree(path);
2789        if (err == -EAGAIN) {
2790                path = NULL;
2791                goto again;
2792        }
2793        ext4_journal_stop(handle);
2794
2795        return err;
2796}
2797
2798/*
2799 * called at mount time
2800 */
2801void ext4_ext_init(struct super_block *sb)
2802{
2803        /*
2804         * possible initialization would be here
2805         */
2806
2807        if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2808#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2809                printk(KERN_INFO "EXT4-fs: file extents enabled"
2810#ifdef AGGRESSIVE_TEST
2811                       ", aggressive tests"
2812#endif
2813#ifdef CHECK_BINSEARCH
2814                       ", check binsearch"
2815#endif
2816#ifdef EXTENTS_STATS
2817                       ", stats"
2818#endif
2819                       "\n");
2820#endif
2821#ifdef EXTENTS_STATS
2822                spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2823                EXT4_SB(sb)->s_ext_min = 1 << 30;
2824                EXT4_SB(sb)->s_ext_max = 0;
2825#endif
2826        }
2827}
2828
2829/*
2830 * called at umount time
2831 */
2832void ext4_ext_release(struct super_block *sb)
2833{
2834        if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2835                return;
2836
2837#ifdef EXTENTS_STATS
2838        if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2839                struct ext4_sb_info *sbi = EXT4_SB(sb);
2840                printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2841                        sbi->s_ext_blocks, sbi->s_ext_extents,
2842                        sbi->s_ext_blocks / sbi->s_ext_extents);
2843                printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2844                        sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2845        }
2846#endif
2847}
2848
2849/* FIXME!! we need to try to merge to left or right after zero-out  */
2850static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2851{
2852        ext4_fsblk_t ee_pblock;
2853        unsigned int ee_len;
2854        int ret;
2855
2856        ee_len    = ext4_ext_get_actual_len(ex);
2857        ee_pblock = ext4_ext_pblock(ex);
2858
2859        ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
2860        if (ret > 0)
2861                ret = 0;
2862
2863        return ret;
2864}
2865
2866/*
2867 * ext4_split_extent_at() splits an extent at given block.
2868 *
2869 * @handle: the journal handle
2870 * @inode: the file inode
2871 * @path: the path to the extent
2872 * @split: the logical block where the extent is splitted.
2873 * @split_flags: indicates if the extent could be zeroout if split fails, and
2874 *               the states(init or uninit) of new extents.
2875 * @flags: flags used to insert new extent to extent tree.
2876 *
2877 *
2878 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
2879 * of which are deterimined by split_flag.
2880 *
2881 * There are two cases:
2882 *  a> the extent are splitted into two extent.
2883 *  b> split is not needed, and just mark the extent.
2884 *
2885 * return 0 on success.
2886 */
2887static int ext4_split_extent_at(handle_t *handle,
2888                             struct inode *inode,
2889                             struct ext4_ext_path *path,
2890                             ext4_lblk_t split,
2891                             int split_flag,
2892                             int flags)
2893{
2894        ext4_fsblk_t newblock;
2895        ext4_lblk_t ee_block;
2896        struct ext4_extent *ex, newex, orig_ex;
2897        struct ext4_extent *ex2 = NULL;
2898        unsigned int ee_len, depth;
2899        int err = 0;
2900
2901        BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
2902               (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
2903
2904        ext_debug("ext4_split_extents_at: inode %lu, logical"
2905                "block %llu\n", inode->i_ino, (unsigned long long)split);
2906
2907        ext4_ext_show_leaf(inode, path);
2908
2909        depth = ext_depth(inode);
2910        ex = path[depth].p_ext;
2911        ee_block = le32_to_cpu(ex->ee_block);
2912        ee_len = ext4_ext_get_actual_len(ex);
2913        newblock = split - ee_block + ext4_ext_pblock(ex);
2914
2915        BUG_ON(split < ee_block || split >= (ee_block + ee_len));
2916
2917        err = ext4_ext_get_access(handle, inode, path + depth);
2918        if (err)
2919                goto out;
2920
2921        if (split == ee_block) {
2922                /*
2923                 * case b: block @split is the block that the extent begins with
2924                 * then we just change the state of the extent, and splitting
2925                 * is not needed.
2926                 */
2927                if (split_flag & EXT4_EXT_MARK_UNINIT2)
2928                        ext4_ext_mark_uninitialized(ex);
2929                else
2930                        ext4_ext_mark_initialized(ex);
2931
2932                if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
2933                        ext4_ext_try_to_merge(inode, path, ex);
2934
2935                err = ext4_ext_dirty(handle, inode, path + depth);
2936                goto out;
2937        }
2938
2939        /* case a */
2940        memcpy(&orig_ex, ex, sizeof(orig_ex));
2941        ex->ee_len = cpu_to_le16(split - ee_block);
2942        if (split_flag & EXT4_EXT_MARK_UNINIT1)
2943                ext4_ext_mark_uninitialized(ex);
2944
2945        /*
2946         * path may lead to new leaf, not to original leaf any more
2947         * after ext4_ext_insert_extent() returns,
2948         */
2949        err = ext4_ext_dirty(handle, inode, path + depth);
2950        if (err)
2951                goto fix_extent_len;
2952
2953        ex2 = &newex;
2954        ex2->ee_block = cpu_to_le32(split);
2955        ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
2956        ext4_ext_store_pblock(ex2, newblock);
2957        if (split_flag & EXT4_EXT_MARK_UNINIT2)
2958                ext4_ext_mark_uninitialized(ex2);
2959
2960        err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
2961        if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
2962                if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
2963                        if (split_flag & EXT4_EXT_DATA_VALID1)
2964                                err = ext4_ext_zeroout(inode, ex2);
2965                        else
2966                                err = ext4_ext_zeroout(inode, ex);
2967                } else
2968                        err = ext4_ext_zeroout(inode, &orig_ex);
2969
2970                if (err)
2971                        goto fix_extent_len;
2972                /* update the extent length and mark as initialized */
2973                ex->ee_len = cpu_to_le16(ee_len);
2974                ext4_ext_try_to_merge(inode, path, ex);
2975                err = ext4_ext_dirty(handle, inode, path + depth);
2976                goto out;
2977        } else if (err)
2978                goto fix_extent_len;
2979
2980out:
2981        ext4_ext_show_leaf(inode, path);
2982        return err;
2983
2984fix_extent_len:
2985        ex->ee_len = orig_ex.ee_len;
2986        ext4_ext_dirty(handle, inode, path + depth);
2987        return err;
2988}
2989
2990/*
2991 * ext4_split_extents() splits an extent and mark extent which is covered
2992 * by @map as split_flags indicates
2993 *
2994 * It may result in splitting the extent into multiple extents (upto three)
2995 * There are three possibilities:
2996 *   a> There is no split required
2997 *   b> Splits in two extents: Split is happening at either end of the extent
2998 *   c> Splits in three extents: Somone is splitting in middle of the extent
2999 *
3000 */
3001static int ext4_split_extent(handle_t *handle,
3002                              struct inode *inode,
3003                              struct ext4_ext_path *path,
3004                              struct ext4_map_blocks *map,
3005                              int split_flag,
3006                              int flags)
3007{
3008        ext4_lblk_t ee_block;
3009        struct ext4_extent *ex;
3010        unsigned int ee_len, depth;
3011        int err = 0;
3012        int uninitialized;
3013        int split_flag1, flags1;
3014
3015        depth = ext_depth(inode);
3016        ex = path[depth].p_ext;
3017        ee_block = le32_to_cpu(ex->ee_block);
3018        ee_len = ext4_ext_get_actual_len(ex);
3019        uninitialized = ext4_ext_is_uninitialized(ex);
3020
3021        if (map->m_lblk + map->m_len < ee_block + ee_len) {
3022                split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3023                flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3024                if (uninitialized)
3025                        split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
3026                                       EXT4_EXT_MARK_UNINIT2;
3027                if (split_flag & EXT4_EXT_DATA_VALID2)
3028                        split_flag1 |= EXT4_EXT_DATA_VALID1;
3029                err = ext4_split_extent_at(handle, inode, path,
3030                                map->m_lblk + map->m_len, split_flag1, flags1);
3031                if (err)
3032                        goto out;
3033        }
3034
3035        ext4_ext_drop_refs(path);
3036        path = ext4_ext_find_extent(inode, map->m_lblk, path);
3037        if (IS_ERR(path))
3038                return PTR_ERR(path);
3039
3040        if (map->m_lblk >= ee_block) {
3041                split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT |
3042                                            EXT4_EXT_DATA_VALID2);
3043                if (uninitialized)
3044                        split_flag1 |= EXT4_EXT_MARK_UNINIT1;
3045                if (split_flag & EXT4_EXT_MARK_UNINIT2)
3046                        split_flag1 |= EXT4_EXT_MARK_UNINIT2;
3047                err = ext4_split_extent_at(handle, inode, path,
3048                                map->m_lblk, split_flag1, flags);
3049                if (err)
3050                        goto out;
3051        }
3052
3053        ext4_ext_show_leaf(inode, path);
3054out:
3055        return err ? err : map->m_len;
3056}
3057
3058#define EXT4_EXT_ZERO_LEN 7
3059/*
3060 * This function is called by ext4_ext_map_blocks() if someone tries to write
3061 * to an uninitialized extent. It may result in splitting the uninitialized
3062 * extent into multiple extents (up to three - one initialized and two
3063 * uninitialized).
3064 * There are three possibilities:
3065 *   a> There is no split required: Entire extent should be initialized
3066 *   b> Splits in two extents: Write is happening at either end of the extent
3067 *   c> Splits in three extents: Somone is writing in middle of the extent
3068 *
3069 * Pre-conditions:
3070 *  - The extent pointed to by 'path' is uninitialized.
3071 *  - The extent pointed to by 'path' contains a superset
3072 *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3073 *
3074 * Post-conditions on success:
3075 *  - the returned value is the number of blocks beyond map->l_lblk
3076 *    that are allocated and initialized.
3077 *    It is guaranteed to be >= map->m_len.
3078 */
3079static int ext4_ext_convert_to_initialized(handle_t *handle,
3080                                           struct inode *inode,
3081                                           struct ext4_map_blocks *map,
3082                                           struct ext4_ext_path *path)
3083{
3084        struct ext4_extent_header *eh;
3085        struct ext4_map_blocks split_map;
3086        struct ext4_extent zero_ex;
3087        struct ext4_extent *ex;
3088        ext4_lblk_t ee_block, eof_block;
3089        unsigned int ee_len, depth;
3090        int allocated;
3091        int err = 0;
3092        int split_flag = 0;
3093
3094        ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3095                "block %llu, max_blocks %u\n", inode->i_ino,
3096                (unsigned long long)map->m_lblk, map->m_len);
3097
3098        eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3099                inode->i_sb->s_blocksize_bits;
3100        if (eof_block < map->m_lblk + map->m_len)
3101                eof_block = map->m_lblk + map->m_len;
3102
3103        depth = ext_depth(inode);
3104        eh = path[depth].p_hdr;
3105        ex = path[depth].p_ext;
3106        ee_block = le32_to_cpu(ex->ee_block);
3107        ee_len = ext4_ext_get_actual_len(ex);
3108        allocated = ee_len - (map->m_lblk - ee_block);
3109
3110        trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3111
3112        /* Pre-conditions */
3113        BUG_ON(!ext4_ext_is_uninitialized(ex));
3114        BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3115
3116        /*
3117         * Attempt to transfer newly initialized blocks from the currently
3118         * uninitialized extent to its left neighbor. This is much cheaper
3119         * than an insertion followed by a merge as those involve costly
3120         * memmove() calls. This is the common case in steady state for
3121         * workloads doing fallocate(FALLOC_FL_KEEP_SIZE) followed by append
3122         * writes.
3123         *
3124         * Limitations of the current logic:
3125         *  - L1: we only deal with writes at the start of the extent.
3126         *    The approach could be extended to writes at the end
3127         *    of the extent but this scenario was deemed less common.
3128         *  - L2: we do not deal with writes covering the whole extent.
3129         *    This would require removing the extent if the transfer
3130         *    is possible.
3131         *  - L3: we only attempt to merge with an extent stored in the
3132         *    same extent tree node.
3133         */
3134        if ((map->m_lblk == ee_block) &&        /*L1*/
3135                (map->m_len < ee_len) &&        /*L2*/
3136                (ex > EXT_FIRST_EXTENT(eh))) {  /*L3*/
3137                struct ext4_extent *prev_ex;
3138                ext4_lblk_t prev_lblk;
3139                ext4_fsblk_t prev_pblk, ee_pblk;
3140                unsigned int prev_len, write_len;
3141
3142                prev_ex = ex - 1;
3143                prev_lblk = le32_to_cpu(prev_ex->ee_block);
3144                prev_len = ext4_ext_get_actual_len(prev_ex);
3145                prev_pblk = ext4_ext_pblock(prev_ex);
3146                ee_pblk = ext4_ext_pblock(ex);
3147                write_len = map->m_len;
3148
3149                /*
3150                 * A transfer of blocks from 'ex' to 'prev_ex' is allowed
3151                 * upon those conditions:
3152                 * - C1: prev_ex is initialized,
3153                 * - C2: prev_ex is logically abutting ex,
3154                 * - C3: prev_ex is physically abutting ex,
3155                 * - C4: prev_ex can receive the additional blocks without
3156                 *   overflowing the (initialized) length limit.
3157                 */
3158                if ((!ext4_ext_is_uninitialized(prev_ex)) &&            /*C1*/
3159                        ((prev_lblk + prev_len) == ee_block) &&         /*C2*/
3160                        ((prev_pblk + prev_len) == ee_pblk) &&          /*C3*/
3161                        (prev_len < (EXT_INIT_MAX_LEN - write_len))) {  /*C4*/
3162                        err = ext4_ext_get_access(handle, inode, path + depth);
3163                        if (err)
3164                                goto out;
3165
3166                        trace_ext4_ext_convert_to_initialized_fastpath(inode,
3167                                map, ex, prev_ex);
3168
3169                        /* Shift the start of ex by 'write_len' blocks */
3170                        ex->ee_block = cpu_to_le32(ee_block + write_len);
3171                        ext4_ext_store_pblock(ex, ee_pblk + write_len);
3172                        ex->ee_len = cpu_to_le16(ee_len - write_len);
3173                        ext4_ext_mark_uninitialized(ex); /* Restore the flag */
3174
3175                        /* Extend prev_ex by 'write_len' blocks */
3176                        prev_ex->ee_len = cpu_to_le16(prev_len + write_len);
3177
3178                        /* Mark the block containing both extents as dirty */
3179                        ext4_ext_dirty(handle, inode, path + depth);
3180
3181                        /* Update path to point to the right extent */
3182                        path[depth].p_ext = prev_ex;
3183
3184                        /* Result: number of initialized blocks past m_lblk */
3185                        allocated = write_len;
3186                        goto out;
3187                }
3188        }
3189
3190        WARN_ON(map->m_lblk < ee_block);
3191        /*
3192         * It is safe to convert extent to initialized via explicit
3193         * zeroout only if extent is fully insde i_size or new_size.
3194         */
3195        split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3196
3197        /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
3198        if (ee_len <= 2*EXT4_EXT_ZERO_LEN &&
3199            (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3200                err = ext4_ext_zeroout(inode, ex);
3201                if (err)
3202                        goto out;
3203
3204                err = ext4_ext_get_access(handle, inode, path + depth);
3205                if (err)
3206                        goto out;
3207                ext4_ext_mark_initialized(ex);
3208                ext4_ext_try_to_merge(inode, path, ex);
3209                err = ext4_ext_dirty(handle, inode, path + depth);
3210                goto out;
3211        }
3212
3213        /*
3214         * four cases:
3215         * 1. split the extent into three extents.
3216         * 2. split the extent into two extents, zeroout the first half.
3217         * 3. split the extent into two extents, zeroout the second half.
3218         * 4. split the extent into two extents with out zeroout.
3219         */
3220        split_map.m_lblk = map->m_lblk;
3221        split_map.m_len = map->m_len;
3222
3223        if (allocated > map->m_len) {
3224                if (allocated <= EXT4_EXT_ZERO_LEN &&
3225                    (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3226                        /* case 3 */
3227                        zero_ex.ee_block =
3228                                         cpu_to_le32(map->m_lblk);
3229                        zero_ex.ee_len = cpu_to_le16(allocated);
3230                        ext4_ext_store_pblock(&zero_ex,
3231                                ext4_ext_pblock(ex) + map->m_lblk - ee_block);
3232                        err = ext4_ext_zeroout(inode, &zero_ex);
3233                        if (err)
3234                                goto out;
3235                        split_map.m_lblk = map->m_lblk;
3236                        split_map.m_len = allocated;
3237                } else if ((map->m_lblk - ee_block + map->m_len <
3238                           EXT4_EXT_ZERO_LEN) &&
3239                           (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3240                        /* case 2 */
3241                        if (map->m_lblk != ee_block) {
3242                                zero_ex.ee_block = ex->ee_block;
3243                                zero_ex.ee_len = cpu_to_le16(map->m_lblk -
3244                                                        ee_block);
3245                                ext4_ext_store_pblock(&zero_ex,
3246                                                      ext4_ext_pblock(ex));
3247                                err = ext4_ext_zeroout(inode, &zero_ex);
3248                                if (err)
3249                                        goto out;
3250                        }
3251
3252                        split_map.m_lblk = ee_block;
3253                        split_map.m_len = map->m_lblk - ee_block + map->m_len;
3254                        allocated = map->m_len;
3255                }
3256        }
3257
3258        allocated = ext4_split_extent(handle, inode, path,
3259                                       &split_map, split_flag, 0);
3260        if (allocated < 0)
3261                err = allocated;
3262
3263out:
3264        return err ? err : allocated;
3265}
3266
3267/*
3268 * This function is called by ext4_ext_map_blocks() from
3269 * ext4_get_blocks_dio_write() when DIO to write
3270 * to an uninitialized extent.
3271 *
3272 * Writing to an uninitialized extent may result in splitting the uninitialized
3273 * extent into multiple /initialized uninitialized extents (up to three)
3274 * There are three possibilities:
3275 *   a> There is no split required: Entire extent should be uninitialized
3276 *   b> Splits in two extents: Write is happening at either end of the extent
3277 *   c> Splits in three extents: Somone is writing in middle of the extent
3278 *
3279 * One of more index blocks maybe needed if the extent tree grow after
3280 * the uninitialized extent split. To prevent ENOSPC occur at the IO
3281 * complete, we need to split the uninitialized extent before DIO submit
3282 * the IO. The uninitialized extent called at this time will be split
3283 * into three uninitialized extent(at most). After IO complete, the part
3284 * being filled will be convert to initialized by the end_io callback function
3285 * via ext4_convert_unwritten_extents().
3286 *
3287 * Returns the size of uninitialized extent to be written on success.
3288 */
3289static int ext4_split_unwritten_extents(handle_t *handle,
3290                                        struct inode *inode,
3291                                        struct ext4_map_blocks *map,
3292                                        struct ext4_ext_path *path,
3293                                        int flags)
3294{
3295        ext4_lblk_t eof_block;
3296        ext4_lblk_t ee_block;
3297        struct ext4_extent *ex;
3298        unsigned int ee_len;
3299        int split_flag = 0, depth;
3300
3301        ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
3302                "block %llu, max_blocks %u\n", inode->i_ino,
3303                (unsigned long long)map->m_lblk, map->m_len);
3304
3305        eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3306                inode->i_sb->s_blocksize_bits;
3307        if (eof_block < map->m_lblk + map->m_len)
3308                eof_block = map->m_lblk + map->m_len;
3309        /*
3310         * It is safe to convert extent to initialized via explicit
3311         * zeroout only if extent is fully insde i_size or new_size.
3312         */
3313        depth = ext_depth(inode);
3314        ex = path[depth].p_ext;
3315        ee_block = le32_to_cpu(ex->ee_block);
3316        ee_len = ext4_ext_get_actual_len(ex);
3317
3318        split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3319        split_flag |= EXT4_EXT_MARK_UNINIT2;
3320        if (flags & EXT4_GET_BLOCKS_CONVERT)
3321                split_flag |= EXT4_EXT_DATA_VALID2;
3322        flags |= EXT4_GET_BLOCKS_PRE_IO;
3323        return ext4_split_extent(handle, inode, path, map, split_flag, flags);
3324}
3325
3326static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3327                                                struct inode *inode,
3328                                                struct ext4_map_blocks *map,
3329                                                struct ext4_ext_path *path)
3330{
3331        struct ext4_extent *ex;
3332        ext4_lblk_t ee_block;
3333        unsigned int ee_len;
3334        int depth;
3335        int err = 0;
3336
3337        depth = ext_depth(inode);
3338        ex = path[depth].p_ext;
3339        ee_block = le32_to_cpu(ex->ee_block);
3340        ee_len = ext4_ext_get_actual_len(ex);
3341
3342        ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3343                "block %llu, max_blocks %u\n", inode->i_ino,
3344                  (unsigned long long)ee_block, ee_len);
3345
3346        /* If extent is larger than requested then split is required */
3347        if (ee_block != map->m_lblk || ee_len > map->m_len) {
3348                err = ext4_split_unwritten_extents(handle, inode, map, path,
3349                                                   EXT4_GET_BLOCKS_CONVERT);
3350                if (err < 0)
3351                        goto out;
3352                ext4_ext_drop_refs(path);
3353                path = ext4_ext_find_extent(inode, map->m_lblk, path);
3354                if (IS_ERR(path)) {
3355                        err = PTR_ERR(path);
3356                        goto out;
3357                }
3358                depth = ext_depth(inode);
3359                ex = path[depth].p_ext;
3360        }
3361
3362        err = ext4_ext_get_access(handle, inode, path + depth);
3363        if (err)
3364                goto out;
3365        /* first mark the extent as initialized */
3366        ext4_ext_mark_initialized(ex);
3367
3368        /* note: ext4_ext_correct_indexes() isn't needed here because
3369         * borders are not changed
3370         */
3371        ext4_ext_try_to_merge(inode, path, ex);
3372
3373        /* Mark modified extent as dirty */
3374        err = ext4_ext_dirty(handle, inode, path + depth);
3375out:
3376        ext4_ext_show_leaf(inode, path);
3377        return err;
3378}
3379
3380static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3381                        sector_t block, int count)
3382{
3383        int i;
3384        for (i = 0; i < count; i++)
3385                unmap_underlying_metadata(bdev, block + i);
3386}
3387
3388/*
3389 * Handle EOFBLOCKS_FL flag, clearing it if necessary
3390 */
3391static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3392                              ext4_lblk_t lblk,
3393                              struct ext4_ext_path *path,
3394                              unsigned int len)
3395{
3396        int i, depth;
3397        struct ext4_extent_header *eh;
3398        struct ext4_extent *last_ex;
3399
3400        if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
3401                return 0;
3402
3403        depth = ext_depth(inode);
3404        eh = path[depth].p_hdr;
3405
3406        /*
3407         * We're going to remove EOFBLOCKS_FL entirely in future so we
3408         * do not care for this case anymore. Simply remove the flag
3409         * if there are no extents.
3410         */
3411        if (unlikely(!eh->eh_entries))
3412                goto out;
3413        last_ex = EXT_LAST_EXTENT(eh);
3414        /*
3415         * We should clear the EOFBLOCKS_FL flag if we are writing the
3416         * last block in the last extent in the file.  We test this by
3417         * first checking to see if the caller to
3418         * ext4_ext_get_blocks() was interested in the last block (or
3419         * a block beyond the last block) in the current extent.  If
3420         * this turns out to be false, we can bail out from this
3421         * function immediately.
3422         */
3423        if (lblk + len < le32_to_cpu(last_ex->ee_block) +
3424            ext4_ext_get_actual_len(last_ex))
3425                return 0;
3426        /*
3427         * If the caller does appear to be planning to write at or
3428         * beyond the end of the current extent, we then test to see
3429         * if the current extent is the last extent in the file, by
3430         * checking to make sure it was reached via the rightmost node
3431         * at each level of the tree.
3432         */
3433        for (i = depth-1; i >= 0; i--)
3434                if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3435                        return 0;
3436out:
3437        ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3438        return ext4_mark_inode_dirty(handle, inode);
3439}
3440
3441/**
3442 * ext4_find_delalloc_range: find delayed allocated block in the given range.
3443 *
3444 * Goes through the buffer heads in the range [lblk_start, lblk_end] and returns
3445 * whether there are any buffers marked for delayed allocation. It returns '1'
3446 * on the first delalloc'ed buffer head found. If no buffer head in the given
3447 * range is marked for delalloc, it returns 0.
3448 * lblk_start should always be <= lblk_end.
3449 * search_hint_reverse is to indicate that searching in reverse from lblk_end to
3450 * lblk_start might be more efficient (i.e., we will likely hit the delalloc'ed
3451 * block sooner). This is useful when blocks are truncated sequentially from
3452 * lblk_start towards lblk_end.
3453 */
3454static int ext4_find_delalloc_range(struct inode *inode,
3455                                    ext4_lblk_t lblk_start,
3456                                    ext4_lblk_t lblk_end,
3457                                    int search_hint_reverse)
3458{
3459        struct address_space *mapping = inode->i_mapping;
3460        struct buffer_head *head, *bh = NULL;
3461        struct page *page;
3462        ext4_lblk_t i, pg_lblk;
3463        pgoff_t index;
3464
3465        if (!test_opt(inode->i_sb, DELALLOC))
3466                return 0;
3467
3468        /* reverse search wont work if fs block size is less than page size */
3469        if (inode->i_blkbits < PAGE_CACHE_SHIFT)
3470                search_hint_reverse = 0;
3471
3472        if (search_hint_reverse)
3473                i = lblk_end;
3474        else
3475                i = lblk_start;
3476
3477        index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
3478
3479        while ((i >= lblk_start) && (i <= lblk_end)) {
3480                page = find_get_page(mapping, index);
3481                if (!page)
3482                        goto nextpage;
3483
3484                if (!page_has_buffers(page))
3485                        goto nextpage;
3486
3487                head = page_buffers(page);
3488                if (!head)
3489                        goto nextpage;
3490
3491                bh = head;
3492                pg_lblk = index << (PAGE_CACHE_SHIFT -
3493                                                inode->i_blkbits);
3494                do {
3495                        if (unlikely(pg_lblk < lblk_start)) {
3496                                /*
3497                                 * This is possible when fs block size is less
3498                                 * than page size and our cluster starts/ends in
3499                                 * middle of the page. So we need to skip the
3500                                 * initial few blocks till we reach the 'lblk'
3501                                 */
3502                                pg_lblk++;
3503                                continue;
3504                        }
3505
3506                        /* Check if the buffer is delayed allocated and that it
3507                         * is not yet mapped. (when da-buffers are mapped during
3508                         * their writeout, their da_mapped bit is set.)
3509                         */
3510                        if (buffer_delay(bh) && !buffer_da_mapped(bh)) {
3511                                page_cache_release(page);
3512                                trace_ext4_find_delalloc_range(inode,
3513                                                lblk_start, lblk_end,
3514                                                search_hint_reverse,
3515                                                1, i);
3516                                return 1;
3517                        }
3518                        if (search_hint_reverse)
3519                                i--;
3520                        else
3521                                i++;
3522                } while ((i >= lblk_start) && (i <= lblk_end) &&
3523                                ((bh = bh->b_this_page) != head));
3524nextpage:
3525                if (page)
3526                        page_cache_release(page);
3527                /*
3528                 * Move to next page. 'i' will be the first lblk in the next
3529                 * page.
3530                 */
3531                if (search_hint_reverse)
3532                        index--;
3533                else
3534                        index++;
3535                i = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
3536        }
3537
3538        trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end,
3539                                        search_hint_reverse, 0, 0);
3540        return 0;
3541}
3542
3543int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk,
3544                               int search_hint_reverse)
3545{
3546        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3547        ext4_lblk_t lblk_start, lblk_end;
3548        lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
3549        lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
3550
3551        return ext4_find_delalloc_range(inode, lblk_start, lblk_end,
3552                                        search_hint_reverse);
3553}
3554
3555/**
3556 * Determines how many complete clusters (out of those specified by the 'map')
3557 * are under delalloc and were reserved quota for.
3558 * This function is called when we are writing out the blocks that were
3559 * originally written with their allocation delayed, but then the space was
3560 * allocated using fallocate() before the delayed allocation could be resolved.
3561 * The cases to look for are:
3562 * ('=' indicated delayed allocated blocks
3563 *  '-' indicates non-delayed allocated blocks)
3564 * (a) partial clusters towards beginning and/or end outside of allocated range
3565 *     are not delalloc'ed.
3566 *      Ex:
3567 *      |----c---=|====c====|====c====|===-c----|
3568 *               |++++++ allocated ++++++|
3569 *      ==> 4 complete clusters in above example
3570 *
3571 * (b) partial cluster (outside of allocated range) towards either end is
3572 *     marked for delayed allocation. In this case, we will exclude that
3573 *     cluster.
3574 *      Ex:
3575 *      |----====c========|========c========|
3576 *           |++++++ allocated ++++++|
3577 *      ==> 1 complete clusters in above example
3578 *
3579 *      Ex:
3580 *      |================c================|
3581 *            |++++++ allocated ++++++|
3582 *      ==> 0 complete clusters in above example
3583 *
3584 * The ext4_da_update_reserve_space will be called only if we
3585 * determine here that there were some "entire" clusters that span
3586 * this 'allocated' range.
3587 * In the non-bigalloc case, this function will just end up returning num_blks
3588 * without ever calling ext4_find_delalloc_range.
3589 */
3590static unsigned int
3591get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3592                           unsigned int num_blks)
3593{
3594        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3595        ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
3596        ext4_lblk_t lblk_from, lblk_to, c_offset;
3597        unsigned int allocated_clusters = 0;
3598
3599        alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
3600        alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
3601
3602        /* max possible clusters for this allocation */
3603        allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
3604
3605        trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3606
3607        /* Check towards left side */
3608        c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
3609        if (c_offset) {
3610                lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
3611                lblk_to = lblk_from + c_offset - 1;
3612
3613                if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
3614                        allocated_clusters--;
3615        }
3616
3617        /* Now check towards right. */
3618        c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
3619        if (allocated_clusters && c_offset) {
3620                lblk_from = lblk_start + num_blks;
3621                lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
3622
3623                if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
3624                        allocated_clusters--;
3625        }
3626
3627        return allocated_clusters;
3628}
3629
3630static int
3631ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3632                        struct ext4_map_blocks *map,
3633                        struct ext4_ext_path *path, int flags,
3634                        unsigned int allocated, ext4_fsblk_t newblock)
3635{
3636        int ret = 0;
3637        int err = 0;
3638        ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3639
3640        ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
3641                  "block %llu, max_blocks %u, flags %x, allocated %u\n",
3642                  inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3643                  flags, allocated);
3644        ext4_ext_show_leaf(inode, path);
3645
3646        trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated,
3647                                                    newblock);
3648
3649        /* get_block() before submit the IO, split the extent */
3650        if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3651                ret = ext4_split_unwritten_extents(handle, inode, map,
3652                                                   path, flags);
3653                /*
3654                 * Flag the inode(non aio case) or end_io struct (aio case)
3655                 * that this IO needs to conversion to written when IO is
3656                 * completed
3657                 */
3658                if (io)
3659                        ext4_set_io_unwritten_flag(inode, io);
3660                else
3661                        ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3662                if (ext4_should_dioread_nolock(inode))
3663                        map->m_flags |= EXT4_MAP_UNINIT;
3664                goto out;
3665        }
3666        /* IO end_io complete, convert the filled extent to written */
3667        if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3668                ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
3669                                                        path);
3670                if (ret >= 0) {
3671                        ext4_update_inode_fsync_trans(handle, inode, 1);
3672                        err = check_eofblocks_fl(handle, inode, map->m_lblk,
3673                                                 path, map->m_len);
3674                } else
3675                        err = ret;
3676                goto out2;
3677        }
3678        /* buffered IO case */
3679        /*
3680         * repeat fallocate creation request
3681         * we already have an unwritten extent
3682         */
3683        if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
3684                goto map_out;
3685
3686        /* buffered READ or buffered write_begin() lookup */
3687        if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3688                /*
3689                 * We have blocks reserved already.  We
3690                 * return allocated blocks so that delalloc
3691                 * won't do block reservation for us.  But
3692                 * the buffer head will be unmapped so that
3693                 * a read from the block returns 0s.
3694                 */
3695                map->m_flags |= EXT4_MAP_UNWRITTEN;
3696                goto out1;
3697        }
3698
3699        /* buffered write, writepage time, convert*/
3700        ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
3701        if (ret >= 0)
3702                ext4_update_inode_fsync_trans(handle, inode, 1);
3703out:
3704        if (ret <= 0) {
3705                err = ret;
3706                goto out2;
3707        } else
3708                allocated = ret;
3709        map->m_flags |= EXT4_MAP_NEW;
3710        /*
3711         * if we allocated more blocks than requested
3712         * we need to make sure we unmap the extra block
3713         * allocated. The actual needed block will get
3714         * unmapped later when we find the buffer_head marked
3715         * new.
3716         */
3717        if (allocated > map->m_len) {
3718                unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3719                                        newblock + map->m_len,
3720                                        allocated - map->m_len);
3721                allocated = map->m_len;
3722        }
3723
3724        /*
3725         * If we have done fallocate with the offset that is already
3726         * delayed allocated, we would have block reservation
3727         * and quota reservation done in the delayed write path.
3728         * But fallocate would have already updated quota and block
3729         * count for this offset. So cancel these reservation
3730         */
3731        if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
3732                unsigned int reserved_clusters;
3733                reserved_clusters = get_reserved_cluster_alloc(inode,
3734                                map->m_lblk, map->m_len);
3735                if (reserved_clusters)
3736                        ext4_da_update_reserve_space(inode,
3737                                                     reserved_clusters,
3738                                                     0);
3739        }
3740
3741map_out:
3742        map->m_flags |= EXT4_MAP_MAPPED;
3743        if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
3744                err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
3745                                         map->m_len);
3746                if (err < 0)
3747                        goto out2;
3748        }
3749out1:
3750        if (allocated > map->m_len)
3751                allocated = map->m_len;
3752        ext4_ext_show_leaf(inode, path);
3753        map->m_pblk = newblock;
3754        map->m_len = allocated;
3755out2:
3756        if (path) {
3757                ext4_ext_drop_refs(path);
3758                kfree(path);
3759        }
3760        return err ? err : allocated;
3761}
3762
3763/*
3764 * get_implied_cluster_alloc - check to see if the requested
3765 * allocation (in the map structure) overlaps with a cluster already
3766 * allocated in an extent.
3767 *      @sb     The filesystem superblock structure
3768 *      @map    The requested lblk->pblk mapping
3769 *      @ex     The extent structure which might contain an implied
3770 *                      cluster allocation
3771 *
3772 * This function is called by ext4_ext_map_blocks() after we failed to
3773 * find blocks that were already in the inode's extent tree.  Hence,
3774 * we know that the beginning of the requested region cannot overlap
3775 * the extent from the inode's extent tree.  There are three cases we
3776 * want to catch.  The first is this case:
3777 *
3778 *               |--- cluster # N--|
3779 *    |--- extent ---|  |---- requested region ---|
3780 *                      |==========|
3781 *
3782 * The second case that we need to test for is this one:
3783 *
3784 *   |--------- cluster # N ----------------|
3785 *         |--- requested region --|   |------- extent ----|
3786 *         |=======================|
3787 *
3788 * The third case is when the requested region lies between two extents
3789 * within the same cluster:
3790 *          |------------- cluster # N-------------|
3791 * |----- ex -----|                  |---- ex_right ----|
3792 *                  |------ requested region ------|
3793 *                  |================|
3794 *
3795 * In each of the above cases, we need to set the map->m_pblk and
3796 * map->m_len so it corresponds to the return the extent labelled as
3797 * "|====|" from cluster #N, since it is already in use for data in
3798 * cluster EXT4_B2C(sbi, map->m_lblk).  We will then return 1 to
3799 * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
3800 * as a new "allocated" block region.  Otherwise, we will return 0 and
3801 * ext4_ext_map_blocks() will then allocate one or more new clusters
3802 * by calling ext4_mb_new_blocks().
3803 */
3804static int get_implied_cluster_alloc(struct super_block *sb,
3805                                     struct ext4_map_blocks *map,
3806                                     struct ext4_extent *ex,
3807                                     struct ext4_ext_path *path)
3808{
3809        struct ext4_sb_info *sbi = EXT4_SB(sb);
3810        ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
3811        ext4_lblk_t ex_cluster_start, ex_cluster_end;
3812        ext4_lblk_t rr_cluster_start;
3813        ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3814        ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3815        unsigned short ee_len = ext4_ext_get_actual_len(ex);
3816
3817        /* The extent passed in that we are trying to match */
3818        ex_cluster_start = EXT4_B2C(sbi, ee_block);
3819        ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
3820
3821        /* The requested region passed into ext4_map_blocks() */
3822        rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
3823
3824        if ((rr_cluster_start == ex_cluster_end) ||
3825            (rr_cluster_start == ex_cluster_start)) {
3826                if (rr_cluster_start == ex_cluster_end)
3827                        ee_start += ee_len - 1;
3828                map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
3829                        c_offset;
3830                map->m_len = min(map->m_len,
3831                                 (unsigned) sbi->s_cluster_ratio - c_offset);
3832                /*
3833                 * Check for and handle this case:
3834                 *
3835                 *   |--------- cluster # N-------------|
3836                 *                     |------- extent ----|
3837                 *         |--- requested region ---|
3838                 *         |===========|
3839                 */
3840
3841                if (map->m_lblk < ee_block)
3842                        map->m_len = min(map->m_len, ee_block - map->m_lblk);
3843
3844                /*
3845                 * Check for the case where there is already another allocated
3846                 * block to the right of 'ex' but before the end of the cluster.
3847                 *
3848                 *          |------------- cluster # N-------------|
3849                 * |----- ex -----|                  |---- ex_right ----|
3850                 *                  |------ requested region ------|
3851                 *                  |================|
3852                 */
3853                if (map->m_lblk > ee_block) {
3854                        ext4_lblk_t next = ext4_ext_next_allocated_block(path);
3855                        map->m_len = min(map->m_len, next - map->m_lblk);
3856                }
3857
3858                trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
3859                return 1;
3860        }
3861
3862        trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
3863        return 0;
3864}
3865
3866
3867/*
3868 * Block allocation/map/preallocation routine for extents based files
3869 *
3870 *
3871 * Need to be called with
3872 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
3873 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3874 *
3875 * return > 0, number of of blocks already mapped/allocated
3876 *          if create == 0 and these are pre-allocated blocks
3877 *              buffer head is unmapped
3878 *          otherwise blocks are mapped
3879 *
3880 * return = 0, if plain look up failed (blocks have not been allocated)
3881 *          buffer head is unmapped
3882 *
3883 * return < 0, error case.
3884 */
3885int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3886                        struct ext4_map_blocks *map, int flags)
3887{
3888        struct ext4_ext_path *path = NULL;
3889        struct ext4_extent newex, *ex, *ex2;
3890        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3891        ext4_fsblk_t newblock = 0;
3892        int free_on_err = 0, err = 0, depth, ret;
3893        unsigned int allocated = 0, offset = 0;
3894        unsigned int allocated_clusters = 0;
3895        struct ext4_allocation_request ar;
3896        ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3897        ext4_lblk_t cluster_offset;
3898
3899        ext_debug("blocks %u/%u requested for inode %lu\n",
3900                  map->m_lblk, map->m_len, inode->i_ino);
3901        trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
3902
3903        /* check in cache */
3904        if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
3905                if (!newex.ee_start_lo && !newex.ee_start_hi) {
3906                        if ((sbi->s_cluster_ratio > 1) &&
3907                            ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
3908                                map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3909
3910                        if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3911                                /*
3912                                 * block isn't allocated yet and
3913                                 * user doesn't want to allocate it
3914                                 */
3915                                goto out2;
3916                        }
3917                        /* we should allocate requested block */
3918                } else {
3919                        /* block is already allocated */
3920                        if (sbi->s_cluster_ratio > 1)
3921                                map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3922                        newblock = map->m_lblk
3923                                   - le32_to_cpu(newex.ee_block)
3924                                   + ext4_ext_pblock(&newex);
3925                        /* number of remaining blocks in the extent */
3926                        allocated = ext4_ext_get_actual_len(&newex) -
3927                                (map->m_lblk - le32_to_cpu(newex.ee_block));
3928                        goto out;
3929                }
3930        }
3931
3932        /* find extent for this block */
3933        path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
3934        if (IS_ERR(path)) {
3935                err = PTR_ERR(path);
3936                path = NULL;
3937                goto out2;
3938        }
3939
3940        depth = ext_depth(inode);
3941
3942        /*
3943         * consistent leaf must not be empty;
3944         * this situation is possible, though, _during_ tree modification;
3945         * this is why assert can't be put in ext4_ext_find_extent()
3946         */
3947        if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
3948                EXT4_ERROR_INODE(inode, "bad extent address "
3949                                 "lblock: %lu, depth: %d pblock %lld",
3950                                 (unsigned long) map->m_lblk, depth,
3951                                 path[depth].p_block);
3952                err = -EIO;
3953                goto out2;
3954        }
3955
3956        ex = path[depth].p_ext;
3957        if (ex) {
3958                ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3959                ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3960                unsigned short ee_len;
3961
3962                /*
3963                 * Uninitialized extents are treated as holes, except that
3964                 * we split out initialized portions during a write.
3965                 */
3966                ee_len = ext4_ext_get_actual_len(ex);
3967
3968                trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
3969
3970                /* if found extent covers block, simply return it */
3971                if (in_range(map->m_lblk, ee_block, ee_len)) {
3972                        newblock = map->m_lblk - ee_block + ee_start;
3973                        /* number of remaining blocks in the extent */
3974                        allocated = ee_len - (map->m_lblk - ee_block);
3975                        ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
3976                                  ee_block, ee_len, newblock);
3977
3978                        /*
3979                         * Do not put uninitialized extent
3980                         * in the cache
3981                         */
3982                        if (!ext4_ext_is_uninitialized(ex)) {
3983                                ext4_ext_put_in_cache(inode, ee_block,
3984                                        ee_len, ee_start);
3985                                goto out;
3986                        }
3987                        ret = ext4_ext_handle_uninitialized_extents(
3988                                handle, inode, map, path, flags,
3989                                allocated, newblock);
3990                        return ret;
3991                }
3992        }
3993
3994        if ((sbi->s_cluster_ratio > 1) &&
3995            ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
3996                map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3997
3998        /*
3999         * requested block isn't allocated yet;
4000         * we couldn't try to create block if create flag is zero
4001         */
4002        if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4003                /*
4004                 * put just found gap into cache to speed up
4005                 * subsequent requests
4006                 */
4007                ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
4008                goto out2;
4009        }
4010
4011        /*
4012         * Okay, we need to do block allocation.
4013         */
4014        map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
4015        newex.ee_block = cpu_to_le32(map->m_lblk);
4016        cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
4017
4018        /*
4019         * If we are doing bigalloc, check to see if the extent returned
4020         * by ext4_ext_find_extent() implies a cluster we can use.
4021         */
4022        if (cluster_offset && ex &&
4023            get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4024                ar.len = allocated = map->m_len;
4025                newblock = map->m_pblk;
4026                map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4027                goto got_allocated_blocks;
4028        }
4029
4030        /* find neighbour allocated blocks */
4031        ar.lleft = map->m_lblk;
4032        err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4033        if (err)
4034                goto out2;
4035        ar.lright = map->m_lblk;
4036        ex2 = NULL;
4037        err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4038        if (err)
4039                goto out2;
4040
4041        /* Check if the extent after searching to the right implies a
4042         * cluster we can use. */
4043        if ((sbi->s_cluster_ratio > 1) && ex2 &&
4044            get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
4045                ar.len = allocated = map->m_len;
4046                newblock = map->m_pblk;
4047                map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4048                goto got_allocated_blocks;
4049        }
4050
4051        /*
4052         * See if request is beyond maximum number of blocks we can have in
4053         * a single extent. For an initialized extent this limit is
4054         * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
4055         * EXT_UNINIT_MAX_LEN.
4056         */
4057        if (map->m_len > EXT_INIT_MAX_LEN &&
4058            !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4059                map->m_len = EXT_INIT_MAX_LEN;
4060        else if (map->m_len > EXT_UNINIT_MAX_LEN &&
4061                 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4062                map->m_len = EXT_UNINIT_MAX_LEN;
4063
4064        /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4065        newex.ee_len = cpu_to_le16(map->m_len);
4066        err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4067        if (err)
4068                allocated = ext4_ext_get_actual_len(&newex);
4069        else
4070                allocated = map->m_len;
4071
4072        /* allocate new block */
4073        ar.inode = inode;
4074        ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4075        ar.logical = map->m_lblk;
4076        /*
4077         * We calculate the offset from the beginning of the cluster
4078         * for the logical block number, since when we allocate a
4079         * physical cluster, the physical block should start at the
4080         * same offset from the beginning of the cluster.  This is
4081         * needed so that future calls to get_implied_cluster_alloc()
4082         * work correctly.
4083         */
4084        offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
4085        ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4086        ar.goal -= offset;
4087        ar.logical -= offset;
4088        if (S_ISREG(inode->i_mode))
4089                ar.flags = EXT4_MB_HINT_DATA;
4090        else
4091                /* disable in-core preallocation for non-regular files */
4092                ar.flags = 0;
4093        if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4094                ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4095        newblock = ext4_mb_new_blocks(handle, &ar, &err);
4096        if (!newblock)
4097                goto out2;
4098        ext_debug("allocate new block: goal %llu, found %llu/%u\n",
4099                  ar.goal, newblock, allocated);
4100        free_on_err = 1;
4101        allocated_clusters = ar.len;
4102        ar.len = EXT4_C2B(sbi, ar.len) - offset;
4103        if (ar.len > allocated)
4104                ar.len = allocated;
4105
4106got_allocated_blocks:
4107        /* try to insert new extent into found leaf and return */
4108        ext4_ext_store_pblock(&newex, newblock + offset);
4109        newex.ee_len = cpu_to_le16(ar.len);
4110        /* Mark uninitialized */
4111        if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
4112                ext4_ext_mark_uninitialized(&newex);
4113                /*
4114                 * io_end structure was created for every IO write to an
4115                 * uninitialized extent. To avoid unnecessary conversion,
4116                 * here we flag the IO that really needs the conversion.
4117                 * For non asycn direct IO case, flag the inode state
4118                 * that we need to perform conversion when IO is done.
4119                 */
4120                if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
4121                        if (io)
4122                                ext4_set_io_unwritten_flag(inode, io);
4123                        else
4124                                ext4_set_inode_state(inode,
4125                                                     EXT4_STATE_DIO_UNWRITTEN);
4126                }
4127                if (ext4_should_dioread_nolock(inode))
4128                        map->m_flags |= EXT4_MAP_UNINIT;
4129        }
4130
4131        err = 0;
4132        if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4133                err = check_eofblocks_fl(handle, inode, map->m_lblk,
4134                                         path, ar.len);
4135        if (!err)
4136                err = ext4_ext_insert_extent(handle, inode, path,
4137                                             &newex, flags);
4138        if (err && free_on_err) {
4139                int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
4140                        EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
4141                /* free data blocks we just allocated */
4142                /* not a good idea to call discard here directly,
4143                 * but otherwise we'd need to call it every free() */
4144                ext4_discard_preallocations(inode);
4145                ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
4146                                 ext4_ext_get_actual_len(&newex), fb_flags);
4147                goto out2;
4148        }
4149
4150        /* previous routine could use block we allocated */
4151        newblock = ext4_ext_pblock(&newex);
4152        allocated = ext4_ext_get_actual_len(&newex);
4153        if (allocated > map->m_len)
4154                allocated = map->m_len;
4155        map->m_flags |= EXT4_MAP_NEW;
4156
4157        /*
4158         * Update reserved blocks/metadata blocks after successful
4159         * block allocation which had been deferred till now.
4160         */
4161        if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4162                unsigned int reserved_clusters;
4163                /*
4164                 * Check how many clusters we had reserved this allocated range
4165                 */
4166                reserved_clusters = get_reserved_cluster_alloc(inode,
4167                                                map->m_lblk, allocated);
4168                if (map->m_flags & EXT4_MAP_FROM_CLUSTER) {
4169                        if (reserved_clusters) {
4170                                /*
4171                                 * We have clusters reserved for this range.
4172                                 * But since we are not doing actual allocation
4173                                 * and are simply using blocks from previously
4174                                 * allocated cluster, we should release the
4175                                 * reservation and not claim quota.
4176                                 */
4177                                ext4_da_update_reserve_space(inode,
4178                                                reserved_clusters, 0);
4179                        }
4180                } else {
4181                        BUG_ON(allocated_clusters < reserved_clusters);
4182                        /* We will claim quota for all newly allocated blocks.*/
4183                        ext4_da_update_reserve_space(inode, allocated_clusters,
4184                                                        1);
4185                        if (reserved_clusters < allocated_clusters) {
4186                                struct ext4_inode_info *ei = EXT4_I(inode);
4187                                int reservation = allocated_clusters -
4188                                                  reserved_clusters;
4189                                /*
4190                                 * It seems we claimed few clusters outside of
4191                                 * the range of this allocation. We should give
4192                                 * it back to the reservation pool. This can
4193                                 * happen in the following case:
4194                                 *
4195                                 * * Suppose s_cluster_ratio is 4 (i.e., each
4196                                 *   cluster has 4 blocks. Thus, the clusters
4197                                 *   are [0-3],[4-7],[8-11]...
4198                                 * * First comes delayed allocation write for
4199                                 *   logical blocks 10 & 11. Since there were no
4200                                 *   previous delayed allocated blocks in the
4201                                 *   range [8-11], we would reserve 1 cluster
4202                                 *   for this write.
4203                                 * * Next comes write for logical blocks 3 to 8.
4204                                 *   In this case, we will reserve 2 clusters
4205                                 *   (for [0-3] and [4-7]; and not for [8-11] as
4206                                 *   that range has a delayed allocated blocks.
4207                                 *   Thus total reserved clusters now becomes 3.
4208                                 * * Now, during the delayed allocation writeout
4209                                 *   time, we will first write blocks [3-8] and
4210                                 *   allocate 3 clusters for writing these
4211                                 *   blocks. Also, we would claim all these
4212                                 *   three clusters above.
4213                                 * * Now when we come here to writeout the
4214                                 *   blocks [10-11], we would expect to claim
4215                                 *   the reservation of 1 cluster we had made
4216                                 *   (and we would claim it since there are no
4217                                 *   more delayed allocated blocks in the range
4218                                 *   [8-11]. But our reserved cluster count had
4219                                 *   already gone to 0.
4220                                 *
4221                                 *   Thus, at the step 4 above when we determine
4222                                 *   that there are still some unwritten delayed
4223                                 *   allocated blocks outside of our current
4224                                 *   block range, we should increment the
4225                                 *   reserved clusters count so that when the
4226                                 *   remaining blocks finally gets written, we
4227                                 *   could claim them.
4228                                 */
4229                                dquot_reserve_block(inode,
4230                                                EXT4_C2B(sbi, reservation));
4231                                spin_lock(&ei->i_block_reservation_lock);
4232                                ei->i_reserved_data_blocks += reservation;
4233                                spin_unlock(&ei->i_block_reservation_lock);
4234                        }
4235                }
4236        }
4237
4238        /*
4239         * Cache the extent and update transaction to commit on fdatasync only
4240         * when it is _not_ an uninitialized extent.
4241         */
4242        if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
4243                ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
4244                ext4_update_inode_fsync_trans(handle, inode, 1);
4245        } else
4246                ext4_update_inode_fsync_trans(handle, inode, 0);
4247out:
4248        if (allocated > map->m_len)
4249                allocated = map->m_len;
4250        ext4_ext_show_leaf(inode, path);
4251        map->m_flags |= EXT4_MAP_MAPPED;
4252        map->m_pblk = newblock;
4253        map->m_len = allocated;
4254out2:
4255        if (path) {
4256                ext4_ext_drop_refs(path);
4257                kfree(path);
4258        }
4259
4260        trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
4261                newblock, map->m_len, err ? err : allocated);
4262
4263        return err ? err : allocated;
4264}
4265
4266void ext4_ext_truncate(struct inode *inode)
4267{
4268        struct address_space *mapping = inode->i_mapping;
4269        struct super_block *sb = inode->i_sb;
4270        ext4_lblk_t last_block;
4271        handle_t *handle;
4272        loff_t page_len;
4273        int err = 0;
4274
4275        /*
4276         * finish any pending end_io work so we won't run the risk of
4277         * converting any truncated blocks to initialized later
4278         */
4279        ext4_flush_completed_IO(inode);
4280
4281        /*
4282         * probably first extent we're gonna free will be last in block
4283         */
4284        err = ext4_writepage_trans_blocks(inode);
4285        handle = ext4_journal_start(inode, err);
4286        if (IS_ERR(handle))
4287                return;
4288
4289        if (inode->i_size % PAGE_CACHE_SIZE != 0) {
4290                page_len = PAGE_CACHE_SIZE -
4291                        (inode->i_size & (PAGE_CACHE_SIZE - 1));
4292
4293                err = ext4_discard_partial_page_buffers(handle,
4294                        mapping, inode->i_size, page_len, 0);
4295
4296                if (err)
4297                        goto out_stop;
4298        }
4299
4300        if (ext4_orphan_add(handle, inode))
4301                goto out_stop;
4302
4303        down_write(&EXT4_I(inode)->i_data_sem);
4304        ext4_ext_invalidate_cache(inode);
4305
4306        ext4_discard_preallocations(inode);
4307
4308        /*
4309         * TODO: optimization is possible here.
4310         * Probably we need not scan at all,
4311         * because page truncation is enough.
4312         */
4313
4314        /* we have to know where to truncate from in crash case */
4315        EXT4_I(inode)->i_disksize = inode->i_size;
4316        ext4_mark_inode_dirty(handle, inode);
4317
4318        last_block = (inode->i_size + sb->s_blocksize - 1)
4319                        >> EXT4_BLOCK_SIZE_BITS(sb);
4320        err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4321
4322        /* In a multi-transaction truncate, we only make the final
4323         * transaction synchronous.
4324         */
4325        if (IS_SYNC(inode))
4326                ext4_handle_sync(handle);
4327
4328        up_write(&EXT4_I(inode)->i_data_sem);
4329
4330out_stop:
4331        /*
4332         * If this was a simple ftruncate() and the file will remain alive,
4333         * then we need to clear up the orphan record which we created above.
4334         * However, if this was a real unlink then we were called by
4335         * ext4_delete_inode(), and we allow that function to clean up the
4336         * orphan info for us.
4337         */
4338        if (inode->i_nlink)
4339                ext4_orphan_del(handle, inode);
4340
4341        inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4342        ext4_mark_inode_dirty(handle, inode);
4343        ext4_journal_stop(handle);
4344}
4345
4346static void ext4_falloc_update_inode(struct inode *inode,
4347                                int mode, loff_t new_size, int update_ctime)
4348{
4349        struct timespec now;
4350
4351        if (update_ctime) {
4352                now = current_fs_time(inode->i_sb);
4353                if (!timespec_equal(&inode->i_ctime, &now))
4354                        inode->i_ctime = now;
4355        }
4356        /*
4357         * Update only when preallocation was requested beyond
4358         * the file size.
4359         */
4360        if (!(mode & FALLOC_FL_KEEP_SIZE)) {
4361                if (new_size > i_size_read(inode))
4362                        i_size_write(inode, new_size);
4363                if (new_size > EXT4_I(inode)->i_disksize)
4364                        ext4_update_i_disksize(inode, new_size);
4365        } else {
4366                /*
4367                 * Mark that we allocate beyond EOF so the subsequent truncate
4368                 * can proceed even if the new size is the same as i_size.
4369                 */
4370                if (new_size > i_size_read(inode))
4371                        ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4372        }
4373
4374}
4375
4376/*
4377 * preallocate space for a file. This implements ext4's fallocate file
4378 * operation, which gets called from sys_fallocate system call.
4379 * For block-mapped files, posix_fallocate should fall back to the method
4380 * of writing zeroes to the required new blocks (the same behavior which is
4381 * expected for file systems which do not support fallocate() system call).
4382 */
4383long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4384{
4385        struct inode *inode = file->f_path.dentry->d_inode;
4386        handle_t *handle;
4387        loff_t new_size;
4388        unsigned int max_blocks;
4389        int ret = 0;
4390        int ret2 = 0;
4391        int retries = 0;
4392        int flags;
4393        struct ext4_map_blocks map;
4394        unsigned int credits, blkbits = inode->i_blkbits;
4395
4396        /*
4397         * currently supporting (pre)allocate mode for extent-based
4398         * files _only_
4399         */
4400        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4401                return -EOPNOTSUPP;
4402
4403        /* Return error if mode is not supported */
4404        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
4405                return -EOPNOTSUPP;
4406
4407        if (mode & FALLOC_FL_PUNCH_HOLE)
4408                return ext4_punch_hole(file, offset, len);
4409
4410        trace_ext4_fallocate_enter(inode, offset, len, mode);
4411        map.m_lblk = offset >> blkbits;
4412        /*
4413         * We can't just convert len to max_blocks because
4414         * If blocksize = 4096 offset = 3072 and len = 2048
4415         */
4416        max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
4417                - map.m_lblk;
4418        /*
4419         * credits to insert 1 extent into extent tree
4420         */
4421        credits = ext4_chunk_trans_blocks(inode, max_blocks);
4422        mutex_lock(&inode->i_mutex);
4423        ret = inode_newsize_ok(inode, (len + offset));
4424        if (ret) {
4425                mutex_unlock(&inode->i_mutex);
4426                trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4427                return ret;
4428        }
4429        flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT;
4430        if (mode & FALLOC_FL_KEEP_SIZE)
4431                flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4432        /*
4433         * Don't normalize the request if it can fit in one extent so
4434         * that it doesn't get unnecessarily split into multiple
4435         * extents.
4436         */
4437        if (len <= EXT_UNINIT_MAX_LEN << blkbits)
4438                flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4439retry:
4440        while (ret >= 0 && ret < max_blocks) {
4441                map.m_lblk = map.m_lblk + ret;
4442                map.m_len = max_blocks = max_blocks - ret;
4443                handle = ext4_journal_start(inode, credits);
4444                if (IS_ERR(handle)) {
4445                        ret = PTR_ERR(handle);
4446                        break;
4447                }
4448                ret = ext4_map_blocks(handle, inode, &map, flags);
4449                if (ret <= 0) {
4450#ifdef EXT4FS_DEBUG
4451                        WARN_ON(ret <= 0);
4452                        printk(KERN_ERR "%s: ext4_ext_map_blocks "
4453                                    "returned error inode#%lu, block=%u, "
4454                                    "max_blocks=%u", __func__,
4455                                    inode->i_ino, map.m_lblk, max_blocks);
4456#endif
4457                        ext4_mark_inode_dirty(handle, inode);
4458                        ret2 = ext4_journal_stop(handle);
4459                        break;
4460                }
4461                if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
4462                                                blkbits) >> blkbits))
4463                        new_size = offset + len;
4464                else
4465                        new_size = ((loff_t) map.m_lblk + ret) << blkbits;
4466
4467                ext4_falloc_update_inode(inode, mode, new_size,
4468                                         (map.m_flags & EXT4_MAP_NEW));
4469                ext4_mark_inode_dirty(handle, inode);
4470                if ((file->f_flags & O_SYNC) && ret >= max_blocks)
4471                        ext4_handle_sync(handle);
4472                ret2 = ext4_journal_stop(handle);
4473                if (ret2)
4474                        break;
4475        }
4476        if (ret == -ENOSPC &&
4477                        ext4_should_retry_alloc(inode->i_sb, &retries)) {
4478                ret = 0;
4479                goto retry;
4480        }
4481        mutex_unlock(&inode->i_mutex);
4482        trace_ext4_fallocate_exit(inode, offset, max_blocks,
4483                                ret > 0 ? ret2 : ret);
4484        return ret > 0 ? ret2 : ret;
4485}
4486
4487/*
4488 * This function convert a range of blocks to written extents
4489 * The caller of this function will pass the start offset and the size.
4490 * all unwritten extents within this range will be converted to
4491 * written extents.
4492 *
4493 * This function is called from the direct IO end io call back
4494 * function, to convert the fallocated extents after IO is completed.
4495 * Returns 0 on success.
4496 */
4497int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
4498                                    ssize_t len)
4499{
4500        handle_t *handle;
4501        unsigned int max_blocks;
4502        int ret = 0;
4503        int ret2 = 0;
4504        struct ext4_map_blocks map;
4505        unsigned int credits, blkbits = inode->i_blkbits;
4506
4507        map.m_lblk = offset >> blkbits;
4508        /*
4509         * We can't just convert len to max_blocks because
4510         * If blocksize = 4096 offset = 3072 and len = 2048
4511         */
4512        max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
4513                      map.m_lblk);
4514        /*
4515         * credits to insert 1 extent into extent tree
4516         */
4517        credits = ext4_chunk_trans_blocks(inode, max_blocks);
4518        while (ret >= 0 && ret < max_blocks) {
4519                map.m_lblk += ret;
4520                map.m_len = (max_blocks -= ret);
4521                handle = ext4_journal_start(inode, credits);
4522                if (IS_ERR(handle)) {
4523                        ret = PTR_ERR(handle);
4524                        break;
4525                }
4526                ret = ext4_map_blocks(handle, inode, &map,
4527                                      EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4528                if (ret <= 0) {
4529                        WARN_ON(ret <= 0);
4530                        ext4_msg(inode->i_sb, KERN_ERR,
4531                                 "%s:%d: inode #%lu: block %u: len %u: "
4532                                 "ext4_ext_map_blocks returned %d",
4533                                 __func__, __LINE__, inode->i_ino, map.m_lblk,
4534                                 map.m_len, ret);
4535                }
4536                ext4_mark_inode_dirty(handle, inode);
4537                ret2 = ext4_journal_stop(handle);
4538                if (ret <= 0 || ret2 )
4539                        break;
4540        }
4541        return ret > 0 ? ret2 : ret;
4542}
4543
4544/*
4545 * Callback function called for each extent to gather FIEMAP information.
4546 */
4547static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next,
4548                       struct ext4_ext_cache *newex, struct ext4_extent *ex,
4549                       void *data)
4550{
4551        __u64   logical;
4552        __u64   physical;
4553        __u64   length;
4554        __u32   flags = 0;
4555        int             ret = 0;
4556        struct fiemap_extent_info *fieinfo = data;
4557        unsigned char blksize_bits;
4558
4559        blksize_bits = inode->i_sb->s_blocksize_bits;
4560        logical = (__u64)newex->ec_block << blksize_bits;
4561
4562        if (newex->ec_start == 0) {
4563                /*
4564                 * No extent in extent-tree contains block @newex->ec_start,
4565                 * then the block may stay in 1)a hole or 2)delayed-extent.
4566                 *
4567                 * Holes or delayed-extents are processed as follows.
4568                 * 1. lookup dirty pages with specified range in pagecache.
4569                 *    If no page is got, then there is no delayed-extent and
4570                 *    return with EXT_CONTINUE.
4571                 * 2. find the 1st mapped buffer,
4572                 * 3. check if the mapped buffer is both in the request range
4573                 *    and a delayed buffer. If not, there is no delayed-extent,
4574                 *    then return.
4575                 * 4. a delayed-extent is found, the extent will be collected.
4576                 */
4577                ext4_lblk_t     end = 0;
4578                pgoff_t         last_offset;
4579                pgoff_t         offset;
4580                pgoff_t         index;
4581                pgoff_t         start_index = 0;
4582                struct page     **pages = NULL;
4583                struct buffer_head *bh = NULL;
4584                struct buffer_head *head = NULL;
4585                unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *);
4586
4587                pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
4588                if (pages == NULL)
4589                        return -ENOMEM;
4590
4591                offset = logical >> PAGE_SHIFT;
4592repeat:
4593                last_offset = offset;
4594                head = NULL;
4595                ret = find_get_pages_tag(inode->i_mapping, &offset,
4596                                        PAGECACHE_TAG_DIRTY, nr_pages, pages);
4597
4598                if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
4599                        /* First time, try to find a mapped buffer. */
4600                        if (ret == 0) {
4601out:
4602                                for (index = 0; index < ret; index++)
4603                                        page_cache_release(pages[index]);
4604                                /* just a hole. */
4605                                kfree(pages);
4606                                return EXT_CONTINUE;
4607                        }
4608                        index = 0;
4609
4610next_page:
4611                        /* Try to find the 1st mapped buffer. */
4612                        end = ((__u64)pages[index]->index << PAGE_SHIFT) >>
4613                                  blksize_bits;
4614                        if (!page_has_buffers(pages[index]))
4615                                goto out;
4616                        head = page_buffers(pages[index]);
4617                        if (!head)
4618                                goto out;
4619
4620                        index++;
4621                        bh = head;
4622                        do {
4623                                if (end >= newex->ec_block +
4624                                        newex->ec_len)
4625                                        /* The buffer is out of
4626                                         * the request range.
4627                                         */
4628                                        goto out;
4629
4630                                if (buffer_mapped(bh) &&
4631                                    end >= newex->ec_block) {
4632                                        start_index = index - 1;
4633                                        /* get the 1st mapped buffer. */
4634                                        goto found_mapped_buffer;
4635                                }
4636
4637                                bh = bh->b_this_page;
4638                                end++;
4639                        } while (bh != head);
4640
4641                        /* No mapped buffer in the range found in this page,
4642                         * We need to look up next page.
4643                         */
4644                        if (index >= ret) {
4645                                /* There is no page left, but we need to limit
4646                                 * newex->ec_len.
4647                                 */
4648                                newex->ec_len = end - newex->ec_block;
4649                                goto out;
4650                        }
4651                        goto next_page;
4652                } else {
4653                        /*Find contiguous delayed buffers. */
4654                        if (ret > 0 && pages[0]->index == last_offset)
4655                                head = page_buffers(pages[0]);
4656                        bh = head;
4657                        index = 1;
4658                        start_index = 0;
4659                }
4660
4661found_mapped_buffer:
4662                if (bh != NULL && buffer_delay(bh)) {
4663                        /* 1st or contiguous delayed buffer found. */
4664                        if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
4665                                /*
4666                                 * 1st delayed buffer found, record
4667                                 * the start of extent.
4668                                 */
4669                                flags |= FIEMAP_EXTENT_DELALLOC;
4670                                newex->ec_block = end;
4671                                logical = (__u64)end << blksize_bits;
4672                        }
4673                        /* Find contiguous delayed buffers. */
4674                        do {
4675                                if (!buffer_delay(bh))
4676                                        goto found_delayed_extent;
4677                                bh = bh->b_this_page;
4678                                end++;
4679                        } while (bh != head);
4680
4681                        for (; index < ret; index++) {
4682                                if (!page_has_buffers(pages[index])) {
4683                                        bh = NULL;
4684                                        break;
4685                                }
4686                                head = page_buffers(pages[index]);
4687                                if (!head) {
4688                                        bh = NULL;
4689                                        break;
4690                                }
4691
4692                                if (pages[index]->index !=
4693                                    pages[start_index]->index + index
4694                                    - start_index) {
4695                                        /* Blocks are not contiguous. */
4696                                        bh = NULL;
4697                                        break;
4698                                }
4699                                bh = head;
4700                                do {
4701                                        if (!buffer_delay(bh))
4702                                                /* Delayed-extent ends. */
4703                                                goto found_delayed_extent;
4704                                        bh = bh->b_this_page;
4705                                        end++;
4706                                } while (bh != head);
4707                        }
4708                } else if (!(flags & FIEMAP_EXTENT_DELALLOC))
4709                        /* a hole found. */
4710                        goto out;
4711
4712found_delayed_extent:
4713                newex->ec_len = min(end - newex->ec_block,
4714                                                (ext4_lblk_t)EXT_INIT_MAX_LEN);
4715                if (ret == nr_pages && bh != NULL &&
4716                        newex->ec_len < EXT_INIT_MAX_LEN &&
4717                        buffer_delay(bh)) {
4718                        /* Have not collected an extent and continue. */
4719                        for (index = 0; index < ret; index++)
4720                                page_cache_release(pages[index]);
4721                        goto repeat;
4722                }
4723
4724                for (index = 0; index < ret; index++)
4725                        page_cache_release(pages[index]);
4726                kfree(pages);
4727        }
4728
4729        physical = (__u64)newex->ec_start << blksize_bits;
4730        length =   (__u64)newex->ec_len << blksize_bits;
4731
4732        if (ex && ext4_ext_is_uninitialized(ex))
4733                flags |= FIEMAP_EXTENT_UNWRITTEN;
4734
4735        if (next == EXT_MAX_BLOCKS)
4736                flags |= FIEMAP_EXTENT_LAST;
4737
4738        ret = fiemap_fill_next_extent(fieinfo, logical, physical,
4739                                        length, flags);
4740        if (ret < 0)
4741                return ret;
4742        if (ret == 1)
4743                return EXT_BREAK;
4744        return EXT_CONTINUE;
4745}
4746/* fiemap flags we can handle specified here */
4747#define EXT4_FIEMAP_FLAGS       (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
4748
4749static int ext4_xattr_fiemap(struct inode *inode,
4750                                struct fiemap_extent_info *fieinfo)
4751{
4752        __u64 physical = 0;
4753        __u64 length;
4754        __u32 flags = FIEMAP_EXTENT_LAST;
4755        int blockbits = inode->i_sb->s_blocksize_bits;
4756        int error = 0;
4757
4758        /* in-inode? */
4759        if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4760                struct ext4_iloc iloc;
4761                int offset;     /* offset of xattr in inode */
4762
4763                error = ext4_get_inode_loc(inode, &iloc);
4764                if (error)
4765                        return error;
4766                physical = iloc.bh->b_blocknr << blockbits;
4767                offset = EXT4_GOOD_OLD_INODE_SIZE +
4768                                EXT4_I(inode)->i_extra_isize;
4769                physical += offset;
4770                length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4771                flags |= FIEMAP_EXTENT_DATA_INLINE;
4772                brelse(iloc.bh);
4773        } else { /* external block */
4774                physical = EXT4_I(inode)->i_file_acl << blockbits;
4775                length = inode->i_sb->s_blocksize;
4776        }
4777
4778        if (physical)
4779                error = fiemap_fill_next_extent(fieinfo, 0, physical,
4780                                                length, flags);
4781        return (error < 0 ? error : 0);
4782}
4783
4784/*
4785 * ext4_ext_punch_hole
4786 *
4787 * Punches a hole of "length" bytes in a file starting
4788 * at byte "offset"
4789 *
4790 * @inode:  The inode of the file to punch a hole in
4791 * @offset: The starting byte offset of the hole
4792 * @length: The length of the hole
4793 *
4794 * Returns the number of blocks removed or negative on err
4795 */
4796int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
4797{
4798        struct inode *inode = file->f_path.dentry->d_inode;
4799        struct super_block *sb = inode->i_sb;
4800        ext4_lblk_t first_block, stop_block;
4801        struct address_space *mapping = inode->i_mapping;
4802        handle_t *handle;
4803        loff_t first_page, last_page, page_len;
4804        loff_t first_page_offset, last_page_offset;
4805        int credits, err = 0;
4806
4807        /* No need to punch hole beyond i_size */
4808        if (offset >= inode->i_size)
4809                return 0;
4810
4811        /*
4812         * If the hole extends beyond i_size, set the hole
4813         * to end after the page that contains i_size
4814         */
4815        if (offset + length > inode->i_size) {
4816                length = inode->i_size +
4817                   PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
4818                   offset;
4819        }
4820
4821        first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
4822        last_page = (offset + length) >> PAGE_CACHE_SHIFT;
4823
4824        first_page_offset = first_page << PAGE_CACHE_SHIFT;
4825        last_page_offset = last_page << PAGE_CACHE_SHIFT;
4826
4827        /*
4828         * Write out all dirty pages to avoid race conditions
4829         * Then release them.
4830         */
4831        if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4832                err = filemap_write_and_wait_range(mapping,
4833                        offset, offset + length - 1);
4834
4835                if (err)
4836                        return err;
4837        }
4838
4839        /* Now release the pages */
4840        if (last_page_offset > first_page_offset) {
4841                truncate_pagecache_range(inode, first_page_offset,
4842                                         last_page_offset - 1);
4843        }
4844
4845        /* finish any pending end_io work */
4846        ext4_flush_completed_IO(inode);
4847
4848        credits = ext4_writepage_trans_blocks(inode);
4849        handle = ext4_journal_start(inode, credits);
4850        if (IS_ERR(handle))
4851                return PTR_ERR(handle);
4852
4853        err = ext4_orphan_add(handle, inode);
4854        if (err)
4855                goto out;
4856
4857        /*
4858         * Now we need to zero out the non-page-aligned data in the
4859         * pages at the start and tail of the hole, and unmap the buffer
4860         * heads for the block aligned regions of the page that were
4861         * completely zeroed.
4862         */
4863        if (first_page > last_page) {
4864                /*
4865                 * If the file space being truncated is contained within a page
4866                 * just zero out and unmap the middle of that page
4867                 */
4868                err = ext4_discard_partial_page_buffers(handle,
4869                        mapping, offset, length, 0);
4870
4871                if (err)
4872                        goto out;
4873        } else {
4874                /*
4875                 * zero out and unmap the partial page that contains
4876                 * the start of the hole
4877                 */
4878                page_len  = first_page_offset - offset;
4879                if (page_len > 0) {
4880                        err = ext4_discard_partial_page_buffers(handle, mapping,
4881                                                   offset, page_len, 0);
4882                        if (err)
4883                                goto out;
4884                }
4885
4886                /*
4887                 * zero out and unmap the partial page that contains
4888                 * the end of the hole
4889                 */
4890                page_len = offset + length - last_page_offset;
4891                if (page_len > 0) {
4892                        err = ext4_discard_partial_page_buffers(handle, mapping,
4893                                        last_page_offset, page_len, 0);
4894                        if (err)
4895                                goto out;
4896                }
4897        }
4898
4899        /*
4900         * If i_size is contained in the last page, we need to
4901         * unmap and zero the partial page after i_size
4902         */
4903        if (inode->i_size >> PAGE_CACHE_SHIFT == last_page &&
4904           inode->i_size % PAGE_CACHE_SIZE != 0) {
4905
4906                page_len = PAGE_CACHE_SIZE -
4907                        (inode->i_size & (PAGE_CACHE_SIZE - 1));
4908
4909                if (page_len > 0) {
4910                        err = ext4_discard_partial_page_buffers(handle,
4911                          mapping, inode->i_size, page_len, 0);
4912
4913                        if (err)
4914                                goto out;
4915                }
4916        }
4917
4918        first_block = (offset + sb->s_blocksize - 1) >>
4919                EXT4_BLOCK_SIZE_BITS(sb);
4920        stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4921
4922        /* If there are no blocks to remove, return now */
4923        if (first_block >= stop_block)
4924                goto out;
4925
4926        down_write(&EXT4_I(inode)->i_data_sem);
4927        ext4_ext_invalidate_cache(inode);
4928        ext4_discard_preallocations(inode);
4929
4930        err = ext4_ext_remove_space(inode, first_block, stop_block - 1);
4931
4932        ext4_ext_invalidate_cache(inode);
4933        ext4_discard_preallocations(inode);
4934
4935        if (IS_SYNC(inode))
4936                ext4_handle_sync(handle);
4937
4938        up_write(&EXT4_I(inode)->i_data_sem);
4939
4940out:
4941        ext4_orphan_del(handle, inode);
4942        inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4943        ext4_mark_inode_dirty(handle, inode);
4944        ext4_journal_stop(handle);
4945        return err;
4946}
4947int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4948                __u64 start, __u64 len)
4949{
4950        ext4_lblk_t start_blk;
4951        int error = 0;
4952
4953        /* fallback to generic here if not in extents fmt */
4954        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4955                return generic_block_fiemap(inode, fieinfo, start, len,
4956                        ext4_get_block);
4957
4958        if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
4959                return -EBADR;
4960
4961        if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
4962                error = ext4_xattr_fiemap(inode, fieinfo);
4963        } else {
4964                ext4_lblk_t len_blks;
4965                __u64 last_blk;
4966
4967                start_blk = start >> inode->i_sb->s_blocksize_bits;
4968                last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
4969                if (last_blk >= EXT_MAX_BLOCKS)
4970                        last_blk = EXT_MAX_BLOCKS-1;
4971                len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
4972
4973                /*
4974                 * Walk the extent tree gathering extent information.
4975                 * ext4_ext_fiemap_cb will push extents back to user.
4976                 */
4977                error = ext4_ext_walk_space(inode, start_blk, len_blks,
4978                                          ext4_ext_fiemap_cb, fieinfo);
4979        }
4980
4981        return error;
4982}
4983
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.