linux/fs/hfsplus/extents.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/hfsplus/extents.c
   3 *
   4 * Copyright (C) 2001
   5 * Brad Boyer (flar@allandria.com)
   6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
   7 *
   8 * Handling of Extents both in catalog and extents overflow trees
   9 */
  10
  11#include <linux/errno.h>
  12#include <linux/fs.h>
  13#include <linux/pagemap.h>
  14
  15#include "hfsplus_fs.h"
  16#include "hfsplus_raw.h"
  17
  18/* Compare two extents keys, returns 0 on same, pos/neg for difference */
  19int hfsplus_ext_cmp_key(const hfsplus_btree_key *k1,
  20                        const hfsplus_btree_key *k2)
  21{
  22        __be32 k1id, k2id;
  23        __be32 k1s, k2s;
  24
  25        k1id = k1->ext.cnid;
  26        k2id = k2->ext.cnid;
  27        if (k1id != k2id)
  28                return be32_to_cpu(k1id) < be32_to_cpu(k2id) ? -1 : 1;
  29
  30        if (k1->ext.fork_type != k2->ext.fork_type)
  31                return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1;
  32
  33        k1s = k1->ext.start_block;
  34        k2s = k2->ext.start_block;
  35        if (k1s == k2s)
  36                return 0;
  37        return be32_to_cpu(k1s) < be32_to_cpu(k2s) ? -1 : 1;
  38}
  39
  40static void hfsplus_ext_build_key(hfsplus_btree_key *key, u32 cnid,
  41                                  u32 block, u8 type)
  42{
  43        key->key_len = cpu_to_be16(HFSPLUS_EXT_KEYLEN - 2);
  44        key->ext.cnid = cpu_to_be32(cnid);
  45        key->ext.start_block = cpu_to_be32(block);
  46        key->ext.fork_type = type;
  47        key->ext.pad = 0;
  48}
  49
  50static u32 hfsplus_ext_find_block(struct hfsplus_extent *ext, u32 off)
  51{
  52        int i;
  53        u32 count;
  54
  55        for (i = 0; i < 8; ext++, i++) {
  56                count = be32_to_cpu(ext->block_count);
  57                if (off < count)
  58                        return be32_to_cpu(ext->start_block) + off;
  59                off -= count;
  60        }
  61        /* panic? */
  62        return 0;
  63}
  64
  65static int hfsplus_ext_block_count(struct hfsplus_extent *ext)
  66{
  67        int i;
  68        u32 count = 0;
  69
  70        for (i = 0; i < 8; ext++, i++)
  71                count += be32_to_cpu(ext->block_count);
  72        return count;
  73}
  74
  75static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext)
  76{
  77        int i;
  78
  79        ext += 7;
  80        for (i = 0; i < 7; ext--, i++)
  81                if (ext->block_count)
  82                        break;
  83        return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count);
  84}
  85
  86static int __hfsplus_ext_write_extent(struct inode *inode,
  87                struct hfs_find_data *fd)
  88{
  89        struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
  90        int res;
  91
  92        WARN_ON(!mutex_is_locked(&hip->extents_lock));
  93
  94        hfsplus_ext_build_key(fd->search_key, inode->i_ino, hip->cached_start,
  95                              HFSPLUS_IS_RSRC(inode) ?
  96                                HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
  97
  98        res = hfs_brec_find(fd, hfs_find_rec_by_key);
  99        if (hip->extent_state & HFSPLUS_EXT_NEW) {
 100                if (res != -ENOENT)
 101                        return res;
 102                hfs_brec_insert(fd, hip->cached_extents,
 103                                sizeof(hfsplus_extent_rec));
 104                hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
 105        } else {
 106                if (res)
 107                        return res;
 108                hfs_bnode_write(fd->bnode, hip->cached_extents,
 109                                fd->entryoffset, fd->entrylength);
 110                hip->extent_state &= ~HFSPLUS_EXT_DIRTY;
 111        }
 112
 113        /*
 114         * We can't just use hfsplus_mark_inode_dirty here, because we
 115         * also get called from hfsplus_write_inode, which should not
 116         * redirty the inode.  Instead the callers have to be careful
 117         * to explicily mark the inode dirty, too.
 118         */
 119        set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags);
 120
 121        return 0;
 122}
 123
 124static int hfsplus_ext_write_extent_locked(struct inode *inode)
 125{
 126        int res = 0;
 127
 128        if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) {
 129                struct hfs_find_data fd;
 130
 131                res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
 132                if (res)
 133                        return res;
 134                res = __hfsplus_ext_write_extent(inode, &fd);
 135                hfs_find_exit(&fd);
 136        }
 137        return res;
 138}
 139
 140int hfsplus_ext_write_extent(struct inode *inode)
 141{
 142        int res;
 143
 144        mutex_lock(&HFSPLUS_I(inode)->extents_lock);
 145        res = hfsplus_ext_write_extent_locked(inode);
 146        mutex_unlock(&HFSPLUS_I(inode)->extents_lock);
 147
 148        return res;
 149}
 150
 151static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
 152                                            struct hfsplus_extent *extent,
 153                                            u32 cnid, u32 block, u8 type)
 154{
 155        int res;
 156
 157        hfsplus_ext_build_key(fd->search_key, cnid, block, type);
 158        fd->key->ext.cnid = 0;
 159        res = hfs_brec_find(fd, hfs_find_rec_by_key);
 160        if (res && res != -ENOENT)
 161                return res;
 162        if (fd->key->ext.cnid != fd->search_key->ext.cnid ||
 163            fd->key->ext.fork_type != fd->search_key->ext.fork_type)
 164                return -ENOENT;
 165        if (fd->entrylength != sizeof(hfsplus_extent_rec))
 166                return -EIO;
 167        hfs_bnode_read(fd->bnode, extent, fd->entryoffset,
 168                sizeof(hfsplus_extent_rec));
 169        return 0;
 170}
 171
 172static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd,
 173                struct inode *inode, u32 block)
 174{
 175        struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
 176        int res;
 177
 178        WARN_ON(!mutex_is_locked(&hip->extents_lock));
 179
 180        if (hip->extent_state & HFSPLUS_EXT_DIRTY) {
 181                res = __hfsplus_ext_write_extent(inode, fd);
 182                if (res)
 183                        return res;
 184        }
 185
 186        res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino,
 187                                        block, HFSPLUS_IS_RSRC(inode) ?
 188                                                HFSPLUS_TYPE_RSRC :
 189                                                HFSPLUS_TYPE_DATA);
 190        if (!res) {
 191                hip->cached_start = be32_to_cpu(fd->key->ext.start_block);
 192                hip->cached_blocks =
 193                        hfsplus_ext_block_count(hip->cached_extents);
 194        } else {
 195                hip->cached_start = hip->cached_blocks = 0;
 196                hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
 197        }
 198        return res;
 199}
 200
 201static int hfsplus_ext_read_extent(struct inode *inode, u32 block)
 202{
 203        struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
 204        struct hfs_find_data fd;
 205        int res;
 206
 207        if (block >= hip->cached_start &&
 208            block < hip->cached_start + hip->cached_blocks)
 209                return 0;
 210
 211        res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
 212        if (!res) {
 213                res = __hfsplus_ext_cache_extent(&fd, inode, block);
 214                hfs_find_exit(&fd);
 215        }
 216        return res;
 217}
 218
 219/* Get a block at iblock for inode, possibly allocating if create */
 220int hfsplus_get_block(struct inode *inode, sector_t iblock,
 221                      struct buffer_head *bh_result, int create)
 222{
 223        struct super_block *sb = inode->i_sb;
 224        struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
 225        struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
 226        int res = -EIO;
 227        u32 ablock, dblock, mask;
 228        sector_t sector;
 229        int was_dirty = 0;
 230        int shift;
 231
 232        /* Convert inode block to disk allocation block */
 233        shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits;
 234        ablock = iblock >> sbi->fs_shift;
 235
 236        if (iblock >= hip->fs_blocks) {
 237                if (iblock > hip->fs_blocks || !create)
 238                        return -EIO;
 239                if (ablock >= hip->alloc_blocks) {
 240                        res = hfsplus_file_extend(inode);
 241                        if (res)
 242                                return res;
 243                }
 244        } else
 245                create = 0;
 246
 247        if (ablock < hip->first_blocks) {
 248                dblock = hfsplus_ext_find_block(hip->first_extents, ablock);
 249                goto done;
 250        }
 251
 252        if (inode->i_ino == HFSPLUS_EXT_CNID)
 253                return -EIO;
 254
 255        mutex_lock(&hip->extents_lock);
 256
 257        /*
 258         * hfsplus_ext_read_extent will write out a cached extent into
 259         * the extents btree.  In that case we may have to mark the inode
 260         * dirty even for a pure read of an extent here.
 261         */
 262        was_dirty = (hip->extent_state & HFSPLUS_EXT_DIRTY);
 263        res = hfsplus_ext_read_extent(inode, ablock);
 264        if (res) {
 265                mutex_unlock(&hip->extents_lock);
 266                return -EIO;
 267        }
 268        dblock = hfsplus_ext_find_block(hip->cached_extents,
 269                                        ablock - hip->cached_start);
 270        mutex_unlock(&hip->extents_lock);
 271
 272done:
 273        hfs_dbg(EXTENT, "get_block(%lu): %llu - %u\n",
 274                inode->i_ino, (long long)iblock, dblock);
 275
 276        mask = (1 << sbi->fs_shift) - 1;
 277        sector = ((sector_t)dblock << sbi->fs_shift) +
 278                  sbi->blockoffset + (iblock & mask);
 279        map_bh(bh_result, sb, sector);
 280
 281        if (create) {
 282                set_buffer_new(bh_result);
 283                hip->phys_size += sb->s_blocksize;
 284                hip->fs_blocks++;
 285                inode_add_bytes(inode, sb->s_blocksize);
 286        }
 287        if (create || was_dirty)
 288                mark_inode_dirty(inode);
 289        return 0;
 290}
 291
 292static void hfsplus_dump_extent(struct hfsplus_extent *extent)
 293{
 294        int i;
 295
 296        hfs_dbg(EXTENT, "   ");
 297        for (i = 0; i < 8; i++)
 298                hfs_dbg_cont(EXTENT, " %u:%u",
 299                             be32_to_cpu(extent[i].start_block),
 300                             be32_to_cpu(extent[i].block_count));
 301        hfs_dbg_cont(EXTENT, "\n");
 302}
 303
 304static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset,
 305                              u32 alloc_block, u32 block_count)
 306{
 307        u32 count, start;
 308        int i;
 309
 310        hfsplus_dump_extent(extent);
 311        for (i = 0; i < 8; extent++, i++) {
 312                count = be32_to_cpu(extent->block_count);
 313                if (offset == count) {
 314                        start = be32_to_cpu(extent->start_block);
 315                        if (alloc_block != start + count) {
 316                                if (++i >= 8)
 317                                        return -ENOSPC;
 318                                extent++;
 319                                extent->start_block = cpu_to_be32(alloc_block);
 320                        } else
 321                                block_count += count;
 322                        extent->block_count = cpu_to_be32(block_count);
 323                        return 0;
 324                } else if (offset < count)
 325                        break;
 326                offset -= count;
 327        }
 328        /* panic? */
 329        return -EIO;
 330}
 331
 332static int hfsplus_free_extents(struct super_block *sb,
 333                                struct hfsplus_extent *extent,
 334                                u32 offset, u32 block_nr)
 335{
 336        u32 count, start;
 337        int i;
 338        int err = 0;
 339
 340        hfsplus_dump_extent(extent);
 341        for (i = 0; i < 8; extent++, i++) {
 342                count = be32_to_cpu(extent->block_count);
 343                if (offset == count)
 344                        goto found;
 345                else if (offset < count)
 346                        break;
 347                offset -= count;
 348        }
 349        /* panic? */
 350        return -EIO;
 351found:
 352        for (;;) {
 353                start = be32_to_cpu(extent->start_block);
 354                if (count <= block_nr) {
 355                        err = hfsplus_block_free(sb, start, count);
 356                        if (err) {
 357                                pr_err("can't free extent\n");
 358                                hfs_dbg(EXTENT, " start: %u count: %u\n",
 359                                        start, count);
 360                        }
 361                        extent->block_count = 0;
 362                        extent->start_block = 0;
 363                        block_nr -= count;
 364                } else {
 365                        count -= block_nr;
 366                        err = hfsplus_block_free(sb, start + count, block_nr);
 367                        if (err) {
 368                                pr_err("can't free extent\n");
 369                                hfs_dbg(EXTENT, " start: %u count: %u\n",
 370                                        start, count);
 371                        }
 372                        extent->block_count = cpu_to_be32(count);
 373                        block_nr = 0;
 374                }
 375                if (!block_nr || !i) {
 376                        /*
 377                         * Try to free all extents and
 378                         * return only last error
 379                         */
 380                        return err;
 381                }
 382                i--;
 383                extent--;
 384                count = be32_to_cpu(extent->block_count);
 385        }
 386}
 387
 388int hfsplus_free_fork(struct super_block *sb, u32 cnid,
 389                struct hfsplus_fork_raw *fork, int type)
 390{
 391        struct hfs_find_data fd;
 392        hfsplus_extent_rec ext_entry;
 393        u32 total_blocks, blocks, start;
 394        int res, i;
 395
 396        total_blocks = be32_to_cpu(fork->total_blocks);
 397        if (!total_blocks)
 398                return 0;
 399
 400        blocks = 0;
 401        for (i = 0; i < 8; i++)
 402                blocks += be32_to_cpu(fork->extents[i].block_count);
 403
 404        res = hfsplus_free_extents(sb, fork->extents, blocks, blocks);
 405        if (res)
 406                return res;
 407        if (total_blocks == blocks)
 408                return 0;
 409
 410        res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
 411        if (res)
 412                return res;
 413        do {
 414                res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid,
 415                                                total_blocks, type);
 416                if (res)
 417                        break;
 418                start = be32_to_cpu(fd.key->ext.start_block);
 419                hfsplus_free_extents(sb, ext_entry,
 420                                     total_blocks - start,
 421                                     total_blocks);
 422                hfs_brec_remove(&fd);
 423                total_blocks = start;
 424        } while (total_blocks > blocks);
 425        hfs_find_exit(&fd);
 426
 427        return res;
 428}
 429
 430int hfsplus_file_extend(struct inode *inode)
 431{
 432        struct super_block *sb = inode->i_sb;
 433        struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
 434        struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
 435        u32 start, len, goal;
 436        int res;
 437
 438        if (sbi->alloc_file->i_size * 8 <
 439            sbi->total_blocks - sbi->free_blocks + 8) {
 440                /* extend alloc file */
 441                pr_err("extend alloc file! "
 442                                "(%llu,%u,%u)\n",
 443                        sbi->alloc_file->i_size * 8,
 444                        sbi->total_blocks, sbi->free_blocks);
 445                return -ENOSPC;
 446        }
 447
 448        mutex_lock(&hip->extents_lock);
 449        if (hip->alloc_blocks == hip->first_blocks)
 450                goal = hfsplus_ext_lastblock(hip->first_extents);
 451        else {
 452                res = hfsplus_ext_read_extent(inode, hip->alloc_blocks);
 453                if (res)
 454                        goto out;
 455                goal = hfsplus_ext_lastblock(hip->cached_extents);
 456        }
 457
 458        len = hip->clump_blocks;
 459        start = hfsplus_block_allocate(sb, sbi->total_blocks, goal, &len);
 460        if (start >= sbi->total_blocks) {
 461                start = hfsplus_block_allocate(sb, goal, 0, &len);
 462                if (start >= goal) {
 463                        res = -ENOSPC;
 464                        goto out;
 465                }
 466        }
 467
 468        hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
 469
 470        if (hip->alloc_blocks <= hip->first_blocks) {
 471                if (!hip->first_blocks) {
 472                        hfs_dbg(EXTENT, "first extents\n");
 473                        /* no extents yet */
 474                        hip->first_extents[0].start_block = cpu_to_be32(start);
 475                        hip->first_extents[0].block_count = cpu_to_be32(len);
 476                        res = 0;
 477                } else {
 478                        /* try to append to extents in inode */
 479                        res = hfsplus_add_extent(hip->first_extents,
 480                                                 hip->alloc_blocks,
 481                                                 start, len);
 482                        if (res == -ENOSPC)
 483                                goto insert_extent;
 484                }
 485                if (!res) {
 486                        hfsplus_dump_extent(hip->first_extents);
 487                        hip->first_blocks += len;
 488                }
 489        } else {
 490                res = hfsplus_add_extent(hip->cached_extents,
 491                                         hip->alloc_blocks - hip->cached_start,
 492                                         start, len);
 493                if (!res) {
 494                        hfsplus_dump_extent(hip->cached_extents);
 495                        hip->extent_state |= HFSPLUS_EXT_DIRTY;
 496                        hip->cached_blocks += len;
 497                } else if (res == -ENOSPC)
 498                        goto insert_extent;
 499        }
 500out:
 501        mutex_unlock(&hip->extents_lock);
 502        if (!res) {
 503                hip->alloc_blocks += len;
 504                hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);
 505        }
 506        return res;
 507
 508insert_extent:
 509        hfs_dbg(EXTENT, "insert new extent\n");
 510        res = hfsplus_ext_write_extent_locked(inode);
 511        if (res)
 512                goto out;
 513
 514        memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
 515        hip->cached_extents[0].start_block = cpu_to_be32(start);
 516        hip->cached_extents[0].block_count = cpu_to_be32(len);
 517        hfsplus_dump_extent(hip->cached_extents);
 518        hip->extent_state |= HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW;
 519        hip->cached_start = hip->alloc_blocks;
 520        hip->cached_blocks = len;
 521
 522        res = 0;
 523        goto out;
 524}
 525
 526void hfsplus_file_truncate(struct inode *inode)
 527{
 528        struct super_block *sb = inode->i_sb;
 529        struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
 530        struct hfs_find_data fd;
 531        u32 alloc_cnt, blk_cnt, start;
 532        int res;
 533
 534        hfs_dbg(INODE, "truncate: %lu, %llu -> %llu\n",
 535                inode->i_ino, (long long)hip->phys_size, inode->i_size);
 536
 537        if (inode->i_size > hip->phys_size) {
 538                struct address_space *mapping = inode->i_mapping;
 539                struct page *page;
 540                void *fsdata;
 541                loff_t size = inode->i_size;
 542
 543                res = pagecache_write_begin(NULL, mapping, size, 0,
 544                                                AOP_FLAG_UNINTERRUPTIBLE,
 545                                                &page, &fsdata);
 546                if (res)
 547                        return;
 548                res = pagecache_write_end(NULL, mapping, size,
 549                        0, 0, page, fsdata);
 550                if (res < 0)
 551                        return;
 552                mark_inode_dirty(inode);
 553                return;
 554        } else if (inode->i_size == hip->phys_size)
 555                return;
 556
 557        blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >>
 558                        HFSPLUS_SB(sb)->alloc_blksz_shift;
 559        alloc_cnt = hip->alloc_blocks;
 560        if (blk_cnt == alloc_cnt)
 561                goto out;
 562
 563        mutex_lock(&hip->extents_lock);
 564        res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
 565        if (res) {
 566                mutex_unlock(&hip->extents_lock);
 567                /* XXX: We lack error handling of hfsplus_file_truncate() */
 568                return;
 569        }
 570        while (1) {
 571                if (alloc_cnt == hip->first_blocks) {
 572                        hfsplus_free_extents(sb, hip->first_extents,
 573                                             alloc_cnt, alloc_cnt - blk_cnt);
 574                        hfsplus_dump_extent(hip->first_extents);
 575                        hip->first_blocks = blk_cnt;
 576                        break;
 577                }
 578                res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
 579                if (res)
 580                        break;
 581                start = hip->cached_start;
 582                hfsplus_free_extents(sb, hip->cached_extents,
 583                                     alloc_cnt - start, alloc_cnt - blk_cnt);
 584                hfsplus_dump_extent(hip->cached_extents);
 585                if (blk_cnt > start) {
 586                        hip->extent_state |= HFSPLUS_EXT_DIRTY;
 587                        break;
 588                }
 589                alloc_cnt = start;
 590                hip->cached_start = hip->cached_blocks = 0;
 591                hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
 592                hfs_brec_remove(&fd);
 593        }
 594        hfs_find_exit(&fd);
 595        mutex_unlock(&hip->extents_lock);
 596
 597        hip->alloc_blocks = blk_cnt;
 598out:
 599        hip->phys_size = inode->i_size;
 600        hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >>
 601                sb->s_blocksize_bits;
 602        inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
 603        hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);
 604}
 605
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.