linux/fs/hfsplus/bnode.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/hfsplus/bnode.c
   3 *
   4 * Copyright (C) 2001
   5 * Brad Boyer (flar@allandria.com)
   6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
   7 *
   8 * Handle basic btree node operations
   9 */
  10
  11#include <linux/string.h>
  12#include <linux/slab.h>
  13#include <linux/pagemap.h>
  14#include <linux/fs.h>
  15#include <linux/swap.h>
  16
  17#include "hfsplus_fs.h"
  18#include "hfsplus_raw.h"
  19
  20/* Copy a specified range of bytes from the raw data of a node */
  21void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
  22{
  23        struct page **pagep;
  24        int l;
  25
  26        off += node->page_offset;
  27        pagep = node->page + (off >> PAGE_CACHE_SHIFT);
  28        off &= ~PAGE_CACHE_MASK;
  29
  30        l = min(len, (int)PAGE_CACHE_SIZE - off);
  31        memcpy(buf, kmap(*pagep) + off, l);
  32        kunmap(*pagep);
  33
  34        while ((len -= l) != 0) {
  35                buf += l;
  36                l = min(len, (int)PAGE_CACHE_SIZE);
  37                memcpy(buf, kmap(*++pagep), l);
  38                kunmap(*pagep);
  39        }
  40}
  41
  42u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
  43{
  44        __be16 data;
  45        /* TODO: optimize later... */
  46        hfs_bnode_read(node, &data, off, 2);
  47        return be16_to_cpu(data);
  48}
  49
  50u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
  51{
  52        u8 data;
  53        /* TODO: optimize later... */
  54        hfs_bnode_read(node, &data, off, 1);
  55        return data;
  56}
  57
  58void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
  59{
  60        struct hfs_btree *tree;
  61        int key_len;
  62
  63        tree = node->tree;
  64        if (node->type == HFS_NODE_LEAF ||
  65            tree->attributes & HFS_TREE_VARIDXKEYS)
  66                key_len = hfs_bnode_read_u16(node, off) + 2;
  67        else
  68                key_len = tree->max_key_len + 2;
  69
  70        hfs_bnode_read(node, key, off, key_len);
  71}
  72
  73void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
  74{
  75        struct page **pagep;
  76        int l;
  77
  78        off += node->page_offset;
  79        pagep = node->page + (off >> PAGE_CACHE_SHIFT);
  80        off &= ~PAGE_CACHE_MASK;
  81
  82        l = min(len, (int)PAGE_CACHE_SIZE - off);
  83        memcpy(kmap(*pagep) + off, buf, l);
  84        set_page_dirty(*pagep);
  85        kunmap(*pagep);
  86
  87        while ((len -= l) != 0) {
  88                buf += l;
  89                l = min(len, (int)PAGE_CACHE_SIZE);
  90                memcpy(kmap(*++pagep), buf, l);
  91                set_page_dirty(*pagep);
  92                kunmap(*pagep);
  93        }
  94}
  95
  96void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
  97{
  98        __be16 v = cpu_to_be16(data);
  99        /* TODO: optimize later... */
 100        hfs_bnode_write(node, &v, off, 2);
 101}
 102
 103void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
 104{
 105        struct page **pagep;
 106        int l;
 107
 108        off += node->page_offset;
 109        pagep = node->page + (off >> PAGE_CACHE_SHIFT);
 110        off &= ~PAGE_CACHE_MASK;
 111
 112        l = min(len, (int)PAGE_CACHE_SIZE - off);
 113        memset(kmap(*pagep) + off, 0, l);
 114        set_page_dirty(*pagep);
 115        kunmap(*pagep);
 116
 117        while ((len -= l) != 0) {
 118                l = min(len, (int)PAGE_CACHE_SIZE);
 119                memset(kmap(*++pagep), 0, l);
 120                set_page_dirty(*pagep);
 121                kunmap(*pagep);
 122        }
 123}
 124
 125void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
 126                    struct hfs_bnode *src_node, int src, int len)
 127{
 128        struct hfs_btree *tree;
 129        struct page **src_page, **dst_page;
 130        int l;
 131
 132        dprint(DBG_BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
 133        if (!len)
 134                return;
 135        tree = src_node->tree;
 136        src += src_node->page_offset;
 137        dst += dst_node->page_offset;
 138        src_page = src_node->page + (src >> PAGE_CACHE_SHIFT);
 139        src &= ~PAGE_CACHE_MASK;
 140        dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT);
 141        dst &= ~PAGE_CACHE_MASK;
 142
 143        if (src == dst) {
 144                l = min(len, (int)PAGE_CACHE_SIZE - src);
 145                memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
 146                kunmap(*src_page);
 147                set_page_dirty(*dst_page);
 148                kunmap(*dst_page);
 149
 150                while ((len -= l) != 0) {
 151                        l = min(len, (int)PAGE_CACHE_SIZE);
 152                        memcpy(kmap(*++dst_page), kmap(*++src_page), l);
 153                        kunmap(*src_page);
 154                        set_page_dirty(*dst_page);
 155                        kunmap(*dst_page);
 156                }
 157        } else {
 158                void *src_ptr, *dst_ptr;
 159
 160                do {
 161                        src_ptr = kmap(*src_page) + src;
 162                        dst_ptr = kmap(*dst_page) + dst;
 163                        if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) {
 164                                l = PAGE_CACHE_SIZE - src;
 165                                src = 0;
 166                                dst += l;
 167                        } else {
 168                                l = PAGE_CACHE_SIZE - dst;
 169                                src += l;
 170                                dst = 0;
 171                        }
 172                        l = min(len, l);
 173                        memcpy(dst_ptr, src_ptr, l);
 174                        kunmap(*src_page);
 175                        set_page_dirty(*dst_page);
 176                        kunmap(*dst_page);
 177                        if (!dst)
 178                                dst_page++;
 179                        else
 180                                src_page++;
 181                } while ((len -= l));
 182        }
 183}
 184
 185void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
 186{
 187        struct page **src_page, **dst_page;
 188        int l;
 189
 190        dprint(DBG_BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
 191        if (!len)
 192                return;
 193        src += node->page_offset;
 194        dst += node->page_offset;
 195        if (dst > src) {
 196                src += len - 1;
 197                src_page = node->page + (src >> PAGE_CACHE_SHIFT);
 198                src = (src & ~PAGE_CACHE_MASK) + 1;
 199                dst += len - 1;
 200                dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
 201                dst = (dst & ~PAGE_CACHE_MASK) + 1;
 202
 203                if (src == dst) {
 204                        while (src < len) {
 205                                memmove(kmap(*dst_page), kmap(*src_page), src);
 206                                kunmap(*src_page);
 207                                set_page_dirty(*dst_page);
 208                                kunmap(*dst_page);
 209                                len -= src;
 210                                src = PAGE_CACHE_SIZE;
 211                                src_page--;
 212                                dst_page--;
 213                        }
 214                        src -= len;
 215                        memmove(kmap(*dst_page) + src,
 216                                kmap(*src_page) + src, len);
 217                        kunmap(*src_page);
 218                        set_page_dirty(*dst_page);
 219                        kunmap(*dst_page);
 220                } else {
 221                        void *src_ptr, *dst_ptr;
 222
 223                        do {
 224                                src_ptr = kmap(*src_page) + src;
 225                                dst_ptr = kmap(*dst_page) + dst;
 226                                if (src < dst) {
 227                                        l = src;
 228                                        src = PAGE_CACHE_SIZE;
 229                                        dst -= l;
 230                                } else {
 231                                        l = dst;
 232                                        src -= l;
 233                                        dst = PAGE_CACHE_SIZE;
 234                                }
 235                                l = min(len, l);
 236                                memmove(dst_ptr - l, src_ptr - l, l);
 237                                kunmap(*src_page);
 238                                set_page_dirty(*dst_page);
 239                                kunmap(*dst_page);
 240                                if (dst == PAGE_CACHE_SIZE)
 241                                        dst_page--;
 242                                else
 243                                        src_page--;
 244                        } while ((len -= l));
 245                }
 246        } else {
 247                src_page = node->page + (src >> PAGE_CACHE_SHIFT);
 248                src &= ~PAGE_CACHE_MASK;
 249                dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
 250                dst &= ~PAGE_CACHE_MASK;
 251
 252                if (src == dst) {
 253                        l = min(len, (int)PAGE_CACHE_SIZE - src);
 254                        memmove(kmap(*dst_page) + src,
 255                                kmap(*src_page) + src, l);
 256                        kunmap(*src_page);
 257                        set_page_dirty(*dst_page);
 258                        kunmap(*dst_page);
 259
 260                        while ((len -= l) != 0) {
 261                                l = min(len, (int)PAGE_CACHE_SIZE);
 262                                memmove(kmap(*++dst_page),
 263                                        kmap(*++src_page), l);
 264                                kunmap(*src_page);
 265                                set_page_dirty(*dst_page);
 266                                kunmap(*dst_page);
 267                        }
 268                } else {
 269                        void *src_ptr, *dst_ptr;
 270
 271                        do {
 272                                src_ptr = kmap(*src_page) + src;
 273                                dst_ptr = kmap(*dst_page) + dst;
 274                                if (PAGE_CACHE_SIZE - src <
 275                                                PAGE_CACHE_SIZE - dst) {
 276                                        l = PAGE_CACHE_SIZE - src;
 277                                        src = 0;
 278                                        dst += l;
 279                                } else {
 280                                        l = PAGE_CACHE_SIZE - dst;
 281                                        src += l;
 282                                        dst = 0;
 283                                }
 284                                l = min(len, l);
 285                                memmove(dst_ptr, src_ptr, l);
 286                                kunmap(*src_page);
 287                                set_page_dirty(*dst_page);
 288                                kunmap(*dst_page);
 289                                if (!dst)
 290                                        dst_page++;
 291                                else
 292                                        src_page++;
 293                        } while ((len -= l));
 294                }
 295        }
 296}
 297
 298void hfs_bnode_dump(struct hfs_bnode *node)
 299{
 300        struct hfs_bnode_desc desc;
 301        __be32 cnid;
 302        int i, off, key_off;
 303
 304        dprint(DBG_BNODE_MOD, "bnode: %d\n", node->this);
 305        hfs_bnode_read(node, &desc, 0, sizeof(desc));
 306        dprint(DBG_BNODE_MOD, "%d, %d, %d, %d, %d\n",
 307                be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
 308                desc.type, desc.height, be16_to_cpu(desc.num_recs));
 309
 310        off = node->tree->node_size - 2;
 311        for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
 312                key_off = hfs_bnode_read_u16(node, off);
 313                dprint(DBG_BNODE_MOD, " %d", key_off);
 314                if (i && node->type == HFS_NODE_INDEX) {
 315                        int tmp;
 316
 317                        if (node->tree->attributes & HFS_TREE_VARIDXKEYS)
 318                                tmp = hfs_bnode_read_u16(node, key_off) + 2;
 319                        else
 320                                tmp = node->tree->max_key_len + 2;
 321                        dprint(DBG_BNODE_MOD, " (%d", tmp);
 322                        hfs_bnode_read(node, &cnid, key_off + tmp, 4);
 323                        dprint(DBG_BNODE_MOD, ",%d)", be32_to_cpu(cnid));
 324                } else if (i && node->type == HFS_NODE_LEAF) {
 325                        int tmp;
 326
 327                        tmp = hfs_bnode_read_u16(node, key_off);
 328                        dprint(DBG_BNODE_MOD, " (%d)", tmp);
 329                }
 330        }
 331        dprint(DBG_BNODE_MOD, "\n");
 332}
 333
 334void hfs_bnode_unlink(struct hfs_bnode *node)
 335{
 336        struct hfs_btree *tree;
 337        struct hfs_bnode *tmp;
 338        __be32 cnid;
 339
 340        tree = node->tree;
 341        if (node->prev) {
 342                tmp = hfs_bnode_find(tree, node->prev);
 343                if (IS_ERR(tmp))
 344                        return;
 345                tmp->next = node->next;
 346                cnid = cpu_to_be32(tmp->next);
 347                hfs_bnode_write(tmp, &cnid,
 348                        offsetof(struct hfs_bnode_desc, next), 4);
 349                hfs_bnode_put(tmp);
 350        } else if (node->type == HFS_NODE_LEAF)
 351                tree->leaf_head = node->next;
 352
 353        if (node->next) {
 354                tmp = hfs_bnode_find(tree, node->next);
 355                if (IS_ERR(tmp))
 356                        return;
 357                tmp->prev = node->prev;
 358                cnid = cpu_to_be32(tmp->prev);
 359                hfs_bnode_write(tmp, &cnid,
 360                        offsetof(struct hfs_bnode_desc, prev), 4);
 361                hfs_bnode_put(tmp);
 362        } else if (node->type == HFS_NODE_LEAF)
 363                tree->leaf_tail = node->prev;
 364
 365        /* move down? */
 366        if (!node->prev && !node->next)
 367                dprint(DBG_BNODE_MOD, "hfs_btree_del_level\n");
 368        if (!node->parent) {
 369                tree->root = 0;
 370                tree->depth = 0;
 371        }
 372        set_bit(HFS_BNODE_DELETED, &node->flags);
 373}
 374
 375static inline int hfs_bnode_hash(u32 num)
 376{
 377        num = (num >> 16) + num;
 378        num += num >> 8;
 379        return num & (NODE_HASH_SIZE - 1);
 380}
 381
 382struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
 383{
 384        struct hfs_bnode *node;
 385
 386        if (cnid >= tree->node_count) {
 387                printk(KERN_ERR "hfs: request for non-existent node "
 388                                "%d in B*Tree\n",
 389                        cnid);
 390                return NULL;
 391        }
 392
 393        for (node = tree->node_hash[hfs_bnode_hash(cnid)];
 394                        node; node = node->next_hash)
 395                if (node->this == cnid)
 396                        return node;
 397        return NULL;
 398}
 399
 400static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
 401{
 402        struct super_block *sb;
 403        struct hfs_bnode *node, *node2;
 404        struct address_space *mapping;
 405        struct page *page;
 406        int size, block, i, hash;
 407        loff_t off;
 408
 409        if (cnid >= tree->node_count) {
 410                printk(KERN_ERR "hfs: request for non-existent node "
 411                                "%d in B*Tree\n",
 412                        cnid);
 413                return NULL;
 414        }
 415
 416        sb = tree->inode->i_sb;
 417        size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
 418                sizeof(struct page *);
 419        node = kzalloc(size, GFP_KERNEL);
 420        if (!node)
 421                return NULL;
 422        node->tree = tree;
 423        node->this = cnid;
 424        set_bit(HFS_BNODE_NEW, &node->flags);
 425        atomic_set(&node->refcnt, 1);
 426        dprint(DBG_BNODE_REFS, "new_node(%d:%d): 1\n",
 427               node->tree->cnid, node->this);
 428        init_waitqueue_head(&node->lock_wq);
 429        spin_lock(&tree->hash_lock);
 430        node2 = hfs_bnode_findhash(tree, cnid);
 431        if (!node2) {
 432                hash = hfs_bnode_hash(cnid);
 433                node->next_hash = tree->node_hash[hash];
 434                tree->node_hash[hash] = node;
 435                tree->node_hash_cnt++;
 436        } else {
 437                spin_unlock(&tree->hash_lock);
 438                kfree(node);
 439                wait_event(node2->lock_wq,
 440                        !test_bit(HFS_BNODE_NEW, &node2->flags));
 441                return node2;
 442        }
 443        spin_unlock(&tree->hash_lock);
 444
 445        mapping = tree->inode->i_mapping;
 446        off = (loff_t)cnid << tree->node_size_shift;
 447        block = off >> PAGE_CACHE_SHIFT;
 448        node->page_offset = off & ~PAGE_CACHE_MASK;
 449        for (i = 0; i < tree->pages_per_bnode; block++, i++) {
 450                page = read_mapping_page(mapping, block, NULL);
 451                if (IS_ERR(page))
 452                        goto fail;
 453                if (PageError(page)) {
 454                        page_cache_release(page);
 455                        goto fail;
 456                }
 457                page_cache_release(page);
 458                node->page[i] = page;
 459        }
 460
 461        return node;
 462fail:
 463        set_bit(HFS_BNODE_ERROR, &node->flags);
 464        return node;
 465}
 466
 467void hfs_bnode_unhash(struct hfs_bnode *node)
 468{
 469        struct hfs_bnode **p;
 470
 471        dprint(DBG_BNODE_REFS, "remove_node(%d:%d): %d\n",
 472                node->tree->cnid, node->this, atomic_read(&node->refcnt));
 473        for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
 474             *p && *p != node; p = &(*p)->next_hash)
 475                ;
 476        BUG_ON(!*p);
 477        *p = node->next_hash;
 478        node->tree->node_hash_cnt--;
 479}
 480
 481/* Load a particular node out of a tree */
 482struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
 483{
 484        struct hfs_bnode *node;
 485        struct hfs_bnode_desc *desc;
 486        int i, rec_off, off, next_off;
 487        int entry_size, key_size;
 488
 489        spin_lock(&tree->hash_lock);
 490        node = hfs_bnode_findhash(tree, num);
 491        if (node) {
 492                hfs_bnode_get(node);
 493                spin_unlock(&tree->hash_lock);
 494                wait_event(node->lock_wq,
 495                        !test_bit(HFS_BNODE_NEW, &node->flags));
 496                if (test_bit(HFS_BNODE_ERROR, &node->flags))
 497                        goto node_error;
 498                return node;
 499        }
 500        spin_unlock(&tree->hash_lock);
 501        node = __hfs_bnode_create(tree, num);
 502        if (!node)
 503                return ERR_PTR(-ENOMEM);
 504        if (test_bit(HFS_BNODE_ERROR, &node->flags))
 505                goto node_error;
 506        if (!test_bit(HFS_BNODE_NEW, &node->flags))
 507                return node;
 508
 509        desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) +
 510                        node->page_offset);
 511        node->prev = be32_to_cpu(desc->prev);
 512        node->next = be32_to_cpu(desc->next);
 513        node->num_recs = be16_to_cpu(desc->num_recs);
 514        node->type = desc->type;
 515        node->height = desc->height;
 516        kunmap(node->page[0]);
 517
 518        switch (node->type) {
 519        case HFS_NODE_HEADER:
 520        case HFS_NODE_MAP:
 521                if (node->height != 0)
 522                        goto node_error;
 523                break;
 524        case HFS_NODE_LEAF:
 525                if (node->height != 1)
 526                        goto node_error;
 527                break;
 528        case HFS_NODE_INDEX:
 529                if (node->height <= 1 || node->height > tree->depth)
 530                        goto node_error;
 531                break;
 532        default:
 533                goto node_error;
 534        }
 535
 536        rec_off = tree->node_size - 2;
 537        off = hfs_bnode_read_u16(node, rec_off);
 538        if (off != sizeof(struct hfs_bnode_desc))
 539                goto node_error;
 540        for (i = 1; i <= node->num_recs; off = next_off, i++) {
 541                rec_off -= 2;
 542                next_off = hfs_bnode_read_u16(node, rec_off);
 543                if (next_off <= off ||
 544                    next_off > tree->node_size ||
 545                    next_off & 1)
 546                        goto node_error;
 547                entry_size = next_off - off;
 548                if (node->type != HFS_NODE_INDEX &&
 549                    node->type != HFS_NODE_LEAF)
 550                        continue;
 551                key_size = hfs_bnode_read_u16(node, off) + 2;
 552                if (key_size >= entry_size || key_size & 1)
 553                        goto node_error;
 554        }
 555        clear_bit(HFS_BNODE_NEW, &node->flags);
 556        wake_up(&node->lock_wq);
 557        return node;
 558
 559node_error:
 560        set_bit(HFS_BNODE_ERROR, &node->flags);
 561        clear_bit(HFS_BNODE_NEW, &node->flags);
 562        wake_up(&node->lock_wq);
 563        hfs_bnode_put(node);
 564        return ERR_PTR(-EIO);
 565}
 566
 567void hfs_bnode_free(struct hfs_bnode *node)
 568{
 569#if 0
 570        int i;
 571
 572        for (i = 0; i < node->tree->pages_per_bnode; i++)
 573                if (node->page[i])
 574                        page_cache_release(node->page[i]);
 575#endif
 576        kfree(node);
 577}
 578
 579struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
 580{
 581        struct hfs_bnode *node;
 582        struct page **pagep;
 583        int i;
 584
 585        spin_lock(&tree->hash_lock);
 586        node = hfs_bnode_findhash(tree, num);
 587        spin_unlock(&tree->hash_lock);
 588        if (node) {
 589                printk(KERN_CRIT "new node %u already hashed?\n", num);
 590                WARN_ON(1);
 591                return node;
 592        }
 593        node = __hfs_bnode_create(tree, num);
 594        if (!node)
 595                return ERR_PTR(-ENOMEM);
 596        if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
 597                hfs_bnode_put(node);
 598                return ERR_PTR(-EIO);
 599        }
 600
 601        pagep = node->page;
 602        memset(kmap(*pagep) + node->page_offset, 0,
 603               min((int)PAGE_CACHE_SIZE, (int)tree->node_size));
 604        set_page_dirty(*pagep);
 605        kunmap(*pagep);
 606        for (i = 1; i < tree->pages_per_bnode; i++) {
 607                memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE);
 608                set_page_dirty(*pagep);
 609                kunmap(*pagep);
 610        }
 611        clear_bit(HFS_BNODE_NEW, &node->flags);
 612        wake_up(&node->lock_wq);
 613
 614        return node;
 615}
 616
 617void hfs_bnode_get(struct hfs_bnode *node)
 618{
 619        if (node) {
 620                atomic_inc(&node->refcnt);
 621                dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n",
 622                        node->tree->cnid, node->this,
 623                        atomic_read(&node->refcnt));
 624        }
 625}
 626
 627/* Dispose of resources used by a node */
 628void hfs_bnode_put(struct hfs_bnode *node)
 629{
 630        if (node) {
 631                struct hfs_btree *tree = node->tree;
 632                int i;
 633
 634                dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n",
 635                        node->tree->cnid, node->this,
 636                        atomic_read(&node->refcnt));
 637                BUG_ON(!atomic_read(&node->refcnt));
 638                if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
 639                        return;
 640                for (i = 0; i < tree->pages_per_bnode; i++) {
 641                        if (!node->page[i])
 642                                continue;
 643                        mark_page_accessed(node->page[i]);
 644                }
 645
 646                if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
 647                        hfs_bnode_unhash(node);
 648                        spin_unlock(&tree->hash_lock);
 649                        hfs_bmap_free(node);
 650                        hfs_bnode_free(node);
 651                        return;
 652                }
 653                spin_unlock(&tree->hash_lock);
 654        }
 655}
 656
 657
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.