linux/fs/gfs2/glock.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#include <linux/sched.h>
  11#include <linux/slab.h>
  12#include <linux/spinlock.h>
  13#include <linux/completion.h>
  14#include <linux/buffer_head.h>
  15#include <linux/delay.h>
  16#include <linux/sort.h>
  17#include <linux/jhash.h>
  18#include <linux/kallsyms.h>
  19#include <linux/gfs2_ondisk.h>
  20#include <linux/list.h>
  21#include <linux/lm_interface.h>
  22#include <linux/wait.h>
  23#include <linux/module.h>
  24#include <linux/rwsem.h>
  25#include <asm/uaccess.h>
  26#include <linux/seq_file.h>
  27#include <linux/debugfs.h>
  28#include <linux/kthread.h>
  29#include <linux/freezer.h>
  30#include <linux/workqueue.h>
  31#include <linux/jiffies.h>
  32
  33#include "gfs2.h"
  34#include "incore.h"
  35#include "glock.h"
  36#include "glops.h"
  37#include "inode.h"
  38#include "lops.h"
  39#include "meta_io.h"
  40#include "quota.h"
  41#include "super.h"
  42#include "util.h"
  43
  44struct gfs2_gl_hash_bucket {
  45        struct hlist_head hb_list;
  46};
  47
  48struct gfs2_glock_iter {
  49        int hash;                       /* hash bucket index         */
  50        struct gfs2_sbd *sdp;           /* incore superblock         */
  51        struct gfs2_glock *gl;          /* current glock struct      */
  52        char string[512];               /* scratch space             */
  53};
  54
  55typedef void (*glock_examiner) (struct gfs2_glock * gl);
  56
  57static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
  58static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
  59#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
  60static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
  61
  62static DECLARE_RWSEM(gfs2_umount_flush_sem);
  63static struct dentry *gfs2_root;
  64static struct task_struct *scand_process;
  65static unsigned int scand_secs = 5;
  66static struct workqueue_struct *glock_workqueue;
  67
  68#define GFS2_GL_HASH_SHIFT      15
  69#define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
  70#define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
  71
  72static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
  73static struct dentry *gfs2_root;
  74
  75/*
  76 * Despite what you might think, the numbers below are not arbitrary :-)
  77 * They are taken from the ipv4 routing hash code, which is well tested
  78 * and thus should be nearly optimal. Later on we might tweek the numbers
  79 * but for now this should be fine.
  80 *
  81 * The reason for putting the locks in a separate array from the list heads
  82 * is that we can have fewer locks than list heads and save memory. We use
  83 * the same hash function for both, but with a different hash mask.
  84 */
  85#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
  86        defined(CONFIG_PROVE_LOCKING)
  87
  88#ifdef CONFIG_LOCKDEP
  89# define GL_HASH_LOCK_SZ        256
  90#else
  91# if NR_CPUS >= 32
  92#  define GL_HASH_LOCK_SZ       4096
  93# elif NR_CPUS >= 16
  94#  define GL_HASH_LOCK_SZ       2048
  95# elif NR_CPUS >= 8
  96#  define GL_HASH_LOCK_SZ       1024
  97# elif NR_CPUS >= 4
  98#  define GL_HASH_LOCK_SZ       512
  99# else
 100#  define GL_HASH_LOCK_SZ       256
 101# endif
 102#endif
 103
 104/* We never want more locks than chains */
 105#if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
 106# undef GL_HASH_LOCK_SZ
 107# define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
 108#endif
 109
 110static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
 111
 112static inline rwlock_t *gl_lock_addr(unsigned int x)
 113{
 114        return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
 115}
 116#else /* not SMP, so no spinlocks required */
 117static inline rwlock_t *gl_lock_addr(unsigned int x)
 118{
 119        return NULL;
 120}
 121#endif
 122
 123/**
 124 * gl_hash() - Turn glock number into hash bucket number
 125 * @lock: The glock number
 126 *
 127 * Returns: The number of the corresponding hash bucket
 128 */
 129
 130static unsigned int gl_hash(const struct gfs2_sbd *sdp,
 131                            const struct lm_lockname *name)
 132{
 133        unsigned int h;
 134
 135        h = jhash(&name->ln_number, sizeof(u64), 0);
 136        h = jhash(&name->ln_type, sizeof(unsigned int), h);
 137        h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
 138        h &= GFS2_GL_HASH_MASK;
 139
 140        return h;
 141}
 142
 143/**
 144 * glock_free() - Perform a few checks and then release struct gfs2_glock
 145 * @gl: The glock to release
 146 *
 147 * Also calls lock module to release its internal structure for this glock.
 148 *
 149 */
 150
 151static void glock_free(struct gfs2_glock *gl)
 152{
 153        struct gfs2_sbd *sdp = gl->gl_sbd;
 154        struct inode *aspace = gl->gl_aspace;
 155
 156        if (sdp->sd_lockstruct.ls_ops->lm_put_lock)
 157                sdp->sd_lockstruct.ls_ops->lm_put_lock(gl->gl_lock);
 158
 159        if (aspace)
 160                gfs2_aspace_put(aspace);
 161
 162        kmem_cache_free(gfs2_glock_cachep, gl);
 163}
 164
 165/**
 166 * gfs2_glock_hold() - increment reference count on glock
 167 * @gl: The glock to hold
 168 *
 169 */
 170
 171static void gfs2_glock_hold(struct gfs2_glock *gl)
 172{
 173        atomic_inc(&gl->gl_ref);
 174}
 175
 176/**
 177 * gfs2_glock_put() - Decrement reference count on glock
 178 * @gl: The glock to put
 179 *
 180 */
 181
 182int gfs2_glock_put(struct gfs2_glock *gl)
 183{
 184        int rv = 0;
 185
 186        write_lock(gl_lock_addr(gl->gl_hash));
 187        if (atomic_dec_and_test(&gl->gl_ref)) {
 188                hlist_del(&gl->gl_list);
 189                write_unlock(gl_lock_addr(gl->gl_hash));
 190                GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_UNLOCKED);
 191                GLOCK_BUG_ON(gl, !list_empty(&gl->gl_reclaim));
 192                GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
 193                glock_free(gl);
 194                rv = 1;
 195                goto out;
 196        }
 197        write_unlock(gl_lock_addr(gl->gl_hash));
 198out:
 199        return rv;
 200}
 201
 202/**
 203 * search_bucket() - Find struct gfs2_glock by lock number
 204 * @bucket: the bucket to search
 205 * @name: The lock name
 206 *
 207 * Returns: NULL, or the struct gfs2_glock with the requested number
 208 */
 209
 210static struct gfs2_glock *search_bucket(unsigned int hash,
 211                                        const struct gfs2_sbd *sdp,
 212                                        const struct lm_lockname *name)
 213{
 214        struct gfs2_glock *gl;
 215        struct hlist_node *h;
 216
 217        hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
 218                if (!lm_name_equal(&gl->gl_name, name))
 219                        continue;
 220                if (gl->gl_sbd != sdp)
 221                        continue;
 222
 223                atomic_inc(&gl->gl_ref);
 224
 225                return gl;
 226        }
 227
 228        return NULL;
 229}
 230
 231/**
 232 * gfs2_glock_find() - Find glock by lock number
 233 * @sdp: The GFS2 superblock
 234 * @name: The lock name
 235 *
 236 * Returns: NULL, or the struct gfs2_glock with the requested number
 237 */
 238
 239static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
 240                                          const struct lm_lockname *name)
 241{
 242        unsigned int hash = gl_hash(sdp, name);
 243        struct gfs2_glock *gl;
 244
 245        read_lock(gl_lock_addr(hash));
 246        gl = search_bucket(hash, sdp, name);
 247        read_unlock(gl_lock_addr(hash));
 248
 249        return gl;
 250}
 251
 252/**
 253 * may_grant - check if its ok to grant a new lock
 254 * @gl: The glock
 255 * @gh: The lock request which we wish to grant
 256 *
 257 * Returns: true if its ok to grant the lock
 258 */
 259
 260static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
 261{
 262        const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
 263        if ((gh->gh_state == LM_ST_EXCLUSIVE ||
 264             gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
 265                return 0;
 266        if (gl->gl_state == gh->gh_state)
 267                return 1;
 268        if (gh->gh_flags & GL_EXACT)
 269                return 0;
 270        if (gl->gl_state == LM_ST_EXCLUSIVE) {
 271                if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
 272                        return 1;
 273                if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
 274                        return 1;
 275        }
 276        if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
 277                return 1;
 278        return 0;
 279}
 280
 281static void gfs2_holder_wake(struct gfs2_holder *gh)
 282{
 283        clear_bit(HIF_WAIT, &gh->gh_iflags);
 284        smp_mb__after_clear_bit();
 285        wake_up_bit(&gh->gh_iflags, HIF_WAIT);
 286}
 287
 288/**
 289 * do_promote - promote as many requests as possible on the current queue
 290 * @gl: The glock
 291 * 
 292 * Returns: true if there is a blocked holder at the head of the list
 293 */
 294
 295static int do_promote(struct gfs2_glock *gl)
 296{
 297        const struct gfs2_glock_operations *glops = gl->gl_ops;
 298        struct gfs2_holder *gh, *tmp;
 299        int ret;
 300
 301restart:
 302        list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
 303                if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 304                        continue;
 305                if (may_grant(gl, gh)) {
 306                        if (gh->gh_list.prev == &gl->gl_holders &&
 307                            glops->go_lock) {
 308                                spin_unlock(&gl->gl_spin);
 309                                /* FIXME: eliminate this eventually */
 310                                ret = glops->go_lock(gh);
 311                                spin_lock(&gl->gl_spin);
 312                                if (ret) {
 313                                        gh->gh_error = ret;
 314                                        list_del_init(&gh->gh_list);
 315                                        gfs2_holder_wake(gh);
 316                                        goto restart;
 317                                }
 318                                set_bit(HIF_HOLDER, &gh->gh_iflags);
 319                                gfs2_holder_wake(gh);
 320                                goto restart;
 321                        }
 322                        set_bit(HIF_HOLDER, &gh->gh_iflags);
 323                        gfs2_holder_wake(gh);
 324                        continue;
 325                }
 326                if (gh->gh_list.prev == &gl->gl_holders)
 327                        return 1;
 328                break;
 329        }
 330        return 0;
 331}
 332
 333/**
 334 * do_error - Something unexpected has happened during a lock request
 335 *
 336 */
 337
 338static inline void do_error(struct gfs2_glock *gl, const int ret)
 339{
 340        struct gfs2_holder *gh, *tmp;
 341
 342        list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
 343                if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 344                        continue;
 345                if (ret & LM_OUT_ERROR)
 346                        gh->gh_error = -EIO;
 347                else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
 348                        gh->gh_error = GLR_TRYFAILED;
 349                else
 350                        continue;
 351                list_del_init(&gh->gh_list);
 352                gfs2_holder_wake(gh);
 353        }
 354}
 355
 356/**
 357 * find_first_waiter - find the first gh that's waiting for the glock
 358 * @gl: the glock
 359 */
 360
 361static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
 362{
 363        struct gfs2_holder *gh;
 364
 365        list_for_each_entry(gh, &gl->gl_holders, gh_list) {
 366                if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
 367                        return gh;
 368        }
 369        return NULL;
 370}
 371
 372/**
 373 * state_change - record that the glock is now in a different state
 374 * @gl: the glock
 375 * @new_state the new state
 376 *
 377 */
 378
 379static void state_change(struct gfs2_glock *gl, unsigned int new_state)
 380{
 381        int held1, held2;
 382
 383        held1 = (gl->gl_state != LM_ST_UNLOCKED);
 384        held2 = (new_state != LM_ST_UNLOCKED);
 385
 386        if (held1 != held2) {
 387                if (held2)
 388                        gfs2_glock_hold(gl);
 389                else
 390                        gfs2_glock_put(gl);
 391        }
 392
 393        gl->gl_state = new_state;
 394        gl->gl_tchange = jiffies;
 395}
 396
 397static void gfs2_demote_wake(struct gfs2_glock *gl)
 398{
 399        gl->gl_demote_state = LM_ST_EXCLUSIVE;
 400        clear_bit(GLF_DEMOTE, &gl->gl_flags);
 401        smp_mb__after_clear_bit();
 402        wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
 403}
 404
 405/**
 406 * finish_xmote - The DLM has replied to one of our lock requests
 407 * @gl: The glock
 408 * @ret: The status from the DLM
 409 *
 410 */
 411
 412static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
 413{
 414        const struct gfs2_glock_operations *glops = gl->gl_ops;
 415        struct gfs2_holder *gh;
 416        unsigned state = ret & LM_OUT_ST_MASK;
 417
 418        spin_lock(&gl->gl_spin);
 419        state_change(gl, state);
 420        gh = find_first_waiter(gl);
 421
 422        /* Demote to UN request arrived during demote to SH or DF */
 423        if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
 424            state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
 425                gl->gl_target = LM_ST_UNLOCKED;
 426
 427        /* Check for state != intended state */
 428        if (unlikely(state != gl->gl_target)) {
 429                if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
 430                        /* move to back of queue and try next entry */
 431                        if (ret & LM_OUT_CANCELED) {
 432                                if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
 433                                        list_move_tail(&gh->gh_list, &gl->gl_holders);
 434                                gh = find_first_waiter(gl);
 435                                gl->gl_target = gh->gh_state;
 436                                goto retry;
 437                        }
 438                        /* Some error or failed "try lock" - report it */
 439                        if ((ret & LM_OUT_ERROR) ||
 440                            (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
 441                                gl->gl_target = gl->gl_state;
 442                                do_error(gl, ret);
 443                                goto out;
 444                        }
 445                }
 446                switch(state) {
 447                /* Unlocked due to conversion deadlock, try again */
 448                case LM_ST_UNLOCKED:
 449retry:
 450                        do_xmote(gl, gh, gl->gl_target);
 451                        break;
 452                /* Conversion fails, unlock and try again */
 453                case LM_ST_SHARED:
 454                case LM_ST_DEFERRED:
 455                        do_xmote(gl, gh, LM_ST_UNLOCKED);
 456                        break;
 457                default: /* Everything else */
 458                        printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
 459                        GLOCK_BUG_ON(gl, 1);
 460                }
 461                spin_unlock(&gl->gl_spin);
 462                gfs2_glock_put(gl);
 463                return;
 464        }
 465
 466        /* Fast path - we got what we asked for */
 467        if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
 468                gfs2_demote_wake(gl);
 469        if (state != LM_ST_UNLOCKED) {
 470                if (glops->go_xmote_bh) {
 471                        int rv;
 472                        spin_unlock(&gl->gl_spin);
 473                        rv = glops->go_xmote_bh(gl, gh);
 474                        if (rv == -EAGAIN)
 475                                return;
 476                        spin_lock(&gl->gl_spin);
 477                        if (rv) {
 478                                do_error(gl, rv);
 479                                goto out;
 480                        }
 481                }
 482                do_promote(gl);
 483        }
 484out:
 485        clear_bit(GLF_LOCK, &gl->gl_flags);
 486        spin_unlock(&gl->gl_spin);
 487        gfs2_glock_put(gl);
 488}
 489
 490static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
 491                                 unsigned int cur_state, unsigned int req_state,
 492                                 unsigned int flags)
 493{
 494        int ret = LM_OUT_ERROR;
 495
 496        if (!sdp->sd_lockstruct.ls_ops->lm_lock)
 497                return req_state == LM_ST_UNLOCKED ? 0 : req_state;
 498
 499        if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
 500                ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
 501                                                         req_state, flags);
 502        return ret;
 503}
 504
 505/**
 506 * do_xmote - Calls the DLM to change the state of a lock
 507 * @gl: The lock state
 508 * @gh: The holder (only for promotes)
 509 * @target: The target lock state
 510 *
 511 */
 512
 513static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
 514{
 515        const struct gfs2_glock_operations *glops = gl->gl_ops;
 516        struct gfs2_sbd *sdp = gl->gl_sbd;
 517        unsigned int lck_flags = gh ? gh->gh_flags : 0;
 518        int ret;
 519
 520        lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
 521                      LM_FLAG_PRIORITY);
 522        BUG_ON(gl->gl_state == target);
 523        BUG_ON(gl->gl_state == gl->gl_target);
 524        if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
 525            glops->go_inval) {
 526                set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
 527                do_error(gl, 0); /* Fail queued try locks */
 528        }
 529        spin_unlock(&gl->gl_spin);
 530        if (glops->go_xmote_th)
 531                glops->go_xmote_th(gl);
 532        if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
 533                glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
 534        clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
 535
 536        gfs2_glock_hold(gl);
 537        if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
 538            gl->gl_state == LM_ST_DEFERRED) &&
 539            !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
 540                lck_flags |= LM_FLAG_TRY_1CB;
 541        ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, target, lck_flags);
 542
 543        if (!(ret & LM_OUT_ASYNC)) {
 544                finish_xmote(gl, ret);
 545                gfs2_glock_hold(gl);
 546                if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
 547                        gfs2_glock_put(gl);
 548        } else {
 549                GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC);
 550        }
 551        spin_lock(&gl->gl_spin);
 552}
 553
 554/**
 555 * find_first_holder - find the first "holder" gh
 556 * @gl: the glock
 557 */
 558
 559static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
 560{
 561        struct gfs2_holder *gh;
 562
 563        if (!list_empty(&gl->gl_holders)) {
 564                gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
 565                if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 566                        return gh;
 567        }
 568        return NULL;
 569}
 570
 571/**
 572 * run_queue - do all outstanding tasks related to a glock
 573 * @gl: The glock in question
 574 * @nonblock: True if we must not block in run_queue
 575 *
 576 */
 577
 578static void run_queue(struct gfs2_glock *gl, const int nonblock)
 579{
 580        struct gfs2_holder *gh = NULL;
 581
 582        if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
 583                return;
 584
 585        GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
 586
 587        if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
 588            gl->gl_demote_state != gl->gl_state) {
 589                if (find_first_holder(gl))
 590                        goto out;
 591                if (nonblock)
 592                        goto out_sched;
 593                set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
 594                GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
 595                gl->gl_target = gl->gl_demote_state;
 596        } else {
 597                if (test_bit(GLF_DEMOTE, &gl->gl_flags))
 598                        gfs2_demote_wake(gl);
 599                if (do_promote(gl) == 0)
 600                        goto out;
 601                gh = find_first_waiter(gl);
 602                gl->gl_target = gh->gh_state;
 603                if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
 604                        do_error(gl, 0); /* Fail queued try locks */
 605        }
 606        do_xmote(gl, gh, gl->gl_target);
 607        return;
 608
 609out_sched:
 610        gfs2_glock_hold(gl);
 611        if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
 612                gfs2_glock_put(gl);
 613out:
 614        clear_bit(GLF_LOCK, &gl->gl_flags);
 615}
 616
 617static void glock_work_func(struct work_struct *work)
 618{
 619        unsigned long delay = 0;
 620        struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
 621
 622        if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags))
 623                finish_xmote(gl, gl->gl_reply);
 624        spin_lock(&gl->gl_spin);
 625        if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
 626            gl->gl_state != LM_ST_UNLOCKED &&
 627            gl->gl_demote_state != LM_ST_EXCLUSIVE) {
 628                unsigned long holdtime, now = jiffies;
 629                holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
 630                if (time_before(now, holdtime))
 631                        delay = holdtime - now;
 632                set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
 633        }
 634        run_queue(gl, 0);
 635        spin_unlock(&gl->gl_spin);
 636        if (!delay ||
 637            queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
 638                gfs2_glock_put(gl);
 639}
 640
 641static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
 642                     void **lockp)
 643{
 644        int error = -EIO;
 645        if (!sdp->sd_lockstruct.ls_ops->lm_get_lock)
 646                return 0;
 647        if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
 648                error = sdp->sd_lockstruct.ls_ops->lm_get_lock(
 649                                sdp->sd_lockstruct.ls_lockspace, name, lockp);
 650        return error;
 651}
 652
 653/**
 654 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
 655 * @sdp: The GFS2 superblock
 656 * @number: the lock number
 657 * @glops: The glock_operations to use
 658 * @create: If 0, don't create the glock if it doesn't exist
 659 * @glp: the glock is returned here
 660 *
 661 * This does not lock a glock, just finds/creates structures for one.
 662 *
 663 * Returns: errno
 664 */
 665
 666int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
 667                   const struct gfs2_glock_operations *glops, int create,
 668                   struct gfs2_glock **glp)
 669{
 670        struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
 671        struct gfs2_glock *gl, *tmp;
 672        unsigned int hash = gl_hash(sdp, &name);
 673        int error;
 674
 675        read_lock(gl_lock_addr(hash));
 676        gl = search_bucket(hash, sdp, &name);
 677        read_unlock(gl_lock_addr(hash));
 678
 679        if (gl || !create) {
 680                *glp = gl;
 681                return 0;
 682        }
 683
 684        gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
 685        if (!gl)
 686                return -ENOMEM;
 687
 688        gl->gl_flags = 0;
 689        gl->gl_name = name;
 690        atomic_set(&gl->gl_ref, 1);
 691        gl->gl_state = LM_ST_UNLOCKED;
 692        gl->gl_target = LM_ST_UNLOCKED;
 693        gl->gl_demote_state = LM_ST_EXCLUSIVE;
 694        gl->gl_hash = hash;
 695        gl->gl_ops = glops;
 696        gl->gl_stamp = jiffies;
 697        gl->gl_tchange = jiffies;
 698        gl->gl_object = NULL;
 699        gl->gl_sbd = sdp;
 700        gl->gl_aspace = NULL;
 701        INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
 702
 703        /* If this glock protects actual on-disk data or metadata blocks,
 704           create a VFS inode to manage the pages/buffers holding them. */
 705        if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
 706                gl->gl_aspace = gfs2_aspace_get(sdp);
 707                if (!gl->gl_aspace) {
 708                        error = -ENOMEM;
 709                        goto fail;
 710                }
 711        }
 712
 713        error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
 714        if (error)
 715                goto fail_aspace;
 716
 717        write_lock(gl_lock_addr(hash));
 718        tmp = search_bucket(hash, sdp, &name);
 719        if (tmp) {
 720                write_unlock(gl_lock_addr(hash));
 721                glock_free(gl);
 722                gl = tmp;
 723        } else {
 724                hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
 725                write_unlock(gl_lock_addr(hash));
 726        }
 727
 728        *glp = gl;
 729
 730        return 0;
 731
 732fail_aspace:
 733        if (gl->gl_aspace)
 734                gfs2_aspace_put(gl->gl_aspace);
 735fail:
 736        kmem_cache_free(gfs2_glock_cachep, gl);
 737        return error;
 738}
 739
 740/**
 741 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
 742 * @gl: the glock
 743 * @state: the state we're requesting
 744 * @flags: the modifier flags
 745 * @gh: the holder structure
 746 *
 747 */
 748
 749void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
 750                      struct gfs2_holder *gh)
 751{
 752        INIT_LIST_HEAD(&gh->gh_list);
 753        gh->gh_gl = gl;
 754        gh->gh_ip = (unsigned long)__builtin_return_address(0);
 755        gh->gh_owner_pid = get_pid(task_pid(current));
 756        gh->gh_state = state;
 757        gh->gh_flags = flags;
 758        gh->gh_error = 0;
 759        gh->gh_iflags = 0;
 760        gfs2_glock_hold(gl);
 761}
 762
 763/**
 764 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
 765 * @state: the state we're requesting
 766 * @flags: the modifier flags
 767 * @gh: the holder structure
 768 *
 769 * Don't mess with the glock.
 770 *
 771 */
 772
 773void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
 774{
 775        gh->gh_state = state;
 776        gh->gh_flags = flags;
 777        gh->gh_iflags = 0;
 778        gh->gh_ip = (unsigned long)__builtin_return_address(0);
 779}
 780
 781/**
 782 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
 783 * @gh: the holder structure
 784 *
 785 */
 786
 787void gfs2_holder_uninit(struct gfs2_holder *gh)
 788{
 789        put_pid(gh->gh_owner_pid);
 790        gfs2_glock_put(gh->gh_gl);
 791        gh->gh_gl = NULL;
 792        gh->gh_ip = 0;
 793}
 794
 795static int just_schedule(void *word)
 796{
 797        schedule();
 798        return 0;
 799}
 800
 801static void wait_on_holder(struct gfs2_holder *gh)
 802{
 803        might_sleep();
 804        wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
 805}
 806
 807static void wait_on_demote(struct gfs2_glock *gl)
 808{
 809        might_sleep();
 810        wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
 811}
 812
 813/**
 814 * handle_callback - process a demote request
 815 * @gl: the glock
 816 * @state: the state the caller wants us to change to
 817 *
 818 * There are only two requests that we are going to see in actual
 819 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
 820 */
 821
 822static void handle_callback(struct gfs2_glock *gl, unsigned int state,
 823                            int remote, unsigned long delay)
 824{
 825        int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
 826
 827        set_bit(bit, &gl->gl_flags);
 828        if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
 829                gl->gl_demote_state = state;
 830                gl->gl_demote_time = jiffies;
 831                if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
 832                    gl->gl_object)
 833                        gfs2_glock_schedule_for_reclaim(gl);
 834        } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
 835                        gl->gl_demote_state != state) {
 836                gl->gl_demote_state = LM_ST_UNLOCKED;
 837        }
 838}
 839
 840/**
 841 * gfs2_glock_wait - wait on a glock acquisition
 842 * @gh: the glock holder
 843 *
 844 * Returns: 0 on success
 845 */
 846
 847int gfs2_glock_wait(struct gfs2_holder *gh)
 848{
 849        wait_on_holder(gh);
 850        return gh->gh_error;
 851}
 852
 853void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
 854{
 855        va_list args;
 856
 857        va_start(args, fmt);
 858        if (seq) {
 859                struct gfs2_glock_iter *gi = seq->private;
 860                vsprintf(gi->string, fmt, args);
 861                seq_printf(seq, gi->string);
 862        } else {
 863                printk(KERN_ERR " ");
 864                vprintk(fmt, args);
 865        }
 866        va_end(args);
 867}
 868
 869/**
 870 * add_to_queue - Add a holder to the wait queue (but look for recursion)
 871 * @gh: the holder structure to add
 872 *
 873 * Eventually we should move the recursive locking trap to a
 874 * debugging option or something like that. This is the fast
 875 * path and needs to have the minimum number of distractions.
 876 * 
 877 */
 878
 879static inline void add_to_queue(struct gfs2_holder *gh)
 880{
 881        struct gfs2_glock *gl = gh->gh_gl;
 882        struct gfs2_sbd *sdp = gl->gl_sbd;
 883        struct list_head *insert_pt = NULL;
 884        struct gfs2_holder *gh2;
 885        int try_lock = 0;
 886
 887        BUG_ON(gh->gh_owner_pid == NULL);
 888        if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
 889                BUG();
 890
 891        if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
 892                if (test_bit(GLF_LOCK, &gl->gl_flags))
 893                        try_lock = 1;
 894                if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
 895                        goto fail;
 896        }
 897
 898        list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
 899                if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
 900                    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
 901                        goto trap_recursive;
 902                if (try_lock &&
 903                    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
 904                    !may_grant(gl, gh)) {
 905fail:
 906                        gh->gh_error = GLR_TRYFAILED;
 907                        gfs2_holder_wake(gh);
 908                        return;
 909                }
 910                if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
 911                        continue;
 912                if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
 913                        insert_pt = &gh2->gh_list;
 914        }
 915        if (likely(insert_pt == NULL)) {
 916                list_add_tail(&gh->gh_list, &gl->gl_holders);
 917                if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
 918                        goto do_cancel;
 919                return;
 920        }
 921        list_add_tail(&gh->gh_list, insert_pt);
 922do_cancel:
 923        gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
 924        if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
 925                spin_unlock(&gl->gl_spin);
 926                if (sdp->sd_lockstruct.ls_ops->lm_cancel)
 927                        sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock);
 928                spin_lock(&gl->gl_spin);
 929        }
 930        return;
 931
 932trap_recursive:
 933        print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
 934        printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
 935        printk(KERN_ERR "lock type: %d req lock state : %d\n",
 936               gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
 937        print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
 938        printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
 939        printk(KERN_ERR "lock type: %d req lock state : %d\n",
 940               gh->gh_gl->gl_name.ln_type, gh->gh_state);
 941        __dump_glock(NULL, gl);
 942        BUG();
 943}
 944
 945/**
 946 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
 947 * @gh: the holder structure
 948 *
 949 * if (gh->gh_flags & GL_ASYNC), this never returns an error
 950 *
 951 * Returns: 0, GLR_TRYFAILED, or errno on failure
 952 */
 953
 954int gfs2_glock_nq(struct gfs2_holder *gh)
 955{
 956        struct gfs2_glock *gl = gh->gh_gl;
 957        struct gfs2_sbd *sdp = gl->gl_sbd;
 958        int error = 0;
 959
 960        if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
 961                return -EIO;
 962
 963        spin_lock(&gl->gl_spin);
 964        add_to_queue(gh);
 965        run_queue(gl, 1);
 966        spin_unlock(&gl->gl_spin);
 967
 968        if (!(gh->gh_flags & GL_ASYNC))
 969                error = gfs2_glock_wait(gh);
 970
 971        return error;
 972}
 973
 974/**
 975 * gfs2_glock_poll - poll to see if an async request has been completed
 976 * @gh: the holder
 977 *
 978 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
 979 */
 980
 981int gfs2_glock_poll(struct gfs2_holder *gh)
 982{
 983        return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
 984}
 985
 986/**
 987 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
 988 * @gh: the glock holder
 989 *
 990 */
 991
 992void gfs2_glock_dq(struct gfs2_holder *gh)
 993{
 994        struct gfs2_glock *gl = gh->gh_gl;
 995        const struct gfs2_glock_operations *glops = gl->gl_ops;
 996        unsigned delay = 0;
 997        int fast_path = 0;
 998
 999        spin_lock(&gl->gl_spin);
1000        if (gh->gh_flags & GL_NOCACHE)
1001                handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1002
1003        list_del_init(&gh->gh_list);
1004        if (find_first_holder(gl) == NULL) {
1005                if (glops->go_unlock) {
1006                        GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1007                        spin_unlock(&gl->gl_spin);
1008                        glops->go_unlock(gh);
1009                        spin_lock(&gl->gl_spin);
1010                        clear_bit(GLF_LOCK, &gl->gl_flags);
1011                }
1012                gl->gl_stamp = jiffies;
1013                if (list_empty(&gl->gl_holders) &&
1014                    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1015                    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1016                        fast_path = 1;
1017        }
1018        spin_unlock(&gl->gl_spin);
1019        if (likely(fast_path))
1020                return;
1021
1022        gfs2_glock_hold(gl);
1023        if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1024            !test_bit(GLF_DEMOTE, &gl->gl_flags))
1025                delay = gl->gl_ops->go_min_hold_time;
1026        if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1027                gfs2_glock_put(gl);
1028}
1029
1030void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1031{
1032        struct gfs2_glock *gl = gh->gh_gl;
1033        gfs2_glock_dq(gh);
1034        wait_on_demote(gl);
1035}
1036
1037/**
1038 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1039 * @gh: the holder structure
1040 *
1041 */
1042
1043void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1044{
1045        gfs2_glock_dq(gh);
1046        gfs2_holder_uninit(gh);
1047}
1048
1049/**
1050 * gfs2_glock_nq_num - acquire a glock based on lock number
1051 * @sdp: the filesystem
1052 * @number: the lock number
1053 * @glops: the glock operations for the type of glock
1054 * @state: the state to acquire the glock in
1055 * @flags: modifier flags for the aquisition
1056 * @gh: the struct gfs2_holder
1057 *
1058 * Returns: errno
1059 */
1060
1061int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1062                      const struct gfs2_glock_operations *glops,
1063                      unsigned int state, int flags, struct gfs2_holder *gh)
1064{
1065        struct gfs2_glock *gl;
1066        int error;
1067
1068        error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1069        if (!error) {
1070                error = gfs2_glock_nq_init(gl, state, flags, gh);
1071                gfs2_glock_put(gl);
1072        }
1073
1074        return error;
1075}
1076
1077/**
1078 * glock_compare - Compare two struct gfs2_glock structures for sorting
1079 * @arg_a: the first structure
1080 * @arg_b: the second structure
1081 *
1082 */
1083
1084static int glock_compare(const void *arg_a, const void *arg_b)
1085{
1086        const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1087        const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1088        const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1089        const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1090
1091        if (a->ln_number > b->ln_number)
1092                return 1;
1093        if (a->ln_number < b->ln_number)
1094                return -1;
1095        BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1096        return 0;
1097}
1098
1099/**
1100 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1101 * @num_gh: the number of structures
1102 * @ghs: an array of struct gfs2_holder structures
1103 *
1104 * Returns: 0 on success (all glocks acquired),
1105 *          errno on failure (no glocks acquired)
1106 */
1107
1108static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1109                     struct gfs2_holder **p)
1110{
1111        unsigned int x;
1112        int error = 0;
1113
1114        for (x = 0; x < num_gh; x++)
1115                p[x] = &ghs[x];
1116
1117        sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1118
1119        for (x = 0; x < num_gh; x++) {
1120                p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1121
1122                error = gfs2_glock_nq(p[x]);
1123                if (error) {
1124                        while (x--)
1125                                gfs2_glock_dq(p[x]);
1126                        break;
1127                }
1128        }
1129
1130        return error;
1131}
1132
1133/**
1134 * gfs2_glock_nq_m - acquire multiple glocks
1135 * @num_gh: the number of structures
1136 * @ghs: an array of struct gfs2_holder structures
1137 *
1138 *
1139 * Returns: 0 on success (all glocks acquired),
1140 *          errno on failure (no glocks acquired)
1141 */
1142
1143int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1144{
1145        struct gfs2_holder *tmp[4];
1146        struct gfs2_holder **pph = tmp;
1147        int error = 0;
1148
1149        switch(num_gh) {
1150        case 0:
1151                return 0;
1152        case 1:
1153                ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1154                return gfs2_glock_nq(ghs);
1155        default:
1156                if (num_gh <= 4)
1157                        break;
1158                pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1159                if (!pph)
1160                        return -ENOMEM;
1161        }
1162
1163        error = nq_m_sync(num_gh, ghs, pph);
1164
1165        if (pph != tmp)
1166                kfree(pph);
1167
1168        return error;
1169}
1170
1171/**
1172 * gfs2_glock_dq_m - release multiple glocks
1173 * @num_gh: the number of structures
1174 * @ghs: an array of struct gfs2_holder structures
1175 *
1176 */
1177
1178void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1179{
1180        unsigned int x;
1181
1182        for (x = 0; x < num_gh; x++)
1183                gfs2_glock_dq(&ghs[x]);
1184}
1185
1186/**
1187 * gfs2_glock_dq_uninit_m - release multiple glocks
1188 * @num_gh: the number of structures
1189 * @ghs: an array of struct gfs2_holder structures
1190 *
1191 */
1192
1193void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1194{
1195        unsigned int x;
1196
1197        for (x = 0; x < num_gh; x++)
1198                gfs2_glock_dq_uninit(&ghs[x]);
1199}
1200
1201static int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp)
1202{
1203        int error = -EIO;
1204        if (!sdp->sd_lockstruct.ls_ops->lm_hold_lvb)
1205                return 0;
1206        if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1207                error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp);
1208        return error;
1209}
1210
1211/**
1212 * gfs2_lvb_hold - attach a LVB from a glock
1213 * @gl: The glock in question
1214 *
1215 */
1216
1217int gfs2_lvb_hold(struct gfs2_glock *gl)
1218{
1219        int error;
1220
1221        if (!atomic_read(&gl->gl_lvb_count)) {
1222                error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1223                if (error) 
1224                        return error;
1225                gfs2_glock_hold(gl);
1226        }
1227        atomic_inc(&gl->gl_lvb_count);
1228
1229        return 0;
1230}
1231
1232/**
1233 * gfs2_lvb_unhold - detach a LVB from a glock
1234 * @gl: The glock in question
1235 *
1236 */
1237
1238void gfs2_lvb_unhold(struct gfs2_glock *gl)
1239{
1240        struct gfs2_sbd *sdp = gl->gl_sbd;
1241
1242        gfs2_glock_hold(gl);
1243        gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1244        if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1245                if (sdp->sd_lockstruct.ls_ops->lm_unhold_lvb)
1246                        sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(gl->gl_lock, gl->gl_lvb);
1247                gl->gl_lvb = NULL;
1248                gfs2_glock_put(gl);
1249        }
1250        gfs2_glock_put(gl);
1251}
1252
1253static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1254                        unsigned int state)
1255{
1256        struct gfs2_glock *gl;
1257        unsigned long delay = 0;
1258        unsigned long holdtime;
1259        unsigned long now = jiffies;
1260
1261        gl = gfs2_glock_find(sdp, name);
1262        if (!gl)
1263                return;
1264
1265        holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1266        if (time_before(now, holdtime))
1267                delay = holdtime - now;
1268
1269        spin_lock(&gl->gl_spin);
1270        handle_callback(gl, state, 1, delay);
1271        spin_unlock(&gl->gl_spin);
1272        if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1273                gfs2_glock_put(gl);
1274}
1275
1276/**
1277 * gfs2_glock_cb - Callback used by locking module
1278 * @sdp: Pointer to the superblock
1279 * @type: Type of callback
1280 * @data: Type dependent data pointer
1281 *
1282 * Called by the locking module when it wants to tell us something.
1283 * Either we need to drop a lock, one of our ASYNC requests completed, or
1284 * a journal from another client needs to be recovered.
1285 */
1286
1287void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1288{
1289        struct gfs2_sbd *sdp = cb_data;
1290
1291        switch (type) {
1292        case LM_CB_NEED_E:
1293                blocking_cb(sdp, data, LM_ST_UNLOCKED);
1294                return;
1295
1296        case LM_CB_NEED_D:
1297                blocking_cb(sdp, data, LM_ST_DEFERRED);
1298                return;
1299
1300        case LM_CB_NEED_S:
1301                blocking_cb(sdp, data, LM_ST_SHARED);
1302                return;
1303
1304        case LM_CB_ASYNC: {
1305                struct lm_async_cb *async = data;
1306                struct gfs2_glock *gl;
1307
1308                down_read(&gfs2_umount_flush_sem);
1309                gl = gfs2_glock_find(sdp, &async->lc_name);
1310                if (gfs2_assert_warn(sdp, gl))
1311                        return;
1312                gl->gl_reply = async->lc_ret;
1313                set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1314                if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1315                        gfs2_glock_put(gl);
1316                up_read(&gfs2_umount_flush_sem);
1317                return;
1318        }
1319
1320        case LM_CB_NEED_RECOVERY:
1321                gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1322                if (sdp->sd_recoverd_process)
1323                        wake_up_process(sdp->sd_recoverd_process);
1324                return;
1325
1326        default:
1327                gfs2_assert_warn(sdp, 0);
1328                return;
1329        }
1330}
1331
1332/**
1333 * demote_ok - Check to see if it's ok to unlock a glock
1334 * @gl: the glock
1335 *
1336 * Returns: 1 if it's ok
1337 */
1338
1339static int demote_ok(struct gfs2_glock *gl)
1340{
1341        const struct gfs2_glock_operations *glops = gl->gl_ops;
1342        int demote = 1;
1343
1344        if (test_bit(GLF_STICKY, &gl->gl_flags))
1345                demote = 0;
1346        else if (glops->go_demote_ok)
1347                demote = glops->go_demote_ok(gl);
1348
1349        return demote;
1350}
1351
1352/**
1353 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1354 * @gl: the glock
1355 *
1356 */
1357
1358void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1359{
1360        struct gfs2_sbd *sdp = gl->gl_sbd;
1361
1362        spin_lock(&sdp->sd_reclaim_lock);
1363        if (list_empty(&gl->gl_reclaim)) {
1364                gfs2_glock_hold(gl);
1365                list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1366                atomic_inc(&sdp->sd_reclaim_count);
1367                spin_unlock(&sdp->sd_reclaim_lock);
1368                wake_up(&sdp->sd_reclaim_wq);
1369        } else
1370                spin_unlock(&sdp->sd_reclaim_lock);
1371}
1372
1373/**
1374 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1375 * @sdp: the filesystem
1376 *
1377 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1378 * different glock and we notice that there are a lot of glocks in the
1379 * reclaim list.
1380 *
1381 */
1382
1383void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1384{
1385        struct gfs2_glock *gl;
1386        int done_callback = 0;
1387
1388        spin_lock(&sdp->sd_reclaim_lock);
1389        if (list_empty(&sdp->sd_reclaim_list)) {
1390                spin_unlock(&sdp->sd_reclaim_lock);
1391                return;
1392        }
1393        gl = list_entry(sdp->sd_reclaim_list.next,
1394                        struct gfs2_glock, gl_reclaim);
1395        list_del_init(&gl->gl_reclaim);
1396        spin_unlock(&sdp->sd_reclaim_lock);
1397
1398        atomic_dec(&sdp->sd_reclaim_count);
1399        atomic_inc(&sdp->sd_reclaimed);
1400
1401        spin_lock(&gl->gl_spin);
1402        if (find_first_holder(gl) == NULL &&
1403            gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) {
1404                handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1405                done_callback = 1;
1406        }
1407        spin_unlock(&gl->gl_spin);
1408        if (!done_callback ||
1409            queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1410                gfs2_glock_put(gl);
1411}
1412
1413/**
1414 * examine_bucket - Call a function for glock in a hash bucket
1415 * @examiner: the function
1416 * @sdp: the filesystem
1417 * @bucket: the bucket
1418 *
1419 * Returns: 1 if the bucket has entries
1420 */
1421
1422static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1423                          unsigned int hash)
1424{
1425        struct gfs2_glock *gl, *prev = NULL;
1426        int has_entries = 0;
1427        struct hlist_head *head = &gl_hash_table[hash].hb_list;
1428
1429        read_lock(gl_lock_addr(hash));
1430        /* Can't use hlist_for_each_entry - don't want prefetch here */
1431        if (hlist_empty(head))
1432                goto out;
1433        gl = list_entry(head->first, struct gfs2_glock, gl_list);
1434        while(1) {
1435                if (!sdp || gl->gl_sbd == sdp) {
1436                        gfs2_glock_hold(gl);
1437                        read_unlock(gl_lock_addr(hash));
1438                        if (prev)
1439                                gfs2_glock_put(prev);
1440                        prev = gl;
1441                        examiner(gl);
1442                        has_entries = 1;
1443                        read_lock(gl_lock_addr(hash));
1444                }
1445                if (gl->gl_list.next == NULL)
1446                        break;
1447                gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1448        }
1449out:
1450        read_unlock(gl_lock_addr(hash));
1451        if (prev)
1452                gfs2_glock_put(prev);
1453        cond_resched();
1454        return has_entries;
1455}
1456
1457/**
1458 * scan_glock - look at a glock and see if we can reclaim it
1459 * @gl: the glock to look at
1460 *
1461 */
1462
1463static void scan_glock(struct gfs2_glock *gl)
1464{
1465        if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1466                return;
1467        if (test_bit(GLF_LOCK, &gl->gl_flags))
1468                return;
1469
1470        spin_lock(&gl->gl_spin);
1471        if (find_first_holder(gl) == NULL &&
1472            gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1473                gfs2_glock_schedule_for_reclaim(gl);
1474        spin_unlock(&gl->gl_spin);
1475}
1476
1477/**
1478 * clear_glock - look at a glock and see if we can free it from glock cache
1479 * @gl: the glock to look at
1480 *
1481 */
1482
1483static void clear_glock(struct gfs2_glock *gl)
1484{
1485        struct gfs2_sbd *sdp = gl->gl_sbd;
1486        int released;
1487
1488        spin_lock(&sdp->sd_reclaim_lock);
1489        if (!list_empty(&gl->gl_reclaim)) {
1490                list_del_init(&gl->gl_reclaim);
1491                atomic_dec(&sdp->sd_reclaim_count);
1492                spin_unlock(&sdp->sd_reclaim_lock);
1493                released = gfs2_glock_put(gl);
1494                gfs2_assert(sdp, !released);
1495        } else {
1496                spin_unlock(&sdp->sd_reclaim_lock);
1497        }
1498
1499        spin_lock(&gl->gl_spin);
1500        if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED)
1501                handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1502        spin_unlock(&gl->gl_spin);
1503        gfs2_glock_hold(gl);
1504        if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1505                gfs2_glock_put(gl);
1506}
1507
1508/**
1509 * gfs2_gl_hash_clear - Empty out the glock hash table
1510 * @sdp: the filesystem
1511 * @wait: wait until it's all gone
1512 *
1513 * Called when unmounting the filesystem.
1514 */
1515
1516void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1517{
1518        unsigned long t;
1519        unsigned int x;
1520        int cont;
1521
1522        t = jiffies;
1523
1524        for (;;) {
1525                cont = 0;
1526                for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1527                        if (examine_bucket(clear_glock, sdp, x))
1528                                cont = 1;
1529                }
1530
1531                if (!cont)
1532                        break;
1533
1534                if (time_after_eq(jiffies,
1535                                  t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1536                        fs_warn(sdp, "Unmount seems to be stalled. "
1537                                     "Dumping lock state...\n");
1538                        gfs2_dump_lockstate(sdp);
1539                        t = jiffies;
1540                }
1541
1542                down_write(&gfs2_umount_flush_sem);
1543                invalidate_inodes(sdp->sd_vfs);
1544                up_write(&gfs2_umount_flush_sem);
1545                msleep(10);
1546        }
1547}
1548
1549static const char *state2str(unsigned state)
1550{
1551        switch(state) {
1552        case LM_ST_UNLOCKED:
1553                return "UN";
1554        case LM_ST_SHARED:
1555                return "SH";
1556        case LM_ST_DEFERRED:
1557                return "DF";
1558        case LM_ST_EXCLUSIVE:
1559                return "EX";
1560        }
1561        return "??";
1562}
1563
1564static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1565{
1566        char *p = buf;
1567        if (flags & LM_FLAG_TRY)
1568                *p++ = 't';
1569        if (flags & LM_FLAG_TRY_1CB)
1570                *p++ = 'T';
1571        if (flags & LM_FLAG_NOEXP)
1572                *p++ = 'e';
1573        if (flags & LM_FLAG_ANY)
1574                *p++ = 'a';
1575        if (flags & LM_FLAG_PRIORITY)
1576                *p++ = 'p';
1577        if (flags & GL_ASYNC)
1578                *p++ = 'a';
1579        if (flags & GL_EXACT)
1580                *p++ = 'E';
1581        if (flags & GL_ATIME)
1582                *p++ = 'a';
1583        if (flags & GL_NOCACHE)
1584                *p++ = 'c';
1585        if (test_bit(HIF_HOLDER, &iflags))
1586                *p++ = 'H';
1587        if (test_bit(HIF_WAIT, &iflags))
1588                *p++ = 'W';
1589        if (test_bit(HIF_FIRST, &iflags))
1590                *p++ = 'F';
1591        *p = 0;
1592        return buf;
1593}
1594
1595/**
1596 * dump_holder - print information about a glock holder
1597 * @seq: the seq_file struct
1598 * @gh: the glock holder
1599 *
1600 * Returns: 0 on success, -ENOBUFS when we run out of space
1601 */
1602
1603static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1604{
1605        struct task_struct *gh_owner = NULL;
1606        char buffer[KSYM_SYMBOL_LEN];
1607        char flags_buf[32];
1608
1609        sprint_symbol(buffer, gh->gh_ip);
1610        if (gh->gh_owner_pid)
1611                gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1612        gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %s\n",
1613                  state2str(gh->gh_state),
1614                  hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1615                  gh->gh_error, 
1616                  gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1617                  gh_owner ? gh_owner->comm : "(ended)", buffer);
1618        return 0;
1619}
1620
1621static const char *gflags2str(char *buf, const unsigned long *gflags)
1622{
1623        char *p = buf;
1624        if (test_bit(GLF_LOCK, gflags))
1625                *p++ = 'l';
1626        if (test_bit(GLF_STICKY, gflags))
1627                *p++ = 's';
1628        if (test_bit(GLF_DEMOTE, gflags))
1629                *p++ = 'D';
1630        if (test_bit(GLF_PENDING_DEMOTE, gflags))
1631                *p++ = 'd';
1632        if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1633                *p++ = 'p';
1634        if (test_bit(GLF_DIRTY, gflags))
1635                *p++ = 'y';
1636        if (test_bit(GLF_LFLUSH, gflags))
1637                *p++ = 'f';
1638        if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1639                *p++ = 'i';
1640        if (test_bit(GLF_REPLY_PENDING, gflags))
1641                *p++ = 'r';
1642        *p = 0;
1643        return buf;
1644}
1645
1646/**
1647 * __dump_glock - print information about a glock
1648 * @seq: The seq_file struct
1649 * @gl: the glock
1650 *
1651 * The file format is as follows:
1652 * One line per object, capital letters are used to indicate objects
1653 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1654 * other objects are indented by a single space and follow the glock to
1655 * which they are related. Fields are indicated by lower case letters
1656 * followed by a colon and the field value, except for strings which are in
1657 * [] so that its possible to see if they are composed of spaces for
1658 * example. The field's are n = number (id of the object), f = flags,
1659 * t = type, s = state, r = refcount, e = error, p = pid.
1660 *
1661 * Returns: 0 on success, -ENOBUFS when we run out of space
1662 */
1663
1664static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1665{
1666        const struct gfs2_glock_operations *glops = gl->gl_ops;
1667        unsigned long long dtime;
1668        const struct gfs2_holder *gh;
1669        char gflags_buf[32];
1670        int error = 0;
1671
1672        dtime = jiffies - gl->gl_demote_time;
1673        dtime *= 1000000/HZ; /* demote time in uSec */
1674        if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1675                dtime = 0;
1676        gfs2_print_dbg(seq, "G:  s:%s n:%u/%llu f:%s t:%s d:%s/%llu l:%d a:%d r:%d\n",
1677                  state2str(gl->gl_state),
1678                  gl->gl_name.ln_type,
1679                  (unsigned long long)gl->gl_name.ln_number,
1680                  gflags2str(gflags_buf, &gl->gl_flags),
1681                  state2str(gl->gl_target),
1682                  state2str(gl->gl_demote_state), dtime,
1683                  atomic_read(&gl->gl_lvb_count),
1684                  atomic_read(&gl->gl_ail_count),
1685                  atomic_read(&gl->gl_ref));
1686
1687        list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1688                error = dump_holder(seq, gh);
1689                if (error)
1690                        goto out;
1691        }
1692        if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1693                error = glops->go_dump(seq, gl);
1694out:
1695        return error;
1696}
1697
1698static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1699{
1700        int ret;
1701        spin_lock(&gl->gl_spin);
1702        ret = __dump_glock(seq, gl);
1703        spin_unlock(&gl->gl_spin);
1704        return ret;
1705}
1706
1707/**
1708 * gfs2_dump_lockstate - print out the current lockstate
1709 * @sdp: the filesystem
1710 * @ub: the buffer to copy the information into
1711 *
1712 * If @ub is NULL, dump the lockstate to the console.
1713 *
1714 */
1715
1716static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1717{
1718        struct gfs2_glock *gl;
1719        struct hlist_node *h;
1720        unsigned int x;
1721        int error = 0;
1722
1723        for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1724
1725                read_lock(gl_lock_addr(x));
1726
1727                hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1728                        if (gl->gl_sbd != sdp)
1729                                continue;
1730
1731                        error = dump_glock(NULL, gl);
1732                        if (error)
1733                                break;
1734                }
1735
1736                read_unlock(gl_lock_addr(x));
1737
1738                if (error)
1739                        break;
1740        }
1741
1742
1743        return error;
1744}
1745
1746/**
1747 * gfs2_scand - Look for cached glocks and inodes to toss from memory
1748 * @sdp: Pointer to GFS2 superblock
1749 *
1750 * One of these daemons runs, finding candidates to add to sd_reclaim_list.
1751 * See gfs2_glockd()
1752 */
1753
1754static int gfs2_scand(void *data)
1755{
1756        unsigned x;
1757        unsigned delay;
1758
1759        while (!kthread_should_stop()) {
1760                for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1761                        examine_bucket(scan_glock, NULL, x);
1762                if (freezing(current))
1763                        refrigerator();
1764                delay = scand_secs;
1765                if (delay < 1)
1766                        delay = 1;
1767                schedule_timeout_interruptible(delay * HZ);
1768        }
1769
1770        return 0;
1771}
1772
1773
1774
1775int __init gfs2_glock_init(void)
1776{
1777        unsigned i;
1778        for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1779                INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
1780        }
1781#ifdef GL_HASH_LOCK_SZ
1782        for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1783                rwlock_init(&gl_hash_locks[i]);
1784        }
1785#endif
1786
1787        scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
1788        if (IS_ERR(scand_process))
1789                return PTR_ERR(scand_process);
1790
1791        glock_workqueue = create_workqueue("glock_workqueue");
1792        if (IS_ERR(glock_workqueue)) {
1793                kthread_stop(scand_process);
1794                return PTR_ERR(glock_workqueue);
1795        }
1796
1797        return 0;
1798}
1799
1800void gfs2_glock_exit(void)
1801{
1802        destroy_workqueue(glock_workqueue);
1803        kthread_stop(scand_process);
1804}
1805
1806module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
1807MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
1808
1809static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1810{
1811        struct gfs2_glock *gl;
1812
1813restart:
1814        read_lock(gl_lock_addr(gi->hash));
1815        gl = gi->gl;
1816        if (gl) {
1817                gi->gl = hlist_entry(gl->gl_list.next,
1818                                     struct gfs2_glock, gl_list);
1819                if (gi->gl)
1820                        gfs2_glock_hold(gi->gl);
1821        }
1822        read_unlock(gl_lock_addr(gi->hash));
1823        if (gl)
1824                gfs2_glock_put(gl);
1825        if (gl && gi->gl == NULL)
1826                gi->hash++;
1827        while (gi->gl == NULL) {
1828                if (gi->hash >= GFS2_GL_HASH_SIZE)
1829                        return 1;
1830                read_lock(gl_lock_addr(gi->hash));
1831                gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1832                                     struct gfs2_glock, gl_list);
1833                if (gi->gl)
1834                        gfs2_glock_hold(gi->gl);
1835                read_unlock(gl_lock_addr(gi->hash));
1836                gi->hash++;
1837        }
1838
1839        if (gi->sdp != gi->gl->gl_sbd)
1840                goto restart;
1841
1842        return 0;
1843}
1844
1845static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
1846{
1847        if (gi->gl)
1848                gfs2_glock_put(gi->gl);
1849        gi->gl = NULL;
1850}
1851
1852static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1853{
1854        struct gfs2_glock_iter *gi = seq->private;
1855        loff_t n = *pos;
1856
1857        gi->hash = 0;
1858
1859        do {
1860                if (gfs2_glock_iter_next(gi)) {
1861                        gfs2_glock_iter_free(gi);
1862                        return NULL;
1863                }
1864        } while (n--);
1865
1866        return gi->gl;
1867}
1868
1869static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1870                                 loff_t *pos)
1871{
1872        struct gfs2_glock_iter *gi = seq->private;
1873
1874        (*pos)++;
1875
1876        if (gfs2_glock_iter_next(gi)) {
1877                gfs2_glock_iter_free(gi);
1878                return NULL;
1879        }
1880
1881        return gi->gl;
1882}
1883
1884static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1885{
1886        struct gfs2_glock_iter *gi = seq->private;
1887        gfs2_glock_iter_free(gi);
1888}
1889
1890static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1891{
1892        return dump_glock(seq, iter_ptr);
1893}
1894
1895static const struct seq_operations gfs2_glock_seq_ops = {
1896        .start = gfs2_glock_seq_start,
1897        .next  = gfs2_glock_seq_next,
1898        .stop  = gfs2_glock_seq_stop,
1899        .show  = gfs2_glock_seq_show,
1900};
1901
1902static int gfs2_debugfs_open(struct inode *inode, struct file *file)
1903{
1904        int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1905                                   sizeof(struct gfs2_glock_iter));
1906        if (ret == 0) {
1907                struct seq_file *seq = file->private_data;
1908                struct gfs2_glock_iter *gi = seq->private;
1909                gi->sdp = inode->i_private;
1910        }
1911        return ret;
1912}
1913
1914static const struct file_operations gfs2_debug_fops = {
1915        .owner   = THIS_MODULE,
1916        .open    = gfs2_debugfs_open,
1917        .read    = seq_read,
1918        .llseek  = seq_lseek,
1919        .release = seq_release_private,
1920};
1921
1922int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1923{
1924        sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1925        if (!sdp->debugfs_dir)
1926                return -ENOMEM;
1927        sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1928                                                         S_IFREG | S_IRUGO,
1929                                                         sdp->debugfs_dir, sdp,
1930                                                         &gfs2_debug_fops);
1931        if (!sdp->debugfs_dentry_glocks)
1932                return -ENOMEM;
1933
1934        return 0;
1935}
1936
1937void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1938{
1939        if (sdp && sdp->debugfs_dir) {
1940                if (sdp->debugfs_dentry_glocks) {
1941                        debugfs_remove(sdp->debugfs_dentry_glocks);
1942                        sdp->debugfs_dentry_glocks = NULL;
1943                }
1944                debugfs_remove(sdp->debugfs_dir);
1945                sdp->debugfs_dir = NULL;
1946        }
1947}
1948
1949int gfs2_register_debugfs(void)
1950{
1951        gfs2_root = debugfs_create_dir("gfs2", NULL);
1952        return gfs2_root ? 0 : -ENOMEM;
1953}
1954
1955void gfs2_unregister_debugfs(void)
1956{
1957        debugfs_remove(gfs2_root);
1958        gfs2_root = NULL;
1959}
1960