linux/fs/ceph/caps.c
<<
>>
Prefs
   1#include <linux/ceph/ceph_debug.h>
   2
   3#include <linux/fs.h>
   4#include <linux/kernel.h>
   5#include <linux/sched.h>
   6#include <linux/slab.h>
   7#include <linux/vmalloc.h>
   8#include <linux/wait.h>
   9#include <linux/writeback.h>
  10
  11#include "super.h"
  12#include "mds_client.h"
  13#include <linux/ceph/decode.h>
  14#include <linux/ceph/messenger.h>
  15
  16/*
  17 * Capability management
  18 *
  19 * The Ceph metadata servers control client access to inode metadata
  20 * and file data by issuing capabilities, granting clients permission
  21 * to read and/or write both inode field and file data to OSDs
  22 * (storage nodes).  Each capability consists of a set of bits
  23 * indicating which operations are allowed.
  24 *
  25 * If the client holds a *_SHARED cap, the client has a coherent value
  26 * that can be safely read from the cached inode.
  27 *
  28 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
  29 * client is allowed to change inode attributes (e.g., file size,
  30 * mtime), note its dirty state in the ceph_cap, and asynchronously
  31 * flush that metadata change to the MDS.
  32 *
  33 * In the event of a conflicting operation (perhaps by another
  34 * client), the MDS will revoke the conflicting client capabilities.
  35 *
  36 * In order for a client to cache an inode, it must hold a capability
  37 * with at least one MDS server.  When inodes are released, release
  38 * notifications are batched and periodically sent en masse to the MDS
  39 * cluster to release server state.
  40 */
  41
  42
  43/*
  44 * Generate readable cap strings for debugging output.
  45 */
  46#define MAX_CAP_STR 20
  47static char cap_str[MAX_CAP_STR][40];
  48static DEFINE_SPINLOCK(cap_str_lock);
  49static int last_cap_str;
  50
  51static char *gcap_string(char *s, int c)
  52{
  53        if (c & CEPH_CAP_GSHARED)
  54                *s++ = 's';
  55        if (c & CEPH_CAP_GEXCL)
  56                *s++ = 'x';
  57        if (c & CEPH_CAP_GCACHE)
  58                *s++ = 'c';
  59        if (c & CEPH_CAP_GRD)
  60                *s++ = 'r';
  61        if (c & CEPH_CAP_GWR)
  62                *s++ = 'w';
  63        if (c & CEPH_CAP_GBUFFER)
  64                *s++ = 'b';
  65        if (c & CEPH_CAP_GLAZYIO)
  66                *s++ = 'l';
  67        return s;
  68}
  69
  70const char *ceph_cap_string(int caps)
  71{
  72        int i;
  73        char *s;
  74        int c;
  75
  76        spin_lock(&cap_str_lock);
  77        i = last_cap_str++;
  78        if (last_cap_str == MAX_CAP_STR)
  79                last_cap_str = 0;
  80        spin_unlock(&cap_str_lock);
  81
  82        s = cap_str[i];
  83
  84        if (caps & CEPH_CAP_PIN)
  85                *s++ = 'p';
  86
  87        c = (caps >> CEPH_CAP_SAUTH) & 3;
  88        if (c) {
  89                *s++ = 'A';
  90                s = gcap_string(s, c);
  91        }
  92
  93        c = (caps >> CEPH_CAP_SLINK) & 3;
  94        if (c) {
  95                *s++ = 'L';
  96                s = gcap_string(s, c);
  97        }
  98
  99        c = (caps >> CEPH_CAP_SXATTR) & 3;
 100        if (c) {
 101                *s++ = 'X';
 102                s = gcap_string(s, c);
 103        }
 104
 105        c = caps >> CEPH_CAP_SFILE;
 106        if (c) {
 107                *s++ = 'F';
 108                s = gcap_string(s, c);
 109        }
 110
 111        if (s == cap_str[i])
 112                *s++ = '-';
 113        *s = 0;
 114        return cap_str[i];
 115}
 116
 117void ceph_caps_init(struct ceph_mds_client *mdsc)
 118{
 119        INIT_LIST_HEAD(&mdsc->caps_list);
 120        spin_lock_init(&mdsc->caps_list_lock);
 121}
 122
 123void ceph_caps_finalize(struct ceph_mds_client *mdsc)
 124{
 125        struct ceph_cap *cap;
 126
 127        spin_lock(&mdsc->caps_list_lock);
 128        while (!list_empty(&mdsc->caps_list)) {
 129                cap = list_first_entry(&mdsc->caps_list,
 130                                       struct ceph_cap, caps_item);
 131                list_del(&cap->caps_item);
 132                kmem_cache_free(ceph_cap_cachep, cap);
 133        }
 134        mdsc->caps_total_count = 0;
 135        mdsc->caps_avail_count = 0;
 136        mdsc->caps_use_count = 0;
 137        mdsc->caps_reserve_count = 0;
 138        mdsc->caps_min_count = 0;
 139        spin_unlock(&mdsc->caps_list_lock);
 140}
 141
 142void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
 143{
 144        spin_lock(&mdsc->caps_list_lock);
 145        mdsc->caps_min_count += delta;
 146        BUG_ON(mdsc->caps_min_count < 0);
 147        spin_unlock(&mdsc->caps_list_lock);
 148}
 149
 150int ceph_reserve_caps(struct ceph_mds_client *mdsc,
 151                      struct ceph_cap_reservation *ctx, int need)
 152{
 153        int i;
 154        struct ceph_cap *cap;
 155        int have;
 156        int alloc = 0;
 157        LIST_HEAD(newcaps);
 158        int ret = 0;
 159
 160        dout("reserve caps ctx=%p need=%d\n", ctx, need);
 161
 162        /* first reserve any caps that are already allocated */
 163        spin_lock(&mdsc->caps_list_lock);
 164        if (mdsc->caps_avail_count >= need)
 165                have = need;
 166        else
 167                have = mdsc->caps_avail_count;
 168        mdsc->caps_avail_count -= have;
 169        mdsc->caps_reserve_count += have;
 170        BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 171                                         mdsc->caps_reserve_count +
 172                                         mdsc->caps_avail_count);
 173        spin_unlock(&mdsc->caps_list_lock);
 174
 175        for (i = have; i < need; i++) {
 176                cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
 177                if (!cap) {
 178                        ret = -ENOMEM;
 179                        goto out_alloc_count;
 180                }
 181                list_add(&cap->caps_item, &newcaps);
 182                alloc++;
 183        }
 184        BUG_ON(have + alloc != need);
 185
 186        spin_lock(&mdsc->caps_list_lock);
 187        mdsc->caps_total_count += alloc;
 188        mdsc->caps_reserve_count += alloc;
 189        list_splice(&newcaps, &mdsc->caps_list);
 190
 191        BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 192                                         mdsc->caps_reserve_count +
 193                                         mdsc->caps_avail_count);
 194        spin_unlock(&mdsc->caps_list_lock);
 195
 196        ctx->count = need;
 197        dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
 198             ctx, mdsc->caps_total_count, mdsc->caps_use_count,
 199             mdsc->caps_reserve_count, mdsc->caps_avail_count);
 200        return 0;
 201
 202out_alloc_count:
 203        /* we didn't manage to reserve as much as we needed */
 204        pr_warning("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
 205                   ctx, need, have);
 206        return ret;
 207}
 208
 209int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
 210                        struct ceph_cap_reservation *ctx)
 211{
 212        dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
 213        if (ctx->count) {
 214                spin_lock(&mdsc->caps_list_lock);
 215                BUG_ON(mdsc->caps_reserve_count < ctx->count);
 216                mdsc->caps_reserve_count -= ctx->count;
 217                mdsc->caps_avail_count += ctx->count;
 218                ctx->count = 0;
 219                dout("unreserve caps %d = %d used + %d resv + %d avail\n",
 220                     mdsc->caps_total_count, mdsc->caps_use_count,
 221                     mdsc->caps_reserve_count, mdsc->caps_avail_count);
 222                BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 223                                                 mdsc->caps_reserve_count +
 224                                                 mdsc->caps_avail_count);
 225                spin_unlock(&mdsc->caps_list_lock);
 226        }
 227        return 0;
 228}
 229
 230static struct ceph_cap *get_cap(struct ceph_mds_client *mdsc,
 231                                struct ceph_cap_reservation *ctx)
 232{
 233        struct ceph_cap *cap = NULL;
 234
 235        /* temporary, until we do something about cap import/export */
 236        if (!ctx) {
 237                cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
 238                if (cap) {
 239                        mdsc->caps_use_count++;
 240                        mdsc->caps_total_count++;
 241                }
 242                return cap;
 243        }
 244
 245        spin_lock(&mdsc->caps_list_lock);
 246        dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
 247             ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
 248             mdsc->caps_reserve_count, mdsc->caps_avail_count);
 249        BUG_ON(!ctx->count);
 250        BUG_ON(ctx->count > mdsc->caps_reserve_count);
 251        BUG_ON(list_empty(&mdsc->caps_list));
 252
 253        ctx->count--;
 254        mdsc->caps_reserve_count--;
 255        mdsc->caps_use_count++;
 256
 257        cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item);
 258        list_del(&cap->caps_item);
 259
 260        BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 261               mdsc->caps_reserve_count + mdsc->caps_avail_count);
 262        spin_unlock(&mdsc->caps_list_lock);
 263        return cap;
 264}
 265
 266void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
 267{
 268        spin_lock(&mdsc->caps_list_lock);
 269        dout("put_cap %p %d = %d used + %d resv + %d avail\n",
 270             cap, mdsc->caps_total_count, mdsc->caps_use_count,
 271             mdsc->caps_reserve_count, mdsc->caps_avail_count);
 272        mdsc->caps_use_count--;
 273        /*
 274         * Keep some preallocated caps around (ceph_min_count), to
 275         * avoid lots of free/alloc churn.
 276         */
 277        if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
 278                                      mdsc->caps_min_count) {
 279                mdsc->caps_total_count--;
 280                kmem_cache_free(ceph_cap_cachep, cap);
 281        } else {
 282                mdsc->caps_avail_count++;
 283                list_add(&cap->caps_item, &mdsc->caps_list);
 284        }
 285
 286        BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 287               mdsc->caps_reserve_count + mdsc->caps_avail_count);
 288        spin_unlock(&mdsc->caps_list_lock);
 289}
 290
 291void ceph_reservation_status(struct ceph_fs_client *fsc,
 292                             int *total, int *avail, int *used, int *reserved,
 293                             int *min)
 294{
 295        struct ceph_mds_client *mdsc = fsc->mdsc;
 296
 297        if (total)
 298                *total = mdsc->caps_total_count;
 299        if (avail)
 300                *avail = mdsc->caps_avail_count;
 301        if (used)
 302                *used = mdsc->caps_use_count;
 303        if (reserved)
 304                *reserved = mdsc->caps_reserve_count;
 305        if (min)
 306                *min = mdsc->caps_min_count;
 307}
 308
 309/*
 310 * Find ceph_cap for given mds, if any.
 311 *
 312 * Called with i_ceph_lock held.
 313 */
 314static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
 315{
 316        struct ceph_cap *cap;
 317        struct rb_node *n = ci->i_caps.rb_node;
 318
 319        while (n) {
 320                cap = rb_entry(n, struct ceph_cap, ci_node);
 321                if (mds < cap->mds)
 322                        n = n->rb_left;
 323                else if (mds > cap->mds)
 324                        n = n->rb_right;
 325                else
 326                        return cap;
 327        }
 328        return NULL;
 329}
 330
 331struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
 332{
 333        struct ceph_cap *cap;
 334
 335        spin_lock(&ci->i_ceph_lock);
 336        cap = __get_cap_for_mds(ci, mds);
 337        spin_unlock(&ci->i_ceph_lock);
 338        return cap;
 339}
 340
 341/*
 342 * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1.
 343 */
 344static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
 345{
 346        struct ceph_cap *cap;
 347        int mds = -1;
 348        struct rb_node *p;
 349
 350        /* prefer mds with WR|BUFFER|EXCL caps */
 351        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 352                cap = rb_entry(p, struct ceph_cap, ci_node);
 353                mds = cap->mds;
 354                if (cap->issued & (CEPH_CAP_FILE_WR |
 355                                   CEPH_CAP_FILE_BUFFER |
 356                                   CEPH_CAP_FILE_EXCL))
 357                        break;
 358        }
 359        return mds;
 360}
 361
 362int ceph_get_cap_mds(struct inode *inode)
 363{
 364        struct ceph_inode_info *ci = ceph_inode(inode);
 365        int mds;
 366        spin_lock(&ci->i_ceph_lock);
 367        mds = __ceph_get_cap_mds(ceph_inode(inode));
 368        spin_unlock(&ci->i_ceph_lock);
 369        return mds;
 370}
 371
 372/*
 373 * Called under i_ceph_lock.
 374 */
 375static void __insert_cap_node(struct ceph_inode_info *ci,
 376                              struct ceph_cap *new)
 377{
 378        struct rb_node **p = &ci->i_caps.rb_node;
 379        struct rb_node *parent = NULL;
 380        struct ceph_cap *cap = NULL;
 381
 382        while (*p) {
 383                parent = *p;
 384                cap = rb_entry(parent, struct ceph_cap, ci_node);
 385                if (new->mds < cap->mds)
 386                        p = &(*p)->rb_left;
 387                else if (new->mds > cap->mds)
 388                        p = &(*p)->rb_right;
 389                else
 390                        BUG();
 391        }
 392
 393        rb_link_node(&new->ci_node, parent, p);
 394        rb_insert_color(&new->ci_node, &ci->i_caps);
 395}
 396
 397/*
 398 * (re)set cap hold timeouts, which control the delayed release
 399 * of unused caps back to the MDS.  Should be called on cap use.
 400 */
 401static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
 402                               struct ceph_inode_info *ci)
 403{
 404        struct ceph_mount_options *ma = mdsc->fsc->mount_options;
 405
 406        ci->i_hold_caps_min = round_jiffies(jiffies +
 407                                            ma->caps_wanted_delay_min * HZ);
 408        ci->i_hold_caps_max = round_jiffies(jiffies +
 409                                            ma->caps_wanted_delay_max * HZ);
 410        dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
 411             ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
 412}
 413
 414/*
 415 * (Re)queue cap at the end of the delayed cap release list.
 416 *
 417 * If I_FLUSH is set, leave the inode at the front of the list.
 418 *
 419 * Caller holds i_ceph_lock
 420 *    -> we take mdsc->cap_delay_lock
 421 */
 422static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
 423                                struct ceph_inode_info *ci)
 424{
 425        __cap_set_timeouts(mdsc, ci);
 426        dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
 427             ci->i_ceph_flags, ci->i_hold_caps_max);
 428        if (!mdsc->stopping) {
 429                spin_lock(&mdsc->cap_delay_lock);
 430                if (!list_empty(&ci->i_cap_delay_list)) {
 431                        if (ci->i_ceph_flags & CEPH_I_FLUSH)
 432                                goto no_change;
 433                        list_del_init(&ci->i_cap_delay_list);
 434                }
 435                list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
 436no_change:
 437                spin_unlock(&mdsc->cap_delay_lock);
 438        }
 439}
 440
 441/*
 442 * Queue an inode for immediate writeback.  Mark inode with I_FLUSH,
 443 * indicating we should send a cap message to flush dirty metadata
 444 * asap, and move to the front of the delayed cap list.
 445 */
 446static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
 447                                      struct ceph_inode_info *ci)
 448{
 449        dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
 450        spin_lock(&mdsc->cap_delay_lock);
 451        ci->i_ceph_flags |= CEPH_I_FLUSH;
 452        if (!list_empty(&ci->i_cap_delay_list))
 453                list_del_init(&ci->i_cap_delay_list);
 454        list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
 455        spin_unlock(&mdsc->cap_delay_lock);
 456}
 457
 458/*
 459 * Cancel delayed work on cap.
 460 *
 461 * Caller must hold i_ceph_lock.
 462 */
 463static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
 464                               struct ceph_inode_info *ci)
 465{
 466        dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
 467        if (list_empty(&ci->i_cap_delay_list))
 468                return;
 469        spin_lock(&mdsc->cap_delay_lock);
 470        list_del_init(&ci->i_cap_delay_list);
 471        spin_unlock(&mdsc->cap_delay_lock);
 472}
 473
 474/*
 475 * Common issue checks for add_cap, handle_cap_grant.
 476 */
 477static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
 478                              unsigned issued)
 479{
 480        unsigned had = __ceph_caps_issued(ci, NULL);
 481
 482        /*
 483         * Each time we receive FILE_CACHE anew, we increment
 484         * i_rdcache_gen.
 485         */
 486        if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
 487            (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0)
 488                ci->i_rdcache_gen++;
 489
 490        /*
 491         * if we are newly issued FILE_SHARED, clear D_COMPLETE; we
 492         * don't know what happened to this directory while we didn't
 493         * have the cap.
 494         */
 495        if ((issued & CEPH_CAP_FILE_SHARED) &&
 496            (had & CEPH_CAP_FILE_SHARED) == 0) {
 497                ci->i_shared_gen++;
 498                if (S_ISDIR(ci->vfs_inode.i_mode))
 499                        ceph_dir_clear_complete(&ci->vfs_inode);
 500        }
 501}
 502
 503/*
 504 * Add a capability under the given MDS session.
 505 *
 506 * Caller should hold session snap_rwsem (read) and s_mutex.
 507 *
 508 * @fmode is the open file mode, if we are opening a file, otherwise
 509 * it is < 0.  (This is so we can atomically add the cap and add an
 510 * open file reference to it.)
 511 */
 512int ceph_add_cap(struct inode *inode,
 513                 struct ceph_mds_session *session, u64 cap_id,
 514                 int fmode, unsigned issued, unsigned wanted,
 515                 unsigned seq, unsigned mseq, u64 realmino, int flags,
 516                 struct ceph_cap_reservation *caps_reservation)
 517{
 518        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
 519        struct ceph_inode_info *ci = ceph_inode(inode);
 520        struct ceph_cap *new_cap = NULL;
 521        struct ceph_cap *cap;
 522        int mds = session->s_mds;
 523        int actual_wanted;
 524
 525        dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
 526             session->s_mds, cap_id, ceph_cap_string(issued), seq);
 527
 528        /*
 529         * If we are opening the file, include file mode wanted bits
 530         * in wanted.
 531         */
 532        if (fmode >= 0)
 533                wanted |= ceph_caps_for_mode(fmode);
 534
 535retry:
 536        spin_lock(&ci->i_ceph_lock);
 537        cap = __get_cap_for_mds(ci, mds);
 538        if (!cap) {
 539                if (new_cap) {
 540                        cap = new_cap;
 541                        new_cap = NULL;
 542                } else {
 543                        spin_unlock(&ci->i_ceph_lock);
 544                        new_cap = get_cap(mdsc, caps_reservation);
 545                        if (new_cap == NULL)
 546                                return -ENOMEM;
 547                        goto retry;
 548                }
 549
 550                cap->issued = 0;
 551                cap->implemented = 0;
 552                cap->mds = mds;
 553                cap->mds_wanted = 0;
 554
 555                cap->ci = ci;
 556                __insert_cap_node(ci, cap);
 557
 558                /* clear out old exporting info?  (i.e. on cap import) */
 559                if (ci->i_cap_exporting_mds == mds) {
 560                        ci->i_cap_exporting_issued = 0;
 561                        ci->i_cap_exporting_mseq = 0;
 562                        ci->i_cap_exporting_mds = -1;
 563                }
 564
 565                /* add to session cap list */
 566                cap->session = session;
 567                spin_lock(&session->s_cap_lock);
 568                list_add_tail(&cap->session_caps, &session->s_caps);
 569                session->s_nr_caps++;
 570                spin_unlock(&session->s_cap_lock);
 571        } else if (new_cap)
 572                ceph_put_cap(mdsc, new_cap);
 573
 574        if (!ci->i_snap_realm) {
 575                /*
 576                 * add this inode to the appropriate snap realm
 577                 */
 578                struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
 579                                                               realmino);
 580                if (realm) {
 581                        ceph_get_snap_realm(mdsc, realm);
 582                        spin_lock(&realm->inodes_with_caps_lock);
 583                        ci->i_snap_realm = realm;
 584                        list_add(&ci->i_snap_realm_item,
 585                                 &realm->inodes_with_caps);
 586                        spin_unlock(&realm->inodes_with_caps_lock);
 587                } else {
 588                        pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
 589                               realmino);
 590                        WARN_ON(!realm);
 591                }
 592        }
 593
 594        __check_cap_issue(ci, cap, issued);
 595
 596        /*
 597         * If we are issued caps we don't want, or the mds' wanted
 598         * value appears to be off, queue a check so we'll release
 599         * later and/or update the mds wanted value.
 600         */
 601        actual_wanted = __ceph_caps_wanted(ci);
 602        if ((wanted & ~actual_wanted) ||
 603            (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
 604                dout(" issued %s, mds wanted %s, actual %s, queueing\n",
 605                     ceph_cap_string(issued), ceph_cap_string(wanted),
 606                     ceph_cap_string(actual_wanted));
 607                __cap_delay_requeue(mdsc, ci);
 608        }
 609
 610        if (flags & CEPH_CAP_FLAG_AUTH)
 611                ci->i_auth_cap = cap;
 612        else if (ci->i_auth_cap == cap)
 613                ci->i_auth_cap = NULL;
 614
 615        dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
 616             inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
 617             ceph_cap_string(issued|cap->issued), seq, mds);
 618        cap->cap_id = cap_id;
 619        cap->issued = issued;
 620        cap->implemented |= issued;
 621        cap->mds_wanted |= wanted;
 622        cap->seq = seq;
 623        cap->issue_seq = seq;
 624        cap->mseq = mseq;
 625        cap->cap_gen = session->s_cap_gen;
 626
 627        if (fmode >= 0)
 628                __ceph_get_fmode(ci, fmode);
 629        spin_unlock(&ci->i_ceph_lock);
 630        wake_up_all(&ci->i_cap_wq);
 631        return 0;
 632}
 633
 634/*
 635 * Return true if cap has not timed out and belongs to the current
 636 * generation of the MDS session (i.e. has not gone 'stale' due to
 637 * us losing touch with the mds).
 638 */
 639static int __cap_is_valid(struct ceph_cap *cap)
 640{
 641        unsigned long ttl;
 642        u32 gen;
 643
 644        spin_lock(&cap->session->s_gen_ttl_lock);
 645        gen = cap->session->s_cap_gen;
 646        ttl = cap->session->s_cap_ttl;
 647        spin_unlock(&cap->session->s_gen_ttl_lock);
 648
 649        if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
 650                dout("__cap_is_valid %p cap %p issued %s "
 651                     "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
 652                     cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
 653                return 0;
 654        }
 655
 656        return 1;
 657}
 658
 659/*
 660 * Return set of valid cap bits issued to us.  Note that caps time
 661 * out, and may be invalidated in bulk if the client session times out
 662 * and session->s_cap_gen is bumped.
 663 */
 664int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
 665{
 666        int have = ci->i_snap_caps | ci->i_cap_exporting_issued;
 667        struct ceph_cap *cap;
 668        struct rb_node *p;
 669
 670        if (implemented)
 671                *implemented = 0;
 672        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 673                cap = rb_entry(p, struct ceph_cap, ci_node);
 674                if (!__cap_is_valid(cap))
 675                        continue;
 676                dout("__ceph_caps_issued %p cap %p issued %s\n",
 677                     &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
 678                have |= cap->issued;
 679                if (implemented)
 680                        *implemented |= cap->implemented;
 681        }
 682        return have;
 683}
 684
 685/*
 686 * Get cap bits issued by caps other than @ocap
 687 */
 688int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
 689{
 690        int have = ci->i_snap_caps;
 691        struct ceph_cap *cap;
 692        struct rb_node *p;
 693
 694        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 695                cap = rb_entry(p, struct ceph_cap, ci_node);
 696                if (cap == ocap)
 697                        continue;
 698                if (!__cap_is_valid(cap))
 699                        continue;
 700                have |= cap->issued;
 701        }
 702        return have;
 703}
 704
 705/*
 706 * Move a cap to the end of the LRU (oldest caps at list head, newest
 707 * at list tail).
 708 */
 709static void __touch_cap(struct ceph_cap *cap)
 710{
 711        struct ceph_mds_session *s = cap->session;
 712
 713        spin_lock(&s->s_cap_lock);
 714        if (s->s_cap_iterator == NULL) {
 715                dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
 716                     s->s_mds);
 717                list_move_tail(&cap->session_caps, &s->s_caps);
 718        } else {
 719                dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
 720                     &cap->ci->vfs_inode, cap, s->s_mds);
 721        }
 722        spin_unlock(&s->s_cap_lock);
 723}
 724
 725/*
 726 * Check if we hold the given mask.  If so, move the cap(s) to the
 727 * front of their respective LRUs.  (This is the preferred way for
 728 * callers to check for caps they want.)
 729 */
 730int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
 731{
 732        struct ceph_cap *cap;
 733        struct rb_node *p;
 734        int have = ci->i_snap_caps;
 735
 736        if ((have & mask) == mask) {
 737                dout("__ceph_caps_issued_mask %p snap issued %s"
 738                     " (mask %s)\n", &ci->vfs_inode,
 739                     ceph_cap_string(have),
 740                     ceph_cap_string(mask));
 741                return 1;
 742        }
 743
 744        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 745                cap = rb_entry(p, struct ceph_cap, ci_node);
 746                if (!__cap_is_valid(cap))
 747                        continue;
 748                if ((cap->issued & mask) == mask) {
 749                        dout("__ceph_caps_issued_mask %p cap %p issued %s"
 750                             " (mask %s)\n", &ci->vfs_inode, cap,
 751                             ceph_cap_string(cap->issued),
 752                             ceph_cap_string(mask));
 753                        if (touch)
 754                                __touch_cap(cap);
 755                        return 1;
 756                }
 757
 758                /* does a combination of caps satisfy mask? */
 759                have |= cap->issued;
 760                if ((have & mask) == mask) {
 761                        dout("__ceph_caps_issued_mask %p combo issued %s"
 762                             " (mask %s)\n", &ci->vfs_inode,
 763                             ceph_cap_string(cap->issued),
 764                             ceph_cap_string(mask));
 765                        if (touch) {
 766                                struct rb_node *q;
 767
 768                                /* touch this + preceding caps */
 769                                __touch_cap(cap);
 770                                for (q = rb_first(&ci->i_caps); q != p;
 771                                     q = rb_next(q)) {
 772                                        cap = rb_entry(q, struct ceph_cap,
 773                                                       ci_node);
 774                                        if (!__cap_is_valid(cap))
 775                                                continue;
 776                                        __touch_cap(cap);
 777                                }
 778                        }
 779                        return 1;
 780                }
 781        }
 782
 783        return 0;
 784}
 785
 786/*
 787 * Return true if mask caps are currently being revoked by an MDS.
 788 */
 789int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
 790{
 791        struct inode *inode = &ci->vfs_inode;
 792        struct ceph_cap *cap;
 793        struct rb_node *p;
 794        int ret = 0;
 795
 796        spin_lock(&ci->i_ceph_lock);
 797        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 798                cap = rb_entry(p, struct ceph_cap, ci_node);
 799                if (__cap_is_valid(cap) &&
 800                    (cap->implemented & ~cap->issued & mask)) {
 801                        ret = 1;
 802                        break;
 803                }
 804        }
 805        spin_unlock(&ci->i_ceph_lock);
 806        dout("ceph_caps_revoking %p %s = %d\n", inode,
 807             ceph_cap_string(mask), ret);
 808        return ret;
 809}
 810
 811int __ceph_caps_used(struct ceph_inode_info *ci)
 812{
 813        int used = 0;
 814        if (ci->i_pin_ref)
 815                used |= CEPH_CAP_PIN;
 816        if (ci->i_rd_ref)
 817                used |= CEPH_CAP_FILE_RD;
 818        if (ci->i_rdcache_ref || ci->vfs_inode.i_data.nrpages)
 819                used |= CEPH_CAP_FILE_CACHE;
 820        if (ci->i_wr_ref)
 821                used |= CEPH_CAP_FILE_WR;
 822        if (ci->i_wb_ref || ci->i_wrbuffer_ref)
 823                used |= CEPH_CAP_FILE_BUFFER;
 824        return used;
 825}
 826
 827/*
 828 * wanted, by virtue of open file modes
 829 */
 830int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
 831{
 832        int want = 0;
 833        int mode;
 834        for (mode = 0; mode < CEPH_FILE_MODE_NUM; mode++)
 835                if (ci->i_nr_by_mode[mode])
 836                        want |= ceph_caps_for_mode(mode);
 837        return want;
 838}
 839
 840/*
 841 * Return caps we have registered with the MDS(s) as 'wanted'.
 842 */
 843int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
 844{
 845        struct ceph_cap *cap;
 846        struct rb_node *p;
 847        int mds_wanted = 0;
 848
 849        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 850                cap = rb_entry(p, struct ceph_cap, ci_node);
 851                if (!__cap_is_valid(cap))
 852                        continue;
 853                mds_wanted |= cap->mds_wanted;
 854        }
 855        return mds_wanted;
 856}
 857
 858/*
 859 * called under i_ceph_lock
 860 */
 861static int __ceph_is_any_caps(struct ceph_inode_info *ci)
 862{
 863        return !RB_EMPTY_ROOT(&ci->i_caps) || ci->i_cap_exporting_mds >= 0;
 864}
 865
 866/*
 867 * Remove a cap.  Take steps to deal with a racing iterate_session_caps.
 868 *
 869 * caller should hold i_ceph_lock.
 870 * caller will not hold session s_mutex if called from destroy_inode.
 871 */
 872void __ceph_remove_cap(struct ceph_cap *cap)
 873{
 874        struct ceph_mds_session *session = cap->session;
 875        struct ceph_inode_info *ci = cap->ci;
 876        struct ceph_mds_client *mdsc =
 877                ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
 878        int removed = 0;
 879
 880        dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
 881
 882        /* remove from session list */
 883        spin_lock(&session->s_cap_lock);
 884        if (session->s_cap_iterator == cap) {
 885                /* not yet, we are iterating over this very cap */
 886                dout("__ceph_remove_cap  delaying %p removal from session %p\n",
 887                     cap, cap->session);
 888        } else {
 889                list_del_init(&cap->session_caps);
 890                session->s_nr_caps--;
 891                cap->session = NULL;
 892                removed = 1;
 893        }
 894        /* protect backpointer with s_cap_lock: see iterate_session_caps */
 895        cap->ci = NULL;
 896        spin_unlock(&session->s_cap_lock);
 897
 898        /* remove from inode list */
 899        rb_erase(&cap->ci_node, &ci->i_caps);
 900        if (ci->i_auth_cap == cap)
 901                ci->i_auth_cap = NULL;
 902
 903        if (removed)
 904                ceph_put_cap(mdsc, cap);
 905
 906        if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) {
 907                struct ceph_snap_realm *realm = ci->i_snap_realm;
 908                spin_lock(&realm->inodes_with_caps_lock);
 909                list_del_init(&ci->i_snap_realm_item);
 910                ci->i_snap_realm_counter++;
 911                ci->i_snap_realm = NULL;
 912                spin_unlock(&realm->inodes_with_caps_lock);
 913                ceph_put_snap_realm(mdsc, realm);
 914        }
 915        if (!__ceph_is_any_real_caps(ci))
 916                __cap_delay_cancel(mdsc, ci);
 917}
 918
 919/*
 920 * Build and send a cap message to the given MDS.
 921 *
 922 * Caller should be holding s_mutex.
 923 */
 924static int send_cap_msg(struct ceph_mds_session *session,
 925                        u64 ino, u64 cid, int op,
 926                        int caps, int wanted, int dirty,
 927                        u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq,
 928                        u64 size, u64 max_size,
 929                        struct timespec *mtime, struct timespec *atime,
 930                        u64 time_warp_seq,
 931                        uid_t uid, gid_t gid, umode_t mode,
 932                        u64 xattr_version,
 933                        struct ceph_buffer *xattrs_buf,
 934                        u64 follows)
 935{
 936        struct ceph_mds_caps *fc;
 937        struct ceph_msg *msg;
 938
 939        dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
 940             " seq %u/%u mseq %u follows %lld size %llu/%llu"
 941             " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
 942             cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
 943             ceph_cap_string(dirty),
 944             seq, issue_seq, mseq, follows, size, max_size,
 945             xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
 946
 947        msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), GFP_NOFS, false);
 948        if (!msg)
 949                return -ENOMEM;
 950
 951        msg->hdr.tid = cpu_to_le64(flush_tid);
 952
 953        fc = msg->front.iov_base;
 954        memset(fc, 0, sizeof(*fc));
 955
 956        fc->cap_id = cpu_to_le64(cid);
 957        fc->op = cpu_to_le32(op);
 958        fc->seq = cpu_to_le32(seq);
 959        fc->issue_seq = cpu_to_le32(issue_seq);
 960        fc->migrate_seq = cpu_to_le32(mseq);
 961        fc->caps = cpu_to_le32(caps);
 962        fc->wanted = cpu_to_le32(wanted);
 963        fc->dirty = cpu_to_le32(dirty);
 964        fc->ino = cpu_to_le64(ino);
 965        fc->snap_follows = cpu_to_le64(follows);
 966
 967        fc->size = cpu_to_le64(size);
 968        fc->max_size = cpu_to_le64(max_size);
 969        if (mtime)
 970                ceph_encode_timespec(&fc->mtime, mtime);
 971        if (atime)
 972                ceph_encode_timespec(&fc->atime, atime);
 973        fc->time_warp_seq = cpu_to_le32(time_warp_seq);
 974
 975        fc->uid = cpu_to_le32(uid);
 976        fc->gid = cpu_to_le32(gid);
 977        fc->mode = cpu_to_le32(mode);
 978
 979        fc->xattr_version = cpu_to_le64(xattr_version);
 980        if (xattrs_buf) {
 981                msg->middle = ceph_buffer_get(xattrs_buf);
 982                fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
 983                msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
 984        }
 985
 986        ceph_con_send(&session->s_con, msg);
 987        return 0;
 988}
 989
 990static void __queue_cap_release(struct ceph_mds_session *session,
 991                                u64 ino, u64 cap_id, u32 migrate_seq,
 992                                u32 issue_seq)
 993{
 994        struct ceph_msg *msg;
 995        struct ceph_mds_cap_release *head;
 996        struct ceph_mds_cap_item *item;
 997
 998        spin_lock(&session->s_cap_lock);
 999        BUG_ON(!session->s_num_cap_releases);
1000        msg = list_first_entry(&session->s_cap_releases,
1001                               struct ceph_msg, list_head);
1002
1003        dout(" adding %llx release to mds%d msg %p (%d left)\n",
1004             ino, session->s_mds, msg, session->s_num_cap_releases);
1005
1006        BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
1007        head = msg->front.iov_base;
1008        le32_add_cpu(&head->num, 1);
1009        item = msg->front.iov_base + msg->front.iov_len;
1010        item->ino = cpu_to_le64(ino);
1011        item->cap_id = cpu_to_le64(cap_id);
1012        item->migrate_seq = cpu_to_le32(migrate_seq);
1013        item->seq = cpu_to_le32(issue_seq);
1014
1015        session->s_num_cap_releases--;
1016
1017        msg->front.iov_len += sizeof(*item);
1018        if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1019                dout(" release msg %p full\n", msg);
1020                list_move_tail(&msg->list_head, &session->s_cap_releases_done);
1021        } else {
1022                dout(" release msg %p at %d/%d (%d)\n", msg,
1023                     (int)le32_to_cpu(head->num),
1024                     (int)CEPH_CAPS_PER_RELEASE,
1025                     (int)msg->front.iov_len);
1026        }
1027        spin_unlock(&session->s_cap_lock);
1028}
1029
1030/*
1031 * Queue cap releases when an inode is dropped from our cache.  Since
1032 * inode is about to be destroyed, there is no need for i_ceph_lock.
1033 */
1034void ceph_queue_caps_release(struct inode *inode)
1035{
1036        struct ceph_inode_info *ci = ceph_inode(inode);
1037        struct rb_node *p;
1038
1039        p = rb_first(&ci->i_caps);
1040        while (p) {
1041                struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
1042                struct ceph_mds_session *session = cap->session;
1043
1044                __queue_cap_release(session, ceph_ino(inode), cap->cap_id,
1045                                    cap->mseq, cap->issue_seq);
1046                p = rb_next(p);
1047                __ceph_remove_cap(cap);
1048        }
1049}
1050
1051/*
1052 * Send a cap msg on the given inode.  Update our caps state, then
1053 * drop i_ceph_lock and send the message.
1054 *
1055 * Make note of max_size reported/requested from mds, revoked caps
1056 * that have now been implemented.
1057 *
1058 * Make half-hearted attempt ot to invalidate page cache if we are
1059 * dropping RDCACHE.  Note that this will leave behind locked pages
1060 * that we'll then need to deal with elsewhere.
1061 *
1062 * Return non-zero if delayed release, or we experienced an error
1063 * such that the caller should requeue + retry later.
1064 *
1065 * called with i_ceph_lock, then drops it.
1066 * caller should hold snap_rwsem (read), s_mutex.
1067 */
1068static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1069                      int op, int used, int want, int retain, int flushing,
1070                      unsigned *pflush_tid)
1071        __releases(cap->ci->i_ceph_lock)
1072{
1073        struct ceph_inode_info *ci = cap->ci;
1074        struct inode *inode = &ci->vfs_inode;
1075        u64 cap_id = cap->cap_id;
1076        int held, revoking, dropping, keep;
1077        u64 seq, issue_seq, mseq, time_warp_seq, follows;
1078        u64 size, max_size;
1079        struct timespec mtime, atime;
1080        int wake = 0;
1081        umode_t mode;
1082        uid_t uid;
1083        gid_t gid;
1084        struct ceph_mds_session *session;
1085        u64 xattr_version = 0;
1086        struct ceph_buffer *xattr_blob = NULL;
1087        int delayed = 0;
1088        u64 flush_tid = 0;
1089        int i;
1090        int ret;
1091
1092        held = cap->issued | cap->implemented;
1093        revoking = cap->implemented & ~cap->issued;
1094        retain &= ~revoking;
1095        dropping = cap->issued & ~retain;
1096
1097        dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1098             inode, cap, cap->session,
1099             ceph_cap_string(held), ceph_cap_string(held & retain),
1100             ceph_cap_string(revoking));
1101        BUG_ON((retain & CEPH_CAP_PIN) == 0);
1102
1103        session = cap->session;
1104
1105        /* don't release wanted unless we've waited a bit. */
1106        if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1107            time_before(jiffies, ci->i_hold_caps_min)) {
1108                dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1109                     ceph_cap_string(cap->issued),
1110                     ceph_cap_string(cap->issued & retain),
1111                     ceph_cap_string(cap->mds_wanted),
1112                     ceph_cap_string(want));
1113                want |= cap->mds_wanted;
1114                retain |= cap->issued;
1115                delayed = 1;
1116        }
1117        ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
1118
1119        cap->issued &= retain;  /* drop bits we don't want */
1120        if (cap->implemented & ~cap->issued) {
1121                /*
1122                 * Wake up any waiters on wanted -> needed transition.
1123                 * This is due to the weird transition from buffered
1124                 * to sync IO... we need to flush dirty pages _before_
1125                 * allowing sync writes to avoid reordering.
1126                 */
1127                wake = 1;
1128        }
1129        cap->implemented &= cap->issued | used;
1130        cap->mds_wanted = want;
1131
1132        if (flushing) {
1133                /*
1134                 * assign a tid for flush operations so we can avoid
1135                 * flush1 -> dirty1 -> flush2 -> flushack1 -> mark
1136                 * clean type races.  track latest tid for every bit
1137                 * so we can handle flush AxFw, flush Fw, and have the
1138                 * first ack clean Ax.
1139                 */
1140                flush_tid = ++ci->i_cap_flush_last_tid;
1141                if (pflush_tid)
1142                        *pflush_tid = flush_tid;
1143                dout(" cap_flush_tid %d\n", (int)flush_tid);
1144                for (i = 0; i < CEPH_CAP_BITS; i++)
1145                        if (flushing & (1 << i))
1146                                ci->i_cap_flush_tid[i] = flush_tid;
1147
1148                follows = ci->i_head_snapc->seq;
1149        } else {
1150                follows = 0;
1151        }
1152
1153        keep = cap->implemented;
1154        seq = cap->seq;
1155        issue_seq = cap->issue_seq;
1156        mseq = cap->mseq;
1157        size = inode->i_size;
1158        ci->i_reported_size = size;
1159        max_size = ci->i_wanted_max_size;
1160        ci->i_requested_max_size = max_size;
1161        mtime = inode->i_mtime;
1162        atime = inode->i_atime;
1163        time_warp_seq = ci->i_time_warp_seq;
1164        uid = inode->i_uid;
1165        gid = inode->i_gid;
1166        mode = inode->i_mode;
1167
1168        if (flushing & CEPH_CAP_XATTR_EXCL) {
1169                __ceph_build_xattrs_blob(ci);
1170                xattr_blob = ci->i_xattrs.blob;
1171                xattr_version = ci->i_xattrs.version;
1172        }
1173
1174        spin_unlock(&ci->i_ceph_lock);
1175
1176        ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1177                op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
1178                size, max_size, &mtime, &atime, time_warp_seq,
1179                uid, gid, mode, xattr_version, xattr_blob,
1180                follows);
1181        if (ret < 0) {
1182                dout("error sending cap msg, must requeue %p\n", inode);
1183                delayed = 1;
1184        }
1185
1186        if (wake)
1187                wake_up_all(&ci->i_cap_wq);
1188
1189        return delayed;
1190}
1191
1192/*
1193 * When a snapshot is taken, clients accumulate dirty metadata on
1194 * inodes with capabilities in ceph_cap_snaps to describe the file
1195 * state at the time the snapshot was taken.  This must be flushed
1196 * asynchronously back to the MDS once sync writes complete and dirty
1197 * data is written out.
1198 *
1199 * Unless @again is true, skip cap_snaps that were already sent to
1200 * the MDS (i.e., during this session).
1201 *
1202 * Called under i_ceph_lock.  Takes s_mutex as needed.
1203 */
1204void __ceph_flush_snaps(struct ceph_inode_info *ci,
1205                        struct ceph_mds_session **psession,
1206                        int again)
1207                __releases(ci->i_ceph_lock)
1208                __acquires(ci->i_ceph_lock)
1209{
1210        struct inode *inode = &ci->vfs_inode;
1211        int mds;
1212        struct ceph_cap_snap *capsnap;
1213        u32 mseq;
1214        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
1215        struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
1216                                                    session->s_mutex */
1217        u64 next_follows = 0;  /* keep track of how far we've gotten through the
1218                             i_cap_snaps list, and skip these entries next time
1219                             around to avoid an infinite loop */
1220
1221        if (psession)
1222                session = *psession;
1223
1224        dout("__flush_snaps %p\n", inode);
1225retry:
1226        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1227                /* avoid an infiniute loop after retry */
1228                if (capsnap->follows < next_follows)
1229                        continue;
1230                /*
1231                 * we need to wait for sync writes to complete and for dirty
1232                 * pages to be written out.
1233                 */
1234                if (capsnap->dirty_pages || capsnap->writing)
1235                        break;
1236
1237                /*
1238                 * if cap writeback already occurred, we should have dropped
1239                 * the capsnap in ceph_put_wrbuffer_cap_refs.
1240                 */
1241                BUG_ON(capsnap->dirty == 0);
1242
1243                /* pick mds, take s_mutex */
1244                if (ci->i_auth_cap == NULL) {
1245                        dout("no auth cap (migrating?), doing nothing\n");
1246                        goto out;
1247                }
1248
1249                /* only flush each capsnap once */
1250                if (!again && !list_empty(&capsnap->flushing_item)) {
1251                        dout("already flushed %p, skipping\n", capsnap);
1252                        continue;
1253                }
1254
1255                mds = ci->i_auth_cap->session->s_mds;
1256                mseq = ci->i_auth_cap->mseq;
1257
1258                if (session && session->s_mds != mds) {
1259                        dout("oops, wrong session %p mutex\n", session);
1260                        mutex_unlock(&session->s_mutex);
1261                        ceph_put_mds_session(session);
1262                        session = NULL;
1263                }
1264                if (!session) {
1265                        spin_unlock(&ci->i_ceph_lock);
1266                        mutex_lock(&mdsc->mutex);
1267                        session = __ceph_lookup_mds_session(mdsc, mds);
1268                        mutex_unlock(&mdsc->mutex);
1269                        if (session) {
1270                                dout("inverting session/ino locks on %p\n",
1271                                     session);
1272                                mutex_lock(&session->s_mutex);
1273                        }
1274                        /*
1275                         * if session == NULL, we raced against a cap
1276                         * deletion or migration.  retry, and we'll
1277                         * get a better @mds value next time.
1278                         */
1279                        spin_lock(&ci->i_ceph_lock);
1280                        goto retry;
1281                }
1282
1283                capsnap->flush_tid = ++ci->i_cap_flush_last_tid;
1284                atomic_inc(&capsnap->nref);
1285                if (!list_empty(&capsnap->flushing_item))
1286                        list_del_init(&capsnap->flushing_item);
1287                list_add_tail(&capsnap->flushing_item,
1288                              &session->s_cap_snaps_flushing);
1289                spin_unlock(&ci->i_ceph_lock);
1290
1291                dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
1292                     inode, capsnap, capsnap->follows, capsnap->flush_tid);
1293                send_cap_msg(session, ceph_vino(inode).ino, 0,
1294                             CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
1295                             capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
1296                             capsnap->size, 0,
1297                             &capsnap->mtime, &capsnap->atime,
1298                             capsnap->time_warp_seq,
1299                             capsnap->uid, capsnap->gid, capsnap->mode,
1300                             capsnap->xattr_version, capsnap->xattr_blob,
1301                             capsnap->follows);
1302
1303                next_follows = capsnap->follows + 1;
1304                ceph_put_cap_snap(capsnap);
1305
1306                spin_lock(&ci->i_ceph_lock);
1307                goto retry;
1308        }
1309
1310        /* we flushed them all; remove this inode from the queue */
1311        spin_lock(&mdsc->snap_flush_lock);
1312        list_del_init(&ci->i_snap_flush_item);
1313        spin_unlock(&mdsc->snap_flush_lock);
1314
1315out:
1316        if (psession)
1317                *psession = session;
1318        else if (session) {
1319                mutex_unlock(&session->s_mutex);
1320                ceph_put_mds_session(session);
1321        }
1322}
1323
1324static void ceph_flush_snaps(struct ceph_inode_info *ci)
1325{
1326        spin_lock(&ci->i_ceph_lock);
1327        __ceph_flush_snaps(ci, NULL, 0);
1328        spin_unlock(&ci->i_ceph_lock);
1329}
1330
1331/*
1332 * Mark caps dirty.  If inode is newly dirty, return the dirty flags.
1333 * Caller is then responsible for calling __mark_inode_dirty with the
1334 * returned flags value.
1335 */
1336int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
1337{
1338        struct ceph_mds_client *mdsc =
1339                ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
1340        struct inode *inode = &ci->vfs_inode;
1341        int was = ci->i_dirty_caps;
1342        int dirty = 0;
1343
1344        dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1345             ceph_cap_string(mask), ceph_cap_string(was),
1346             ceph_cap_string(was | mask));
1347        ci->i_dirty_caps |= mask;
1348        if (was == 0) {
1349                if (!ci->i_head_snapc)
1350                        ci->i_head_snapc = ceph_get_snap_context(
1351                                ci->i_snap_realm->cached_context);
1352                dout(" inode %p now dirty snapc %p\n", &ci->vfs_inode,
1353                        ci->i_head_snapc);
1354                BUG_ON(!list_empty(&ci->i_dirty_item));
1355                spin_lock(&mdsc->cap_dirty_lock);
1356                list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1357                spin_unlock(&mdsc->cap_dirty_lock);
1358                if (ci->i_flushing_caps == 0) {
1359                        ihold(inode);
1360                        dirty |= I_DIRTY_SYNC;
1361                }
1362        }
1363        BUG_ON(list_empty(&ci->i_dirty_item));
1364        if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1365            (mask & CEPH_CAP_FILE_BUFFER))
1366                dirty |= I_DIRTY_DATASYNC;
1367        __cap_delay_requeue(mdsc, ci);
1368        return dirty;
1369}
1370
1371/*
1372 * Add dirty inode to the flushing list.  Assigned a seq number so we
1373 * can wait for caps to flush without starving.
1374 *
1375 * Called under i_ceph_lock.
1376 */
1377static int __mark_caps_flushing(struct inode *inode,
1378                                 struct ceph_mds_session *session)
1379{
1380        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1381        struct ceph_inode_info *ci = ceph_inode(inode);
1382        int flushing;
1383
1384        BUG_ON(ci->i_dirty_caps == 0);
1385        BUG_ON(list_empty(&ci->i_dirty_item));
1386
1387        flushing = ci->i_dirty_caps;
1388        dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1389             ceph_cap_string(flushing),
1390             ceph_cap_string(ci->i_flushing_caps),
1391             ceph_cap_string(ci->i_flushing_caps | flushing));
1392        ci->i_flushing_caps |= flushing;
1393        ci->i_dirty_caps = 0;
1394        dout(" inode %p now !dirty\n", inode);
1395
1396        spin_lock(&mdsc->cap_dirty_lock);
1397        list_del_init(&ci->i_dirty_item);
1398
1399        ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
1400        if (list_empty(&ci->i_flushing_item)) {
1401                list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1402                mdsc->num_cap_flushing++;
1403                dout(" inode %p now flushing seq %lld\n", inode,
1404                     ci->i_cap_flush_seq);
1405        } else {
1406                list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1407                dout(" inode %p now flushing (more) seq %lld\n", inode,
1408                     ci->i_cap_flush_seq);
1409        }
1410        spin_unlock(&mdsc->cap_dirty_lock);
1411
1412        return flushing;
1413}
1414
1415/*
1416 * try to invalidate mapping pages without blocking.
1417 */
1418static int try_nonblocking_invalidate(struct inode *inode)
1419{
1420        struct ceph_inode_info *ci = ceph_inode(inode);
1421        u32 invalidating_gen = ci->i_rdcache_gen;
1422
1423        spin_unlock(&ci->i_ceph_lock);
1424        invalidate_mapping_pages(&inode->i_data, 0, -1);
1425        spin_lock(&ci->i_ceph_lock);
1426
1427        if (inode->i_data.nrpages == 0 &&
1428            invalidating_gen == ci->i_rdcache_gen) {
1429                /* success. */
1430                dout("try_nonblocking_invalidate %p success\n", inode);
1431                /* save any racing async invalidate some trouble */
1432                ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
1433                return 0;
1434        }
1435        dout("try_nonblocking_invalidate %p failed\n", inode);
1436        return -1;
1437}
1438
1439/*
1440 * Swiss army knife function to examine currently used and wanted
1441 * versus held caps.  Release, flush, ack revoked caps to mds as
1442 * appropriate.
1443 *
1444 *  CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1445 *    cap release further.
1446 *  CHECK_CAPS_AUTHONLY - we should only check the auth cap
1447 *  CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1448 *    further delay.
1449 */
1450void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1451                     struct ceph_mds_session *session)
1452{
1453        struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1454        struct ceph_mds_client *mdsc = fsc->mdsc;
1455        struct inode *inode = &ci->vfs_inode;
1456        struct ceph_cap *cap;
1457        int file_wanted, used;
1458        int took_snap_rwsem = 0;             /* true if mdsc->snap_rwsem held */
1459        int issued, implemented, want, retain, revoking, flushing = 0;
1460        int mds = -1;   /* keep track of how far we've gone through i_caps list
1461                           to avoid an infinite loop on retry */
1462        struct rb_node *p;
1463        int tried_invalidate = 0;
1464        int delayed = 0, sent = 0, force_requeue = 0, num;
1465        int queue_invalidate = 0;
1466        int is_delayed = flags & CHECK_CAPS_NODELAY;
1467
1468        /* if we are unmounting, flush any unused caps immediately. */
1469        if (mdsc->stopping)
1470                is_delayed = 1;
1471
1472        spin_lock(&ci->i_ceph_lock);
1473
1474        if (ci->i_ceph_flags & CEPH_I_FLUSH)
1475                flags |= CHECK_CAPS_FLUSH;
1476
1477        /* flush snaps first time around only */
1478        if (!list_empty(&ci->i_cap_snaps))
1479                __ceph_flush_snaps(ci, &session, 0);
1480        goto retry_locked;
1481retry:
1482        spin_lock(&ci->i_ceph_lock);
1483retry_locked:
1484        file_wanted = __ceph_caps_file_wanted(ci);
1485        used = __ceph_caps_used(ci);
1486        want = file_wanted | used;
1487        issued = __ceph_caps_issued(ci, &implemented);
1488        revoking = implemented & ~issued;
1489
1490        retain = want | CEPH_CAP_PIN;
1491        if (!mdsc->stopping && inode->i_nlink > 0) {
1492                if (want) {
1493                        retain |= CEPH_CAP_ANY;       /* be greedy */
1494                } else {
1495                        retain |= CEPH_CAP_ANY_SHARED;
1496                        /*
1497                         * keep RD only if we didn't have the file open RW,
1498                         * because then the mds would revoke it anyway to
1499                         * journal max_size=0.
1500                         */
1501                        if (ci->i_max_size == 0)
1502                                retain |= CEPH_CAP_ANY_RD;
1503                }
1504        }
1505
1506        dout("check_caps %p file_want %s used %s dirty %s flushing %s"
1507             " issued %s revoking %s retain %s %s%s%s\n", inode,
1508             ceph_cap_string(file_wanted),
1509             ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1510             ceph_cap_string(ci->i_flushing_caps),
1511             ceph_cap_string(issued), ceph_cap_string(revoking),
1512             ceph_cap_string(retain),
1513             (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1514             (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1515             (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1516
1517        /*
1518         * If we no longer need to hold onto old our caps, and we may
1519         * have cached pages, but don't want them, then try to invalidate.
1520         * If we fail, it's because pages are locked.... try again later.
1521         */
1522        if ((!is_delayed || mdsc->stopping) &&
1523            ci->i_wrbuffer_ref == 0 &&               /* no dirty pages... */
1524            inode->i_data.nrpages &&                 /* have cached pages */
1525            (file_wanted == 0 ||                     /* no open files */
1526             (revoking & (CEPH_CAP_FILE_CACHE|
1527                          CEPH_CAP_FILE_LAZYIO))) && /*  or revoking cache */
1528            !tried_invalidate) {
1529                dout("check_caps trying to invalidate on %p\n", inode);
1530                if (try_nonblocking_invalidate(inode) < 0) {
1531                        if (revoking & (CEPH_CAP_FILE_CACHE|
1532                                        CEPH_CAP_FILE_LAZYIO)) {
1533                                dout("check_caps queuing invalidate\n");
1534                                queue_invalidate = 1;
1535                                ci->i_rdcache_revoking = ci->i_rdcache_gen;
1536                        } else {
1537                                dout("check_caps failed to invalidate pages\n");
1538                                /* we failed to invalidate pages.  check these
1539                                   caps again later. */
1540                                force_requeue = 1;
1541                                __cap_set_timeouts(mdsc, ci);
1542                        }
1543                }
1544                tried_invalidate = 1;
1545                goto retry_locked;
1546        }
1547
1548        num = 0;
1549        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1550                cap = rb_entry(p, struct ceph_cap, ci_node);
1551                num++;
1552
1553                /* avoid looping forever */
1554                if (mds >= cap->mds ||
1555                    ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1556                        continue;
1557
1558                /* NOTE: no side-effects allowed, until we take s_mutex */
1559
1560                revoking = cap->implemented & ~cap->issued;
1561                dout(" mds%d cap %p issued %s implemented %s revoking %s\n",
1562                     cap->mds, cap, ceph_cap_string(cap->issued),
1563                     ceph_cap_string(cap->implemented),
1564                     ceph_cap_string(revoking));
1565
1566                if (cap == ci->i_auth_cap &&
1567                    (cap->issued & CEPH_CAP_FILE_WR)) {
1568                        /* request larger max_size from MDS? */
1569                        if (ci->i_wanted_max_size > ci->i_max_size &&
1570                            ci->i_wanted_max_size > ci->i_requested_max_size) {
1571                                dout("requesting new max_size\n");
1572                                goto ack;
1573                        }
1574
1575                        /* approaching file_max? */
1576                        if ((inode->i_size << 1) >= ci->i_max_size &&
1577                            (ci->i_reported_size << 1) < ci->i_max_size) {
1578                                dout("i_size approaching max_size\n");
1579                                goto ack;
1580                        }
1581                }
1582                /* flush anything dirty? */
1583                if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
1584                    ci->i_dirty_caps) {
1585                        dout("flushing dirty caps\n");
1586                        goto ack;
1587                }
1588
1589                /* completed revocation? going down and there are no caps? */
1590                if (revoking && (revoking & used) == 0) {
1591                        dout("completed revocation of %s\n",
1592                             ceph_cap_string(cap->implemented & ~cap->issued));
1593                        goto ack;
1594                }
1595
1596                /* want more caps from mds? */
1597                if (want & ~(cap->mds_wanted | cap->issued))
1598                        goto ack;
1599
1600                /* things we might delay */
1601                if ((cap->issued & ~retain) == 0 &&
1602                    cap->mds_wanted == want)
1603                        continue;     /* nope, all good */
1604
1605                if (is_delayed)
1606                        goto ack;
1607
1608                /* delay? */
1609                if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1610                    time_before(jiffies, ci->i_hold_caps_max)) {
1611                        dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1612                             ceph_cap_string(cap->issued),
1613                             ceph_cap_string(cap->issued & retain),
1614                             ceph_cap_string(cap->mds_wanted),
1615                             ceph_cap_string(want));
1616                        delayed++;
1617                        continue;
1618                }
1619
1620ack:
1621                if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1622                        dout(" skipping %p I_NOFLUSH set\n", inode);
1623                        continue;
1624                }
1625
1626                if (session && session != cap->session) {
1627                        dout("oops, wrong session %p mutex\n", session);
1628                        mutex_unlock(&session->s_mutex);
1629                        session = NULL;
1630                }
1631                if (!session) {
1632                        session = cap->session;
1633                        if (mutex_trylock(&session->s_mutex) == 0) {
1634                                dout("inverting session/ino locks on %p\n",
1635                                     session);
1636                                spin_unlock(&ci->i_ceph_lock);
1637                                if (took_snap_rwsem) {
1638                                        up_read(&mdsc->snap_rwsem);
1639                                        took_snap_rwsem = 0;
1640                                }
1641                                mutex_lock(&session->s_mutex);
1642                                goto retry;
1643                        }
1644                }
1645                /* take snap_rwsem after session mutex */
1646                if (!took_snap_rwsem) {
1647                        if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1648                                dout("inverting snap/in locks on %p\n",
1649                                     inode);
1650                                spin_unlock(&ci->i_ceph_lock);
1651                                down_read(&mdsc->snap_rwsem);
1652                                took_snap_rwsem = 1;
1653                                goto retry;
1654                        }
1655                        took_snap_rwsem = 1;
1656                }
1657
1658                if (cap == ci->i_auth_cap && ci->i_dirty_caps)
1659                        flushing = __mark_caps_flushing(inode, session);
1660                else
1661                        flushing = 0;
1662
1663                mds = cap->mds;  /* remember mds, so we don't repeat */
1664                sent++;
1665
1666                /* __send_cap drops i_ceph_lock */
1667                delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
1668                                      retain, flushing, NULL);
1669                goto retry; /* retake i_ceph_lock and restart our cap scan. */
1670        }
1671
1672        /*
1673         * Reschedule delayed caps release if we delayed anything,
1674         * otherwise cancel.
1675         */
1676        if (delayed && is_delayed)
1677                force_requeue = 1;   /* __send_cap delayed release; requeue */
1678        if (!delayed && !is_delayed)
1679                __cap_delay_cancel(mdsc, ci);
1680        else if (!is_delayed || force_requeue)
1681                __cap_delay_requeue(mdsc, ci);
1682
1683        spin_unlock(&ci->i_ceph_lock);
1684
1685        if (queue_invalidate)
1686                ceph_queue_invalidate(inode);
1687
1688        if (session)
1689                mutex_unlock(&session->s_mutex);
1690        if (took_snap_rwsem)
1691                up_read(&mdsc->snap_rwsem);
1692}
1693
1694/*
1695 * Try to flush dirty caps back to the auth mds.
1696 */
1697static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
1698                          unsigned *flush_tid)
1699{
1700        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1701        struct ceph_inode_info *ci = ceph_inode(inode);
1702        int unlock_session = session ? 0 : 1;
1703        int flushing = 0;
1704
1705retry:
1706        spin_lock(&ci->i_ceph_lock);
1707        if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1708                dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1709                goto out;
1710        }
1711        if (ci->i_dirty_caps && ci->i_auth_cap) {
1712                struct ceph_cap *cap = ci->i_auth_cap;
1713                int used = __ceph_caps_used(ci);
1714                int want = __ceph_caps_wanted(ci);
1715                int delayed;
1716
1717                if (!session) {
1718                        spin_unlock(&ci->i_ceph_lock);
1719                        session = cap->session;
1720                        mutex_lock(&session->s_mutex);
1721                        goto retry;
1722                }
1723                BUG_ON(session != cap->session);
1724                if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
1725                        goto out;
1726
1727                flushing = __mark_caps_flushing(inode, session);
1728
1729                /* __send_cap drops i_ceph_lock */
1730                delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
1731                                     cap->issued | cap->implemented, flushing,
1732                                     flush_tid);
1733                if (!delayed)
1734                        goto out_unlocked;
1735
1736                spin_lock(&ci->i_ceph_lock);
1737                __cap_delay_requeue(mdsc, ci);
1738        }
1739out:
1740        spin_unlock(&ci->i_ceph_lock);
1741out_unlocked:
1742        if (session && unlock_session)
1743                mutex_unlock(&session->s_mutex);
1744        return flushing;
1745}
1746
1747/*
1748 * Return true if we've flushed caps through the given flush_tid.
1749 */
1750static int caps_are_flushed(struct inode *inode, unsigned tid)
1751{
1752        struct ceph_inode_info *ci = ceph_inode(inode);
1753        int i, ret = 1;
1754
1755        spin_lock(&ci->i_ceph_lock);
1756        for (i = 0; i < CEPH_CAP_BITS; i++)
1757                if ((ci->i_flushing_caps & (1 << i)) &&
1758                    ci->i_cap_flush_tid[i] <= tid) {
1759                        /* still flushing this bit */
1760                        ret = 0;
1761                        break;
1762                }
1763        spin_unlock(&ci->i_ceph_lock);
1764        return ret;
1765}
1766
1767/*
1768 * Wait on any unsafe replies for the given inode.  First wait on the
1769 * newest request, and make that the upper bound.  Then, if there are
1770 * more requests, keep waiting on the oldest as long as it is still older
1771 * than the original request.
1772 */
1773static void sync_write_wait(struct inode *inode)
1774{
1775        struct ceph_inode_info *ci = ceph_inode(inode);
1776        struct list_head *head = &ci->i_unsafe_writes;
1777        struct ceph_osd_request *req;
1778        u64 last_tid;
1779
1780        spin_lock(&ci->i_unsafe_lock);
1781        if (list_empty(head))
1782                goto out;
1783
1784        /* set upper bound as _last_ entry in chain */
1785        req = list_entry(head->prev, struct ceph_osd_request,
1786                         r_unsafe_item);
1787        last_tid = req->r_tid;
1788
1789        do {
1790                ceph_osdc_get_request(req);
1791                spin_unlock(&ci->i_unsafe_lock);
1792                dout("sync_write_wait on tid %llu (until %llu)\n",
1793                     req->r_tid, last_tid);
1794                wait_for_completion(&req->r_safe_completion);
1795                spin_lock(&ci->i_unsafe_lock);
1796                ceph_osdc_put_request(req);
1797
1798                /*
1799                 * from here on look at first entry in chain, since we
1800                 * only want to wait for anything older than last_tid
1801                 */
1802                if (list_empty(head))
1803                        break;
1804                req = list_entry(head->next, struct ceph_osd_request,
1805                                 r_unsafe_item);
1806        } while (req->r_tid < last_tid);
1807out:
1808        spin_unlock(&ci->i_unsafe_lock);
1809}
1810
1811int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1812{
1813        struct inode *inode = file->f_mapping->host;
1814        struct ceph_inode_info *ci = ceph_inode(inode);
1815        unsigned flush_tid;
1816        int ret;
1817        int dirty;
1818
1819        dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
1820        sync_write_wait(inode);
1821
1822        ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1823        if (ret < 0)
1824                return ret;
1825        mutex_lock(&inode->i_mutex);
1826
1827        dirty = try_flush_caps(inode, NULL, &flush_tid);
1828        dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
1829
1830        /*
1831         * only wait on non-file metadata writeback (the mds
1832         * can recover size and mtime, so we don't need to
1833         * wait for that)
1834         */
1835        if (!datasync && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
1836                dout("fsync waiting for flush_tid %u\n", flush_tid);
1837                ret = wait_event_interruptible(ci->i_cap_wq,
1838                                       caps_are_flushed(inode, flush_tid));
1839        }
1840
1841        dout("fsync %p%s done\n", inode, datasync ? " datasync" : "");
1842        mutex_unlock(&inode->i_mutex);
1843        return ret;
1844}
1845
1846/*
1847 * Flush any dirty caps back to the mds.  If we aren't asked to wait,
1848 * queue inode for flush but don't do so immediately, because we can
1849 * get by with fewer MDS messages if we wait for data writeback to
1850 * complete first.
1851 */
1852int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
1853{
1854        struct ceph_inode_info *ci = ceph_inode(inode);
1855        unsigned flush_tid;
1856        int err = 0;
1857        int dirty;
1858        int wait = wbc->sync_mode == WB_SYNC_ALL;
1859
1860        dout("write_inode %p wait=%d\n", inode, wait);
1861        if (wait) {
1862                dirty = try_flush_caps(inode, NULL, &flush_tid);
1863                if (dirty)
1864                        err = wait_event_interruptible(ci->i_cap_wq,
1865                                       caps_are_flushed(inode, flush_tid));
1866        } else {
1867                struct ceph_mds_client *mdsc =
1868                        ceph_sb_to_client(inode->i_sb)->mdsc;
1869
1870                spin_lock(&ci->i_ceph_lock);
1871                if (__ceph_caps_dirty(ci))
1872                        __cap_delay_requeue_front(mdsc, ci);
1873                spin_unlock(&ci->i_ceph_lock);
1874        }
1875        return err;
1876}
1877
1878/*
1879 * After a recovering MDS goes active, we need to resend any caps
1880 * we were flushing.
1881 *
1882 * Caller holds session->s_mutex.
1883 */
1884static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
1885                                   struct ceph_mds_session *session)
1886{
1887        struct ceph_cap_snap *capsnap;
1888
1889        dout("kick_flushing_capsnaps mds%d\n", session->s_mds);
1890        list_for_each_entry(capsnap, &session->s_cap_snaps_flushing,
1891                            flushing_item) {
1892                struct ceph_inode_info *ci = capsnap->ci;
1893                struct inode *inode = &ci->vfs_inode;
1894                struct ceph_cap *cap;
1895
1896                spin_lock(&ci->i_ceph_lock);
1897                cap = ci->i_auth_cap;
1898                if (cap && cap->session == session) {
1899                        dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
1900                             cap, capsnap);
1901                        __ceph_flush_snaps(ci, &session, 1);
1902                } else {
1903                        pr_err("%p auth cap %p not mds%d ???\n", inode,
1904                               cap, session->s_mds);
1905                }
1906                spin_unlock(&ci->i_ceph_lock);
1907        }
1908}
1909
1910void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
1911                             struct ceph_mds_session *session)
1912{
1913        struct ceph_inode_info *ci;
1914
1915        kick_flushing_capsnaps(mdsc, session);
1916
1917        dout("kick_flushing_caps mds%d\n", session->s_mds);
1918        list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
1919                struct inode *inode = &ci->vfs_inode;
1920                struct ceph_cap *cap;
1921                int delayed = 0;
1922
1923                spin_lock(&ci->i_ceph_lock);
1924                cap = ci->i_auth_cap;
1925                if (cap && cap->session == session) {
1926                        dout("kick_flushing_caps %p cap %p %s\n", inode,
1927                             cap, ceph_cap_string(ci->i_flushing_caps));
1928                        delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
1929                                             __ceph_caps_used(ci),
1930                                             __ceph_caps_wanted(ci),
1931                                             cap->issued | cap->implemented,
1932                                             ci->i_flushing_caps, NULL);
1933                        if (delayed) {
1934                                spin_lock(&ci->i_ceph_lock);
1935                                __cap_delay_requeue(mdsc, ci);
1936                                spin_unlock(&ci->i_ceph_lock);
1937                        }
1938                } else {
1939                        pr_err("%p auth cap %p not mds%d ???\n", inode,
1940                               cap, session->s_mds);
1941                        spin_unlock(&ci->i_ceph_lock);
1942                }
1943        }
1944}
1945
1946static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
1947                                     struct ceph_mds_session *session,
1948                                     struct inode *inode)
1949{
1950        struct ceph_inode_info *ci = ceph_inode(inode);
1951        struct ceph_cap *cap;
1952        int delayed = 0;
1953
1954        spin_lock(&ci->i_ceph_lock);
1955        cap = ci->i_auth_cap;
1956        dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
1957             ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
1958        __ceph_flush_snaps(ci, &session, 1);
1959        if (ci->i_flushing_caps) {
1960                delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
1961                                     __ceph_caps_used(ci),
1962                                     __ceph_caps_wanted(ci),
1963                                     cap->issued | cap->implemented,
1964                                     ci->i_flushing_caps, NULL);
1965                if (delayed) {
1966                        spin_lock(&ci->i_ceph_lock);
1967                        __cap_delay_requeue(mdsc, ci);
1968                        spin_unlock(&ci->i_ceph_lock);
1969                }
1970        } else {
1971                spin_unlock(&ci->i_ceph_lock);
1972        }
1973}
1974
1975
1976/*
1977 * Take references to capabilities we hold, so that we don't release
1978 * them to the MDS prematurely.
1979 *
1980 * Protected by i_ceph_lock.
1981 */
1982static void __take_cap_refs(struct ceph_inode_info *ci, int got)
1983{
1984        if (got & CEPH_CAP_PIN)
1985                ci->i_pin_ref++;
1986        if (got & CEPH_CAP_FILE_RD)
1987                ci->i_rd_ref++;
1988        if (got & CEPH_CAP_FILE_CACHE)
1989                ci->i_rdcache_ref++;
1990        if (got & CEPH_CAP_FILE_WR)
1991                ci->i_wr_ref++;
1992        if (got & CEPH_CAP_FILE_BUFFER) {
1993                if (ci->i_wb_ref == 0)
1994                        ihold(&ci->vfs_inode);
1995                ci->i_wb_ref++;
1996                dout("__take_cap_refs %p wb %d -> %d (?)\n",
1997                     &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
1998        }
1999}
2000
2001/*
2002 * Try to grab cap references.  Specify those refs we @want, and the
2003 * minimal set we @need.  Also include the larger offset we are writing
2004 * to (when applicable), and check against max_size here as well.
2005 * Note that caller is responsible for ensuring max_size increases are
2006 * requested from the MDS.
2007 */
2008static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
2009                            int *got, loff_t endoff, int *check_max, int *err)
2010{
2011        struct inode *inode = &ci->vfs_inode;
2012        int ret = 0;
2013        int have, implemented;
2014        int file_wanted;
2015
2016        dout("get_cap_refs %p need %s want %s\n", inode,
2017             ceph_cap_string(need), ceph_cap_string(want));
2018        spin_lock(&ci->i_ceph_lock);
2019
2020        /* make sure file is actually open */
2021        file_wanted = __ceph_caps_file_wanted(ci);
2022        if ((file_wanted & need) == 0) {
2023                dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
2024                     ceph_cap_string(need), ceph_cap_string(file_wanted));
2025                *err = -EBADF;
2026                ret = 1;
2027                goto out;
2028        }
2029
2030        if (need & CEPH_CAP_FILE_WR) {
2031                if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
2032                        dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
2033                             inode, endoff, ci->i_max_size);
2034                        if (endoff > ci->i_wanted_max_size) {
2035                                *check_max = 1;
2036                                ret = 1;
2037                        }
2038                        goto out;
2039                }
2040                /*
2041                 * If a sync write is in progress, we must wait, so that we
2042                 * can get a final snapshot value for size+mtime.
2043                 */
2044                if (__ceph_have_pending_cap_snap(ci)) {
2045                        dout("get_cap_refs %p cap_snap_pending\n", inode);
2046                        goto out;
2047                }
2048        }
2049        have = __ceph_caps_issued(ci, &implemented);
2050
2051        /*
2052         * disallow writes while a truncate is pending
2053         */
2054        if (ci->i_truncate_pending)
2055                have &= ~CEPH_CAP_FILE_WR;
2056
2057        if ((have & need) == need) {
2058                /*
2059                 * Look at (implemented & ~have & not) so that we keep waiting
2060                 * on transition from wanted -> needed caps.  This is needed
2061                 * for WRBUFFER|WR -> WR to avoid a new WR sync write from
2062                 * going before a prior buffered writeback happens.
2063                 */
2064                int not = want & ~(have & need);
2065                int revoking = implemented & ~have;
2066                dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
2067                     inode, ceph_cap_string(have), ceph_cap_string(not),
2068                     ceph_cap_string(revoking));
2069                if ((revoking & not) == 0) {
2070                        *got = need | (have & want);
2071                        __take_cap_refs(ci, *got);
2072                        ret = 1;
2073                }
2074        } else {
2075                dout("get_cap_refs %p have %s needed %s\n", inode,
2076                     ceph_cap_string(have), ceph_cap_string(need));
2077        }
2078out:
2079        spin_unlock(&ci->i_ceph_lock);
2080        dout("get_cap_refs %p ret %d got %s\n", inode,
2081             ret, ceph_cap_string(*got));
2082        return ret;
2083}
2084
2085/*
2086 * Check the offset we are writing up to against our current
2087 * max_size.  If necessary, tell the MDS we want to write to
2088 * a larger offset.
2089 */
2090static void check_max_size(struct inode *inode, loff_t endoff)
2091{
2092        struct ceph_inode_info *ci = ceph_inode(inode);
2093        int check = 0;
2094
2095        /* do we need to explicitly request a larger max_size? */
2096        spin_lock(&ci->i_ceph_lock);
2097        if ((endoff >= ci->i_max_size ||
2098             endoff > (inode->i_size << 1)) &&
2099            endoff > ci->i_wanted_max_size) {
2100                dout("write %p at large endoff %llu, req max_size\n",
2101                     inode, endoff);
2102                ci->i_wanted_max_size = endoff;
2103                check = 1;
2104        }
2105        spin_unlock(&ci->i_ceph_lock);
2106        if (check)
2107                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2108}
2109
2110/*
2111 * Wait for caps, and take cap references.  If we can't get a WR cap
2112 * due to a small max_size, make sure we check_max_size (and possibly
2113 * ask the mds) so we don't get hung up indefinitely.
2114 */
2115int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, int *got,
2116                  loff_t endoff)
2117{
2118        int check_max, ret, err;
2119
2120retry:
2121        if (endoff > 0)
2122                check_max_size(&ci->vfs_inode, endoff);
2123        check_max = 0;
2124        err = 0;
2125        ret = wait_event_interruptible(ci->i_cap_wq,
2126                                       try_get_cap_refs(ci, need, want,
2127                                                        got, endoff,
2128                                                        &check_max, &err));
2129        if (err)
2130                ret = err;
2131        if (check_max)
2132                goto retry;
2133        return ret;
2134}
2135
2136/*
2137 * Take cap refs.  Caller must already know we hold at least one ref
2138 * on the caps in question or we don't know this is safe.
2139 */
2140void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2141{
2142        spin_lock(&ci->i_ceph_lock);
2143        __take_cap_refs(ci, caps);
2144        spin_unlock(&ci->i_ceph_lock);
2145}
2146
2147/*
2148 * Release cap refs.
2149 *
2150 * If we released the last ref on any given cap, call ceph_check_caps
2151 * to release (or schedule a release).
2152 *
2153 * If we are releasing a WR cap (from a sync write), finalize any affected
2154 * cap_snap, and wake up any waiters.
2155 */
2156void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2157{
2158        struct inode *inode = &ci->vfs_inode;
2159        int last = 0, put = 0, flushsnaps = 0, wake = 0;
2160        struct ceph_cap_snap *capsnap;
2161
2162        spin_lock(&ci->i_ceph_lock);
2163        if (had & CEPH_CAP_PIN)
2164                --ci->i_pin_ref;
2165        if (had & CEPH_CAP_FILE_RD)
2166                if (--ci->i_rd_ref == 0)
2167                        last++;
2168        if (had & CEPH_CAP_FILE_CACHE)
2169                if (--ci->i_rdcache_ref == 0)
2170                        last++;
2171        if (had & CEPH_CAP_FILE_BUFFER) {
2172                if (--ci->i_wb_ref == 0) {
2173                        last++;
2174                        put++;
2175                }
2176                dout("put_cap_refs %p wb %d -> %d (?)\n",
2177                     inode, ci->i_wb_ref+1, ci->i_wb_ref);
2178        }
2179        if (had & CEPH_CAP_FILE_WR)
2180                if (--ci->i_wr_ref == 0) {
2181                        last++;
2182                        if (!list_empty(&ci->i_cap_snaps)) {
2183                                capsnap = list_first_entry(&ci->i_cap_snaps,
2184                                                     struct ceph_cap_snap,
2185                                                     ci_item);
2186                                if (capsnap->writing) {
2187                                        capsnap->writing = 0;
2188                                        flushsnaps =
2189                                                __ceph_finish_cap_snap(ci,
2190                                                                       capsnap);
2191                                        wake = 1;
2192                                }
2193                        }
2194                }
2195        spin_unlock(&ci->i_ceph_lock);
2196
2197        dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2198             last ? " last" : "", put ? " put" : "");
2199
2200        if (last && !flushsnaps)
2201                ceph_check_caps(ci, 0, NULL);
2202        else if (flushsnaps)
2203                ceph_flush_snaps(ci);
2204        if (wake)
2205                wake_up_all(&ci->i_cap_wq);
2206        if (put)
2207                iput(inode);
2208}
2209
2210/*
2211 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2212 * context.  Adjust per-snap dirty page accounting as appropriate.
2213 * Once all dirty data for a cap_snap is flushed, flush snapped file
2214 * metadata back to the MDS.  If we dropped the last ref, call
2215 * ceph_check_caps.
2216 */
2217void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2218                                struct ceph_snap_context *snapc)
2219{
2220        struct inode *inode = &ci->vfs_inode;
2221        int last = 0;
2222        int complete_capsnap = 0;
2223        int drop_capsnap = 0;
2224        int found = 0;
2225        struct ceph_cap_snap *capsnap = NULL;
2226
2227        spin_lock(&ci->i_ceph_lock);
2228        ci->i_wrbuffer_ref -= nr;
2229        last = !ci->i_wrbuffer_ref;
2230
2231        if (ci->i_head_snapc == snapc) {
2232                ci->i_wrbuffer_ref_head -= nr;
2233                if (ci->i_wrbuffer_ref_head == 0 &&
2234                    ci->i_dirty_caps == 0 && ci->i_flushing_caps == 0) {
2235                        BUG_ON(!ci->i_head_snapc);
2236                        ceph_put_snap_context(ci->i_head_snapc);
2237                        ci->i_head_snapc = NULL;
2238                }
2239                dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2240                     inode,
2241                     ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2242                     ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2243                     last ? " LAST" : "");
2244        } else {
2245                list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2246                        if (capsnap->context == snapc) {
2247                                found = 1;
2248                                break;
2249                        }
2250                }
2251                BUG_ON(!found);
2252                capsnap->dirty_pages -= nr;
2253                if (capsnap->dirty_pages == 0) {
2254                        complete_capsnap = 1;
2255                        if (capsnap->dirty == 0)
2256                                /* cap writeback completed before we created
2257                                 * the cap_snap; no FLUSHSNAP is needed */
2258                                drop_capsnap = 1;
2259                }
2260                dout("put_wrbuffer_cap_refs on %p cap_snap %p "
2261                     " snap %lld %d/%d -> %d/%d %s%s%s\n",
2262                     inode, capsnap, capsnap->context->seq,
2263                     ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2264                     ci->i_wrbuffer_ref, capsnap->dirty_pages,
2265                     last ? " (wrbuffer last)" : "",
2266                     complete_capsnap ? " (complete capsnap)" : "",
2267                     drop_capsnap ? " (drop capsnap)" : "");
2268                if (drop_capsnap) {
2269                        ceph_put_snap_context(capsnap->context);
2270                        list_del(&capsnap->ci_item);
2271                        list_del(&capsnap->flushing_item);
2272                        ceph_put_cap_snap(capsnap);
2273                }
2274        }
2275
2276        spin_unlock(&ci->i_ceph_lock);
2277
2278        if (last) {
2279                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2280                iput(inode);
2281        } else if (complete_capsnap) {
2282                ceph_flush_snaps(ci);
2283                wake_up_all(&ci->i_cap_wq);
2284        }
2285        if (drop_capsnap)
2286                iput(inode);
2287}
2288
2289/*
2290 * Handle a cap GRANT message from the MDS.  (Note that a GRANT may
2291 * actually be a revocation if it specifies a smaller cap set.)
2292 *
2293 * caller holds s_mutex and i_ceph_lock, we drop both.
2294 *
2295 * return value:
2296 *  0 - ok
2297 *  1 - check_caps on auth cap only (writeback)
2298 *  2 - check_caps (ack revoke)
2299 */
2300static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2301                             struct ceph_mds_session *session,
2302                             struct ceph_cap *cap,
2303                             struct ceph_buffer *xattr_buf)
2304                __releases(ci->i_ceph_lock)
2305{
2306        struct ceph_inode_info *ci = ceph_inode(inode);
2307        int mds = session->s_mds;
2308        int seq = le32_to_cpu(grant->seq);
2309        int newcaps = le32_to_cpu(grant->caps);
2310        int issued, implemented, used, wanted, dirty;
2311        u64 size = le64_to_cpu(grant->size);
2312        u64 max_size = le64_to_cpu(grant->max_size);
2313        struct timespec mtime, atime, ctime;
2314        int check_caps = 0;
2315        int wake = 0;
2316        int writeback = 0;
2317        int revoked_rdcache = 0;
2318        int queue_invalidate = 0;
2319
2320        dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2321             inode, cap, mds, seq, ceph_cap_string(newcaps));
2322        dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2323                inode->i_size);
2324
2325        /*
2326         * If CACHE is being revoked, and we have no dirty buffers,
2327         * try to invalidate (once).  (If there are dirty buffers, we
2328         * will invalidate _after_ writeback.)
2329         */
2330        if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
2331            (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2332            !ci->i_wrbuffer_ref) {
2333                if (try_nonblocking_invalidate(inode) == 0) {
2334                        revoked_rdcache = 1;
2335                } else {
2336                        /* there were locked pages.. invalidate later
2337                           in a separate thread. */
2338                        if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2339                                queue_invalidate = 1;
2340                                ci->i_rdcache_revoking = ci->i_rdcache_gen;
2341                        }
2342                }
2343        }
2344
2345        /* side effects now are allowed */
2346
2347        issued = __ceph_caps_issued(ci, &implemented);
2348        issued |= implemented | __ceph_caps_dirty(ci);
2349
2350        cap->cap_gen = session->s_cap_gen;
2351
2352        __check_cap_issue(ci, cap, newcaps);
2353
2354        if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
2355                inode->i_mode = le32_to_cpu(grant->mode);
2356                inode->i_uid = le32_to_cpu(grant->uid);
2357                inode->i_gid = le32_to_cpu(grant->gid);
2358                dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
2359                     inode->i_uid, inode->i_gid);
2360        }
2361
2362        if ((issued & CEPH_CAP_LINK_EXCL) == 0)
2363                set_nlink(inode, le32_to_cpu(grant->nlink));
2364
2365        if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
2366                int len = le32_to_cpu(grant->xattr_len);
2367                u64 version = le64_to_cpu(grant->xattr_version);
2368
2369                if (version > ci->i_xattrs.version) {
2370                        dout(" got new xattrs v%llu on %p len %d\n",
2371                             version, inode, len);
2372                        if (ci->i_xattrs.blob)
2373                                ceph_buffer_put(ci->i_xattrs.blob);
2374                        ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
2375                        ci->i_xattrs.version = version;
2376                }
2377        }
2378
2379        /* size/ctime/mtime/atime? */
2380        ceph_fill_file_size(inode, issued,
2381                            le32_to_cpu(grant->truncate_seq),
2382                            le64_to_cpu(grant->truncate_size), size);
2383        ceph_decode_timespec(&mtime, &grant->mtime);
2384        ceph_decode_timespec(&atime, &grant->atime);
2385        ceph_decode_timespec(&ctime, &grant->ctime);
2386        ceph_fill_file_time(inode, issued,
2387                            le32_to_cpu(grant->time_warp_seq), &ctime, &mtime,
2388                            &atime);
2389
2390        /* max size increase? */
2391        if (max_size != ci->i_max_size) {
2392                dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
2393                ci->i_max_size = max_size;
2394                if (max_size >= ci->i_wanted_max_size) {
2395                        ci->i_wanted_max_size = 0;  /* reset */
2396                        ci->i_requested_max_size = 0;
2397                }
2398                wake = 1;
2399        }
2400
2401        /* check cap bits */
2402        wanted = __ceph_caps_wanted(ci);
2403        used = __ceph_caps_used(ci);
2404        dirty = __ceph_caps_dirty(ci);
2405        dout(" my wanted = %s, used = %s, dirty %s\n",
2406             ceph_cap_string(wanted),
2407             ceph_cap_string(used),
2408             ceph_cap_string(dirty));
2409        if (wanted != le32_to_cpu(grant->wanted)) {
2410                dout("mds wanted %s -> %s\n",
2411                     ceph_cap_string(le32_to_cpu(grant->wanted)),
2412                     ceph_cap_string(wanted));
2413                grant->wanted = cpu_to_le32(wanted);
2414        }
2415
2416        cap->seq = seq;
2417
2418        /* file layout may have changed */
2419        ci->i_layout = grant->layout;
2420
2421        /* revocation, grant, or no-op? */
2422        if (cap->issued & ~newcaps) {
2423                int revoking = cap->issued & ~newcaps;
2424
2425                dout("revocation: %s -> %s (revoking %s)\n",
2426                     ceph_cap_string(cap->issued),
2427                     ceph_cap_string(newcaps),
2428                     ceph_cap_string(revoking));
2429                if (revoking & used & CEPH_CAP_FILE_BUFFER)
2430                        writeback = 1;  /* initiate writeback; will delay ack */
2431                else if (revoking == CEPH_CAP_FILE_CACHE &&
2432                         (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2433                         queue_invalidate)
2434                        ; /* do nothing yet, invalidation will be queued */
2435                else if (cap == ci->i_auth_cap)
2436                        check_caps = 1; /* check auth cap only */
2437                else
2438                        check_caps = 2; /* check all caps */
2439                cap->issued = newcaps;
2440                cap->implemented |= newcaps;
2441        } else if (cap->issued == newcaps) {
2442                dout("caps unchanged: %s -> %s\n",
2443                     ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
2444        } else {
2445                dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
2446                     ceph_cap_string(newcaps));
2447                cap->issued = newcaps;
2448                cap->implemented |= newcaps; /* add bits only, to
2449                                              * avoid stepping on a
2450                                              * pending revocation */
2451                wake = 1;
2452        }
2453        BUG_ON(cap->issued & ~cap->implemented);
2454
2455        spin_unlock(&ci->i_ceph_lock);
2456        if (writeback)
2457                /*
2458                 * queue inode for writeback: we can't actually call
2459                 * filemap_write_and_wait, etc. from message handler
2460                 * context.
2461                 */
2462                ceph_queue_writeback(inode);
2463        if (queue_invalidate)
2464                ceph_queue_invalidate(inode);
2465        if (wake)
2466                wake_up_all(&ci->i_cap_wq);
2467
2468        if (check_caps == 1)
2469                ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
2470                                session);
2471        else if (check_caps == 2)
2472                ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
2473        else
2474                mutex_unlock(&session->s_mutex);
2475}
2476
2477/*
2478 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
2479 * MDS has been safely committed.
2480 */
2481static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2482                                 struct ceph_mds_caps *m,
2483                                 struct ceph_mds_session *session,
2484                                 struct ceph_cap *cap)
2485        __releases(ci->i_ceph_lock)
2486{
2487        struct ceph_inode_info *ci = ceph_inode(inode);
2488        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
2489        unsigned seq = le32_to_cpu(m->seq);
2490        int dirty = le32_to_cpu(m->dirty);
2491        int cleaned = 0;
2492        int drop = 0;
2493        int i;
2494
2495        for (i = 0; i < CEPH_CAP_BITS; i++)
2496                if ((dirty & (1 << i)) &&
2497                    flush_tid == ci->i_cap_flush_tid[i])
2498                        cleaned |= 1 << i;
2499
2500        dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
2501             " flushing %s -> %s\n",
2502             inode, session->s_mds, seq, ceph_cap_string(dirty),
2503             ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
2504             ceph_cap_string(ci->i_flushing_caps & ~cleaned));
2505
2506        if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned))
2507                goto out;
2508
2509        ci->i_flushing_caps &= ~cleaned;
2510
2511        spin_lock(&mdsc->cap_dirty_lock);
2512        if (ci->i_flushing_caps == 0) {
2513                list_del_init(&ci->i_flushing_item);
2514                if (!list_empty(&session->s_cap_flushing))
2515                        dout(" mds%d still flushing cap on %p\n",
2516                             session->s_mds,
2517                             &list_entry(session->s_cap_flushing.next,
2518                                         struct ceph_inode_info,
2519                                         i_flushing_item)->vfs_inode);
2520                mdsc->num_cap_flushing--;
2521                wake_up_all(&mdsc->cap_flushing_wq);
2522                dout(" inode %p now !flushing\n", inode);
2523
2524                if (ci->i_dirty_caps == 0) {
2525                        dout(" inode %p now clean\n", inode);
2526                        BUG_ON(!list_empty(&ci->i_dirty_item));
2527                        drop = 1;
2528                        if (ci->i_wrbuffer_ref_head == 0) {
2529                                BUG_ON(!ci->i_head_snapc);
2530                                ceph_put_snap_context(ci->i_head_snapc);
2531                                ci->i_head_snapc = NULL;
2532                        }
2533                } else {
2534                        BUG_ON(list_empty(&ci->i_dirty_item));
2535                }
2536        }
2537        spin_unlock(&mdsc->cap_dirty_lock);
2538        wake_up_all(&ci->i_cap_wq);
2539
2540out:
2541        spin_unlock(&ci->i_ceph_lock);
2542        if (drop)
2543                iput(inode);
2544}
2545
2546/*
2547 * Handle FLUSHSNAP_ACK.  MDS has flushed snap data to disk and we can
2548 * throw away our cap_snap.
2549 *
2550 * Caller hold s_mutex.
2551 */
2552static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
2553                                     struct ceph_mds_caps *m,
2554                                     struct ceph_mds_session *session)
2555{
2556        struct ceph_inode_info *ci = ceph_inode(inode);
2557        u64 follows = le64_to_cpu(m->snap_follows);
2558        struct ceph_cap_snap *capsnap;
2559        int drop = 0;
2560
2561        dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
2562             inode, ci, session->s_mds, follows);
2563
2564        spin_lock(&ci->i_ceph_lock);
2565        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2566                if (capsnap->follows == follows) {
2567                        if (capsnap->flush_tid != flush_tid) {
2568                                dout(" cap_snap %p follows %lld tid %lld !="
2569                                     " %lld\n", capsnap, follows,
2570                                     flush_tid, capsnap->flush_tid);
2571                                break;
2572                        }
2573                        WARN_ON(capsnap->dirty_pages || capsnap->writing);
2574                        dout(" removing %p cap_snap %p follows %lld\n",
2575                             inode, capsnap, follows);
2576                        ceph_put_snap_context(capsnap->context);
2577                        list_del(&capsnap->ci_item);
2578                        list_del(&capsnap->flushing_item);
2579                        ceph_put_cap_snap(capsnap);
2580                        drop = 1;
2581                        break;
2582                } else {
2583                        dout(" skipping cap_snap %p follows %lld\n",
2584                             capsnap, capsnap->follows);
2585                }
2586        }
2587        spin_unlock(&ci->i_ceph_lock);
2588        if (drop)
2589                iput(inode);
2590}
2591
2592/*
2593 * Handle TRUNC from MDS, indicating file truncation.
2594 *
2595 * caller hold s_mutex.
2596 */
2597static void handle_cap_trunc(struct inode *inode,
2598                             struct ceph_mds_caps *trunc,
2599                             struct ceph_mds_session *session)
2600        __releases(ci->i_ceph_lock)
2601{
2602        struct ceph_inode_info *ci = ceph_inode(inode);
2603        int mds = session->s_mds;
2604        int seq = le32_to_cpu(trunc->seq);
2605        u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
2606        u64 truncate_size = le64_to_cpu(trunc->truncate_size);
2607        u64 size = le64_to_cpu(trunc->size);
2608        int implemented = 0;
2609        int dirty = __ceph_caps_dirty(ci);
2610        int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
2611        int queue_trunc = 0;
2612
2613        issued |= implemented | dirty;
2614
2615        dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
2616             inode, mds, seq, truncate_size, truncate_seq);
2617        queue_trunc = ceph_fill_file_size(inode, issued,
2618                                          truncate_seq, truncate_size, size);
2619        spin_unlock(&ci->i_ceph_lock);
2620
2621        if (queue_trunc)
2622                ceph_queue_vmtruncate(inode);
2623}
2624
2625/*
2626 * Handle EXPORT from MDS.  Cap is being migrated _from_ this mds to a
2627 * different one.  If we are the most recent migration we've seen (as
2628 * indicated by mseq), make note of the migrating cap bits for the
2629 * duration (until we see the corresponding IMPORT).
2630 *
2631 * caller holds s_mutex
2632 */
2633static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2634                              struct ceph_mds_session *session,
2635                              int *open_target_sessions)
2636{
2637        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
2638        struct ceph_inode_info *ci = ceph_inode(inode);
2639        int mds = session->s_mds;
2640        unsigned mseq = le32_to_cpu(ex->migrate_seq);
2641        struct ceph_cap *cap = NULL, *t;
2642        struct rb_node *p;
2643        int remember = 1;
2644
2645        dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
2646             inode, ci, mds, mseq);
2647
2648        spin_lock(&ci->i_ceph_lock);
2649
2650        /* make sure we haven't seen a higher mseq */
2651        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
2652                t = rb_entry(p, struct ceph_cap, ci_node);
2653                if (ceph_seq_cmp(t->mseq, mseq) > 0) {
2654                        dout(" higher mseq on cap from mds%d\n",
2655                             t->session->s_mds);
2656                        remember = 0;
2657                }
2658                if (t->session->s_mds == mds)
2659                        cap = t;
2660        }
2661
2662        if (cap) {
2663                if (remember) {
2664                        /* make note */
2665                        ci->i_cap_exporting_mds = mds;
2666                        ci->i_cap_exporting_mseq = mseq;
2667                        ci->i_cap_exporting_issued = cap->issued;
2668
2669                        /*
2670                         * make sure we have open sessions with all possible
2671                         * export targets, so that we get the matching IMPORT
2672                         */
2673                        *open_target_sessions = 1;
2674
2675                        /*
2676                         * we can't flush dirty caps that we've seen the
2677                         * EXPORT but no IMPORT for
2678                         */
2679                        spin_lock(&mdsc->cap_dirty_lock);
2680                        if (!list_empty(&ci->i_dirty_item)) {
2681                                dout(" moving %p to cap_dirty_migrating\n",
2682                                     inode);
2683                                list_move(&ci->i_dirty_item,
2684                                          &mdsc->cap_dirty_migrating);
2685                        }
2686                        spin_unlock(&mdsc->cap_dirty_lock);
2687                }
2688                __ceph_remove_cap(cap);
2689        }
2690        /* else, we already released it */
2691
2692        spin_unlock(&ci->i_ceph_lock);
2693}
2694
2695/*
2696 * Handle cap IMPORT.  If there are temp bits from an older EXPORT,
2697 * clean them up.
2698 *
2699 * caller holds s_mutex.
2700 */
2701static void handle_cap_import(struct ceph_mds_client *mdsc,
2702                              struct inode *inode, struct ceph_mds_caps *im,
2703                              struct ceph_mds_session *session,
2704                              void *snaptrace, int snaptrace_len)
2705{
2706        struct ceph_inode_info *ci = ceph_inode(inode);
2707        int mds = session->s_mds;
2708        unsigned issued = le32_to_cpu(im->caps);
2709        unsigned wanted = le32_to_cpu(im->wanted);
2710        unsigned seq = le32_to_cpu(im->seq);
2711        unsigned mseq = le32_to_cpu(im->migrate_seq);
2712        u64 realmino = le64_to_cpu(im->realm);
2713        u64 cap_id = le64_to_cpu(im->cap_id);
2714
2715        if (ci->i_cap_exporting_mds >= 0 &&
2716            ceph_seq_cmp(ci->i_cap_exporting_mseq, mseq) < 0) {
2717                dout("handle_cap_import inode %p ci %p mds%d mseq %d"
2718                     " - cleared exporting from mds%d\n",
2719                     inode, ci, mds, mseq,
2720                     ci->i_cap_exporting_mds);
2721                ci->i_cap_exporting_issued = 0;
2722                ci->i_cap_exporting_mseq = 0;
2723                ci->i_cap_exporting_mds = -1;
2724
2725                spin_lock(&mdsc->cap_dirty_lock);
2726                if (!list_empty(&ci->i_dirty_item)) {
2727                        dout(" moving %p back to cap_dirty\n", inode);
2728                        list_move(&ci->i_dirty_item, &mdsc->cap_dirty);
2729                }
2730                spin_unlock(&mdsc->cap_dirty_lock);
2731        } else {
2732                dout("handle_cap_import inode %p ci %p mds%d mseq %d\n",
2733                     inode, ci, mds, mseq);
2734        }
2735
2736        down_write(&mdsc->snap_rwsem);
2737        ceph_update_snap_trace(mdsc, snaptrace, snaptrace+snaptrace_len,
2738                               false);
2739        downgrade_write(&mdsc->snap_rwsem);
2740        ceph_add_cap(inode, session, cap_id, -1,
2741                     issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH,
2742                     NULL /* no caps context */);
2743        kick_flushing_inode_caps(mdsc, session, inode);
2744        up_read(&mdsc->snap_rwsem);
2745
2746        /* make sure we re-request max_size, if necessary */
2747        spin_lock(&ci->i_ceph_lock);
2748        ci->i_requested_max_size = 0;
2749        spin_unlock(&ci->i_ceph_lock);
2750}
2751
2752/*
2753 * Handle a caps message from the MDS.
2754 *
2755 * Identify the appropriate session, inode, and call the right handler
2756 * based on the cap op.
2757 */
2758void ceph_handle_caps(struct ceph_mds_session *session,
2759                      struct ceph_msg *msg)
2760{
2761        struct ceph_mds_client *mdsc = session->s_mdsc;
2762        struct super_block *sb = mdsc->fsc->sb;
2763        struct inode *inode;
2764        struct ceph_inode_info *ci;
2765        struct ceph_cap *cap;
2766        struct ceph_mds_caps *h;
2767        int mds = session->s_mds;
2768        int op;
2769        u32 seq, mseq;
2770        struct ceph_vino vino;
2771        u64 cap_id;
2772        u64 size, max_size;
2773        u64 tid;
2774        void *snaptrace;
2775        size_t snaptrace_len;
2776        void *flock;
2777        u32 flock_len;
2778        int open_target_sessions = 0;
2779
2780        dout("handle_caps from mds%d\n", mds);
2781
2782        /* decode */
2783        tid = le64_to_cpu(msg->hdr.tid);
2784        if (msg->front.iov_len < sizeof(*h))
2785                goto bad;
2786        h = msg->front.iov_base;
2787        op = le32_to_cpu(h->op);
2788        vino.ino = le64_to_cpu(h->ino);
2789        vino.snap = CEPH_NOSNAP;
2790        cap_id = le64_to_cpu(h->cap_id);
2791        seq = le32_to_cpu(h->seq);
2792        mseq = le32_to_cpu(h->migrate_seq);
2793        size = le64_to_cpu(h->size);
2794        max_size = le64_to_cpu(h->max_size);
2795
2796        snaptrace = h + 1;
2797        snaptrace_len = le32_to_cpu(h->snap_trace_len);
2798
2799        if (le16_to_cpu(msg->hdr.version) >= 2) {
2800                void *p, *end;
2801
2802                p = snaptrace + snaptrace_len;
2803                end = msg->front.iov_base + msg->front.iov_len;
2804                ceph_decode_32_safe(&p, end, flock_len, bad);
2805                flock = p;
2806        } else {
2807                flock = NULL;
2808                flock_len = 0;
2809        }
2810
2811        mutex_lock(&session->s_mutex);
2812        session->s_seq++;
2813        dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
2814             (unsigned)seq);
2815
2816        /* lookup ino */
2817        inode = ceph_find_inode(sb, vino);
2818        ci = ceph_inode(inode);
2819        dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
2820             vino.snap, inode);
2821        if (!inode) {
2822                dout(" i don't have ino %llx\n", vino.ino);
2823
2824                if (op == CEPH_CAP_OP_IMPORT)
2825                        __queue_cap_release(session, vino.ino, cap_id,
2826                                            mseq, seq);
2827                goto flush_cap_releases;
2828        }
2829
2830        /* these will work even if we don't have a cap yet */
2831        switch (op) {
2832        case CEPH_CAP_OP_FLUSHSNAP_ACK:
2833                handle_cap_flushsnap_ack(inode, tid, h, session);
2834                goto done;
2835
2836        case CEPH_CAP_OP_EXPORT:
2837                handle_cap_export(inode, h, session, &open_target_sessions);
2838                goto done;
2839
2840        case CEPH_CAP_OP_IMPORT:
2841                handle_cap_import(mdsc, inode, h, session,
2842                                  snaptrace, snaptrace_len);
2843                ceph_check_caps(ceph_inode(inode), 0, session);
2844                goto done_unlocked;
2845        }
2846
2847        /* the rest require a cap */
2848        spin_lock(&ci->i_ceph_lock);
2849        cap = __get_cap_for_mds(ceph_inode(inode), mds);
2850        if (!cap) {
2851                dout(" no cap on %p ino %llx.%llx from mds%d\n",
2852                     inode, ceph_ino(inode), ceph_snap(inode), mds);
2853                spin_unlock(&ci->i_ceph_lock);
2854                goto flush_cap_releases;
2855        }
2856
2857        /* note that each of these drops i_ceph_lock for us */
2858        switch (op) {
2859        case CEPH_CAP_OP_REVOKE:
2860        case CEPH_CAP_OP_GRANT:
2861                handle_cap_grant(inode, h, session, cap, msg->middle);
2862                goto done_unlocked;
2863
2864        case CEPH_CAP_OP_FLUSH_ACK:
2865                handle_cap_flush_ack(inode, tid, h, session, cap);
2866                break;
2867
2868        case CEPH_CAP_OP_TRUNC:
2869                handle_cap_trunc(inode, h, session);
2870                break;
2871
2872        default:
2873                spin_unlock(&ci->i_ceph_lock);
2874                pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
2875                       ceph_cap_op_name(op));
2876        }
2877
2878        goto done;
2879
2880flush_cap_releases:
2881        /*
2882         * send any full release message to try to move things
2883         * along for the mds (who clearly thinks we still have this
2884         * cap).
2885         */
2886        ceph_add_cap_releases(mdsc, session);
2887        ceph_send_cap_releases(mdsc, session);
2888
2889done:
2890        mutex_unlock(&session->s_mutex);
2891done_unlocked:
2892        if (inode)
2893                iput(inode);
2894        if (open_target_sessions)
2895                ceph_mdsc_open_export_target_sessions(mdsc, session);
2896        return;
2897
2898bad:
2899        pr_err("ceph_handle_caps: corrupt message\n");
2900        ceph_msg_dump(msg);
2901        return;
2902}
2903
2904/*
2905 * Delayed work handler to process end of delayed cap release LRU list.
2906 */
2907void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
2908{
2909        struct ceph_inode_info *ci;
2910        int flags = CHECK_CAPS_NODELAY;
2911
2912        dout("check_delayed_caps\n");
2913        while (1) {
2914                spin_lock(&mdsc->cap_delay_lock);
2915                if (list_empty(&mdsc->cap_delay_list))
2916                        break;
2917                ci = list_first_entry(&mdsc->cap_delay_list,
2918                                      struct ceph_inode_info,
2919                                      i_cap_delay_list);
2920                if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
2921                    time_before(jiffies, ci->i_hold_caps_max))
2922                        break;
2923                list_del_init(&ci->i_cap_delay_list);
2924                spin_unlock(&mdsc->cap_delay_lock);
2925                dout("check_delayed_caps on %p\n", &ci->vfs_inode);
2926                ceph_check_caps(ci, flags, NULL);
2927        }
2928        spin_unlock(&mdsc->cap_delay_lock);
2929}
2930
2931/*
2932 * Flush all dirty caps to the mds
2933 */
2934void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
2935{
2936        struct ceph_inode_info *ci;
2937        struct inode *inode;
2938
2939        dout("flush_dirty_caps\n");
2940        spin_lock(&mdsc->cap_dirty_lock);
2941        while (!list_empty(&mdsc->cap_dirty)) {
2942                ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
2943                                      i_dirty_item);
2944                inode = &ci->vfs_inode;
2945                ihold(inode);
2946                dout("flush_dirty_caps %p\n", inode);
2947                spin_unlock(&mdsc->cap_dirty_lock);
2948                ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL);
2949                iput(inode);
2950                spin_lock(&mdsc->cap_dirty_lock);
2951        }
2952        spin_unlock(&mdsc->cap_dirty_lock);
2953        dout("flush_dirty_caps done\n");
2954}
2955
2956/*
2957 * Drop open file reference.  If we were the last open file,
2958 * we may need to release capabilities to the MDS (or schedule
2959 * their delayed release).
2960 */
2961void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
2962{
2963        struct inode *inode = &ci->vfs_inode;
2964        int last = 0;
2965
2966        spin_lock(&ci->i_ceph_lock);
2967        dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
2968             ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
2969        BUG_ON(ci->i_nr_by_mode[fmode] == 0);
2970        if (--ci->i_nr_by_mode[fmode] == 0)
2971                last++;
2972        spin_unlock(&ci->i_ceph_lock);
2973
2974        if (last && ci->i_vino.snap == CEPH_NOSNAP)
2975                ceph_check_caps(ci, 0, NULL);
2976}
2977
2978/*
2979 * Helpers for embedding cap and dentry lease releases into mds
2980 * requests.
2981 *
2982 * @force is used by dentry_release (below) to force inclusion of a
2983 * record for the directory inode, even when there aren't any caps to
2984 * drop.
2985 */
2986int ceph_encode_inode_release(void **p, struct inode *inode,
2987                              int mds, int drop, int unless, int force)
2988{
2989        struct ceph_inode_info *ci = ceph_inode(inode);
2990        struct ceph_cap *cap;
2991        struct ceph_mds_request_release *rel = *p;
2992        int used, dirty;
2993        int ret = 0;
2994
2995        spin_lock(&ci->i_ceph_lock);
2996        used = __ceph_caps_used(ci);
2997        dirty = __ceph_caps_dirty(ci);
2998
2999        dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
3000             inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
3001             ceph_cap_string(unless));
3002
3003        /* only drop unused, clean caps */
3004        drop &= ~(used | dirty);
3005
3006        cap = __get_cap_for_mds(ci, mds);
3007        if (cap && __cap_is_valid(cap)) {
3008                if (force ||
3009                    ((cap->issued & drop) &&
3010                     (cap->issued & unless) == 0)) {
3011                        if ((cap->issued & drop) &&
3012                            (cap->issued & unless) == 0) {
3013                                dout("encode_inode_release %p cap %p %s -> "
3014                                     "%s\n", inode, cap,
3015                                     ceph_cap_string(cap->issued),
3016                                     ceph_cap_string(cap->issued & ~drop));
3017                                cap->issued &= ~drop;
3018                                cap->implemented &= ~drop;
3019                                if (ci->i_ceph_flags & CEPH_I_NODELAY) {
3020                                        int wanted = __ceph_caps_wanted(ci);
3021                                        dout("  wanted %s -> %s (act %s)\n",
3022                                             ceph_cap_string(cap->mds_wanted),
3023                                             ceph_cap_string(cap->mds_wanted &
3024                                                             ~wanted),
3025                                             ceph_cap_string(wanted));
3026                                        cap->mds_wanted &= wanted;
3027                                }
3028                        } else {
3029                                dout("encode_inode_release %p cap %p %s"
3030                                     " (force)\n", inode, cap,
3031                                     ceph_cap_string(cap->issued));
3032                        }
3033
3034                        rel->ino = cpu_to_le64(ceph_ino(inode));
3035                        rel->cap_id = cpu_to_le64(cap->cap_id);
3036                        rel->seq = cpu_to_le32(cap->seq);
3037                        rel->issue_seq = cpu_to_le32(cap->issue_seq),
3038                        rel->mseq = cpu_to_le32(cap->mseq);
3039                        rel->caps = cpu_to_le32(cap->issued);
3040                        rel->wanted = cpu_to_le32(cap->mds_wanted);
3041                        rel->dname_len = 0;
3042                        rel->dname_seq = 0;
3043                        *p += sizeof(*rel);
3044                        ret = 1;
3045                } else {
3046                        dout("encode_inode_release %p cap %p %s\n",
3047                             inode, cap, ceph_cap_string(cap->issued));
3048                }
3049        }
3050        spin_unlock(&ci->i_ceph_lock);
3051        return ret;
3052}
3053
3054int ceph_encode_dentry_release(void **p, struct dentry *dentry,
3055                               int mds, int drop, int unless)
3056{
3057        struct inode *dir = dentry->d_parent->d_inode;
3058        struct ceph_mds_request_release *rel = *p;
3059        struct ceph_dentry_info *di = ceph_dentry(dentry);
3060        int force = 0;
3061        int ret;
3062
3063        /*
3064         * force an record for the directory caps if we have a dentry lease.
3065         * this is racy (can't take i_ceph_lock and d_lock together), but it
3066         * doesn't have to be perfect; the mds will revoke anything we don't
3067         * release.
3068         */
3069        spin_lock(&dentry->d_lock);
3070        if (di->lease_session && di->lease_session->s_mds == mds)
3071                force = 1;
3072        spin_unlock(&dentry->d_lock);
3073
3074        ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
3075
3076        spin_lock(&dentry->d_lock);
3077        if (ret && di->lease_session && di->lease_session->s_mds == mds) {
3078                dout("encode_dentry_release %p mds%d seq %d\n",
3079                     dentry, mds, (int)di->lease_seq);
3080                rel->dname_len = cpu_to_le32(dentry->d_name.len);
3081                memcpy(*p, dentry->d_name.name, dentry->d_name.len);
3082                *p += dentry->d_name.len;
3083                rel->dname_seq = cpu_to_le32(di->lease_seq);
3084                __ceph_mdsc_drop_dentry_lease(dentry);
3085        }
3086        spin_unlock(&dentry->d_lock);
3087        return ret;
3088}
3089
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.