linux/fs/ceph/caps.c
<<
>>
Prefs
   1#include <linux/ceph/ceph_debug.h>
   2
   3#include <linux/fs.h>
   4#include <linux/kernel.h>
   5#include <linux/sched.h>
   6#include <linux/slab.h>
   7#include <linux/vmalloc.h>
   8#include <linux/wait.h>
   9#include <linux/writeback.h>
  10
  11#include "super.h"
  12#include "mds_client.h"
  13#include "cache.h"
  14#include <linux/ceph/decode.h>
  15#include <linux/ceph/messenger.h>
  16
  17/*
  18 * Capability management
  19 *
  20 * The Ceph metadata servers control client access to inode metadata
  21 * and file data by issuing capabilities, granting clients permission
  22 * to read and/or write both inode field and file data to OSDs
  23 * (storage nodes).  Each capability consists of a set of bits
  24 * indicating which operations are allowed.
  25 *
  26 * If the client holds a *_SHARED cap, the client has a coherent value
  27 * that can be safely read from the cached inode.
  28 *
  29 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
  30 * client is allowed to change inode attributes (e.g., file size,
  31 * mtime), note its dirty state in the ceph_cap, and asynchronously
  32 * flush that metadata change to the MDS.
  33 *
  34 * In the event of a conflicting operation (perhaps by another
  35 * client), the MDS will revoke the conflicting client capabilities.
  36 *
  37 * In order for a client to cache an inode, it must hold a capability
  38 * with at least one MDS server.  When inodes are released, release
  39 * notifications are batched and periodically sent en masse to the MDS
  40 * cluster to release server state.
  41 */
  42
  43
  44/*
  45 * Generate readable cap strings for debugging output.
  46 */
  47#define MAX_CAP_STR 20
  48static char cap_str[MAX_CAP_STR][40];
  49static DEFINE_SPINLOCK(cap_str_lock);
  50static int last_cap_str;
  51
  52static char *gcap_string(char *s, int c)
  53{
  54        if (c & CEPH_CAP_GSHARED)
  55                *s++ = 's';
  56        if (c & CEPH_CAP_GEXCL)
  57                *s++ = 'x';
  58        if (c & CEPH_CAP_GCACHE)
  59                *s++ = 'c';
  60        if (c & CEPH_CAP_GRD)
  61                *s++ = 'r';
  62        if (c & CEPH_CAP_GWR)
  63                *s++ = 'w';
  64        if (c & CEPH_CAP_GBUFFER)
  65                *s++ = 'b';
  66        if (c & CEPH_CAP_GLAZYIO)
  67                *s++ = 'l';
  68        return s;
  69}
  70
  71const char *ceph_cap_string(int caps)
  72{
  73        int i;
  74        char *s;
  75        int c;
  76
  77        spin_lock(&cap_str_lock);
  78        i = last_cap_str++;
  79        if (last_cap_str == MAX_CAP_STR)
  80                last_cap_str = 0;
  81        spin_unlock(&cap_str_lock);
  82
  83        s = cap_str[i];
  84
  85        if (caps & CEPH_CAP_PIN)
  86                *s++ = 'p';
  87
  88        c = (caps >> CEPH_CAP_SAUTH) & 3;
  89        if (c) {
  90                *s++ = 'A';
  91                s = gcap_string(s, c);
  92        }
  93
  94        c = (caps >> CEPH_CAP_SLINK) & 3;
  95        if (c) {
  96                *s++ = 'L';
  97                s = gcap_string(s, c);
  98        }
  99
 100        c = (caps >> CEPH_CAP_SXATTR) & 3;
 101        if (c) {
 102                *s++ = 'X';
 103                s = gcap_string(s, c);
 104        }
 105
 106        c = caps >> CEPH_CAP_SFILE;
 107        if (c) {
 108                *s++ = 'F';
 109                s = gcap_string(s, c);
 110        }
 111
 112        if (s == cap_str[i])
 113                *s++ = '-';
 114        *s = 0;
 115        return cap_str[i];
 116}
 117
 118void ceph_caps_init(struct ceph_mds_client *mdsc)
 119{
 120        INIT_LIST_HEAD(&mdsc->caps_list);
 121        spin_lock_init(&mdsc->caps_list_lock);
 122}
 123
 124void ceph_caps_finalize(struct ceph_mds_client *mdsc)
 125{
 126        struct ceph_cap *cap;
 127
 128        spin_lock(&mdsc->caps_list_lock);
 129        while (!list_empty(&mdsc->caps_list)) {
 130                cap = list_first_entry(&mdsc->caps_list,
 131                                       struct ceph_cap, caps_item);
 132                list_del(&cap->caps_item);
 133                kmem_cache_free(ceph_cap_cachep, cap);
 134        }
 135        mdsc->caps_total_count = 0;
 136        mdsc->caps_avail_count = 0;
 137        mdsc->caps_use_count = 0;
 138        mdsc->caps_reserve_count = 0;
 139        mdsc->caps_min_count = 0;
 140        spin_unlock(&mdsc->caps_list_lock);
 141}
 142
 143void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
 144{
 145        spin_lock(&mdsc->caps_list_lock);
 146        mdsc->caps_min_count += delta;
 147        BUG_ON(mdsc->caps_min_count < 0);
 148        spin_unlock(&mdsc->caps_list_lock);
 149}
 150
 151void ceph_reserve_caps(struct ceph_mds_client *mdsc,
 152                      struct ceph_cap_reservation *ctx, int need)
 153{
 154        int i;
 155        struct ceph_cap *cap;
 156        int have;
 157        int alloc = 0;
 158        LIST_HEAD(newcaps);
 159
 160        dout("reserve caps ctx=%p need=%d\n", ctx, need);
 161
 162        /* first reserve any caps that are already allocated */
 163        spin_lock(&mdsc->caps_list_lock);
 164        if (mdsc->caps_avail_count >= need)
 165                have = need;
 166        else
 167                have = mdsc->caps_avail_count;
 168        mdsc->caps_avail_count -= have;
 169        mdsc->caps_reserve_count += have;
 170        BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 171                                         mdsc->caps_reserve_count +
 172                                         mdsc->caps_avail_count);
 173        spin_unlock(&mdsc->caps_list_lock);
 174
 175        for (i = have; i < need; i++) {
 176                cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
 177                if (!cap)
 178                        break;
 179                list_add(&cap->caps_item, &newcaps);
 180                alloc++;
 181        }
 182        /* we didn't manage to reserve as much as we needed */
 183        if (have + alloc != need)
 184                pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
 185                        ctx, need, have + alloc);
 186
 187        spin_lock(&mdsc->caps_list_lock);
 188        mdsc->caps_total_count += alloc;
 189        mdsc->caps_reserve_count += alloc;
 190        list_splice(&newcaps, &mdsc->caps_list);
 191
 192        BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 193                                         mdsc->caps_reserve_count +
 194                                         mdsc->caps_avail_count);
 195        spin_unlock(&mdsc->caps_list_lock);
 196
 197        ctx->count = need;
 198        dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
 199             ctx, mdsc->caps_total_count, mdsc->caps_use_count,
 200             mdsc->caps_reserve_count, mdsc->caps_avail_count);
 201}
 202
 203int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
 204                        struct ceph_cap_reservation *ctx)
 205{
 206        dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
 207        if (ctx->count) {
 208                spin_lock(&mdsc->caps_list_lock);
 209                BUG_ON(mdsc->caps_reserve_count < ctx->count);
 210                mdsc->caps_reserve_count -= ctx->count;
 211                mdsc->caps_avail_count += ctx->count;
 212                ctx->count = 0;
 213                dout("unreserve caps %d = %d used + %d resv + %d avail\n",
 214                     mdsc->caps_total_count, mdsc->caps_use_count,
 215                     mdsc->caps_reserve_count, mdsc->caps_avail_count);
 216                BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 217                                                 mdsc->caps_reserve_count +
 218                                                 mdsc->caps_avail_count);
 219                spin_unlock(&mdsc->caps_list_lock);
 220        }
 221        return 0;
 222}
 223
 224static struct ceph_cap *get_cap(struct ceph_mds_client *mdsc,
 225                                struct ceph_cap_reservation *ctx)
 226{
 227        struct ceph_cap *cap = NULL;
 228
 229        /* temporary, until we do something about cap import/export */
 230        if (!ctx) {
 231                cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
 232                if (cap) {
 233                        spin_lock(&mdsc->caps_list_lock);
 234                        mdsc->caps_use_count++;
 235                        mdsc->caps_total_count++;
 236                        spin_unlock(&mdsc->caps_list_lock);
 237                }
 238                return cap;
 239        }
 240
 241        spin_lock(&mdsc->caps_list_lock);
 242        dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
 243             ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
 244             mdsc->caps_reserve_count, mdsc->caps_avail_count);
 245        BUG_ON(!ctx->count);
 246        BUG_ON(ctx->count > mdsc->caps_reserve_count);
 247        BUG_ON(list_empty(&mdsc->caps_list));
 248
 249        ctx->count--;
 250        mdsc->caps_reserve_count--;
 251        mdsc->caps_use_count++;
 252
 253        cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item);
 254        list_del(&cap->caps_item);
 255
 256        BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 257               mdsc->caps_reserve_count + mdsc->caps_avail_count);
 258        spin_unlock(&mdsc->caps_list_lock);
 259        return cap;
 260}
 261
 262void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
 263{
 264        spin_lock(&mdsc->caps_list_lock);
 265        dout("put_cap %p %d = %d used + %d resv + %d avail\n",
 266             cap, mdsc->caps_total_count, mdsc->caps_use_count,
 267             mdsc->caps_reserve_count, mdsc->caps_avail_count);
 268        mdsc->caps_use_count--;
 269        /*
 270         * Keep some preallocated caps around (ceph_min_count), to
 271         * avoid lots of free/alloc churn.
 272         */
 273        if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
 274                                      mdsc->caps_min_count) {
 275                mdsc->caps_total_count--;
 276                kmem_cache_free(ceph_cap_cachep, cap);
 277        } else {
 278                mdsc->caps_avail_count++;
 279                list_add(&cap->caps_item, &mdsc->caps_list);
 280        }
 281
 282        BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 283               mdsc->caps_reserve_count + mdsc->caps_avail_count);
 284        spin_unlock(&mdsc->caps_list_lock);
 285}
 286
 287void ceph_reservation_status(struct ceph_fs_client *fsc,
 288                             int *total, int *avail, int *used, int *reserved,
 289                             int *min)
 290{
 291        struct ceph_mds_client *mdsc = fsc->mdsc;
 292
 293        if (total)
 294                *total = mdsc->caps_total_count;
 295        if (avail)
 296                *avail = mdsc->caps_avail_count;
 297        if (used)
 298                *used = mdsc->caps_use_count;
 299        if (reserved)
 300                *reserved = mdsc->caps_reserve_count;
 301        if (min)
 302                *min = mdsc->caps_min_count;
 303}
 304
 305/*
 306 * Find ceph_cap for given mds, if any.
 307 *
 308 * Called with i_ceph_lock held.
 309 */
 310static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
 311{
 312        struct ceph_cap *cap;
 313        struct rb_node *n = ci->i_caps.rb_node;
 314
 315        while (n) {
 316                cap = rb_entry(n, struct ceph_cap, ci_node);
 317                if (mds < cap->mds)
 318                        n = n->rb_left;
 319                else if (mds > cap->mds)
 320                        n = n->rb_right;
 321                else
 322                        return cap;
 323        }
 324        return NULL;
 325}
 326
 327struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
 328{
 329        struct ceph_cap *cap;
 330
 331        spin_lock(&ci->i_ceph_lock);
 332        cap = __get_cap_for_mds(ci, mds);
 333        spin_unlock(&ci->i_ceph_lock);
 334        return cap;
 335}
 336
 337/*
 338 * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1.
 339 */
 340static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
 341{
 342        struct ceph_cap *cap;
 343        int mds = -1;
 344        struct rb_node *p;
 345
 346        /* prefer mds with WR|BUFFER|EXCL caps */
 347        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 348                cap = rb_entry(p, struct ceph_cap, ci_node);
 349                mds = cap->mds;
 350                if (cap->issued & (CEPH_CAP_FILE_WR |
 351                                   CEPH_CAP_FILE_BUFFER |
 352                                   CEPH_CAP_FILE_EXCL))
 353                        break;
 354        }
 355        return mds;
 356}
 357
 358int ceph_get_cap_mds(struct inode *inode)
 359{
 360        struct ceph_inode_info *ci = ceph_inode(inode);
 361        int mds;
 362        spin_lock(&ci->i_ceph_lock);
 363        mds = __ceph_get_cap_mds(ceph_inode(inode));
 364        spin_unlock(&ci->i_ceph_lock);
 365        return mds;
 366}
 367
 368/*
 369 * Called under i_ceph_lock.
 370 */
 371static void __insert_cap_node(struct ceph_inode_info *ci,
 372                              struct ceph_cap *new)
 373{
 374        struct rb_node **p = &ci->i_caps.rb_node;
 375        struct rb_node *parent = NULL;
 376        struct ceph_cap *cap = NULL;
 377
 378        while (*p) {
 379                parent = *p;
 380                cap = rb_entry(parent, struct ceph_cap, ci_node);
 381                if (new->mds < cap->mds)
 382                        p = &(*p)->rb_left;
 383                else if (new->mds > cap->mds)
 384                        p = &(*p)->rb_right;
 385                else
 386                        BUG();
 387        }
 388
 389        rb_link_node(&new->ci_node, parent, p);
 390        rb_insert_color(&new->ci_node, &ci->i_caps);
 391}
 392
 393/*
 394 * (re)set cap hold timeouts, which control the delayed release
 395 * of unused caps back to the MDS.  Should be called on cap use.
 396 */
 397static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
 398                               struct ceph_inode_info *ci)
 399{
 400        struct ceph_mount_options *ma = mdsc->fsc->mount_options;
 401
 402        ci->i_hold_caps_min = round_jiffies(jiffies +
 403                                            ma->caps_wanted_delay_min * HZ);
 404        ci->i_hold_caps_max = round_jiffies(jiffies +
 405                                            ma->caps_wanted_delay_max * HZ);
 406        dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
 407             ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
 408}
 409
 410/*
 411 * (Re)queue cap at the end of the delayed cap release list.
 412 *
 413 * If I_FLUSH is set, leave the inode at the front of the list.
 414 *
 415 * Caller holds i_ceph_lock
 416 *    -> we take mdsc->cap_delay_lock
 417 */
 418static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
 419                                struct ceph_inode_info *ci)
 420{
 421        __cap_set_timeouts(mdsc, ci);
 422        dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
 423             ci->i_ceph_flags, ci->i_hold_caps_max);
 424        if (!mdsc->stopping) {
 425                spin_lock(&mdsc->cap_delay_lock);
 426                if (!list_empty(&ci->i_cap_delay_list)) {
 427                        if (ci->i_ceph_flags & CEPH_I_FLUSH)
 428                                goto no_change;
 429                        list_del_init(&ci->i_cap_delay_list);
 430                }
 431                list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
 432no_change:
 433                spin_unlock(&mdsc->cap_delay_lock);
 434        }
 435}
 436
 437/*
 438 * Queue an inode for immediate writeback.  Mark inode with I_FLUSH,
 439 * indicating we should send a cap message to flush dirty metadata
 440 * asap, and move to the front of the delayed cap list.
 441 */
 442static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
 443                                      struct ceph_inode_info *ci)
 444{
 445        dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
 446        spin_lock(&mdsc->cap_delay_lock);
 447        ci->i_ceph_flags |= CEPH_I_FLUSH;
 448        if (!list_empty(&ci->i_cap_delay_list))
 449                list_del_init(&ci->i_cap_delay_list);
 450        list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
 451        spin_unlock(&mdsc->cap_delay_lock);
 452}
 453
 454/*
 455 * Cancel delayed work on cap.
 456 *
 457 * Caller must hold i_ceph_lock.
 458 */
 459static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
 460                               struct ceph_inode_info *ci)
 461{
 462        dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
 463        if (list_empty(&ci->i_cap_delay_list))
 464                return;
 465        spin_lock(&mdsc->cap_delay_lock);
 466        list_del_init(&ci->i_cap_delay_list);
 467        spin_unlock(&mdsc->cap_delay_lock);
 468}
 469
 470/*
 471 * Common issue checks for add_cap, handle_cap_grant.
 472 */
 473static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
 474                              unsigned issued)
 475{
 476        unsigned had = __ceph_caps_issued(ci, NULL);
 477
 478        /*
 479         * Each time we receive FILE_CACHE anew, we increment
 480         * i_rdcache_gen.
 481         */
 482        if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
 483            (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) {
 484                ci->i_rdcache_gen++;
 485        }
 486
 487        /*
 488         * if we are newly issued FILE_SHARED, mark dir not complete; we
 489         * don't know what happened to this directory while we didn't
 490         * have the cap.
 491         */
 492        if ((issued & CEPH_CAP_FILE_SHARED) &&
 493            (had & CEPH_CAP_FILE_SHARED) == 0) {
 494                ci->i_shared_gen++;
 495                if (S_ISDIR(ci->vfs_inode.i_mode)) {
 496                        dout(" marking %p NOT complete\n", &ci->vfs_inode);
 497                        __ceph_dir_clear_complete(ci);
 498                }
 499        }
 500}
 501
 502/*
 503 * Add a capability under the given MDS session.
 504 *
 505 * Caller should hold session snap_rwsem (read) and s_mutex.
 506 *
 507 * @fmode is the open file mode, if we are opening a file, otherwise
 508 * it is < 0.  (This is so we can atomically add the cap and add an
 509 * open file reference to it.)
 510 */
 511int ceph_add_cap(struct inode *inode,
 512                 struct ceph_mds_session *session, u64 cap_id,
 513                 int fmode, unsigned issued, unsigned wanted,
 514                 unsigned seq, unsigned mseq, u64 realmino, int flags,
 515                 struct ceph_cap_reservation *caps_reservation)
 516{
 517        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
 518        struct ceph_inode_info *ci = ceph_inode(inode);
 519        struct ceph_cap *new_cap = NULL;
 520        struct ceph_cap *cap;
 521        int mds = session->s_mds;
 522        int actual_wanted;
 523
 524        dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
 525             session->s_mds, cap_id, ceph_cap_string(issued), seq);
 526
 527        /*
 528         * If we are opening the file, include file mode wanted bits
 529         * in wanted.
 530         */
 531        if (fmode >= 0)
 532                wanted |= ceph_caps_for_mode(fmode);
 533
 534retry:
 535        spin_lock(&ci->i_ceph_lock);
 536        cap = __get_cap_for_mds(ci, mds);
 537        if (!cap) {
 538                if (new_cap) {
 539                        cap = new_cap;
 540                        new_cap = NULL;
 541                } else {
 542                        spin_unlock(&ci->i_ceph_lock);
 543                        new_cap = get_cap(mdsc, caps_reservation);
 544                        if (new_cap == NULL)
 545                                return -ENOMEM;
 546                        goto retry;
 547                }
 548
 549                cap->issued = 0;
 550                cap->implemented = 0;
 551                cap->mds = mds;
 552                cap->mds_wanted = 0;
 553                cap->mseq = 0;
 554
 555                cap->ci = ci;
 556                __insert_cap_node(ci, cap);
 557
 558                /* clear out old exporting info?  (i.e. on cap import) */
 559                if (ci->i_cap_exporting_mds == mds) {
 560                        ci->i_cap_exporting_issued = 0;
 561                        ci->i_cap_exporting_mseq = 0;
 562                        ci->i_cap_exporting_mds = -1;
 563                }
 564
 565                /* add to session cap list */
 566                cap->session = session;
 567                spin_lock(&session->s_cap_lock);
 568                list_add_tail(&cap->session_caps, &session->s_caps);
 569                session->s_nr_caps++;
 570                spin_unlock(&session->s_cap_lock);
 571        } else if (new_cap)
 572                ceph_put_cap(mdsc, new_cap);
 573
 574        if (!ci->i_snap_realm) {
 575                /*
 576                 * add this inode to the appropriate snap realm
 577                 */
 578                struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
 579                                                               realmino);
 580                if (realm) {
 581                        ceph_get_snap_realm(mdsc, realm);
 582                        spin_lock(&realm->inodes_with_caps_lock);
 583                        ci->i_snap_realm = realm;
 584                        list_add(&ci->i_snap_realm_item,
 585                                 &realm->inodes_with_caps);
 586                        spin_unlock(&realm->inodes_with_caps_lock);
 587                } else {
 588                        pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
 589                               realmino);
 590                        WARN_ON(!realm);
 591                }
 592        }
 593
 594        __check_cap_issue(ci, cap, issued);
 595
 596        /*
 597         * If we are issued caps we don't want, or the mds' wanted
 598         * value appears to be off, queue a check so we'll release
 599         * later and/or update the mds wanted value.
 600         */
 601        actual_wanted = __ceph_caps_wanted(ci);
 602        if ((wanted & ~actual_wanted) ||
 603            (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
 604                dout(" issued %s, mds wanted %s, actual %s, queueing\n",
 605                     ceph_cap_string(issued), ceph_cap_string(wanted),
 606                     ceph_cap_string(actual_wanted));
 607                __cap_delay_requeue(mdsc, ci);
 608        }
 609
 610        if (flags & CEPH_CAP_FLAG_AUTH) {
 611                if (ci->i_auth_cap == NULL ||
 612                    ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0)
 613                        ci->i_auth_cap = cap;
 614        } else if (ci->i_auth_cap == cap) {
 615                ci->i_auth_cap = NULL;
 616                spin_lock(&mdsc->cap_dirty_lock);
 617                if (!list_empty(&ci->i_dirty_item)) {
 618                        dout(" moving %p to cap_dirty_migrating\n", inode);
 619                        list_move(&ci->i_dirty_item,
 620                                  &mdsc->cap_dirty_migrating);
 621                }
 622                spin_unlock(&mdsc->cap_dirty_lock);
 623        }
 624
 625        dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
 626             inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
 627             ceph_cap_string(issued|cap->issued), seq, mds);
 628        cap->cap_id = cap_id;
 629        cap->issued = issued;
 630        cap->implemented |= issued;
 631        if (mseq > cap->mseq)
 632                cap->mds_wanted = wanted;
 633        else
 634                cap->mds_wanted |= wanted;
 635        cap->seq = seq;
 636        cap->issue_seq = seq;
 637        cap->mseq = mseq;
 638        cap->cap_gen = session->s_cap_gen;
 639
 640        if (fmode >= 0)
 641                __ceph_get_fmode(ci, fmode);
 642        spin_unlock(&ci->i_ceph_lock);
 643        wake_up_all(&ci->i_cap_wq);
 644        return 0;
 645}
 646
 647/*
 648 * Return true if cap has not timed out and belongs to the current
 649 * generation of the MDS session (i.e. has not gone 'stale' due to
 650 * us losing touch with the mds).
 651 */
 652static int __cap_is_valid(struct ceph_cap *cap)
 653{
 654        unsigned long ttl;
 655        u32 gen;
 656
 657        spin_lock(&cap->session->s_gen_ttl_lock);
 658        gen = cap->session->s_cap_gen;
 659        ttl = cap->session->s_cap_ttl;
 660        spin_unlock(&cap->session->s_gen_ttl_lock);
 661
 662        if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
 663                dout("__cap_is_valid %p cap %p issued %s "
 664                     "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
 665                     cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
 666                return 0;
 667        }
 668
 669        return 1;
 670}
 671
 672/*
 673 * Return set of valid cap bits issued to us.  Note that caps time
 674 * out, and may be invalidated in bulk if the client session times out
 675 * and session->s_cap_gen is bumped.
 676 */
 677int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
 678{
 679        int have = ci->i_snap_caps | ci->i_cap_exporting_issued;
 680        struct ceph_cap *cap;
 681        struct rb_node *p;
 682
 683        if (implemented)
 684                *implemented = 0;
 685        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 686                cap = rb_entry(p, struct ceph_cap, ci_node);
 687                if (!__cap_is_valid(cap))
 688                        continue;
 689                dout("__ceph_caps_issued %p cap %p issued %s\n",
 690                     &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
 691                have |= cap->issued;
 692                if (implemented)
 693                        *implemented |= cap->implemented;
 694        }
 695        /*
 696         * exclude caps issued by non-auth MDS, but are been revoking
 697         * by the auth MDS. The non-auth MDS should be revoking/exporting
 698         * these caps, but the message is delayed.
 699         */
 700        if (ci->i_auth_cap) {
 701                cap = ci->i_auth_cap;
 702                have &= ~cap->implemented | cap->issued;
 703        }
 704        return have;
 705}
 706
 707/*
 708 * Get cap bits issued by caps other than @ocap
 709 */
 710int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
 711{
 712        int have = ci->i_snap_caps;
 713        struct ceph_cap *cap;
 714        struct rb_node *p;
 715
 716        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 717                cap = rb_entry(p, struct ceph_cap, ci_node);
 718                if (cap == ocap)
 719                        continue;
 720                if (!__cap_is_valid(cap))
 721                        continue;
 722                have |= cap->issued;
 723        }
 724        return have;
 725}
 726
 727/*
 728 * Move a cap to the end of the LRU (oldest caps at list head, newest
 729 * at list tail).
 730 */
 731static void __touch_cap(struct ceph_cap *cap)
 732{
 733        struct ceph_mds_session *s = cap->session;
 734
 735        spin_lock(&s->s_cap_lock);
 736        if (s->s_cap_iterator == NULL) {
 737                dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
 738                     s->s_mds);
 739                list_move_tail(&cap->session_caps, &s->s_caps);
 740        } else {
 741                dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
 742                     &cap->ci->vfs_inode, cap, s->s_mds);
 743        }
 744        spin_unlock(&s->s_cap_lock);
 745}
 746
 747/*
 748 * Check if we hold the given mask.  If so, move the cap(s) to the
 749 * front of their respective LRUs.  (This is the preferred way for
 750 * callers to check for caps they want.)
 751 */
 752int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
 753{
 754        struct ceph_cap *cap;
 755        struct rb_node *p;
 756        int have = ci->i_snap_caps;
 757
 758        if ((have & mask) == mask) {
 759                dout("__ceph_caps_issued_mask %p snap issued %s"
 760                     " (mask %s)\n", &ci->vfs_inode,
 761                     ceph_cap_string(have),
 762                     ceph_cap_string(mask));
 763                return 1;
 764        }
 765
 766        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 767                cap = rb_entry(p, struct ceph_cap, ci_node);
 768                if (!__cap_is_valid(cap))
 769                        continue;
 770                if ((cap->issued & mask) == mask) {
 771                        dout("__ceph_caps_issued_mask %p cap %p issued %s"
 772                             " (mask %s)\n", &ci->vfs_inode, cap,
 773                             ceph_cap_string(cap->issued),
 774                             ceph_cap_string(mask));
 775                        if (touch)
 776                                __touch_cap(cap);
 777                        return 1;
 778                }
 779
 780                /* does a combination of caps satisfy mask? */
 781                have |= cap->issued;
 782                if ((have & mask) == mask) {
 783                        dout("__ceph_caps_issued_mask %p combo issued %s"
 784                             " (mask %s)\n", &ci->vfs_inode,
 785                             ceph_cap_string(cap->issued),
 786                             ceph_cap_string(mask));
 787                        if (touch) {
 788                                struct rb_node *q;
 789
 790                                /* touch this + preceding caps */
 791                                __touch_cap(cap);
 792                                for (q = rb_first(&ci->i_caps); q != p;
 793                                     q = rb_next(q)) {
 794                                        cap = rb_entry(q, struct ceph_cap,
 795                                                       ci_node);
 796                                        if (!__cap_is_valid(cap))
 797                                                continue;
 798                                        __touch_cap(cap);
 799                                }
 800                        }
 801                        return 1;
 802                }
 803        }
 804
 805        return 0;
 806}
 807
 808/*
 809 * Return true if mask caps are currently being revoked by an MDS.
 810 */
 811int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
 812                               struct ceph_cap *ocap, int mask)
 813{
 814        struct ceph_cap *cap;
 815        struct rb_node *p;
 816
 817        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 818                cap = rb_entry(p, struct ceph_cap, ci_node);
 819                if (cap != ocap && __cap_is_valid(cap) &&
 820                    (cap->implemented & ~cap->issued & mask))
 821                        return 1;
 822        }
 823        return 0;
 824}
 825
 826int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
 827{
 828        struct inode *inode = &ci->vfs_inode;
 829        int ret;
 830
 831        spin_lock(&ci->i_ceph_lock);
 832        ret = __ceph_caps_revoking_other(ci, NULL, mask);
 833        spin_unlock(&ci->i_ceph_lock);
 834        dout("ceph_caps_revoking %p %s = %d\n", inode,
 835             ceph_cap_string(mask), ret);
 836        return ret;
 837}
 838
 839int __ceph_caps_used(struct ceph_inode_info *ci)
 840{
 841        int used = 0;
 842        if (ci->i_pin_ref)
 843                used |= CEPH_CAP_PIN;
 844        if (ci->i_rd_ref)
 845                used |= CEPH_CAP_FILE_RD;
 846        if (ci->i_rdcache_ref || ci->vfs_inode.i_data.nrpages)
 847                used |= CEPH_CAP_FILE_CACHE;
 848        if (ci->i_wr_ref)
 849                used |= CEPH_CAP_FILE_WR;
 850        if (ci->i_wb_ref || ci->i_wrbuffer_ref)
 851                used |= CEPH_CAP_FILE_BUFFER;
 852        return used;
 853}
 854
 855/*
 856 * wanted, by virtue of open file modes
 857 */
 858int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
 859{
 860        int want = 0;
 861        int mode;
 862        for (mode = 0; mode < CEPH_FILE_MODE_NUM; mode++)
 863                if (ci->i_nr_by_mode[mode])
 864                        want |= ceph_caps_for_mode(mode);
 865        return want;
 866}
 867
 868/*
 869 * Return caps we have registered with the MDS(s) as 'wanted'.
 870 */
 871int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
 872{
 873        struct ceph_cap *cap;
 874        struct rb_node *p;
 875        int mds_wanted = 0;
 876
 877        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 878                cap = rb_entry(p, struct ceph_cap, ci_node);
 879                if (!__cap_is_valid(cap))
 880                        continue;
 881                mds_wanted |= cap->mds_wanted;
 882        }
 883        return mds_wanted;
 884}
 885
 886/*
 887 * called under i_ceph_lock
 888 */
 889static int __ceph_is_any_caps(struct ceph_inode_info *ci)
 890{
 891        return !RB_EMPTY_ROOT(&ci->i_caps) || ci->i_cap_exporting_mds >= 0;
 892}
 893
 894/*
 895 * Remove a cap.  Take steps to deal with a racing iterate_session_caps.
 896 *
 897 * caller should hold i_ceph_lock.
 898 * caller will not hold session s_mutex if called from destroy_inode.
 899 */
 900void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
 901{
 902        struct ceph_mds_session *session = cap->session;
 903        struct ceph_inode_info *ci = cap->ci;
 904        struct ceph_mds_client *mdsc =
 905                ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
 906        int removed = 0;
 907
 908        dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
 909
 910        /* remove from session list */
 911        spin_lock(&session->s_cap_lock);
 912        /*
 913         * s_cap_reconnect is protected by s_cap_lock. no one changes
 914         * s_cap_gen while session is in the reconnect state.
 915         */
 916        if (queue_release &&
 917            (!session->s_cap_reconnect ||
 918             cap->cap_gen == session->s_cap_gen))
 919                __queue_cap_release(session, ci->i_vino.ino, cap->cap_id,
 920                                    cap->mseq, cap->issue_seq);
 921
 922        if (session->s_cap_iterator == cap) {
 923                /* not yet, we are iterating over this very cap */
 924                dout("__ceph_remove_cap  delaying %p removal from session %p\n",
 925                     cap, cap->session);
 926        } else {
 927                list_del_init(&cap->session_caps);
 928                session->s_nr_caps--;
 929                cap->session = NULL;
 930                removed = 1;
 931        }
 932        /* protect backpointer with s_cap_lock: see iterate_session_caps */
 933        cap->ci = NULL;
 934        spin_unlock(&session->s_cap_lock);
 935
 936        /* remove from inode list */
 937        rb_erase(&cap->ci_node, &ci->i_caps);
 938        if (ci->i_auth_cap == cap)
 939                ci->i_auth_cap = NULL;
 940
 941        if (removed)
 942                ceph_put_cap(mdsc, cap);
 943
 944        if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) {
 945                struct ceph_snap_realm *realm = ci->i_snap_realm;
 946                spin_lock(&realm->inodes_with_caps_lock);
 947                list_del_init(&ci->i_snap_realm_item);
 948                ci->i_snap_realm_counter++;
 949                ci->i_snap_realm = NULL;
 950                spin_unlock(&realm->inodes_with_caps_lock);
 951                ceph_put_snap_realm(mdsc, realm);
 952        }
 953        if (!__ceph_is_any_real_caps(ci))
 954                __cap_delay_cancel(mdsc, ci);
 955}
 956
 957/*
 958 * Build and send a cap message to the given MDS.
 959 *
 960 * Caller should be holding s_mutex.
 961 */
 962static int send_cap_msg(struct ceph_mds_session *session,
 963                        u64 ino, u64 cid, int op,
 964                        int caps, int wanted, int dirty,
 965                        u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq,
 966                        u64 size, u64 max_size,
 967                        struct timespec *mtime, struct timespec *atime,
 968                        u64 time_warp_seq,
 969                        kuid_t uid, kgid_t gid, umode_t mode,
 970                        u64 xattr_version,
 971                        struct ceph_buffer *xattrs_buf,
 972                        u64 follows)
 973{
 974        struct ceph_mds_caps *fc;
 975        struct ceph_msg *msg;
 976
 977        dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
 978             " seq %u/%u mseq %u follows %lld size %llu/%llu"
 979             " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
 980             cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
 981             ceph_cap_string(dirty),
 982             seq, issue_seq, mseq, follows, size, max_size,
 983             xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
 984
 985        msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), GFP_NOFS, false);
 986        if (!msg)
 987                return -ENOMEM;
 988
 989        msg->hdr.tid = cpu_to_le64(flush_tid);
 990
 991        fc = msg->front.iov_base;
 992        memset(fc, 0, sizeof(*fc));
 993
 994        fc->cap_id = cpu_to_le64(cid);
 995        fc->op = cpu_to_le32(op);
 996        fc->seq = cpu_to_le32(seq);
 997        fc->issue_seq = cpu_to_le32(issue_seq);
 998        fc->migrate_seq = cpu_to_le32(mseq);
 999        fc->caps = cpu_to_le32(caps);
1000        fc->wanted = cpu_to_le32(wanted);
1001        fc->dirty = cpu_to_le32(dirty);
1002        fc->ino = cpu_to_le64(ino);
1003        fc->snap_follows = cpu_to_le64(follows);
1004
1005        fc->size = cpu_to_le64(size);
1006        fc->max_size = cpu_to_le64(max_size);
1007        if (mtime)
1008                ceph_encode_timespec(&fc->mtime, mtime);
1009        if (atime)
1010                ceph_encode_timespec(&fc->atime, atime);
1011        fc->time_warp_seq = cpu_to_le32(time_warp_seq);
1012
1013        fc->uid = cpu_to_le32(from_kuid(&init_user_ns, uid));
1014        fc->gid = cpu_to_le32(from_kgid(&init_user_ns, gid));
1015        fc->mode = cpu_to_le32(mode);
1016
1017        fc->xattr_version = cpu_to_le64(xattr_version);
1018        if (xattrs_buf) {
1019                msg->middle = ceph_buffer_get(xattrs_buf);
1020                fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
1021                msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
1022        }
1023
1024        ceph_con_send(&session->s_con, msg);
1025        return 0;
1026}
1027
1028void __queue_cap_release(struct ceph_mds_session *session,
1029                         u64 ino, u64 cap_id, u32 migrate_seq,
1030                         u32 issue_seq)
1031{
1032        struct ceph_msg *msg;
1033        struct ceph_mds_cap_release *head;
1034        struct ceph_mds_cap_item *item;
1035
1036        BUG_ON(!session->s_num_cap_releases);
1037        msg = list_first_entry(&session->s_cap_releases,
1038                               struct ceph_msg, list_head);
1039
1040        dout(" adding %llx release to mds%d msg %p (%d left)\n",
1041             ino, session->s_mds, msg, session->s_num_cap_releases);
1042
1043        BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
1044        head = msg->front.iov_base;
1045        le32_add_cpu(&head->num, 1);
1046        item = msg->front.iov_base + msg->front.iov_len;
1047        item->ino = cpu_to_le64(ino);
1048        item->cap_id = cpu_to_le64(cap_id);
1049        item->migrate_seq = cpu_to_le32(migrate_seq);
1050        item->seq = cpu_to_le32(issue_seq);
1051
1052        session->s_num_cap_releases--;
1053
1054        msg->front.iov_len += sizeof(*item);
1055        if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1056                dout(" release msg %p full\n", msg);
1057                list_move_tail(&msg->list_head, &session->s_cap_releases_done);
1058        } else {
1059                dout(" release msg %p at %d/%d (%d)\n", msg,
1060                     (int)le32_to_cpu(head->num),
1061                     (int)CEPH_CAPS_PER_RELEASE,
1062                     (int)msg->front.iov_len);
1063        }
1064}
1065
1066/*
1067 * Queue cap releases when an inode is dropped from our cache.  Since
1068 * inode is about to be destroyed, there is no need for i_ceph_lock.
1069 */
1070void ceph_queue_caps_release(struct inode *inode)
1071{
1072        struct ceph_inode_info *ci = ceph_inode(inode);
1073        struct rb_node *p;
1074
1075        p = rb_first(&ci->i_caps);
1076        while (p) {
1077                struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
1078                p = rb_next(p);
1079                __ceph_remove_cap(cap, true);
1080        }
1081}
1082
1083/*
1084 * Send a cap msg on the given inode.  Update our caps state, then
1085 * drop i_ceph_lock and send the message.
1086 *
1087 * Make note of max_size reported/requested from mds, revoked caps
1088 * that have now been implemented.
1089 *
1090 * Make half-hearted attempt ot to invalidate page cache if we are
1091 * dropping RDCACHE.  Note that this will leave behind locked pages
1092 * that we'll then need to deal with elsewhere.
1093 *
1094 * Return non-zero if delayed release, or we experienced an error
1095 * such that the caller should requeue + retry later.
1096 *
1097 * called with i_ceph_lock, then drops it.
1098 * caller should hold snap_rwsem (read), s_mutex.
1099 */
1100static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1101                      int op, int used, int want, int retain, int flushing,
1102                      unsigned *pflush_tid)
1103        __releases(cap->ci->i_ceph_lock)
1104{
1105        struct ceph_inode_info *ci = cap->ci;
1106        struct inode *inode = &ci->vfs_inode;
1107        u64 cap_id = cap->cap_id;
1108        int held, revoking, dropping, keep;
1109        u64 seq, issue_seq, mseq, time_warp_seq, follows;
1110        u64 size, max_size;
1111        struct timespec mtime, atime;
1112        int wake = 0;
1113        umode_t mode;
1114        kuid_t uid;
1115        kgid_t gid;
1116        struct ceph_mds_session *session;
1117        u64 xattr_version = 0;
1118        struct ceph_buffer *xattr_blob = NULL;
1119        int delayed = 0;
1120        u64 flush_tid = 0;
1121        int i;
1122        int ret;
1123
1124        held = cap->issued | cap->implemented;
1125        revoking = cap->implemented & ~cap->issued;
1126        retain &= ~revoking;
1127        dropping = cap->issued & ~retain;
1128
1129        dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1130             inode, cap, cap->session,
1131             ceph_cap_string(held), ceph_cap_string(held & retain),
1132             ceph_cap_string(revoking));
1133        BUG_ON((retain & CEPH_CAP_PIN) == 0);
1134
1135        session = cap->session;
1136
1137        /* don't release wanted unless we've waited a bit. */
1138        if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1139            time_before(jiffies, ci->i_hold_caps_min)) {
1140                dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1141                     ceph_cap_string(cap->issued),
1142                     ceph_cap_string(cap->issued & retain),
1143                     ceph_cap_string(cap->mds_wanted),
1144                     ceph_cap_string(want));
1145                want |= cap->mds_wanted;
1146                retain |= cap->issued;
1147                delayed = 1;
1148        }
1149        ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
1150
1151        cap->issued &= retain;  /* drop bits we don't want */
1152        if (cap->implemented & ~cap->issued) {
1153                /*
1154                 * Wake up any waiters on wanted -> needed transition.
1155                 * This is due to the weird transition from buffered
1156                 * to sync IO... we need to flush dirty pages _before_
1157                 * allowing sync writes to avoid reordering.
1158                 */
1159                wake = 1;
1160        }
1161        cap->implemented &= cap->issued | used;
1162        cap->mds_wanted = want;
1163
1164        if (flushing) {
1165                /*
1166                 * assign a tid for flush operations so we can avoid
1167                 * flush1 -> dirty1 -> flush2 -> flushack1 -> mark
1168                 * clean type races.  track latest tid for every bit
1169                 * so we can handle flush AxFw, flush Fw, and have the
1170                 * first ack clean Ax.
1171                 */
1172                flush_tid = ++ci->i_cap_flush_last_tid;
1173                if (pflush_tid)
1174                        *pflush_tid = flush_tid;
1175                dout(" cap_flush_tid %d\n", (int)flush_tid);
1176                for (i = 0; i < CEPH_CAP_BITS; i++)
1177                        if (flushing & (1 << i))
1178                                ci->i_cap_flush_tid[i] = flush_tid;
1179
1180                follows = ci->i_head_snapc->seq;
1181        } else {
1182                follows = 0;
1183        }
1184
1185        keep = cap->implemented;
1186        seq = cap->seq;
1187        issue_seq = cap->issue_seq;
1188        mseq = cap->mseq;
1189        size = inode->i_size;
1190        ci->i_reported_size = size;
1191        max_size = ci->i_wanted_max_size;
1192        ci->i_requested_max_size = max_size;
1193        mtime = inode->i_mtime;
1194        atime = inode->i_atime;
1195        time_warp_seq = ci->i_time_warp_seq;
1196        uid = inode->i_uid;
1197        gid = inode->i_gid;
1198        mode = inode->i_mode;
1199
1200        if (flushing & CEPH_CAP_XATTR_EXCL) {
1201                __ceph_build_xattrs_blob(ci);
1202                xattr_blob = ci->i_xattrs.blob;
1203                xattr_version = ci->i_xattrs.version;
1204        }
1205
1206        spin_unlock(&ci->i_ceph_lock);
1207
1208        ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1209                op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
1210                size, max_size, &mtime, &atime, time_warp_seq,
1211                uid, gid, mode, xattr_version, xattr_blob,
1212                follows);
1213        if (ret < 0) {
1214                dout("error sending cap msg, must requeue %p\n", inode);
1215                delayed = 1;
1216        }
1217
1218        if (wake)
1219                wake_up_all(&ci->i_cap_wq);
1220
1221        return delayed;
1222}
1223
1224/*
1225 * When a snapshot is taken, clients accumulate dirty metadata on
1226 * inodes with capabilities in ceph_cap_snaps to describe the file
1227 * state at the time the snapshot was taken.  This must be flushed
1228 * asynchronously back to the MDS once sync writes complete and dirty
1229 * data is written out.
1230 *
1231 * Unless @again is true, skip cap_snaps that were already sent to
1232 * the MDS (i.e., during this session).
1233 *
1234 * Called under i_ceph_lock.  Takes s_mutex as needed.
1235 */
1236void __ceph_flush_snaps(struct ceph_inode_info *ci,
1237                        struct ceph_mds_session **psession,
1238                        int again)
1239                __releases(ci->i_ceph_lock)
1240                __acquires(ci->i_ceph_lock)
1241{
1242        struct inode *inode = &ci->vfs_inode;
1243        int mds;
1244        struct ceph_cap_snap *capsnap;
1245        u32 mseq;
1246        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
1247        struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
1248                                                    session->s_mutex */
1249        u64 next_follows = 0;  /* keep track of how far we've gotten through the
1250                             i_cap_snaps list, and skip these entries next time
1251                             around to avoid an infinite loop */
1252
1253        if (psession)
1254                session = *psession;
1255
1256        dout("__flush_snaps %p\n", inode);
1257retry:
1258        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1259                /* avoid an infiniute loop after retry */
1260                if (capsnap->follows < next_follows)
1261                        continue;
1262                /*
1263                 * we need to wait for sync writes to complete and for dirty
1264                 * pages to be written out.
1265                 */
1266                if (capsnap->dirty_pages || capsnap->writing)
1267                        break;
1268
1269                /*
1270                 * if cap writeback already occurred, we should have dropped
1271                 * the capsnap in ceph_put_wrbuffer_cap_refs.
1272                 */
1273                BUG_ON(capsnap->dirty == 0);
1274
1275                /* pick mds, take s_mutex */
1276                if (ci->i_auth_cap == NULL) {
1277                        dout("no auth cap (migrating?), doing nothing\n");
1278                        goto out;
1279                }
1280
1281                /* only flush each capsnap once */
1282                if (!again && !list_empty(&capsnap->flushing_item)) {
1283                        dout("already flushed %p, skipping\n", capsnap);
1284                        continue;
1285                }
1286
1287                mds = ci->i_auth_cap->session->s_mds;
1288                mseq = ci->i_auth_cap->mseq;
1289
1290                if (session && session->s_mds != mds) {
1291                        dout("oops, wrong session %p mutex\n", session);
1292                        mutex_unlock(&session->s_mutex);
1293                        ceph_put_mds_session(session);
1294                        session = NULL;
1295                }
1296                if (!session) {
1297                        spin_unlock(&ci->i_ceph_lock);
1298                        mutex_lock(&mdsc->mutex);
1299                        session = __ceph_lookup_mds_session(mdsc, mds);
1300                        mutex_unlock(&mdsc->mutex);
1301                        if (session) {
1302                                dout("inverting session/ino locks on %p\n",
1303                                     session);
1304                                mutex_lock(&session->s_mutex);
1305                        }
1306                        /*
1307                         * if session == NULL, we raced against a cap
1308                         * deletion or migration.  retry, and we'll
1309                         * get a better @mds value next time.
1310                         */
1311                        spin_lock(&ci->i_ceph_lock);
1312                        goto retry;
1313                }
1314
1315                capsnap->flush_tid = ++ci->i_cap_flush_last_tid;
1316                atomic_inc(&capsnap->nref);
1317                if (!list_empty(&capsnap->flushing_item))
1318                        list_del_init(&capsnap->flushing_item);
1319                list_add_tail(&capsnap->flushing_item,
1320                              &session->s_cap_snaps_flushing);
1321                spin_unlock(&ci->i_ceph_lock);
1322
1323                dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
1324                     inode, capsnap, capsnap->follows, capsnap->flush_tid);
1325                send_cap_msg(session, ceph_vino(inode).ino, 0,
1326                             CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
1327                             capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
1328                             capsnap->size, 0,
1329                             &capsnap->mtime, &capsnap->atime,
1330                             capsnap->time_warp_seq,
1331                             capsnap->uid, capsnap->gid, capsnap->mode,
1332                             capsnap->xattr_version, capsnap->xattr_blob,
1333                             capsnap->follows);
1334
1335                next_follows = capsnap->follows + 1;
1336                ceph_put_cap_snap(capsnap);
1337
1338                spin_lock(&ci->i_ceph_lock);
1339                goto retry;
1340        }
1341
1342        /* we flushed them all; remove this inode from the queue */
1343        spin_lock(&mdsc->snap_flush_lock);
1344        list_del_init(&ci->i_snap_flush_item);
1345        spin_unlock(&mdsc->snap_flush_lock);
1346
1347out:
1348        if (psession)
1349                *psession = session;
1350        else if (session) {
1351                mutex_unlock(&session->s_mutex);
1352                ceph_put_mds_session(session);
1353        }
1354}
1355
1356static void ceph_flush_snaps(struct ceph_inode_info *ci)
1357{
1358        spin_lock(&ci->i_ceph_lock);
1359        __ceph_flush_snaps(ci, NULL, 0);
1360        spin_unlock(&ci->i_ceph_lock);
1361}
1362
1363/*
1364 * Mark caps dirty.  If inode is newly dirty, return the dirty flags.
1365 * Caller is then responsible for calling __mark_inode_dirty with the
1366 * returned flags value.
1367 */
1368int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
1369{
1370        struct ceph_mds_client *mdsc =
1371                ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
1372        struct inode *inode = &ci->vfs_inode;
1373        int was = ci->i_dirty_caps;
1374        int dirty = 0;
1375
1376        dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1377             ceph_cap_string(mask), ceph_cap_string(was),
1378             ceph_cap_string(was | mask));
1379        ci->i_dirty_caps |= mask;
1380        if (was == 0) {
1381                if (!ci->i_head_snapc)
1382                        ci->i_head_snapc = ceph_get_snap_context(
1383                                ci->i_snap_realm->cached_context);
1384                dout(" inode %p now dirty snapc %p auth cap %p\n",
1385                     &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
1386                BUG_ON(!list_empty(&ci->i_dirty_item));
1387                spin_lock(&mdsc->cap_dirty_lock);
1388                if (ci->i_auth_cap)
1389                        list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1390                else
1391                        list_add(&ci->i_dirty_item,
1392                                 &mdsc->cap_dirty_migrating);
1393                spin_unlock(&mdsc->cap_dirty_lock);
1394                if (ci->i_flushing_caps == 0) {
1395                        ihold(inode);
1396                        dirty |= I_DIRTY_SYNC;
1397                }
1398        }
1399        BUG_ON(list_empty(&ci->i_dirty_item));
1400        if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1401            (mask & CEPH_CAP_FILE_BUFFER))
1402                dirty |= I_DIRTY_DATASYNC;
1403        __cap_delay_requeue(mdsc, ci);
1404        return dirty;
1405}
1406
1407/*
1408 * Add dirty inode to the flushing list.  Assigned a seq number so we
1409 * can wait for caps to flush without starving.
1410 *
1411 * Called under i_ceph_lock.
1412 */
1413static int __mark_caps_flushing(struct inode *inode,
1414                                 struct ceph_mds_session *session)
1415{
1416        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1417        struct ceph_inode_info *ci = ceph_inode(inode);
1418        int flushing;
1419
1420        BUG_ON(ci->i_dirty_caps == 0);
1421        BUG_ON(list_empty(&ci->i_dirty_item));
1422
1423        flushing = ci->i_dirty_caps;
1424        dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1425             ceph_cap_string(flushing),
1426             ceph_cap_string(ci->i_flushing_caps),
1427             ceph_cap_string(ci->i_flushing_caps | flushing));
1428        ci->i_flushing_caps |= flushing;
1429        ci->i_dirty_caps = 0;
1430        dout(" inode %p now !dirty\n", inode);
1431
1432        spin_lock(&mdsc->cap_dirty_lock);
1433        list_del_init(&ci->i_dirty_item);
1434
1435        ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
1436        if (list_empty(&ci->i_flushing_item)) {
1437                list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1438                mdsc->num_cap_flushing++;
1439                dout(" inode %p now flushing seq %lld\n", inode,
1440                     ci->i_cap_flush_seq);
1441        } else {
1442                list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1443                dout(" inode %p now flushing (more) seq %lld\n", inode,
1444                     ci->i_cap_flush_seq);
1445        }
1446        spin_unlock(&mdsc->cap_dirty_lock);
1447
1448        return flushing;
1449}
1450
1451/*
1452 * try to invalidate mapping pages without blocking.
1453 */
1454static int try_nonblocking_invalidate(struct inode *inode)
1455{
1456        struct ceph_inode_info *ci = ceph_inode(inode);
1457        u32 invalidating_gen = ci->i_rdcache_gen;
1458
1459        spin_unlock(&ci->i_ceph_lock);
1460        invalidate_mapping_pages(&inode->i_data, 0, -1);
1461        spin_lock(&ci->i_ceph_lock);
1462
1463        if (inode->i_data.nrpages == 0 &&
1464            invalidating_gen == ci->i_rdcache_gen) {
1465                /* success. */
1466                dout("try_nonblocking_invalidate %p success\n", inode);
1467                /* save any racing async invalidate some trouble */
1468                ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
1469                return 0;
1470        }
1471        dout("try_nonblocking_invalidate %p failed\n", inode);
1472        return -1;
1473}
1474
1475/*
1476 * Swiss army knife function to examine currently used and wanted
1477 * versus held caps.  Release, flush, ack revoked caps to mds as
1478 * appropriate.
1479 *
1480 *  CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1481 *    cap release further.
1482 *  CHECK_CAPS_AUTHONLY - we should only check the auth cap
1483 *  CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1484 *    further delay.
1485 */
1486void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1487                     struct ceph_mds_session *session)
1488{
1489        struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1490        struct ceph_mds_client *mdsc = fsc->mdsc;
1491        struct inode *inode = &ci->vfs_inode;
1492        struct ceph_cap *cap;
1493        int file_wanted, used, cap_used;
1494        int took_snap_rwsem = 0;             /* true if mdsc->snap_rwsem held */
1495        int issued, implemented, want, retain, revoking, flushing = 0;
1496        int mds = -1;   /* keep track of how far we've gone through i_caps list
1497                           to avoid an infinite loop on retry */
1498        struct rb_node *p;
1499        int tried_invalidate = 0;
1500        int delayed = 0, sent = 0, force_requeue = 0, num;
1501        int queue_invalidate = 0;
1502        int is_delayed = flags & CHECK_CAPS_NODELAY;
1503
1504        /* if we are unmounting, flush any unused caps immediately. */
1505        if (mdsc->stopping)
1506                is_delayed = 1;
1507
1508        spin_lock(&ci->i_ceph_lock);
1509
1510        if (ci->i_ceph_flags & CEPH_I_FLUSH)
1511                flags |= CHECK_CAPS_FLUSH;
1512
1513        /* flush snaps first time around only */
1514        if (!list_empty(&ci->i_cap_snaps))
1515                __ceph_flush_snaps(ci, &session, 0);
1516        goto retry_locked;
1517retry:
1518        spin_lock(&ci->i_ceph_lock);
1519retry_locked:
1520        file_wanted = __ceph_caps_file_wanted(ci);
1521        used = __ceph_caps_used(ci);
1522        want = file_wanted | used;
1523        issued = __ceph_caps_issued(ci, &implemented);
1524        revoking = implemented & ~issued;
1525
1526        retain = want | CEPH_CAP_PIN;
1527        if (!mdsc->stopping && inode->i_nlink > 0) {
1528                if (want) {
1529                        retain |= CEPH_CAP_ANY;       /* be greedy */
1530                } else {
1531                        retain |= CEPH_CAP_ANY_SHARED;
1532                        /*
1533                         * keep RD only if we didn't have the file open RW,
1534                         * because then the mds would revoke it anyway to
1535                         * journal max_size=0.
1536                         */
1537                        if (ci->i_max_size == 0)
1538                                retain |= CEPH_CAP_ANY_RD;
1539                }
1540        }
1541
1542        dout("check_caps %p file_want %s used %s dirty %s flushing %s"
1543             " issued %s revoking %s retain %s %s%s%s\n", inode,
1544             ceph_cap_string(file_wanted),
1545             ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1546             ceph_cap_string(ci->i_flushing_caps),
1547             ceph_cap_string(issued), ceph_cap_string(revoking),
1548             ceph_cap_string(retain),
1549             (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1550             (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1551             (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1552
1553        /*
1554         * If we no longer need to hold onto old our caps, and we may
1555         * have cached pages, but don't want them, then try to invalidate.
1556         * If we fail, it's because pages are locked.... try again later.
1557         */
1558        if ((!is_delayed || mdsc->stopping) &&
1559            ci->i_wrbuffer_ref == 0 &&               /* no dirty pages... */
1560            inode->i_data.nrpages &&                 /* have cached pages */
1561            (file_wanted == 0 ||                     /* no open files */
1562             (revoking & (CEPH_CAP_FILE_CACHE|
1563                          CEPH_CAP_FILE_LAZYIO))) && /*  or revoking cache */
1564            !tried_invalidate) {
1565                dout("check_caps trying to invalidate on %p\n", inode);
1566                if (try_nonblocking_invalidate(inode) < 0) {
1567                        if (revoking & (CEPH_CAP_FILE_CACHE|
1568                                        CEPH_CAP_FILE_LAZYIO)) {
1569                                dout("check_caps queuing invalidate\n");
1570                                queue_invalidate = 1;
1571                                ci->i_rdcache_revoking = ci->i_rdcache_gen;
1572                        } else {
1573                                dout("check_caps failed to invalidate pages\n");
1574                                /* we failed to invalidate pages.  check these
1575                                   caps again later. */
1576                                force_requeue = 1;
1577                                __cap_set_timeouts(mdsc, ci);
1578                        }
1579                }
1580                tried_invalidate = 1;
1581                goto retry_locked;
1582        }
1583
1584        num = 0;
1585        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1586                cap = rb_entry(p, struct ceph_cap, ci_node);
1587                num++;
1588
1589                /* avoid looping forever */
1590                if (mds >= cap->mds ||
1591                    ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1592                        continue;
1593
1594                /* NOTE: no side-effects allowed, until we take s_mutex */
1595
1596                cap_used = used;
1597                if (ci->i_auth_cap && cap != ci->i_auth_cap)
1598                        cap_used &= ~ci->i_auth_cap->issued;
1599
1600                revoking = cap->implemented & ~cap->issued;
1601                dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
1602                     cap->mds, cap, ceph_cap_string(cap->issued),
1603                     ceph_cap_string(cap_used),
1604                     ceph_cap_string(cap->implemented),
1605                     ceph_cap_string(revoking));
1606
1607                if (cap == ci->i_auth_cap &&
1608                    (cap->issued & CEPH_CAP_FILE_WR)) {
1609                        /* request larger max_size from MDS? */
1610                        if (ci->i_wanted_max_size > ci->i_max_size &&
1611                            ci->i_wanted_max_size > ci->i_requested_max_size) {
1612                                dout("requesting new max_size\n");
1613                                goto ack;
1614                        }
1615
1616                        /* approaching file_max? */
1617                        if ((inode->i_size << 1) >= ci->i_max_size &&
1618                            (ci->i_reported_size << 1) < ci->i_max_size) {
1619                                dout("i_size approaching max_size\n");
1620                                goto ack;
1621                        }
1622                }
1623                /* flush anything dirty? */
1624                if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
1625                    ci->i_dirty_caps) {
1626                        dout("flushing dirty caps\n");
1627                        goto ack;
1628                }
1629
1630                /* completed revocation? going down and there are no caps? */
1631                if (revoking && (revoking & cap_used) == 0) {
1632                        dout("completed revocation of %s\n",
1633                             ceph_cap_string(cap->implemented & ~cap->issued));
1634                        goto ack;
1635                }
1636
1637                /* want more caps from mds? */
1638                if (want & ~(cap->mds_wanted | cap->issued))
1639                        goto ack;
1640
1641                /* things we might delay */
1642                if ((cap->issued & ~retain) == 0 &&
1643                    cap->mds_wanted == want)
1644                        continue;     /* nope, all good */
1645
1646                if (is_delayed)
1647                        goto ack;
1648
1649                /* delay? */
1650                if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1651                    time_before(jiffies, ci->i_hold_caps_max)) {
1652                        dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1653                             ceph_cap_string(cap->issued),
1654                             ceph_cap_string(cap->issued & retain),
1655                             ceph_cap_string(cap->mds_wanted),
1656                             ceph_cap_string(want));
1657                        delayed++;
1658                        continue;
1659                }
1660
1661ack:
1662                if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1663                        dout(" skipping %p I_NOFLUSH set\n", inode);
1664                        continue;
1665                }
1666
1667                if (session && session != cap->session) {
1668                        dout("oops, wrong session %p mutex\n", session);
1669                        mutex_unlock(&session->s_mutex);
1670                        session = NULL;
1671                }
1672                if (!session) {
1673                        session = cap->session;
1674                        if (mutex_trylock(&session->s_mutex) == 0) {
1675                                dout("inverting session/ino locks on %p\n",
1676                                     session);
1677                                spin_unlock(&ci->i_ceph_lock);
1678                                if (took_snap_rwsem) {
1679                                        up_read(&mdsc->snap_rwsem);
1680                                        took_snap_rwsem = 0;
1681                                }
1682                                mutex_lock(&session->s_mutex);
1683                                goto retry;
1684                        }
1685                }
1686                /* take snap_rwsem after session mutex */
1687                if (!took_snap_rwsem) {
1688                        if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1689                                dout("inverting snap/in locks on %p\n",
1690                                     inode);
1691                                spin_unlock(&ci->i_ceph_lock);
1692                                down_read(&mdsc->snap_rwsem);
1693                                took_snap_rwsem = 1;
1694                                goto retry;
1695                        }
1696                        took_snap_rwsem = 1;
1697                }
1698
1699                if (cap == ci->i_auth_cap && ci->i_dirty_caps)
1700                        flushing = __mark_caps_flushing(inode, session);
1701                else
1702                        flushing = 0;
1703
1704                mds = cap->mds;  /* remember mds, so we don't repeat */
1705                sent++;
1706
1707                /* __send_cap drops i_ceph_lock */
1708                delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, cap_used,
1709                                      want, retain, flushing, NULL);
1710                goto retry; /* retake i_ceph_lock and restart our cap scan. */
1711        }
1712
1713        /*
1714         * Reschedule delayed caps release if we delayed anything,
1715         * otherwise cancel.
1716         */
1717        if (delayed && is_delayed)
1718                force_requeue = 1;   /* __send_cap delayed release; requeue */
1719        if (!delayed && !is_delayed)
1720                __cap_delay_cancel(mdsc, ci);
1721        else if (!is_delayed || force_requeue)
1722                __cap_delay_requeue(mdsc, ci);
1723
1724        spin_unlock(&ci->i_ceph_lock);
1725
1726        if (queue_invalidate)
1727                ceph_queue_invalidate(inode);
1728
1729        if (session)
1730                mutex_unlock(&session->s_mutex);
1731        if (took_snap_rwsem)
1732                up_read(&mdsc->snap_rwsem);
1733}
1734
1735/*
1736 * Try to flush dirty caps back to the auth mds.
1737 */
1738static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
1739                          unsigned *flush_tid)
1740{
1741        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1742        struct ceph_inode_info *ci = ceph_inode(inode);
1743        int unlock_session = session ? 0 : 1;
1744        int flushing = 0;
1745
1746retry:
1747        spin_lock(&ci->i_ceph_lock);
1748        if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1749                dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1750                goto out;
1751        }
1752        if (ci->i_dirty_caps && ci->i_auth_cap) {
1753                struct ceph_cap *cap = ci->i_auth_cap;
1754                int used = __ceph_caps_used(ci);
1755                int want = __ceph_caps_wanted(ci);
1756                int delayed;
1757
1758                if (!session) {
1759                        spin_unlock(&ci->i_ceph_lock);
1760                        session = cap->session;
1761                        mutex_lock(&session->s_mutex);
1762                        goto retry;
1763                }
1764                BUG_ON(session != cap->session);
1765                if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
1766                        goto out;
1767
1768                flushing = __mark_caps_flushing(inode, session);
1769
1770                /* __send_cap drops i_ceph_lock */
1771                delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
1772                                     cap->issued | cap->implemented, flushing,
1773                                     flush_tid);
1774                if (!delayed)
1775                        goto out_unlocked;
1776
1777                spin_lock(&ci->i_ceph_lock);
1778                __cap_delay_requeue(mdsc, ci);
1779        }
1780out:
1781        spin_unlock(&ci->i_ceph_lock);
1782out_unlocked:
1783        if (session && unlock_session)
1784                mutex_unlock(&session->s_mutex);
1785        return flushing;
1786}
1787
1788/*
1789 * Return true if we've flushed caps through the given flush_tid.
1790 */
1791static int caps_are_flushed(struct inode *inode, unsigned tid)
1792{
1793        struct ceph_inode_info *ci = ceph_inode(inode);
1794        int i, ret = 1;
1795
1796        spin_lock(&ci->i_ceph_lock);
1797        for (i = 0; i < CEPH_CAP_BITS; i++)
1798                if ((ci->i_flushing_caps & (1 << i)) &&
1799                    ci->i_cap_flush_tid[i] <= tid) {
1800                        /* still flushing this bit */
1801                        ret = 0;
1802                        break;
1803                }
1804        spin_unlock(&ci->i_ceph_lock);
1805        return ret;
1806}
1807
1808/*
1809 * Wait on any unsafe replies for the given inode.  First wait on the
1810 * newest request, and make that the upper bound.  Then, if there are
1811 * more requests, keep waiting on the oldest as long as it is still older
1812 * than the original request.
1813 */
1814static void sync_write_wait(struct inode *inode)
1815{
1816        struct ceph_inode_info *ci = ceph_inode(inode);
1817        struct list_head *head = &ci->i_unsafe_writes;
1818        struct ceph_osd_request *req;
1819        u64 last_tid;
1820
1821        spin_lock(&ci->i_unsafe_lock);
1822        if (list_empty(head))
1823                goto out;
1824
1825        /* set upper bound as _last_ entry in chain */
1826        req = list_entry(head->prev, struct ceph_osd_request,
1827                         r_unsafe_item);
1828        last_tid = req->r_tid;
1829
1830        do {
1831                ceph_osdc_get_request(req);
1832                spin_unlock(&ci->i_unsafe_lock);
1833                dout("sync_write_wait on tid %llu (until %llu)\n",
1834                     req->r_tid, last_tid);
1835                wait_for_completion(&req->r_safe_completion);
1836                spin_lock(&ci->i_unsafe_lock);
1837                ceph_osdc_put_request(req);
1838
1839                /*
1840                 * from here on look at first entry in chain, since we
1841                 * only want to wait for anything older than last_tid
1842                 */
1843                if (list_empty(head))
1844                        break;
1845                req = list_entry(head->next, struct ceph_osd_request,
1846                                 r_unsafe_item);
1847        } while (req->r_tid < last_tid);
1848out:
1849        spin_unlock(&ci->i_unsafe_lock);
1850}
1851
1852int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1853{
1854        struct inode *inode = file->f_mapping->host;
1855        struct ceph_inode_info *ci = ceph_inode(inode);
1856        unsigned flush_tid;
1857        int ret;
1858        int dirty;
1859
1860        dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
1861        sync_write_wait(inode);
1862
1863        ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1864        if (ret < 0)
1865                return ret;
1866        mutex_lock(&inode->i_mutex);
1867
1868        dirty = try_flush_caps(inode, NULL, &flush_tid);
1869        dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
1870
1871        /*
1872         * only wait on non-file metadata writeback (the mds
1873         * can recover size and mtime, so we don't need to
1874         * wait for that)
1875         */
1876        if (!datasync && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
1877                dout("fsync waiting for flush_tid %u\n", flush_tid);
1878                ret = wait_event_interruptible(ci->i_cap_wq,
1879                                       caps_are_flushed(inode, flush_tid));
1880        }
1881
1882        dout("fsync %p%s done\n", inode, datasync ? " datasync" : "");
1883        mutex_unlock(&inode->i_mutex);
1884        return ret;
1885}
1886
1887/*
1888 * Flush any dirty caps back to the mds.  If we aren't asked to wait,
1889 * queue inode for flush but don't do so immediately, because we can
1890 * get by with fewer MDS messages if we wait for data writeback to
1891 * complete first.
1892 */
1893int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
1894{
1895        struct ceph_inode_info *ci = ceph_inode(inode);
1896        unsigned flush_tid;
1897        int err = 0;
1898        int dirty;
1899        int wait = wbc->sync_mode == WB_SYNC_ALL;
1900
1901        dout("write_inode %p wait=%d\n", inode, wait);
1902        if (wait) {
1903                dirty = try_flush_caps(inode, NULL, &flush_tid);
1904                if (dirty)
1905                        err = wait_event_interruptible(ci->i_cap_wq,
1906                                       caps_are_flushed(inode, flush_tid));
1907        } else {
1908                struct ceph_mds_client *mdsc =
1909                        ceph_sb_to_client(inode->i_sb)->mdsc;
1910
1911                spin_lock(&ci->i_ceph_lock);
1912                if (__ceph_caps_dirty(ci))
1913                        __cap_delay_requeue_front(mdsc, ci);
1914                spin_unlock(&ci->i_ceph_lock);
1915        }
1916        return err;
1917}
1918
1919/*
1920 * After a recovering MDS goes active, we need to resend any caps
1921 * we were flushing.
1922 *
1923 * Caller holds session->s_mutex.
1924 */
1925static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
1926                                   struct ceph_mds_session *session)
1927{
1928        struct ceph_cap_snap *capsnap;
1929
1930        dout("kick_flushing_capsnaps mds%d\n", session->s_mds);
1931        list_for_each_entry(capsnap, &session->s_cap_snaps_flushing,
1932                            flushing_item) {
1933                struct ceph_inode_info *ci = capsnap->ci;
1934                struct inode *inode = &ci->vfs_inode;
1935                struct ceph_cap *cap;
1936
1937                spin_lock(&ci->i_ceph_lock);
1938                cap = ci->i_auth_cap;
1939                if (cap && cap->session == session) {
1940                        dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
1941                             cap, capsnap);
1942                        __ceph_flush_snaps(ci, &session, 1);
1943                } else {
1944                        pr_err("%p auth cap %p not mds%d ???\n", inode,
1945                               cap, session->s_mds);
1946                }
1947                spin_unlock(&ci->i_ceph_lock);
1948        }
1949}
1950
1951void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
1952                             struct ceph_mds_session *session)
1953{
1954        struct ceph_inode_info *ci;
1955
1956        kick_flushing_capsnaps(mdsc, session);
1957
1958        dout("kick_flushing_caps mds%d\n", session->s_mds);
1959        list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
1960                struct inode *inode = &ci->vfs_inode;
1961                struct ceph_cap *cap;
1962                int delayed = 0;
1963
1964                spin_lock(&ci->i_ceph_lock);
1965                cap = ci->i_auth_cap;
1966                if (cap && cap->session == session) {
1967                        dout("kick_flushing_caps %p cap %p %s\n", inode,
1968                             cap, ceph_cap_string(ci->i_flushing_caps));
1969                        delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
1970                                             __ceph_caps_used(ci),
1971                                             __ceph_caps_wanted(ci),
1972                                             cap->issued | cap->implemented,
1973                                             ci->i_flushing_caps, NULL);
1974                        if (delayed) {
1975                                spin_lock(&ci->i_ceph_lock);
1976                                __cap_delay_requeue(mdsc, ci);
1977                                spin_unlock(&ci->i_ceph_lock);
1978                        }
1979                } else {
1980                        pr_err("%p auth cap %p not mds%d ???\n", inode,
1981                               cap, session->s_mds);
1982                        spin_unlock(&ci->i_ceph_lock);
1983                }
1984        }
1985}
1986
1987static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
1988                                     struct ceph_mds_session *session,
1989                                     struct inode *inode)
1990{
1991        struct ceph_inode_info *ci = ceph_inode(inode);
1992        struct ceph_cap *cap;
1993        int delayed = 0;
1994
1995        spin_lock(&ci->i_ceph_lock);
1996        cap = ci->i_auth_cap;
1997        dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
1998             ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
1999
2000        __ceph_flush_snaps(ci, &session, 1);
2001
2002        if (ci->i_flushing_caps) {
2003                spin_lock(&mdsc->cap_dirty_lock);
2004                list_move_tail(&ci->i_flushing_item,
2005                               &cap->session->s_cap_flushing);
2006                spin_unlock(&mdsc->cap_dirty_lock);
2007
2008                delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
2009                                     __ceph_caps_used(ci),
2010                                     __ceph_caps_wanted(ci),
2011                                     cap->issued | cap->implemented,
2012                                     ci->i_flushing_caps, NULL);
2013                if (delayed) {
2014                        spin_lock(&ci->i_ceph_lock);
2015                        __cap_delay_requeue(mdsc, ci);
2016                        spin_unlock(&ci->i_ceph_lock);
2017                }
2018        } else {
2019                spin_unlock(&ci->i_ceph_lock);
2020        }
2021}
2022
2023
2024/*
2025 * Take references to capabilities we hold, so that we don't release
2026 * them to the MDS prematurely.
2027 *
2028 * Protected by i_ceph_lock.
2029 */
2030static void __take_cap_refs(struct ceph_inode_info *ci, int got)
2031{
2032        if (got & CEPH_CAP_PIN)
2033                ci->i_pin_ref++;
2034        if (got & CEPH_CAP_FILE_RD)
2035                ci->i_rd_ref++;
2036        if (got & CEPH_CAP_FILE_CACHE)
2037                ci->i_rdcache_ref++;
2038        if (got & CEPH_CAP_FILE_WR)
2039                ci->i_wr_ref++;
2040        if (got & CEPH_CAP_FILE_BUFFER) {
2041                if (ci->i_wb_ref == 0)
2042                        ihold(&ci->vfs_inode);
2043                ci->i_wb_ref++;
2044                dout("__take_cap_refs %p wb %d -> %d (?)\n",
2045                     &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
2046        }
2047}
2048
2049/*
2050 * Try to grab cap references.  Specify those refs we @want, and the
2051 * minimal set we @need.  Also include the larger offset we are writing
2052 * to (when applicable), and check against max_size here as well.
2053 * Note that caller is responsible for ensuring max_size increases are
2054 * requested from the MDS.
2055 */
2056static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
2057                            int *got, loff_t endoff, int *check_max, int *err)
2058{
2059        struct inode *inode = &ci->vfs_inode;
2060        int ret = 0;
2061        int have, implemented;
2062        int file_wanted;
2063
2064        dout("get_cap_refs %p need %s want %s\n", inode,
2065             ceph_cap_string(need), ceph_cap_string(want));
2066        spin_lock(&ci->i_ceph_lock);
2067
2068        /* make sure file is actually open */
2069        file_wanted = __ceph_caps_file_wanted(ci);
2070        if ((file_wanted & need) == 0) {
2071                dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
2072                     ceph_cap_string(need), ceph_cap_string(file_wanted));
2073                *err = -EBADF;
2074                ret = 1;
2075                goto out;
2076        }
2077
2078        /* finish pending truncate */
2079        while (ci->i_truncate_pending) {
2080                spin_unlock(&ci->i_ceph_lock);
2081                __ceph_do_pending_vmtruncate(inode);
2082                spin_lock(&ci->i_ceph_lock);
2083        }
2084
2085        have = __ceph_caps_issued(ci, &implemented);
2086
2087        if (have & need & CEPH_CAP_FILE_WR) {
2088                if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
2089                        dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
2090                             inode, endoff, ci->i_max_size);
2091                        if (endoff > ci->i_requested_max_size) {
2092                                *check_max = 1;
2093                                ret = 1;
2094                        }
2095                        goto out;
2096                }
2097                /*
2098                 * If a sync write is in progress, we must wait, so that we
2099                 * can get a final snapshot value for size+mtime.
2100                 */
2101                if (__ceph_have_pending_cap_snap(ci)) {
2102                        dout("get_cap_refs %p cap_snap_pending\n", inode);
2103                        goto out;
2104                }
2105        }
2106
2107        if ((have & need) == need) {
2108                /*
2109                 * Look at (implemented & ~have & not) so that we keep waiting
2110                 * on transition from wanted -> needed caps.  This is needed
2111                 * for WRBUFFER|WR -> WR to avoid a new WR sync write from
2112                 * going before a prior buffered writeback happens.
2113                 */
2114                int not = want & ~(have & need);
2115                int revoking = implemented & ~have;
2116                dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
2117                     inode, ceph_cap_string(have), ceph_cap_string(not),
2118                     ceph_cap_string(revoking));
2119                if ((revoking & not) == 0) {
2120                        *got = need | (have & want);
2121                        __take_cap_refs(ci, *got);
2122                        ret = 1;
2123                }
2124        } else {
2125                dout("get_cap_refs %p have %s needed %s\n", inode,
2126                     ceph_cap_string(have), ceph_cap_string(need));
2127        }
2128out:
2129        spin_unlock(&ci->i_ceph_lock);
2130        dout("get_cap_refs %p ret %d got %s\n", inode,
2131             ret, ceph_cap_string(*got));
2132        return ret;
2133}
2134
2135/*
2136 * Check the offset we are writing up to against our current
2137 * max_size.  If necessary, tell the MDS we want to write to
2138 * a larger offset.
2139 */
2140static void check_max_size(struct inode *inode, loff_t endoff)
2141{
2142        struct ceph_inode_info *ci = ceph_inode(inode);
2143        int check = 0;
2144
2145        /* do we need to explicitly request a larger max_size? */
2146        spin_lock(&ci->i_ceph_lock);
2147        if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) {
2148                dout("write %p at large endoff %llu, req max_size\n",
2149                     inode, endoff);
2150                ci->i_wanted_max_size = endoff;
2151        }
2152        /* duplicate ceph_check_caps()'s logic */
2153        if (ci->i_auth_cap &&
2154            (ci->i_auth_cap->issued & CEPH_CAP_FILE_WR) &&
2155            ci->i_wanted_max_size > ci->i_max_size &&
2156            ci->i_wanted_max_size > ci->i_requested_max_size)
2157                check = 1;
2158        spin_unlock(&ci->i_ceph_lock);
2159        if (check)
2160                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2161}
2162
2163/*
2164 * Wait for caps, and take cap references.  If we can't get a WR cap
2165 * due to a small max_size, make sure we check_max_size (and possibly
2166 * ask the mds) so we don't get hung up indefinitely.
2167 */
2168int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, int *got,
2169                  loff_t endoff)
2170{
2171        int check_max, ret, err;
2172
2173retry:
2174        if (endoff > 0)
2175                check_max_size(&ci->vfs_inode, endoff);
2176        check_max = 0;
2177        err = 0;
2178        ret = wait_event_interruptible(ci->i_cap_wq,
2179                                       try_get_cap_refs(ci, need, want,
2180                                                        got, endoff,
2181                                                        &check_max, &err));
2182        if (err)
2183                ret = err;
2184        if (check_max)
2185                goto retry;
2186        return ret;
2187}
2188
2189/*
2190 * Take cap refs.  Caller must already know we hold at least one ref
2191 * on the caps in question or we don't know this is safe.
2192 */
2193void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2194{
2195        spin_lock(&ci->i_ceph_lock);
2196        __take_cap_refs(ci, caps);
2197        spin_unlock(&ci->i_ceph_lock);
2198}
2199
2200/*
2201 * Release cap refs.
2202 *
2203 * If we released the last ref on any given cap, call ceph_check_caps
2204 * to release (or schedule a release).
2205 *
2206 * If we are releasing a WR cap (from a sync write), finalize any affected
2207 * cap_snap, and wake up any waiters.
2208 */
2209void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2210{
2211        struct inode *inode = &ci->vfs_inode;
2212        int last = 0, put = 0, flushsnaps = 0, wake = 0;
2213        struct ceph_cap_snap *capsnap;
2214
2215        spin_lock(&ci->i_ceph_lock);
2216        if (had & CEPH_CAP_PIN)
2217                --ci->i_pin_ref;
2218        if (had & CEPH_CAP_FILE_RD)
2219                if (--ci->i_rd_ref == 0)
2220                        last++;
2221        if (had & CEPH_CAP_FILE_CACHE)
2222                if (--ci->i_rdcache_ref == 0)
2223                        last++;
2224        if (had & CEPH_CAP_FILE_BUFFER) {
2225                if (--ci->i_wb_ref == 0) {
2226                        last++;
2227                        put++;
2228                }
2229                dout("put_cap_refs %p wb %d -> %d (?)\n",
2230                     inode, ci->i_wb_ref+1, ci->i_wb_ref);
2231        }
2232        if (had & CEPH_CAP_FILE_WR)
2233                if (--ci->i_wr_ref == 0) {
2234                        last++;
2235                        if (!list_empty(&ci->i_cap_snaps)) {
2236                                capsnap = list_first_entry(&ci->i_cap_snaps,
2237                                                     struct ceph_cap_snap,
2238                                                     ci_item);
2239                                if (capsnap->writing) {
2240                                        capsnap->writing = 0;
2241                                        flushsnaps =
2242                                                __ceph_finish_cap_snap(ci,
2243                                                                       capsnap);
2244                                        wake = 1;
2245                                }
2246                        }
2247                }
2248        spin_unlock(&ci->i_ceph_lock);
2249
2250        dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2251             last ? " last" : "", put ? " put" : "");
2252
2253        if (last && !flushsnaps)
2254                ceph_check_caps(ci, 0, NULL);
2255        else if (flushsnaps)
2256                ceph_flush_snaps(ci);
2257        if (wake)
2258                wake_up_all(&ci->i_cap_wq);
2259        if (put)
2260                iput(inode);
2261}
2262
2263/*
2264 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2265 * context.  Adjust per-snap dirty page accounting as appropriate.
2266 * Once all dirty data for a cap_snap is flushed, flush snapped file
2267 * metadata back to the MDS.  If we dropped the last ref, call
2268 * ceph_check_caps.
2269 */
2270void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2271                                struct ceph_snap_context *snapc)
2272{
2273        struct inode *inode = &ci->vfs_inode;
2274        int last = 0;
2275        int complete_capsnap = 0;
2276        int drop_capsnap = 0;
2277        int found = 0;
2278        struct ceph_cap_snap *capsnap = NULL;
2279
2280        spin_lock(&ci->i_ceph_lock);
2281        ci->i_wrbuffer_ref -= nr;
2282        last = !ci->i_wrbuffer_ref;
2283
2284        if (ci->i_head_snapc == snapc) {
2285                ci->i_wrbuffer_ref_head -= nr;
2286                if (ci->i_wrbuffer_ref_head == 0 &&
2287                    ci->i_dirty_caps == 0 && ci->i_flushing_caps == 0) {
2288                        BUG_ON(!ci->i_head_snapc);
2289                        ceph_put_snap_context(ci->i_head_snapc);
2290                        ci->i_head_snapc = NULL;
2291                }
2292                dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2293                     inode,
2294                     ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2295                     ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2296                     last ? " LAST" : "");
2297        } else {
2298                list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2299                        if (capsnap->context == snapc) {
2300                                found = 1;
2301                                break;
2302                        }
2303                }
2304                BUG_ON(!found);
2305                capsnap->dirty_pages -= nr;
2306                if (capsnap->dirty_pages == 0) {
2307                        complete_capsnap = 1;
2308                        if (capsnap->dirty == 0)
2309                                /* cap writeback completed before we created
2310                                 * the cap_snap; no FLUSHSNAP is needed */
2311                                drop_capsnap = 1;
2312                }
2313                dout("put_wrbuffer_cap_refs on %p cap_snap %p "
2314                     " snap %lld %d/%d -> %d/%d %s%s%s\n",
2315                     inode, capsnap, capsnap->context->seq,
2316                     ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2317                     ci->i_wrbuffer_ref, capsnap->dirty_pages,
2318                     last ? " (wrbuffer last)" : "",
2319                     complete_capsnap ? " (complete capsnap)" : "",
2320                     drop_capsnap ? " (drop capsnap)" : "");
2321                if (drop_capsnap) {
2322                        ceph_put_snap_context(capsnap->context);
2323                        list_del(&capsnap->ci_item);
2324                        list_del(&capsnap->flushing_item);
2325                        ceph_put_cap_snap(capsnap);
2326                }
2327        }
2328
2329        spin_unlock(&ci->i_ceph_lock);
2330
2331        if (last) {
2332                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2333                iput(inode);
2334        } else if (complete_capsnap) {
2335                ceph_flush_snaps(ci);
2336                wake_up_all(&ci->i_cap_wq);
2337        }
2338        if (drop_capsnap)
2339                iput(inode);
2340}
2341
2342/*
2343 * Invalidate unlinked inode's aliases, so we can drop the inode ASAP.
2344 */
2345static void invalidate_aliases(struct inode *inode)
2346{
2347        struct dentry *dn, *prev = NULL;
2348
2349        dout("invalidate_aliases inode %p\n", inode);
2350        d_prune_aliases(inode);
2351        /*
2352         * For non-directory inode, d_find_alias() only returns
2353         * connected dentry. After calling d_invalidate(), the
2354         * dentry become disconnected.
2355         *
2356         * For directory inode, d_find_alias() can return
2357         * disconnected dentry. But directory inode should have
2358         * one alias at most.
2359         */
2360        while ((dn = d_find_alias(inode))) {
2361                if (dn == prev) {
2362                        dput(dn);
2363                        break;
2364                }
2365                d_invalidate(dn);
2366                if (prev)
2367                        dput(prev);
2368                prev = dn;
2369        }
2370        if (prev)
2371                dput(prev);
2372}
2373
2374/*
2375 * Handle a cap GRANT message from the MDS.  (Note that a GRANT may
2376 * actually be a revocation if it specifies a smaller cap set.)
2377 *
2378 * caller holds s_mutex and i_ceph_lock, we drop both.
2379 *
2380 * return value:
2381 *  0 - ok
2382 *  1 - check_caps on auth cap only (writeback)
2383 *  2 - check_caps (ack revoke)
2384 */
2385static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2386                             struct ceph_mds_session *session,
2387                             struct ceph_cap *cap,
2388                             struct ceph_buffer *xattr_buf)
2389                __releases(ci->i_ceph_lock)
2390{
2391        struct ceph_inode_info *ci = ceph_inode(inode);
2392        int mds = session->s_mds;
2393        int seq = le32_to_cpu(grant->seq);
2394        int newcaps = le32_to_cpu(grant->caps);
2395        int issued, implemented, used, wanted, dirty;
2396        u64 size = le64_to_cpu(grant->size);
2397        u64 max_size = le64_to_cpu(grant->max_size);
2398        struct timespec mtime, atime, ctime;
2399        int check_caps = 0;
2400        int wake = 0;
2401        int writeback = 0;
2402        int queue_invalidate = 0;
2403        int deleted_inode = 0;
2404        int queue_revalidate = 0;
2405
2406        dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2407             inode, cap, mds, seq, ceph_cap_string(newcaps));
2408        dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2409                inode->i_size);
2410
2411        /*
2412         * If CACHE is being revoked, and we have no dirty buffers,
2413         * try to invalidate (once).  (If there are dirty buffers, we
2414         * will invalidate _after_ writeback.)
2415         */
2416        if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
2417            (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2418            !ci->i_wrbuffer_ref) {
2419                if (try_nonblocking_invalidate(inode)) {
2420                        /* there were locked pages.. invalidate later
2421                           in a separate thread. */
2422                        if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2423                                queue_invalidate = 1;
2424                                ci->i_rdcache_revoking = ci->i_rdcache_gen;
2425                        }
2426                }
2427
2428                ceph_fscache_invalidate(inode);
2429        }
2430
2431        /* side effects now are allowed */
2432
2433        issued = __ceph_caps_issued(ci, &implemented);
2434        issued |= implemented | __ceph_caps_dirty(ci);
2435
2436        cap->cap_gen = session->s_cap_gen;
2437
2438        __check_cap_issue(ci, cap, newcaps);
2439
2440        if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
2441                inode->i_mode = le32_to_cpu(grant->mode);
2442                inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid));
2443                inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid));
2444                dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
2445                     from_kuid(&init_user_ns, inode->i_uid),
2446                     from_kgid(&init_user_ns, inode->i_gid));
2447        }
2448
2449        if ((issued & CEPH_CAP_LINK_EXCL) == 0) {
2450                set_nlink(inode, le32_to_cpu(grant->nlink));
2451                if (inode->i_nlink == 0 &&
2452                    (newcaps & (CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL)))
2453                        deleted_inode = 1;
2454        }
2455
2456        if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
2457                int len = le32_to_cpu(grant->xattr_len);
2458                u64 version = le64_to_cpu(grant->xattr_version);
2459
2460                if (version > ci->i_xattrs.version) {
2461                        dout(" got new xattrs v%llu on %p len %d\n",
2462                             version, inode, len);
2463                        if (ci->i_xattrs.blob)
2464                                ceph_buffer_put(ci->i_xattrs.blob);
2465                        ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
2466                        ci->i_xattrs.version = version;
2467                }
2468        }
2469
2470        /* Do we need to revalidate our fscache cookie. Don't bother on the
2471         * first cache cap as we already validate at cookie creation time. */
2472        if ((issued & CEPH_CAP_FILE_CACHE) && ci->i_rdcache_gen > 1)
2473                queue_revalidate = 1;
2474
2475        /* size/ctime/mtime/atime? */
2476        ceph_fill_file_size(inode, issued,
2477                            le32_to_cpu(grant->truncate_seq),
2478                            le64_to_cpu(grant->truncate_size), size);
2479        ceph_decode_timespec(&mtime, &grant->mtime);
2480        ceph_decode_timespec(&atime, &grant->atime);
2481        ceph_decode_timespec(&ctime, &grant->ctime);
2482        ceph_fill_file_time(inode, issued,
2483                            le32_to_cpu(grant->time_warp_seq), &ctime, &mtime,
2484                            &atime);
2485
2486        /* max size increase? */
2487        if (ci->i_auth_cap == cap && max_size != ci->i_max_size) {
2488                dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
2489                ci->i_max_size = max_size;
2490                if (max_size >= ci->i_wanted_max_size) {
2491                        ci->i_wanted_max_size = 0;  /* reset */
2492                        ci->i_requested_max_size = 0;
2493                }
2494                wake = 1;
2495        }
2496
2497        /* check cap bits */
2498        wanted = __ceph_caps_wanted(ci);
2499        used = __ceph_caps_used(ci);
2500        dirty = __ceph_caps_dirty(ci);
2501        dout(" my wanted = %s, used = %s, dirty %s\n",
2502             ceph_cap_string(wanted),
2503             ceph_cap_string(used),
2504             ceph_cap_string(dirty));
2505        if (wanted != le32_to_cpu(grant->wanted)) {
2506                dout("mds wanted %s -> %s\n",
2507                     ceph_cap_string(le32_to_cpu(grant->wanted)),
2508                     ceph_cap_string(wanted));
2509                /* imported cap may not have correct mds_wanted */
2510                if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT)
2511                        check_caps = 1;
2512        }
2513
2514        cap->seq = seq;
2515
2516        /* file layout may have changed */
2517        ci->i_layout = grant->layout;
2518
2519        /* revocation, grant, or no-op? */
2520        if (cap->issued & ~newcaps) {
2521                int revoking = cap->issued & ~newcaps;
2522
2523                dout("revocation: %s -> %s (revoking %s)\n",
2524                     ceph_cap_string(cap->issued),
2525                     ceph_cap_string(newcaps),
2526                     ceph_cap_string(revoking));
2527                if (revoking & used & CEPH_CAP_FILE_BUFFER)
2528                        writeback = 1;  /* initiate writeback; will delay ack */
2529                else if (revoking == CEPH_CAP_FILE_CACHE &&
2530                         (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2531                         queue_invalidate)
2532                        ; /* do nothing yet, invalidation will be queued */
2533                else if (cap == ci->i_auth_cap)
2534                        check_caps = 1; /* check auth cap only */
2535                else
2536                        check_caps = 2; /* check all caps */
2537                cap->issued = newcaps;
2538                cap->implemented |= newcaps;
2539        } else if (cap->issued == newcaps) {
2540                dout("caps unchanged: %s -> %s\n",
2541                     ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
2542        } else {
2543                dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
2544                     ceph_cap_string(newcaps));
2545                /* non-auth MDS is revoking the newly grant caps ? */
2546                if (cap == ci->i_auth_cap &&
2547                    __ceph_caps_revoking_other(ci, cap, newcaps))
2548                    check_caps = 2;
2549
2550                cap->issued = newcaps;
2551                cap->implemented |= newcaps; /* add bits only, to
2552                                              * avoid stepping on a
2553                                              * pending revocation */
2554                wake = 1;
2555        }
2556        BUG_ON(cap->issued & ~cap->implemented);
2557
2558        spin_unlock(&ci->i_ceph_lock);
2559
2560        if (writeback)
2561                /*
2562                 * queue inode for writeback: we can't actually call
2563                 * filemap_write_and_wait, etc. from message handler
2564                 * context.
2565                 */
2566                ceph_queue_writeback(inode);
2567        if (queue_invalidate)
2568                ceph_queue_invalidate(inode);
2569        if (deleted_inode)
2570                invalidate_aliases(inode);
2571        if (queue_revalidate)
2572                ceph_queue_revalidate(inode);
2573        if (wake)
2574                wake_up_all(&ci->i_cap_wq);
2575
2576        if (check_caps == 1)
2577                ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
2578                                session);
2579        else if (check_caps == 2)
2580                ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
2581        else
2582                mutex_unlock(&session->s_mutex);
2583}
2584
2585/*
2586 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
2587 * MDS has been safely committed.
2588 */
2589static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2590                                 struct ceph_mds_caps *m,
2591                                 struct ceph_mds_session *session,
2592                                 struct ceph_cap *cap)
2593        __releases(ci->i_ceph_lock)
2594{
2595        struct ceph_inode_info *ci = ceph_inode(inode);
2596        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
2597        unsigned seq = le32_to_cpu(m->seq);
2598        int dirty = le32_to_cpu(m->dirty);
2599        int cleaned = 0;
2600        int drop = 0;
2601        int i;
2602
2603        for (i = 0; i < CEPH_CAP_BITS; i++)
2604                if ((dirty & (1 << i)) &&
2605                    flush_tid == ci->i_cap_flush_tid[i])
2606                        cleaned |= 1 << i;
2607
2608        dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
2609             " flushing %s -> %s\n",
2610             inode, session->s_mds, seq, ceph_cap_string(dirty),
2611             ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
2612             ceph_cap_string(ci->i_flushing_caps & ~cleaned));
2613
2614        if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned))
2615                goto out;
2616
2617        ci->i_flushing_caps &= ~cleaned;
2618
2619        spin_lock(&mdsc->cap_dirty_lock);
2620        if (ci->i_flushing_caps == 0) {
2621                list_del_init(&ci->i_flushing_item);
2622                if (!list_empty(&session->s_cap_flushing))
2623                        dout(" mds%d still flushing cap on %p\n",
2624                             session->s_mds,
2625                             &list_entry(session->s_cap_flushing.next,
2626                                         struct ceph_inode_info,
2627                                         i_flushing_item)->vfs_inode);
2628                mdsc->num_cap_flushing--;
2629                wake_up_all(&mdsc->cap_flushing_wq);
2630                dout(" inode %p now !flushing\n", inode);
2631
2632                if (ci->i_dirty_caps == 0) {
2633                        dout(" inode %p now clean\n", inode);
2634                        BUG_ON(!list_empty(&ci->i_dirty_item));
2635                        drop = 1;
2636                        if (ci->i_wrbuffer_ref_head == 0) {
2637                                BUG_ON(!ci->i_head_snapc);
2638                                ceph_put_snap_context(ci->i_head_snapc);
2639                                ci->i_head_snapc = NULL;
2640                        }
2641                } else {
2642                        BUG_ON(list_empty(&ci->i_dirty_item));
2643                }
2644        }
2645        spin_unlock(&mdsc->cap_dirty_lock);
2646        wake_up_all(&ci->i_cap_wq);
2647
2648out:
2649        spin_unlock(&ci->i_ceph_lock);
2650        if (drop)
2651                iput(inode);
2652}
2653
2654/*
2655 * Handle FLUSHSNAP_ACK.  MDS has flushed snap data to disk and we can
2656 * throw away our cap_snap.
2657 *
2658 * Caller hold s_mutex.
2659 */
2660static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
2661                                     struct ceph_mds_caps *m,
2662                                     struct ceph_mds_session *session)
2663{
2664        struct ceph_inode_info *ci = ceph_inode(inode);
2665        u64 follows = le64_to_cpu(m->snap_follows);
2666        struct ceph_cap_snap *capsnap;
2667        int drop = 0;
2668
2669        dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
2670             inode, ci, session->s_mds, follows);
2671
2672        spin_lock(&ci->i_ceph_lock);
2673        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2674                if (capsnap->follows == follows) {
2675                        if (capsnap->flush_tid != flush_tid) {
2676                                dout(" cap_snap %p follows %lld tid %lld !="
2677                                     " %lld\n", capsnap, follows,
2678                                     flush_tid, capsnap->flush_tid);
2679                                break;
2680                        }
2681                        WARN_ON(capsnap->dirty_pages || capsnap->writing);
2682                        dout(" removing %p cap_snap %p follows %lld\n",
2683                             inode, capsnap, follows);
2684                        ceph_put_snap_context(capsnap->context);
2685                        list_del(&capsnap->ci_item);
2686                        list_del(&capsnap->flushing_item);
2687                        ceph_put_cap_snap(capsnap);
2688                        drop = 1;
2689                        break;
2690                } else {
2691                        dout(" skipping cap_snap %p follows %lld\n",
2692                             capsnap, capsnap->follows);
2693                }
2694        }
2695        spin_unlock(&ci->i_ceph_lock);
2696        if (drop)
2697                iput(inode);
2698}
2699
2700/*
2701 * Handle TRUNC from MDS, indicating file truncation.
2702 *
2703 * caller hold s_mutex.
2704 */
2705static void handle_cap_trunc(struct inode *inode,
2706                             struct ceph_mds_caps *trunc,
2707                             struct ceph_mds_session *session)
2708        __releases(ci->i_ceph_lock)
2709{
2710        struct ceph_inode_info *ci = ceph_inode(inode);
2711        int mds = session->s_mds;
2712        int seq = le32_to_cpu(trunc->seq);
2713        u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
2714        u64 truncate_size = le64_to_cpu(trunc->truncate_size);
2715        u64 size = le64_to_cpu(trunc->size);
2716        int implemented = 0;
2717        int dirty = __ceph_caps_dirty(ci);
2718        int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
2719        int queue_trunc = 0;
2720
2721        issued |= implemented | dirty;
2722
2723        dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
2724             inode, mds, seq, truncate_size, truncate_seq);
2725        queue_trunc = ceph_fill_file_size(inode, issued,
2726                                          truncate_seq, truncate_size, size);
2727        spin_unlock(&ci->i_ceph_lock);
2728
2729        if (queue_trunc) {
2730                ceph_queue_vmtruncate(inode);
2731                ceph_fscache_invalidate(inode);
2732        }
2733}
2734
2735/*
2736 * Handle EXPORT from MDS.  Cap is being migrated _from_ this mds to a
2737 * different one.  If we are the most recent migration we've seen (as
2738 * indicated by mseq), make note of the migrating cap bits for the
2739 * duration (until we see the corresponding IMPORT).
2740 *
2741 * caller holds s_mutex
2742 */
2743static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2744                              struct ceph_mds_session *session,
2745                              int *open_target_sessions)
2746{
2747        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
2748        struct ceph_inode_info *ci = ceph_inode(inode);
2749        int mds = session->s_mds;
2750        unsigned mseq = le32_to_cpu(ex->migrate_seq);
2751        struct ceph_cap *cap = NULL, *t;
2752        struct rb_node *p;
2753        int remember = 1;
2754
2755        dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
2756             inode, ci, mds, mseq);
2757
2758        spin_lock(&ci->i_ceph_lock);
2759
2760        /* make sure we haven't seen a higher mseq */
2761        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
2762                t = rb_entry(p, struct ceph_cap, ci_node);
2763                if (ceph_seq_cmp(t->mseq, mseq) > 0) {
2764                        dout(" higher mseq on cap from mds%d\n",
2765                             t->session->s_mds);
2766                        remember = 0;
2767                }
2768                if (t->session->s_mds == mds)
2769                        cap = t;
2770        }
2771
2772        if (cap) {
2773                if (remember) {
2774                        /* make note */
2775                        ci->i_cap_exporting_mds = mds;
2776                        ci->i_cap_exporting_mseq = mseq;
2777                        ci->i_cap_exporting_issued = cap->issued;
2778
2779                        /*
2780                         * make sure we have open sessions with all possible
2781                         * export targets, so that we get the matching IMPORT
2782                         */
2783                        *open_target_sessions = 1;
2784
2785                        /*
2786                         * we can't flush dirty caps that we've seen the
2787                         * EXPORT but no IMPORT for
2788                         */
2789                        spin_lock(&mdsc->cap_dirty_lock);
2790                        if (!list_empty(&ci->i_dirty_item)) {
2791                                dout(" moving %p to cap_dirty_migrating\n",
2792                                     inode);
2793                                list_move(&ci->i_dirty_item,
2794                                          &mdsc->cap_dirty_migrating);
2795                        }
2796                        spin_unlock(&mdsc->cap_dirty_lock);
2797                }
2798                __ceph_remove_cap(cap, false);
2799        }
2800        /* else, we already released it */
2801
2802        spin_unlock(&ci->i_ceph_lock);
2803}
2804
2805/*
2806 * Handle cap IMPORT.  If there are temp bits from an older EXPORT,
2807 * clean them up.
2808 *
2809 * caller holds s_mutex.
2810 */
2811static void handle_cap_import(struct ceph_mds_client *mdsc,
2812                              struct inode *inode, struct ceph_mds_caps *im,
2813                              struct ceph_mds_session *session,
2814                              void *snaptrace, int snaptrace_len)
2815{
2816        struct ceph_inode_info *ci = ceph_inode(inode);
2817        int mds = session->s_mds;
2818        unsigned issued = le32_to_cpu(im->caps);
2819        unsigned wanted = le32_to_cpu(im->wanted);
2820        unsigned seq = le32_to_cpu(im->seq);
2821        unsigned mseq = le32_to_cpu(im->migrate_seq);
2822        u64 realmino = le64_to_cpu(im->realm);
2823        u64 cap_id = le64_to_cpu(im->cap_id);
2824
2825        if (ci->i_cap_exporting_mds >= 0 &&
2826            ceph_seq_cmp(ci->i_cap_exporting_mseq, mseq) < 0) {
2827                dout("handle_cap_import inode %p ci %p mds%d mseq %d"
2828                     " - cleared exporting from mds%d\n",
2829                     inode, ci, mds, mseq,
2830                     ci->i_cap_exporting_mds);
2831                ci->i_cap_exporting_issued = 0;
2832                ci->i_cap_exporting_mseq = 0;
2833                ci->i_cap_exporting_mds = -1;
2834
2835                spin_lock(&mdsc->cap_dirty_lock);
2836                if (!list_empty(&ci->i_dirty_item)) {
2837                        dout(" moving %p back to cap_dirty\n", inode);
2838                        list_move(&ci->i_dirty_item, &mdsc->cap_dirty);
2839                }
2840                spin_unlock(&mdsc->cap_dirty_lock);
2841        } else {
2842                dout("handle_cap_import inode %p ci %p mds%d mseq %d\n",
2843                     inode, ci, mds, mseq);
2844        }
2845
2846        down_write(&mdsc->snap_rwsem);
2847        ceph_update_snap_trace(mdsc, snaptrace, snaptrace+snaptrace_len,
2848                               false);
2849        downgrade_write(&mdsc->snap_rwsem);
2850        ceph_add_cap(inode, session, cap_id, -1,
2851                     issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH,
2852                     NULL /* no caps context */);
2853        kick_flushing_inode_caps(mdsc, session, inode);
2854        up_read(&mdsc->snap_rwsem);
2855
2856        /* make sure we re-request max_size, if necessary */
2857        spin_lock(&ci->i_ceph_lock);
2858        ci->i_wanted_max_size = 0;  /* reset */
2859        ci->i_requested_max_size = 0;
2860        spin_unlock(&ci->i_ceph_lock);
2861}
2862
2863/*
2864 * Handle a caps message from the MDS.
2865 *
2866 * Identify the appropriate session, inode, and call the right handler
2867 * based on the cap op.
2868 */
2869void ceph_handle_caps(struct ceph_mds_session *session,
2870                      struct ceph_msg *msg)
2871{
2872        struct ceph_mds_client *mdsc = session->s_mdsc;
2873        struct super_block *sb = mdsc->fsc->sb;
2874        struct inode *inode;
2875        struct ceph_inode_info *ci;
2876        struct ceph_cap *cap;
2877        struct ceph_mds_caps *h;
2878        int mds = session->s_mds;
2879        int op;
2880        u32 seq, mseq;
2881        struct ceph_vino vino;
2882        u64 cap_id;
2883        u64 size, max_size;
2884        u64 tid;
2885        void *snaptrace;
2886        size_t snaptrace_len;
2887        void *flock;
2888        u32 flock_len;
2889        int open_target_sessions = 0;
2890
2891        dout("handle_caps from mds%d\n", mds);
2892
2893        /* decode */
2894        tid = le64_to_cpu(msg->hdr.tid);
2895        if (msg->front.iov_len < sizeof(*h))
2896                goto bad;
2897        h = msg->front.iov_base;
2898        op = le32_to_cpu(h->op);
2899        vino.ino = le64_to_cpu(h->ino);
2900        vino.snap = CEPH_NOSNAP;
2901        cap_id = le64_to_cpu(h->cap_id);
2902        seq = le32_to_cpu(h->seq);
2903        mseq = le32_to_cpu(h->migrate_seq);
2904        size = le64_to_cpu(h->size);
2905        max_size = le64_to_cpu(h->max_size);
2906
2907        snaptrace = h + 1;
2908        snaptrace_len = le32_to_cpu(h->snap_trace_len);
2909
2910        if (le16_to_cpu(msg->hdr.version) >= 2) {
2911                void *p, *end;
2912
2913                p = snaptrace + snaptrace_len;
2914                end = msg->front.iov_base + msg->front.iov_len;
2915                ceph_decode_32_safe(&p, end, flock_len, bad);
2916                flock = p;
2917        } else {
2918                flock = NULL;
2919                flock_len = 0;
2920        }
2921
2922        mutex_lock(&session->s_mutex);
2923        session->s_seq++;
2924        dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
2925             (unsigned)seq);
2926
2927        if (op == CEPH_CAP_OP_IMPORT)
2928                ceph_add_cap_releases(mdsc, session);
2929
2930        /* lookup ino */
2931        inode = ceph_find_inode(sb, vino);
2932        ci = ceph_inode(inode);
2933        dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
2934             vino.snap, inode);
2935        if (!inode) {
2936                dout(" i don't have ino %llx\n", vino.ino);
2937
2938                if (op == CEPH_CAP_OP_IMPORT) {
2939                        spin_lock(&session->s_cap_lock);
2940                        __queue_cap_release(session, vino.ino, cap_id,
2941                                            mseq, seq);
2942                        spin_unlock(&session->s_cap_lock);
2943                }
2944                goto flush_cap_releases;
2945        }
2946
2947        /* these will work even if we don't have a cap yet */
2948        switch (op) {
2949        case CEPH_CAP_OP_FLUSHSNAP_ACK:
2950                handle_cap_flushsnap_ack(inode, tid, h, session);
2951                goto done;
2952
2953        case CEPH_CAP_OP_EXPORT:
2954                handle_cap_export(inode, h, session, &open_target_sessions);
2955                goto done;
2956
2957        case CEPH_CAP_OP_IMPORT:
2958                handle_cap_import(mdsc, inode, h, session,
2959                                  snaptrace, snaptrace_len);
2960        }
2961
2962        /* the rest require a cap */
2963        spin_lock(&ci->i_ceph_lock);
2964        cap = __get_cap_for_mds(ceph_inode(inode), mds);
2965        if (!cap) {
2966                dout(" no cap on %p ino %llx.%llx from mds%d\n",
2967                     inode, ceph_ino(inode), ceph_snap(inode), mds);
2968                spin_unlock(&ci->i_ceph_lock);
2969                goto flush_cap_releases;
2970        }
2971
2972        /* note that each of these drops i_ceph_lock for us */
2973        switch (op) {
2974        case CEPH_CAP_OP_REVOKE:
2975        case CEPH_CAP_OP_GRANT:
2976        case CEPH_CAP_OP_IMPORT:
2977                handle_cap_grant(inode, h, session, cap, msg->middle);
2978                goto done_unlocked;
2979
2980        case CEPH_CAP_OP_FLUSH_ACK:
2981                handle_cap_flush_ack(inode, tid, h, session, cap);
2982                break;
2983
2984        case CEPH_CAP_OP_TRUNC:
2985                handle_cap_trunc(inode, h, session);
2986                break;
2987
2988        default:
2989                spin_unlock(&ci->i_ceph_lock);
2990                pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
2991                       ceph_cap_op_name(op));
2992        }
2993
2994        goto done;
2995
2996flush_cap_releases:
2997        /*
2998         * send any full release message to try to move things
2999         * along for the mds (who clearly thinks we still have this
3000         * cap).
3001         */
3002        ceph_add_cap_releases(mdsc, session);
3003        ceph_send_cap_releases(mdsc, session);
3004
3005done:
3006        mutex_unlock(&session->s_mutex);
3007done_unlocked:
3008        if (inode)
3009                iput(inode);
3010        if (open_target_sessions)
3011                ceph_mdsc_open_export_target_sessions(mdsc, session);
3012        return;
3013
3014bad:
3015        pr_err("ceph_handle_caps: corrupt message\n");
3016        ceph_msg_dump(msg);
3017        return;
3018}
3019
3020/*
3021 * Delayed work handler to process end of delayed cap release LRU list.
3022 */
3023void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
3024{
3025        struct ceph_inode_info *ci;
3026        int flags = CHECK_CAPS_NODELAY;
3027
3028        dout("check_delayed_caps\n");
3029        while (1) {
3030                spin_lock(&mdsc->cap_delay_lock);
3031                if (list_empty(&mdsc->cap_delay_list))
3032                        break;
3033                ci = list_first_entry(&mdsc->cap_delay_list,
3034                                      struct ceph_inode_info,
3035                                      i_cap_delay_list);
3036                if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
3037                    time_before(jiffies, ci->i_hold_caps_max))
3038                        break;
3039                list_del_init(&ci->i_cap_delay_list);
3040                spin_unlock(&mdsc->cap_delay_lock);
3041                dout("check_delayed_caps on %p\n", &ci->vfs_inode);
3042                ceph_check_caps(ci, flags, NULL);
3043        }
3044        spin_unlock(&mdsc->cap_delay_lock);
3045}
3046
3047/*
3048 * Flush all dirty caps to the mds
3049 */
3050void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
3051{
3052        struct ceph_inode_info *ci;
3053        struct inode *inode;
3054
3055        dout("flush_dirty_caps\n");
3056        spin_lock(&mdsc->cap_dirty_lock);
3057        while (!list_empty(&mdsc->cap_dirty)) {
3058                ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
3059                                      i_dirty_item);
3060                inode = &ci->vfs_inode;
3061                ihold(inode);
3062                dout("flush_dirty_caps %p\n", inode);
3063                spin_unlock(&mdsc->cap_dirty_lock);
3064                ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL);
3065                iput(inode);
3066                spin_lock(&mdsc->cap_dirty_lock);
3067        }
3068        spin_unlock(&mdsc->cap_dirty_lock);
3069        dout("flush_dirty_caps done\n");
3070}
3071
3072/*
3073 * Drop open file reference.  If we were the last open file,
3074 * we may need to release capabilities to the MDS (or schedule
3075 * their delayed release).
3076 */
3077void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
3078{
3079        struct inode *inode = &ci->vfs_inode;
3080        int last = 0;
3081
3082        spin_lock(&ci->i_ceph_lock);
3083        dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
3084             ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
3085        BUG_ON(ci->i_nr_by_mode[fmode] == 0);
3086        if (--ci->i_nr_by_mode[fmode] == 0)
3087                last++;
3088        spin_unlock(&ci->i_ceph_lock);
3089
3090        if (last && ci->i_vino.snap == CEPH_NOSNAP)
3091                ceph_check_caps(ci, 0, NULL);
3092}
3093
3094/*
3095 * Helpers for embedding cap and dentry lease releases into mds
3096 * requests.
3097 *
3098 * @force is used by dentry_release (below) to force inclusion of a
3099 * record for the directory inode, even when there aren't any caps to
3100 * drop.
3101 */
3102int ceph_encode_inode_release(void **p, struct inode *inode,
3103                              int mds, int drop, int unless, int force)
3104{
3105        struct ceph_inode_info *ci = ceph_inode(inode);
3106        struct ceph_cap *cap;
3107        struct ceph_mds_request_release *rel = *p;
3108        int used, dirty;
3109        int ret = 0;
3110
3111        spin_lock(&ci->i_ceph_lock);
3112        used = __ceph_caps_used(ci);
3113        dirty = __ceph_caps_dirty(ci);
3114
3115        dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
3116             inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
3117             ceph_cap_string(unless));
3118
3119        /* only drop unused, clean caps */
3120        drop &= ~(used | dirty);
3121
3122        cap = __get_cap_for_mds(ci, mds);
3123        if (cap && __cap_is_valid(cap)) {
3124                if (force ||
3125                    ((cap->issued & drop) &&
3126                     (cap->issued & unless) == 0)) {
3127                        if ((cap->issued & drop) &&
3128                            (cap->issued & unless) == 0) {
3129                                int wanted = __ceph_caps_wanted(ci);
3130                                if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0)
3131                                        wanted |= cap->mds_wanted;
3132                                dout("encode_inode_release %p cap %p "
3133                                     "%s -> %s, wanted %s -> %s\n", inode, cap,
3134                                     ceph_cap_string(cap->issued),
3135                                     ceph_cap_string(cap->issued & ~drop),
3136                                     ceph_cap_string(cap->mds_wanted),
3137                                     ceph_cap_string(wanted));
3138
3139                                cap->issued &= ~drop;
3140                                cap->implemented &= ~drop;
3141                                cap->mds_wanted = wanted;
3142                        } else {
3143                                dout("encode_inode_release %p cap %p %s"
3144                                     " (force)\n", inode, cap,
3145                                     ceph_cap_string(cap->issued));
3146                        }
3147
3148                        rel->ino = cpu_to_le64(ceph_ino(inode));
3149                        rel->cap_id = cpu_to_le64(cap->cap_id);
3150                        rel->seq = cpu_to_le32(cap->seq);
3151                        rel->issue_seq = cpu_to_le32(cap->issue_seq),
3152                        rel->mseq = cpu_to_le32(cap->mseq);
3153                        rel->caps = cpu_to_le32(cap->issued);
3154                        rel->wanted = cpu_to_le32(cap->mds_wanted);
3155                        rel->dname_len = 0;
3156                        rel->dname_seq = 0;
3157                        *p += sizeof(*rel);
3158                        ret = 1;
3159                } else {
3160                        dout("encode_inode_release %p cap %p %s\n",
3161                             inode, cap, ceph_cap_string(cap->issued));
3162                }
3163        }
3164        spin_unlock(&ci->i_ceph_lock);
3165        return ret;
3166}
3167
3168int ceph_encode_dentry_release(void **p, struct dentry *dentry,
3169                               int mds, int drop, int unless)
3170{
3171        struct inode *dir = dentry->d_parent->d_inode;
3172        struct ceph_mds_request_release *rel = *p;
3173        struct ceph_dentry_info *di = ceph_dentry(dentry);
3174        int force = 0;
3175        int ret;
3176
3177        /*
3178         * force an record for the directory caps if we have a dentry lease.
3179         * this is racy (can't take i_ceph_lock and d_lock together), but it
3180         * doesn't have to be perfect; the mds will revoke anything we don't
3181         * release.
3182         */
3183        spin_lock(&dentry->d_lock);
3184        if (di->lease_session && di->lease_session->s_mds == mds)
3185                force = 1;
3186        spin_unlock(&dentry->d_lock);
3187
3188        ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
3189
3190        spin_lock(&dentry->d_lock);
3191        if (ret && di->lease_session && di->lease_session->s_mds == mds) {
3192                dout("encode_dentry_release %p mds%d seq %d\n",
3193                     dentry, mds, (int)di->lease_seq);
3194                rel->dname_len = cpu_to_le32(dentry->d_name.len);
3195                memcpy(*p, dentry->d_name.name, dentry->d_name.len);
3196                *p += dentry->d_name.len;
3197                rel->dname_seq = cpu_to_le32(di->lease_seq);
3198                __ceph_mdsc_drop_dentry_lease(dentry);
3199        }
3200        spin_unlock(&dentry->d_lock);
3201        return ret;
3202}
3203