linux/fs/ceph/caps.c
<<
>>
Prefs
   1#include <linux/ceph/ceph_debug.h>
   2
   3#include <linux/fs.h>
   4#include <linux/kernel.h>
   5#include <linux/sched.h>
   6#include <linux/slab.h>
   7#include <linux/vmalloc.h>
   8#include <linux/wait.h>
   9#include <linux/writeback.h>
  10
  11#include "super.h"
  12#include "mds_client.h"
  13#include <linux/ceph/decode.h>
  14#include <linux/ceph/messenger.h>
  15
  16/*
  17 * Capability management
  18 *
  19 * The Ceph metadata servers control client access to inode metadata
  20 * and file data by issuing capabilities, granting clients permission
  21 * to read and/or write both inode field and file data to OSDs
  22 * (storage nodes).  Each capability consists of a set of bits
  23 * indicating which operations are allowed.
  24 *
  25 * If the client holds a *_SHARED cap, the client has a coherent value
  26 * that can be safely read from the cached inode.
  27 *
  28 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
  29 * client is allowed to change inode attributes (e.g., file size,
  30 * mtime), note its dirty state in the ceph_cap, and asynchronously
  31 * flush that metadata change to the MDS.
  32 *
  33 * In the event of a conflicting operation (perhaps by another
  34 * client), the MDS will revoke the conflicting client capabilities.
  35 *
  36 * In order for a client to cache an inode, it must hold a capability
  37 * with at least one MDS server.  When inodes are released, release
  38 * notifications are batched and periodically sent en masse to the MDS
  39 * cluster to release server state.
  40 */
  41
  42
  43/*
  44 * Generate readable cap strings for debugging output.
  45 */
  46#define MAX_CAP_STR 20
  47static char cap_str[MAX_CAP_STR][40];
  48static DEFINE_SPINLOCK(cap_str_lock);
  49static int last_cap_str;
  50
  51static char *gcap_string(char *s, int c)
  52{
  53        if (c & CEPH_CAP_GSHARED)
  54                *s++ = 's';
  55        if (c & CEPH_CAP_GEXCL)
  56                *s++ = 'x';
  57        if (c & CEPH_CAP_GCACHE)
  58                *s++ = 'c';
  59        if (c & CEPH_CAP_GRD)
  60                *s++ = 'r';
  61        if (c & CEPH_CAP_GWR)
  62                *s++ = 'w';
  63        if (c & CEPH_CAP_GBUFFER)
  64                *s++ = 'b';
  65        if (c & CEPH_CAP_GLAZYIO)
  66                *s++ = 'l';
  67        return s;
  68}
  69
  70const char *ceph_cap_string(int caps)
  71{
  72        int i;
  73        char *s;
  74        int c;
  75
  76        spin_lock(&cap_str_lock);
  77        i = last_cap_str++;
  78        if (last_cap_str == MAX_CAP_STR)
  79                last_cap_str = 0;
  80        spin_unlock(&cap_str_lock);
  81
  82        s = cap_str[i];
  83
  84        if (caps & CEPH_CAP_PIN)
  85                *s++ = 'p';
  86
  87        c = (caps >> CEPH_CAP_SAUTH) & 3;
  88        if (c) {
  89                *s++ = 'A';
  90                s = gcap_string(s, c);
  91        }
  92
  93        c = (caps >> CEPH_CAP_SLINK) & 3;
  94        if (c) {
  95                *s++ = 'L';
  96                s = gcap_string(s, c);
  97        }
  98
  99        c = (caps >> CEPH_CAP_SXATTR) & 3;
 100        if (c) {
 101                *s++ = 'X';
 102                s = gcap_string(s, c);
 103        }
 104
 105        c = caps >> CEPH_CAP_SFILE;
 106        if (c) {
 107                *s++ = 'F';
 108                s = gcap_string(s, c);
 109        }
 110
 111        if (s == cap_str[i])
 112                *s++ = '-';
 113        *s = 0;
 114        return cap_str[i];
 115}
 116
 117void ceph_caps_init(struct ceph_mds_client *mdsc)
 118{
 119        INIT_LIST_HEAD(&mdsc->caps_list);
 120        spin_lock_init(&mdsc->caps_list_lock);
 121}
 122
 123void ceph_caps_finalize(struct ceph_mds_client *mdsc)
 124{
 125        struct ceph_cap *cap;
 126
 127        spin_lock(&mdsc->caps_list_lock);
 128        while (!list_empty(&mdsc->caps_list)) {
 129                cap = list_first_entry(&mdsc->caps_list,
 130                                       struct ceph_cap, caps_item);
 131                list_del(&cap->caps_item);
 132                kmem_cache_free(ceph_cap_cachep, cap);
 133        }
 134        mdsc->caps_total_count = 0;
 135        mdsc->caps_avail_count = 0;
 136        mdsc->caps_use_count = 0;
 137        mdsc->caps_reserve_count = 0;
 138        mdsc->caps_min_count = 0;
 139        spin_unlock(&mdsc->caps_list_lock);
 140}
 141
 142void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
 143{
 144        spin_lock(&mdsc->caps_list_lock);
 145        mdsc->caps_min_count += delta;
 146        BUG_ON(mdsc->caps_min_count < 0);
 147        spin_unlock(&mdsc->caps_list_lock);
 148}
 149
 150void ceph_reserve_caps(struct ceph_mds_client *mdsc,
 151                      struct ceph_cap_reservation *ctx, int need)
 152{
 153        int i;
 154        struct ceph_cap *cap;
 155        int have;
 156        int alloc = 0;
 157        LIST_HEAD(newcaps);
 158
 159        dout("reserve caps ctx=%p need=%d\n", ctx, need);
 160
 161        /* first reserve any caps that are already allocated */
 162        spin_lock(&mdsc->caps_list_lock);
 163        if (mdsc->caps_avail_count >= need)
 164                have = need;
 165        else
 166                have = mdsc->caps_avail_count;
 167        mdsc->caps_avail_count -= have;
 168        mdsc->caps_reserve_count += have;
 169        BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 170                                         mdsc->caps_reserve_count +
 171                                         mdsc->caps_avail_count);
 172        spin_unlock(&mdsc->caps_list_lock);
 173
 174        for (i = have; i < need; i++) {
 175                cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
 176                if (!cap)
 177                        break;
 178                list_add(&cap->caps_item, &newcaps);
 179                alloc++;
 180        }
 181        /* we didn't manage to reserve as much as we needed */
 182        if (have + alloc != need)
 183                pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
 184                        ctx, need, have + alloc);
 185
 186        spin_lock(&mdsc->caps_list_lock);
 187        mdsc->caps_total_count += alloc;
 188        mdsc->caps_reserve_count += alloc;
 189        list_splice(&newcaps, &mdsc->caps_list);
 190
 191        BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 192                                         mdsc->caps_reserve_count +
 193                                         mdsc->caps_avail_count);
 194        spin_unlock(&mdsc->caps_list_lock);
 195
 196        ctx->count = need;
 197        dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
 198             ctx, mdsc->caps_total_count, mdsc->caps_use_count,
 199             mdsc->caps_reserve_count, mdsc->caps_avail_count);
 200}
 201
 202int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
 203                        struct ceph_cap_reservation *ctx)
 204{
 205        dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
 206        if (ctx->count) {
 207                spin_lock(&mdsc->caps_list_lock);
 208                BUG_ON(mdsc->caps_reserve_count < ctx->count);
 209                mdsc->caps_reserve_count -= ctx->count;
 210                mdsc->caps_avail_count += ctx->count;
 211                ctx->count = 0;
 212                dout("unreserve caps %d = %d used + %d resv + %d avail\n",
 213                     mdsc->caps_total_count, mdsc->caps_use_count,
 214                     mdsc->caps_reserve_count, mdsc->caps_avail_count);
 215                BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 216                                                 mdsc->caps_reserve_count +
 217                                                 mdsc->caps_avail_count);
 218                spin_unlock(&mdsc->caps_list_lock);
 219        }
 220        return 0;
 221}
 222
 223static struct ceph_cap *get_cap(struct ceph_mds_client *mdsc,
 224                                struct ceph_cap_reservation *ctx)
 225{
 226        struct ceph_cap *cap = NULL;
 227
 228        /* temporary, until we do something about cap import/export */
 229        if (!ctx) {
 230                cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
 231                if (cap) {
 232                        spin_lock(&mdsc->caps_list_lock);
 233                        mdsc->caps_use_count++;
 234                        mdsc->caps_total_count++;
 235                        spin_unlock(&mdsc->caps_list_lock);
 236                }
 237                return cap;
 238        }
 239
 240        spin_lock(&mdsc->caps_list_lock);
 241        dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
 242             ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
 243             mdsc->caps_reserve_count, mdsc->caps_avail_count);
 244        BUG_ON(!ctx->count);
 245        BUG_ON(ctx->count > mdsc->caps_reserve_count);
 246        BUG_ON(list_empty(&mdsc->caps_list));
 247
 248        ctx->count--;
 249        mdsc->caps_reserve_count--;
 250        mdsc->caps_use_count++;
 251
 252        cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item);
 253        list_del(&cap->caps_item);
 254
 255        BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 256               mdsc->caps_reserve_count + mdsc->caps_avail_count);
 257        spin_unlock(&mdsc->caps_list_lock);
 258        return cap;
 259}
 260
 261void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
 262{
 263        spin_lock(&mdsc->caps_list_lock);
 264        dout("put_cap %p %d = %d used + %d resv + %d avail\n",
 265             cap, mdsc->caps_total_count, mdsc->caps_use_count,
 266             mdsc->caps_reserve_count, mdsc->caps_avail_count);
 267        mdsc->caps_use_count--;
 268        /*
 269         * Keep some preallocated caps around (ceph_min_count), to
 270         * avoid lots of free/alloc churn.
 271         */
 272        if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
 273                                      mdsc->caps_min_count) {
 274                mdsc->caps_total_count--;
 275                kmem_cache_free(ceph_cap_cachep, cap);
 276        } else {
 277                mdsc->caps_avail_count++;
 278                list_add(&cap->caps_item, &mdsc->caps_list);
 279        }
 280
 281        BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 282               mdsc->caps_reserve_count + mdsc->caps_avail_count);
 283        spin_unlock(&mdsc->caps_list_lock);
 284}
 285
 286void ceph_reservation_status(struct ceph_fs_client *fsc,
 287                             int *total, int *avail, int *used, int *reserved,
 288                             int *min)
 289{
 290        struct ceph_mds_client *mdsc = fsc->mdsc;
 291
 292        if (total)
 293                *total = mdsc->caps_total_count;
 294        if (avail)
 295                *avail = mdsc->caps_avail_count;
 296        if (used)
 297                *used = mdsc->caps_use_count;
 298        if (reserved)
 299                *reserved = mdsc->caps_reserve_count;
 300        if (min)
 301                *min = mdsc->caps_min_count;
 302}
 303
 304/*
 305 * Find ceph_cap for given mds, if any.
 306 *
 307 * Called with i_ceph_lock held.
 308 */
 309static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
 310{
 311        struct ceph_cap *cap;
 312        struct rb_node *n = ci->i_caps.rb_node;
 313
 314        while (n) {
 315                cap = rb_entry(n, struct ceph_cap, ci_node);
 316                if (mds < cap->mds)
 317                        n = n->rb_left;
 318                else if (mds > cap->mds)
 319                        n = n->rb_right;
 320                else
 321                        return cap;
 322        }
 323        return NULL;
 324}
 325
 326struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
 327{
 328        struct ceph_cap *cap;
 329
 330        spin_lock(&ci->i_ceph_lock);
 331        cap = __get_cap_for_mds(ci, mds);
 332        spin_unlock(&ci->i_ceph_lock);
 333        return cap;
 334}
 335
 336/*
 337 * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1.
 338 */
 339static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
 340{
 341        struct ceph_cap *cap;
 342        int mds = -1;
 343        struct rb_node *p;
 344
 345        /* prefer mds with WR|BUFFER|EXCL caps */
 346        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 347                cap = rb_entry(p, struct ceph_cap, ci_node);
 348                mds = cap->mds;
 349                if (cap->issued & (CEPH_CAP_FILE_WR |
 350                                   CEPH_CAP_FILE_BUFFER |
 351                                   CEPH_CAP_FILE_EXCL))
 352                        break;
 353        }
 354        return mds;
 355}
 356
 357int ceph_get_cap_mds(struct inode *inode)
 358{
 359        struct ceph_inode_info *ci = ceph_inode(inode);
 360        int mds;
 361        spin_lock(&ci->i_ceph_lock);
 362        mds = __ceph_get_cap_mds(ceph_inode(inode));
 363        spin_unlock(&ci->i_ceph_lock);
 364        return mds;
 365}
 366
 367/*
 368 * Called under i_ceph_lock.
 369 */
 370static void __insert_cap_node(struct ceph_inode_info *ci,
 371                              struct ceph_cap *new)
 372{
 373        struct rb_node **p = &ci->i_caps.rb_node;
 374        struct rb_node *parent = NULL;
 375        struct ceph_cap *cap = NULL;
 376
 377        while (*p) {
 378                parent = *p;
 379                cap = rb_entry(parent, struct ceph_cap, ci_node);
 380                if (new->mds < cap->mds)
 381                        p = &(*p)->rb_left;
 382                else if (new->mds > cap->mds)
 383                        p = &(*p)->rb_right;
 384                else
 385                        BUG();
 386        }
 387
 388        rb_link_node(&new->ci_node, parent, p);
 389        rb_insert_color(&new->ci_node, &ci->i_caps);
 390}
 391
 392/*
 393 * (re)set cap hold timeouts, which control the delayed release
 394 * of unused caps back to the MDS.  Should be called on cap use.
 395 */
 396static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
 397                               struct ceph_inode_info *ci)
 398{
 399        struct ceph_mount_options *ma = mdsc->fsc->mount_options;
 400
 401        ci->i_hold_caps_min = round_jiffies(jiffies +
 402                                            ma->caps_wanted_delay_min * HZ);
 403        ci->i_hold_caps_max = round_jiffies(jiffies +
 404                                            ma->caps_wanted_delay_max * HZ);
 405        dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
 406             ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
 407}
 408
 409/*
 410 * (Re)queue cap at the end of the delayed cap release list.
 411 *
 412 * If I_FLUSH is set, leave the inode at the front of the list.
 413 *
 414 * Caller holds i_ceph_lock
 415 *    -> we take mdsc->cap_delay_lock
 416 */
 417static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
 418                                struct ceph_inode_info *ci)
 419{
 420        __cap_set_timeouts(mdsc, ci);
 421        dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
 422             ci->i_ceph_flags, ci->i_hold_caps_max);
 423        if (!mdsc->stopping) {
 424                spin_lock(&mdsc->cap_delay_lock);
 425                if (!list_empty(&ci->i_cap_delay_list)) {
 426                        if (ci->i_ceph_flags & CEPH_I_FLUSH)
 427                                goto no_change;
 428                        list_del_init(&ci->i_cap_delay_list);
 429                }
 430                list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
 431no_change:
 432                spin_unlock(&mdsc->cap_delay_lock);
 433        }
 434}
 435
 436/*
 437 * Queue an inode for immediate writeback.  Mark inode with I_FLUSH,
 438 * indicating we should send a cap message to flush dirty metadata
 439 * asap, and move to the front of the delayed cap list.
 440 */
 441static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
 442                                      struct ceph_inode_info *ci)
 443{
 444        dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
 445        spin_lock(&mdsc->cap_delay_lock);
 446        ci->i_ceph_flags |= CEPH_I_FLUSH;
 447        if (!list_empty(&ci->i_cap_delay_list))
 448                list_del_init(&ci->i_cap_delay_list);
 449        list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
 450        spin_unlock(&mdsc->cap_delay_lock);
 451}
 452
 453/*
 454 * Cancel delayed work on cap.
 455 *
 456 * Caller must hold i_ceph_lock.
 457 */
 458static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
 459                               struct ceph_inode_info *ci)
 460{
 461        dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
 462        if (list_empty(&ci->i_cap_delay_list))
 463                return;
 464        spin_lock(&mdsc->cap_delay_lock);
 465        list_del_init(&ci->i_cap_delay_list);
 466        spin_unlock(&mdsc->cap_delay_lock);
 467}
 468
 469/*
 470 * Common issue checks for add_cap, handle_cap_grant.
 471 */
 472static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
 473                              unsigned issued)
 474{
 475        unsigned had = __ceph_caps_issued(ci, NULL);
 476
 477        /*
 478         * Each time we receive FILE_CACHE anew, we increment
 479         * i_rdcache_gen.
 480         */
 481        if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
 482            (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0)
 483                ci->i_rdcache_gen++;
 484
 485        /*
 486         * if we are newly issued FILE_SHARED, mark dir not complete; we
 487         * don't know what happened to this directory while we didn't
 488         * have the cap.
 489         */
 490        if ((issued & CEPH_CAP_FILE_SHARED) &&
 491            (had & CEPH_CAP_FILE_SHARED) == 0) {
 492                ci->i_shared_gen++;
 493                if (S_ISDIR(ci->vfs_inode.i_mode)) {
 494                        dout(" marking %p NOT complete\n", &ci->vfs_inode);
 495                        __ceph_dir_clear_complete(ci);
 496                }
 497        }
 498}
 499
 500/*
 501 * Add a capability under the given MDS session.
 502 *
 503 * Caller should hold session snap_rwsem (read) and s_mutex.
 504 *
 505 * @fmode is the open file mode, if we are opening a file, otherwise
 506 * it is < 0.  (This is so we can atomically add the cap and add an
 507 * open file reference to it.)
 508 */
 509int ceph_add_cap(struct inode *inode,
 510                 struct ceph_mds_session *session, u64 cap_id,
 511                 int fmode, unsigned issued, unsigned wanted,
 512                 unsigned seq, unsigned mseq, u64 realmino, int flags,
 513                 struct ceph_cap_reservation *caps_reservation)
 514{
 515        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
 516        struct ceph_inode_info *ci = ceph_inode(inode);
 517        struct ceph_cap *new_cap = NULL;
 518        struct ceph_cap *cap;
 519        int mds = session->s_mds;
 520        int actual_wanted;
 521
 522        dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
 523             session->s_mds, cap_id, ceph_cap_string(issued), seq);
 524
 525        /*
 526         * If we are opening the file, include file mode wanted bits
 527         * in wanted.
 528         */
 529        if (fmode >= 0)
 530                wanted |= ceph_caps_for_mode(fmode);
 531
 532retry:
 533        spin_lock(&ci->i_ceph_lock);
 534        cap = __get_cap_for_mds(ci, mds);
 535        if (!cap) {
 536                if (new_cap) {
 537                        cap = new_cap;
 538                        new_cap = NULL;
 539                } else {
 540                        spin_unlock(&ci->i_ceph_lock);
 541                        new_cap = get_cap(mdsc, caps_reservation);
 542                        if (new_cap == NULL)
 543                                return -ENOMEM;
 544                        goto retry;
 545                }
 546
 547                cap->issued = 0;
 548                cap->implemented = 0;
 549                cap->mds = mds;
 550                cap->mds_wanted = 0;
 551                cap->mseq = 0;
 552
 553                cap->ci = ci;
 554                __insert_cap_node(ci, cap);
 555
 556                /* clear out old exporting info?  (i.e. on cap import) */
 557                if (ci->i_cap_exporting_mds == mds) {
 558                        ci->i_cap_exporting_issued = 0;
 559                        ci->i_cap_exporting_mseq = 0;
 560                        ci->i_cap_exporting_mds = -1;
 561                }
 562
 563                /* add to session cap list */
 564                cap->session = session;
 565                spin_lock(&session->s_cap_lock);
 566                list_add_tail(&cap->session_caps, &session->s_caps);
 567                session->s_nr_caps++;
 568                spin_unlock(&session->s_cap_lock);
 569        } else if (new_cap)
 570                ceph_put_cap(mdsc, new_cap);
 571
 572        if (!ci->i_snap_realm) {
 573                /*
 574                 * add this inode to the appropriate snap realm
 575                 */
 576                struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
 577                                                               realmino);
 578                if (realm) {
 579                        ceph_get_snap_realm(mdsc, realm);
 580                        spin_lock(&realm->inodes_with_caps_lock);
 581                        ci->i_snap_realm = realm;
 582                        list_add(&ci->i_snap_realm_item,
 583                                 &realm->inodes_with_caps);
 584                        spin_unlock(&realm->inodes_with_caps_lock);
 585                } else {
 586                        pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
 587                               realmino);
 588                        WARN_ON(!realm);
 589                }
 590        }
 591
 592        __check_cap_issue(ci, cap, issued);
 593
 594        /*
 595         * If we are issued caps we don't want, or the mds' wanted
 596         * value appears to be off, queue a check so we'll release
 597         * later and/or update the mds wanted value.
 598         */
 599        actual_wanted = __ceph_caps_wanted(ci);
 600        if ((wanted & ~actual_wanted) ||
 601            (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
 602                dout(" issued %s, mds wanted %s, actual %s, queueing\n",
 603                     ceph_cap_string(issued), ceph_cap_string(wanted),
 604                     ceph_cap_string(actual_wanted));
 605                __cap_delay_requeue(mdsc, ci);
 606        }
 607
 608        if (flags & CEPH_CAP_FLAG_AUTH) {
 609                if (ci->i_auth_cap == NULL ||
 610                    ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0)
 611                        ci->i_auth_cap = cap;
 612        } else if (ci->i_auth_cap == cap) {
 613                ci->i_auth_cap = NULL;
 614                spin_lock(&mdsc->cap_dirty_lock);
 615                if (!list_empty(&ci->i_dirty_item)) {
 616                        dout(" moving %p to cap_dirty_migrating\n", inode);
 617                        list_move(&ci->i_dirty_item,
 618                                  &mdsc->cap_dirty_migrating);
 619                }
 620                spin_unlock(&mdsc->cap_dirty_lock);
 621        }
 622
 623        dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
 624             inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
 625             ceph_cap_string(issued|cap->issued), seq, mds);
 626        cap->cap_id = cap_id;
 627        cap->issued = issued;
 628        cap->implemented |= issued;
 629        if (mseq > cap->mseq)
 630                cap->mds_wanted = wanted;
 631        else
 632                cap->mds_wanted |= wanted;
 633        cap->seq = seq;
 634        cap->issue_seq = seq;
 635        cap->mseq = mseq;
 636        cap->cap_gen = session->s_cap_gen;
 637
 638        if (fmode >= 0)
 639                __ceph_get_fmode(ci, fmode);
 640        spin_unlock(&ci->i_ceph_lock);
 641        wake_up_all(&ci->i_cap_wq);
 642        return 0;
 643}
 644
 645/*
 646 * Return true if cap has not timed out and belongs to the current
 647 * generation of the MDS session (i.e. has not gone 'stale' due to
 648 * us losing touch with the mds).
 649 */
 650static int __cap_is_valid(struct ceph_cap *cap)
 651{
 652        unsigned long ttl;
 653        u32 gen;
 654
 655        spin_lock(&cap->session->s_gen_ttl_lock);
 656        gen = cap->session->s_cap_gen;
 657        ttl = cap->session->s_cap_ttl;
 658        spin_unlock(&cap->session->s_gen_ttl_lock);
 659
 660        if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
 661                dout("__cap_is_valid %p cap %p issued %s "
 662                     "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
 663                     cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
 664                return 0;
 665        }
 666
 667        return 1;
 668}
 669
 670/*
 671 * Return set of valid cap bits issued to us.  Note that caps time
 672 * out, and may be invalidated in bulk if the client session times out
 673 * and session->s_cap_gen is bumped.
 674 */
 675int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
 676{
 677        int have = ci->i_snap_caps | ci->i_cap_exporting_issued;
 678        struct ceph_cap *cap;
 679        struct rb_node *p;
 680
 681        if (implemented)
 682                *implemented = 0;
 683        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 684                cap = rb_entry(p, struct ceph_cap, ci_node);
 685                if (!__cap_is_valid(cap))
 686                        continue;
 687                dout("__ceph_caps_issued %p cap %p issued %s\n",
 688                     &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
 689                have |= cap->issued;
 690                if (implemented)
 691                        *implemented |= cap->implemented;
 692        }
 693        /*
 694         * exclude caps issued by non-auth MDS, but are been revoking
 695         * by the auth MDS. The non-auth MDS should be revoking/exporting
 696         * these caps, but the message is delayed.
 697         */
 698        if (ci->i_auth_cap) {
 699                cap = ci->i_auth_cap;
 700                have &= ~cap->implemented | cap->issued;
 701        }
 702        return have;
 703}
 704
 705/*
 706 * Get cap bits issued by caps other than @ocap
 707 */
 708int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
 709{
 710        int have = ci->i_snap_caps;
 711        struct ceph_cap *cap;
 712        struct rb_node *p;
 713
 714        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 715                cap = rb_entry(p, struct ceph_cap, ci_node);
 716                if (cap == ocap)
 717                        continue;
 718                if (!__cap_is_valid(cap))
 719                        continue;
 720                have |= cap->issued;
 721        }
 722        return have;
 723}
 724
 725/*
 726 * Move a cap to the end of the LRU (oldest caps at list head, newest
 727 * at list tail).
 728 */
 729static void __touch_cap(struct ceph_cap *cap)
 730{
 731        struct ceph_mds_session *s = cap->session;
 732
 733        spin_lock(&s->s_cap_lock);
 734        if (s->s_cap_iterator == NULL) {
 735                dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
 736                     s->s_mds);
 737                list_move_tail(&cap->session_caps, &s->s_caps);
 738        } else {
 739                dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
 740                     &cap->ci->vfs_inode, cap, s->s_mds);
 741        }
 742        spin_unlock(&s->s_cap_lock);
 743}
 744
 745/*
 746 * Check if we hold the given mask.  If so, move the cap(s) to the
 747 * front of their respective LRUs.  (This is the preferred way for
 748 * callers to check for caps they want.)
 749 */
 750int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
 751{
 752        struct ceph_cap *cap;
 753        struct rb_node *p;
 754        int have = ci->i_snap_caps;
 755
 756        if ((have & mask) == mask) {
 757                dout("__ceph_caps_issued_mask %p snap issued %s"
 758                     " (mask %s)\n", &ci->vfs_inode,
 759                     ceph_cap_string(have),
 760                     ceph_cap_string(mask));
 761                return 1;
 762        }
 763
 764        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 765                cap = rb_entry(p, struct ceph_cap, ci_node);
 766                if (!__cap_is_valid(cap))
 767                        continue;
 768                if ((cap->issued & mask) == mask) {
 769                        dout("__ceph_caps_issued_mask %p cap %p issued %s"
 770                             " (mask %s)\n", &ci->vfs_inode, cap,
 771                             ceph_cap_string(cap->issued),
 772                             ceph_cap_string(mask));
 773                        if (touch)
 774                                __touch_cap(cap);
 775                        return 1;
 776                }
 777
 778                /* does a combination of caps satisfy mask? */
 779                have |= cap->issued;
 780                if ((have & mask) == mask) {
 781                        dout("__ceph_caps_issued_mask %p combo issued %s"
 782                             " (mask %s)\n", &ci->vfs_inode,
 783                             ceph_cap_string(cap->issued),
 784                             ceph_cap_string(mask));
 785                        if (touch) {
 786                                struct rb_node *q;
 787
 788                                /* touch this + preceding caps */
 789                                __touch_cap(cap);
 790                                for (q = rb_first(&ci->i_caps); q != p;
 791                                     q = rb_next(q)) {
 792                                        cap = rb_entry(q, struct ceph_cap,
 793                                                       ci_node);
 794                                        if (!__cap_is_valid(cap))
 795                                                continue;
 796                                        __touch_cap(cap);
 797                                }
 798                        }
 799                        return 1;
 800                }
 801        }
 802
 803        return 0;
 804}
 805
 806/*
 807 * Return true if mask caps are currently being revoked by an MDS.
 808 */
 809int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
 810                               struct ceph_cap *ocap, int mask)
 811{
 812        struct ceph_cap *cap;
 813        struct rb_node *p;
 814
 815        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 816                cap = rb_entry(p, struct ceph_cap, ci_node);
 817                if (cap != ocap && __cap_is_valid(cap) &&
 818                    (cap->implemented & ~cap->issued & mask))
 819                        return 1;
 820        }
 821        return 0;
 822}
 823
 824int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
 825{
 826        struct inode *inode = &ci->vfs_inode;
 827        int ret;
 828
 829        spin_lock(&ci->i_ceph_lock);
 830        ret = __ceph_caps_revoking_other(ci, NULL, mask);
 831        spin_unlock(&ci->i_ceph_lock);
 832        dout("ceph_caps_revoking %p %s = %d\n", inode,
 833             ceph_cap_string(mask), ret);
 834        return ret;
 835}
 836
 837int __ceph_caps_used(struct ceph_inode_info *ci)
 838{
 839        int used = 0;
 840        if (ci->i_pin_ref)
 841                used |= CEPH_CAP_PIN;
 842        if (ci->i_rd_ref)
 843                used |= CEPH_CAP_FILE_RD;
 844        if (ci->i_rdcache_ref || ci->vfs_inode.i_data.nrpages)
 845                used |= CEPH_CAP_FILE_CACHE;
 846        if (ci->i_wr_ref)
 847                used |= CEPH_CAP_FILE_WR;
 848        if (ci->i_wb_ref || ci->i_wrbuffer_ref)
 849                used |= CEPH_CAP_FILE_BUFFER;
 850        return used;
 851}
 852
 853/*
 854 * wanted, by virtue of open file modes
 855 */
 856int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
 857{
 858        int want = 0;
 859        int mode;
 860        for (mode = 0; mode < CEPH_FILE_MODE_NUM; mode++)
 861                if (ci->i_nr_by_mode[mode])
 862                        want |= ceph_caps_for_mode(mode);
 863        return want;
 864}
 865
 866/*
 867 * Return caps we have registered with the MDS(s) as 'wanted'.
 868 */
 869int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
 870{
 871        struct ceph_cap *cap;
 872        struct rb_node *p;
 873        int mds_wanted = 0;
 874
 875        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
 876                cap = rb_entry(p, struct ceph_cap, ci_node);
 877                if (!__cap_is_valid(cap))
 878                        continue;
 879                mds_wanted |= cap->mds_wanted;
 880        }
 881        return mds_wanted;
 882}
 883
 884/*
 885 * called under i_ceph_lock
 886 */
 887static int __ceph_is_any_caps(struct ceph_inode_info *ci)
 888{
 889        return !RB_EMPTY_ROOT(&ci->i_caps) || ci->i_cap_exporting_mds >= 0;
 890}
 891
 892/*
 893 * Remove a cap.  Take steps to deal with a racing iterate_session_caps.
 894 *
 895 * caller should hold i_ceph_lock.
 896 * caller will not hold session s_mutex if called from destroy_inode.
 897 */
 898void __ceph_remove_cap(struct ceph_cap *cap)
 899{
 900        struct ceph_mds_session *session = cap->session;
 901        struct ceph_inode_info *ci = cap->ci;
 902        struct ceph_mds_client *mdsc =
 903                ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
 904        int removed = 0;
 905
 906        dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
 907
 908        /* remove from session list */
 909        spin_lock(&session->s_cap_lock);
 910        if (session->s_cap_iterator == cap) {
 911                /* not yet, we are iterating over this very cap */
 912                dout("__ceph_remove_cap  delaying %p removal from session %p\n",
 913                     cap, cap->session);
 914        } else {
 915                list_del_init(&cap->session_caps);
 916                session->s_nr_caps--;
 917                cap->session = NULL;
 918                removed = 1;
 919        }
 920        /* protect backpointer with s_cap_lock: see iterate_session_caps */
 921        cap->ci = NULL;
 922        spin_unlock(&session->s_cap_lock);
 923
 924        /* remove from inode list */
 925        rb_erase(&cap->ci_node, &ci->i_caps);
 926        if (ci->i_auth_cap == cap)
 927                ci->i_auth_cap = NULL;
 928
 929        if (removed)
 930                ceph_put_cap(mdsc, cap);
 931
 932        if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) {
 933                struct ceph_snap_realm *realm = ci->i_snap_realm;
 934                spin_lock(&realm->inodes_with_caps_lock);
 935                list_del_init(&ci->i_snap_realm_item);
 936                ci->i_snap_realm_counter++;
 937                ci->i_snap_realm = NULL;
 938                spin_unlock(&realm->inodes_with_caps_lock);
 939                ceph_put_snap_realm(mdsc, realm);
 940        }
 941        if (!__ceph_is_any_real_caps(ci))
 942                __cap_delay_cancel(mdsc, ci);
 943}
 944
 945/*
 946 * Build and send a cap message to the given MDS.
 947 *
 948 * Caller should be holding s_mutex.
 949 */
 950static int send_cap_msg(struct ceph_mds_session *session,
 951                        u64 ino, u64 cid, int op,
 952                        int caps, int wanted, int dirty,
 953                        u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq,
 954                        u64 size, u64 max_size,
 955                        struct timespec *mtime, struct timespec *atime,
 956                        u64 time_warp_seq,
 957                        kuid_t uid, kgid_t gid, umode_t mode,
 958                        u64 xattr_version,
 959                        struct ceph_buffer *xattrs_buf,
 960                        u64 follows)
 961{
 962        struct ceph_mds_caps *fc;
 963        struct ceph_msg *msg;
 964
 965        dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
 966             " seq %u/%u mseq %u follows %lld size %llu/%llu"
 967             " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
 968             cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
 969             ceph_cap_string(dirty),
 970             seq, issue_seq, mseq, follows, size, max_size,
 971             xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
 972
 973        msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), GFP_NOFS, false);
 974        if (!msg)
 975                return -ENOMEM;
 976
 977        msg->hdr.tid = cpu_to_le64(flush_tid);
 978
 979        fc = msg->front.iov_base;
 980        memset(fc, 0, sizeof(*fc));
 981
 982        fc->cap_id = cpu_to_le64(cid);
 983        fc->op = cpu_to_le32(op);
 984        fc->seq = cpu_to_le32(seq);
 985        fc->issue_seq = cpu_to_le32(issue_seq);
 986        fc->migrate_seq = cpu_to_le32(mseq);
 987        fc->caps = cpu_to_le32(caps);
 988        fc->wanted = cpu_to_le32(wanted);
 989        fc->dirty = cpu_to_le32(dirty);
 990        fc->ino = cpu_to_le64(ino);
 991        fc->snap_follows = cpu_to_le64(follows);
 992
 993        fc->size = cpu_to_le64(size);
 994        fc->max_size = cpu_to_le64(max_size);
 995        if (mtime)
 996                ceph_encode_timespec(&fc->mtime, mtime);
 997        if (atime)
 998                ceph_encode_timespec(&fc->atime, atime);
 999        fc->time_warp_seq = cpu_to_le32(time_warp_seq);
1000
1001        fc->uid = cpu_to_le32(from_kuid(&init_user_ns, uid));
1002        fc->gid = cpu_to_le32(from_kgid(&init_user_ns, gid));
1003        fc->mode = cpu_to_le32(mode);
1004
1005        fc->xattr_version = cpu_to_le64(xattr_version);
1006        if (xattrs_buf) {
1007                msg->middle = ceph_buffer_get(xattrs_buf);
1008                fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
1009                msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
1010        }
1011
1012        ceph_con_send(&session->s_con, msg);
1013        return 0;
1014}
1015
1016void __queue_cap_release(struct ceph_mds_session *session,
1017                         u64 ino, u64 cap_id, u32 migrate_seq,
1018                         u32 issue_seq)
1019{
1020        struct ceph_msg *msg;
1021        struct ceph_mds_cap_release *head;
1022        struct ceph_mds_cap_item *item;
1023
1024        spin_lock(&session->s_cap_lock);
1025        BUG_ON(!session->s_num_cap_releases);
1026        msg = list_first_entry(&session->s_cap_releases,
1027                               struct ceph_msg, list_head);
1028
1029        dout(" adding %llx release to mds%d msg %p (%d left)\n",
1030             ino, session->s_mds, msg, session->s_num_cap_releases);
1031
1032        BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
1033        head = msg->front.iov_base;
1034        le32_add_cpu(&head->num, 1);
1035        item = msg->front.iov_base + msg->front.iov_len;
1036        item->ino = cpu_to_le64(ino);
1037        item->cap_id = cpu_to_le64(cap_id);
1038        item->migrate_seq = cpu_to_le32(migrate_seq);
1039        item->seq = cpu_to_le32(issue_seq);
1040
1041        session->s_num_cap_releases--;
1042
1043        msg->front.iov_len += sizeof(*item);
1044        if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1045                dout(" release msg %p full\n", msg);
1046                list_move_tail(&msg->list_head, &session->s_cap_releases_done);
1047        } else {
1048                dout(" release msg %p at %d/%d (%d)\n", msg,
1049                     (int)le32_to_cpu(head->num),
1050                     (int)CEPH_CAPS_PER_RELEASE,
1051                     (int)msg->front.iov_len);
1052        }
1053        spin_unlock(&session->s_cap_lock);
1054}
1055
1056/*
1057 * Queue cap releases when an inode is dropped from our cache.  Since
1058 * inode is about to be destroyed, there is no need for i_ceph_lock.
1059 */
1060void ceph_queue_caps_release(struct inode *inode)
1061{
1062        struct ceph_inode_info *ci = ceph_inode(inode);
1063        struct rb_node *p;
1064
1065        p = rb_first(&ci->i_caps);
1066        while (p) {
1067                struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
1068                struct ceph_mds_session *session = cap->session;
1069
1070                __queue_cap_release(session, ceph_ino(inode), cap->cap_id,
1071                                    cap->mseq, cap->issue_seq);
1072                p = rb_next(p);
1073                __ceph_remove_cap(cap);
1074        }
1075}
1076
1077/*
1078 * Send a cap msg on the given inode.  Update our caps state, then
1079 * drop i_ceph_lock and send the message.
1080 *
1081 * Make note of max_size reported/requested from mds, revoked caps
1082 * that have now been implemented.
1083 *
1084 * Make half-hearted attempt ot to invalidate page cache if we are
1085 * dropping RDCACHE.  Note that this will leave behind locked pages
1086 * that we'll then need to deal with elsewhere.
1087 *
1088 * Return non-zero if delayed release, or we experienced an error
1089 * such that the caller should requeue + retry later.
1090 *
1091 * called with i_ceph_lock, then drops it.
1092 * caller should hold snap_rwsem (read), s_mutex.
1093 */
1094static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1095                      int op, int used, int want, int retain, int flushing,
1096                      unsigned *pflush_tid)
1097        __releases(cap->ci->i_ceph_lock)
1098{
1099        struct ceph_inode_info *ci = cap->ci;
1100        struct inode *inode = &ci->vfs_inode;
1101        u64 cap_id = cap->cap_id;
1102        int held, revoking, dropping, keep;
1103        u64 seq, issue_seq, mseq, time_warp_seq, follows;
1104        u64 size, max_size;
1105        struct timespec mtime, atime;
1106        int wake = 0;
1107        umode_t mode;
1108        kuid_t uid;
1109        kgid_t gid;
1110        struct ceph_mds_session *session;
1111        u64 xattr_version = 0;
1112        struct ceph_buffer *xattr_blob = NULL;
1113        int delayed = 0;
1114        u64 flush_tid = 0;
1115        int i;
1116        int ret;
1117
1118        held = cap->issued | cap->implemented;
1119        revoking = cap->implemented & ~cap->issued;
1120        retain &= ~revoking;
1121        dropping = cap->issued & ~retain;
1122
1123        dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1124             inode, cap, cap->session,
1125             ceph_cap_string(held), ceph_cap_string(held & retain),
1126             ceph_cap_string(revoking));
1127        BUG_ON((retain & CEPH_CAP_PIN) == 0);
1128
1129        session = cap->session;
1130
1131        /* don't release wanted unless we've waited a bit. */
1132        if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1133            time_before(jiffies, ci->i_hold_caps_min)) {
1134                dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1135                     ceph_cap_string(cap->issued),
1136                     ceph_cap_string(cap->issued & retain),
1137                     ceph_cap_string(cap->mds_wanted),
1138                     ceph_cap_string(want));
1139                want |= cap->mds_wanted;
1140                retain |= cap->issued;
1141                delayed = 1;
1142        }
1143        ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
1144
1145        cap->issued &= retain;  /* drop bits we don't want */
1146        if (cap->implemented & ~cap->issued) {
1147                /*
1148                 * Wake up any waiters on wanted -> needed transition.
1149                 * This is due to the weird transition from buffered
1150                 * to sync IO... we need to flush dirty pages _before_
1151                 * allowing sync writes to avoid reordering.
1152                 */
1153                wake = 1;
1154        }
1155        cap->implemented &= cap->issued | used;
1156        cap->mds_wanted = want;
1157
1158        if (flushing) {
1159                /*
1160                 * assign a tid for flush operations so we can avoid
1161                 * flush1 -> dirty1 -> flush2 -> flushack1 -> mark
1162                 * clean type races.  track latest tid for every bit
1163                 * so we can handle flush AxFw, flush Fw, and have the
1164                 * first ack clean Ax.
1165                 */
1166                flush_tid = ++ci->i_cap_flush_last_tid;
1167                if (pflush_tid)
1168                        *pflush_tid = flush_tid;
1169                dout(" cap_flush_tid %d\n", (int)flush_tid);
1170                for (i = 0; i < CEPH_CAP_BITS; i++)
1171                        if (flushing & (1 << i))
1172                                ci->i_cap_flush_tid[i] = flush_tid;
1173
1174                follows = ci->i_head_snapc->seq;
1175        } else {
1176                follows = 0;
1177        }
1178
1179        keep = cap->implemented;
1180        seq = cap->seq;
1181        issue_seq = cap->issue_seq;
1182        mseq = cap->mseq;
1183        size = inode->i_size;
1184        ci->i_reported_size = size;
1185        max_size = ci->i_wanted_max_size;
1186        ci->i_requested_max_size = max_size;
1187        mtime = inode->i_mtime;
1188        atime = inode->i_atime;
1189        time_warp_seq = ci->i_time_warp_seq;
1190        uid = inode->i_uid;
1191        gid = inode->i_gid;
1192        mode = inode->i_mode;
1193
1194        if (flushing & CEPH_CAP_XATTR_EXCL) {
1195                __ceph_build_xattrs_blob(ci);
1196                xattr_blob = ci->i_xattrs.blob;
1197                xattr_version = ci->i_xattrs.version;
1198        }
1199
1200        spin_unlock(&ci->i_ceph_lock);
1201
1202        ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1203                op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
1204                size, max_size, &mtime, &atime, time_warp_seq,
1205                uid, gid, mode, xattr_version, xattr_blob,
1206                follows);
1207        if (ret < 0) {
1208                dout("error sending cap msg, must requeue %p\n", inode);
1209                delayed = 1;
1210        }
1211
1212        if (wake)
1213                wake_up_all(&ci->i_cap_wq);
1214
1215        return delayed;
1216}
1217
1218/*
1219 * When a snapshot is taken, clients accumulate dirty metadata on
1220 * inodes with capabilities in ceph_cap_snaps to describe the file
1221 * state at the time the snapshot was taken.  This must be flushed
1222 * asynchronously back to the MDS once sync writes complete and dirty
1223 * data is written out.
1224 *
1225 * Unless @again is true, skip cap_snaps that were already sent to
1226 * the MDS (i.e., during this session).
1227 *
1228 * Called under i_ceph_lock.  Takes s_mutex as needed.
1229 */
1230void __ceph_flush_snaps(struct ceph_inode_info *ci,
1231                        struct ceph_mds_session **psession,
1232                        int again)
1233                __releases(ci->i_ceph_lock)
1234                __acquires(ci->i_ceph_lock)
1235{
1236        struct inode *inode = &ci->vfs_inode;
1237        int mds;
1238        struct ceph_cap_snap *capsnap;
1239        u32 mseq;
1240        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
1241        struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
1242                                                    session->s_mutex */
1243        u64 next_follows = 0;  /* keep track of how far we've gotten through the
1244                             i_cap_snaps list, and skip these entries next time
1245                             around to avoid an infinite loop */
1246
1247        if (psession)
1248                session = *psession;
1249
1250        dout("__flush_snaps %p\n", inode);
1251retry:
1252        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1253                /* avoid an infiniute loop after retry */
1254                if (capsnap->follows < next_follows)
1255                        continue;
1256                /*
1257                 * we need to wait for sync writes to complete and for dirty
1258                 * pages to be written out.
1259                 */
1260                if (capsnap->dirty_pages || capsnap->writing)
1261                        break;
1262
1263                /*
1264                 * if cap writeback already occurred, we should have dropped
1265                 * the capsnap in ceph_put_wrbuffer_cap_refs.
1266                 */
1267                BUG_ON(capsnap->dirty == 0);
1268
1269                /* pick mds, take s_mutex */
1270                if (ci->i_auth_cap == NULL) {
1271                        dout("no auth cap (migrating?), doing nothing\n");
1272                        goto out;
1273                }
1274
1275                /* only flush each capsnap once */
1276                if (!again && !list_empty(&capsnap->flushing_item)) {
1277                        dout("already flushed %p, skipping\n", capsnap);
1278                        continue;
1279                }
1280
1281                mds = ci->i_auth_cap->session->s_mds;
1282                mseq = ci->i_auth_cap->mseq;
1283
1284                if (session && session->s_mds != mds) {
1285                        dout("oops, wrong session %p mutex\n", session);
1286                        mutex_unlock(&session->s_mutex);
1287                        ceph_put_mds_session(session);
1288                        session = NULL;
1289                }
1290                if (!session) {
1291                        spin_unlock(&ci->i_ceph_lock);
1292                        mutex_lock(&mdsc->mutex);
1293                        session = __ceph_lookup_mds_session(mdsc, mds);
1294                        mutex_unlock(&mdsc->mutex);
1295                        if (session) {
1296                                dout("inverting session/ino locks on %p\n",
1297                                     session);
1298                                mutex_lock(&session->s_mutex);
1299                        }
1300                        /*
1301                         * if session == NULL, we raced against a cap
1302                         * deletion or migration.  retry, and we'll
1303                         * get a better @mds value next time.
1304                         */
1305                        spin_lock(&ci->i_ceph_lock);
1306                        goto retry;
1307                }
1308
1309                capsnap->flush_tid = ++ci->i_cap_flush_last_tid;
1310                atomic_inc(&capsnap->nref);
1311                if (!list_empty(&capsnap->flushing_item))
1312                        list_del_init(&capsnap->flushing_item);
1313                list_add_tail(&capsnap->flushing_item,
1314                              &session->s_cap_snaps_flushing);
1315                spin_unlock(&ci->i_ceph_lock);
1316
1317                dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
1318                     inode, capsnap, capsnap->follows, capsnap->flush_tid);
1319                send_cap_msg(session, ceph_vino(inode).ino, 0,
1320                             CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
1321                             capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
1322                             capsnap->size, 0,
1323                             &capsnap->mtime, &capsnap->atime,
1324                             capsnap->time_warp_seq,
1325                             capsnap->uid, capsnap->gid, capsnap->mode,
1326                             capsnap->xattr_version, capsnap->xattr_blob,
1327                             capsnap->follows);
1328
1329                next_follows = capsnap->follows + 1;
1330                ceph_put_cap_snap(capsnap);
1331
1332                spin_lock(&ci->i_ceph_lock);
1333                goto retry;
1334        }
1335
1336        /* we flushed them all; remove this inode from the queue */
1337        spin_lock(&mdsc->snap_flush_lock);
1338        list_del_init(&ci->i_snap_flush_item);
1339        spin_unlock(&mdsc->snap_flush_lock);
1340
1341out:
1342        if (psession)
1343                *psession = session;
1344        else if (session) {
1345                mutex_unlock(&session->s_mutex);
1346                ceph_put_mds_session(session);
1347        }
1348}
1349
1350static void ceph_flush_snaps(struct ceph_inode_info *ci)
1351{
1352        spin_lock(&ci->i_ceph_lock);
1353        __ceph_flush_snaps(ci, NULL, 0);
1354        spin_unlock(&ci->i_ceph_lock);
1355}
1356
1357/*
1358 * Mark caps dirty.  If inode is newly dirty, return the dirty flags.
1359 * Caller is then responsible for calling __mark_inode_dirty with the
1360 * returned flags value.
1361 */
1362int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
1363{
1364        struct ceph_mds_client *mdsc =
1365                ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
1366        struct inode *inode = &ci->vfs_inode;
1367        int was = ci->i_dirty_caps;
1368        int dirty = 0;
1369
1370        dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1371             ceph_cap_string(mask), ceph_cap_string(was),
1372             ceph_cap_string(was | mask));
1373        ci->i_dirty_caps |= mask;
1374        if (was == 0) {
1375                if (!ci->i_head_snapc)
1376                        ci->i_head_snapc = ceph_get_snap_context(
1377                                ci->i_snap_realm->cached_context);
1378                dout(" inode %p now dirty snapc %p auth cap %p\n",
1379                     &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
1380                BUG_ON(!list_empty(&ci->i_dirty_item));
1381                spin_lock(&mdsc->cap_dirty_lock);
1382                if (ci->i_auth_cap)
1383                        list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1384                else
1385                        list_add(&ci->i_dirty_item,
1386                                 &mdsc->cap_dirty_migrating);
1387                spin_unlock(&mdsc->cap_dirty_lock);
1388                if (ci->i_flushing_caps == 0) {
1389                        ihold(inode);
1390                        dirty |= I_DIRTY_SYNC;
1391                }
1392        }
1393        BUG_ON(list_empty(&ci->i_dirty_item));
1394        if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1395            (mask & CEPH_CAP_FILE_BUFFER))
1396                dirty |= I_DIRTY_DATASYNC;
1397        __cap_delay_requeue(mdsc, ci);
1398        return dirty;
1399}
1400
1401/*
1402 * Add dirty inode to the flushing list.  Assigned a seq number so we
1403 * can wait for caps to flush without starving.
1404 *
1405 * Called under i_ceph_lock.
1406 */
1407static int __mark_caps_flushing(struct inode *inode,
1408                                 struct ceph_mds_session *session)
1409{
1410        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1411        struct ceph_inode_info *ci = ceph_inode(inode);
1412        int flushing;
1413
1414        BUG_ON(ci->i_dirty_caps == 0);
1415        BUG_ON(list_empty(&ci->i_dirty_item));
1416
1417        flushing = ci->i_dirty_caps;
1418        dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1419             ceph_cap_string(flushing),
1420             ceph_cap_string(ci->i_flushing_caps),
1421             ceph_cap_string(ci->i_flushing_caps | flushing));
1422        ci->i_flushing_caps |= flushing;
1423        ci->i_dirty_caps = 0;
1424        dout(" inode %p now !dirty\n", inode);
1425
1426        spin_lock(&mdsc->cap_dirty_lock);
1427        list_del_init(&ci->i_dirty_item);
1428
1429        ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
1430        if (list_empty(&ci->i_flushing_item)) {
1431                list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1432                mdsc->num_cap_flushing++;
1433                dout(" inode %p now flushing seq %lld\n", inode,
1434                     ci->i_cap_flush_seq);
1435        } else {
1436                list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1437                dout(" inode %p now flushing (more) seq %lld\n", inode,
1438                     ci->i_cap_flush_seq);
1439        }
1440        spin_unlock(&mdsc->cap_dirty_lock);
1441
1442        return flushing;
1443}
1444
1445/*
1446 * try to invalidate mapping pages without blocking.
1447 */
1448static int try_nonblocking_invalidate(struct inode *inode)
1449{
1450        struct ceph_inode_info *ci = ceph_inode(inode);
1451        u32 invalidating_gen = ci->i_rdcache_gen;
1452
1453        spin_unlock(&ci->i_ceph_lock);
1454        invalidate_mapping_pages(&inode->i_data, 0, -1);
1455        spin_lock(&ci->i_ceph_lock);
1456
1457        if (inode->i_data.nrpages == 0 &&
1458            invalidating_gen == ci->i_rdcache_gen) {
1459                /* success. */
1460                dout("try_nonblocking_invalidate %p success\n", inode);
1461                /* save any racing async invalidate some trouble */
1462                ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
1463                return 0;
1464        }
1465        dout("try_nonblocking_invalidate %p failed\n", inode);
1466        return -1;
1467}
1468
1469/*
1470 * Swiss army knife function to examine currently used and wanted
1471 * versus held caps.  Release, flush, ack revoked caps to mds as
1472 * appropriate.
1473 *
1474 *  CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1475 *    cap release further.
1476 *  CHECK_CAPS_AUTHONLY - we should only check the auth cap
1477 *  CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1478 *    further delay.
1479 */
1480void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1481                     struct ceph_mds_session *session)
1482{
1483        struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1484        struct ceph_mds_client *mdsc = fsc->mdsc;
1485        struct inode *inode = &ci->vfs_inode;
1486        struct ceph_cap *cap;
1487        int file_wanted, used, cap_used;
1488        int took_snap_rwsem = 0;             /* true if mdsc->snap_rwsem held */
1489        int issued, implemented, want, retain, revoking, flushing = 0;
1490        int mds = -1;   /* keep track of how far we've gone through i_caps list
1491                           to avoid an infinite loop on retry */
1492        struct rb_node *p;
1493        int tried_invalidate = 0;
1494        int delayed = 0, sent = 0, force_requeue = 0, num;
1495        int queue_invalidate = 0;
1496        int is_delayed = flags & CHECK_CAPS_NODELAY;
1497
1498        /* if we are unmounting, flush any unused caps immediately. */
1499        if (mdsc->stopping)
1500                is_delayed = 1;
1501
1502        spin_lock(&ci->i_ceph_lock);
1503
1504        if (ci->i_ceph_flags & CEPH_I_FLUSH)
1505                flags |= CHECK_CAPS_FLUSH;
1506
1507        /* flush snaps first time around only */
1508        if (!list_empty(&ci->i_cap_snaps))
1509                __ceph_flush_snaps(ci, &session, 0);
1510        goto retry_locked;
1511retry:
1512        spin_lock(&ci->i_ceph_lock);
1513retry_locked:
1514        file_wanted = __ceph_caps_file_wanted(ci);
1515        used = __ceph_caps_used(ci);
1516        want = file_wanted | used;
1517        issued = __ceph_caps_issued(ci, &implemented);
1518        revoking = implemented & ~issued;
1519
1520        retain = want | CEPH_CAP_PIN;
1521        if (!mdsc->stopping && inode->i_nlink > 0) {
1522                if (want) {
1523                        retain |= CEPH_CAP_ANY;       /* be greedy */
1524                } else {
1525                        retain |= CEPH_CAP_ANY_SHARED;
1526                        /*
1527                         * keep RD only if we didn't have the file open RW,
1528                         * because then the mds would revoke it anyway to
1529                         * journal max_size=0.
1530                         */
1531                        if (ci->i_max_size == 0)
1532                                retain |= CEPH_CAP_ANY_RD;
1533                }
1534        }
1535
1536        dout("check_caps %p file_want %s used %s dirty %s flushing %s"
1537             " issued %s revoking %s retain %s %s%s%s\n", inode,
1538             ceph_cap_string(file_wanted),
1539             ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1540             ceph_cap_string(ci->i_flushing_caps),
1541             ceph_cap_string(issued), ceph_cap_string(revoking),
1542             ceph_cap_string(retain),
1543             (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1544             (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1545             (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1546
1547        /*
1548         * If we no longer need to hold onto old our caps, and we may
1549         * have cached pages, but don't want them, then try to invalidate.
1550         * If we fail, it's because pages are locked.... try again later.
1551         */
1552        if ((!is_delayed || mdsc->stopping) &&
1553            ci->i_wrbuffer_ref == 0 &&               /* no dirty pages... */
1554            inode->i_data.nrpages &&                 /* have cached pages */
1555            (file_wanted == 0 ||                     /* no open files */
1556             (revoking & (CEPH_CAP_FILE_CACHE|
1557                          CEPH_CAP_FILE_LAZYIO))) && /*  or revoking cache */
1558            !tried_invalidate) {
1559                dout("check_caps trying to invalidate on %p\n", inode);
1560                if (try_nonblocking_invalidate(inode) < 0) {
1561                        if (revoking & (CEPH_CAP_FILE_CACHE|
1562                                        CEPH_CAP_FILE_LAZYIO)) {
1563                                dout("check_caps queuing invalidate\n");
1564                                queue_invalidate = 1;
1565                                ci->i_rdcache_revoking = ci->i_rdcache_gen;
1566                        } else {
1567                                dout("check_caps failed to invalidate pages\n");
1568                                /* we failed to invalidate pages.  check these
1569                                   caps again later. */
1570                                force_requeue = 1;
1571                                __cap_set_timeouts(mdsc, ci);
1572                        }
1573                }
1574                tried_invalidate = 1;
1575                goto retry_locked;
1576        }
1577
1578        num = 0;
1579        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1580                cap = rb_entry(p, struct ceph_cap, ci_node);
1581                num++;
1582
1583                /* avoid looping forever */
1584                if (mds >= cap->mds ||
1585                    ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1586                        continue;
1587
1588                /* NOTE: no side-effects allowed, until we take s_mutex */
1589
1590                cap_used = used;
1591                if (ci->i_auth_cap && cap != ci->i_auth_cap)
1592                        cap_used &= ~ci->i_auth_cap->issued;
1593
1594                revoking = cap->implemented & ~cap->issued;
1595                dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
1596                     cap->mds, cap, ceph_cap_string(cap->issued),
1597                     ceph_cap_string(cap_used),
1598                     ceph_cap_string(cap->implemented),
1599                     ceph_cap_string(revoking));
1600
1601                if (cap == ci->i_auth_cap &&
1602                    (cap->issued & CEPH_CAP_FILE_WR)) {
1603                        /* request larger max_size from MDS? */
1604                        if (ci->i_wanted_max_size > ci->i_max_size &&
1605                            ci->i_wanted_max_size > ci->i_requested_max_size) {
1606                                dout("requesting new max_size\n");
1607                                goto ack;
1608                        }
1609
1610                        /* approaching file_max? */
1611                        if ((inode->i_size << 1) >= ci->i_max_size &&
1612                            (ci->i_reported_size << 1) < ci->i_max_size) {
1613                                dout("i_size approaching max_size\n");
1614                                goto ack;
1615                        }
1616                }
1617                /* flush anything dirty? */
1618                if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
1619                    ci->i_dirty_caps) {
1620                        dout("flushing dirty caps\n");
1621                        goto ack;
1622                }
1623
1624                /* completed revocation? going down and there are no caps? */
1625                if (revoking && (revoking & cap_used) == 0) {
1626                        dout("completed revocation of %s\n",
1627                             ceph_cap_string(cap->implemented & ~cap->issued));
1628                        goto ack;
1629                }
1630
1631                /* want more caps from mds? */
1632                if (want & ~(cap->mds_wanted | cap->issued))
1633                        goto ack;
1634
1635                /* things we might delay */
1636                if ((cap->issued & ~retain) == 0 &&
1637                    cap->mds_wanted == want)
1638                        continue;     /* nope, all good */
1639
1640                if (is_delayed)
1641                        goto ack;
1642
1643                /* delay? */
1644                if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1645                    time_before(jiffies, ci->i_hold_caps_max)) {
1646                        dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1647                             ceph_cap_string(cap->issued),
1648                             ceph_cap_string(cap->issued & retain),
1649                             ceph_cap_string(cap->mds_wanted),
1650                             ceph_cap_string(want));
1651                        delayed++;
1652                        continue;
1653                }
1654
1655ack:
1656                if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1657                        dout(" skipping %p I_NOFLUSH set\n", inode);
1658                        continue;
1659                }
1660
1661                if (session && session != cap->session) {
1662                        dout("oops, wrong session %p mutex\n", session);
1663                        mutex_unlock(&session->s_mutex);
1664                        session = NULL;
1665                }
1666                if (!session) {
1667                        session = cap->session;
1668                        if (mutex_trylock(&session->s_mutex) == 0) {
1669                                dout("inverting session/ino locks on %p\n",
1670                                     session);
1671                                spin_unlock(&ci->i_ceph_lock);
1672                                if (took_snap_rwsem) {
1673                                        up_read(&mdsc->snap_rwsem);
1674                                        took_snap_rwsem = 0;
1675                                }
1676                                mutex_lock(&session->s_mutex);
1677                                goto retry;
1678                        }
1679                }
1680                /* take snap_rwsem after session mutex */
1681                if (!took_snap_rwsem) {
1682                        if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1683                                dout("inverting snap/in locks on %p\n",
1684                                     inode);
1685                                spin_unlock(&ci->i_ceph_lock);
1686                                down_read(&mdsc->snap_rwsem);
1687                                took_snap_rwsem = 1;
1688                                goto retry;
1689                        }
1690                        took_snap_rwsem = 1;
1691                }
1692
1693                if (cap == ci->i_auth_cap && ci->i_dirty_caps)
1694                        flushing = __mark_caps_flushing(inode, session);
1695                else
1696                        flushing = 0;
1697
1698                mds = cap->mds;  /* remember mds, so we don't repeat */
1699                sent++;
1700
1701                /* __send_cap drops i_ceph_lock */
1702                delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, cap_used,
1703                                      want, retain, flushing, NULL);
1704                goto retry; /* retake i_ceph_lock and restart our cap scan. */
1705        }
1706
1707        /*
1708         * Reschedule delayed caps release if we delayed anything,
1709         * otherwise cancel.
1710         */
1711        if (delayed && is_delayed)
1712                force_requeue = 1;   /* __send_cap delayed release; requeue */
1713        if (!delayed && !is_delayed)
1714                __cap_delay_cancel(mdsc, ci);
1715        else if (!is_delayed || force_requeue)
1716                __cap_delay_requeue(mdsc, ci);
1717
1718        spin_unlock(&ci->i_ceph_lock);
1719
1720        if (queue_invalidate)
1721                ceph_queue_invalidate(inode);
1722
1723        if (session)
1724                mutex_unlock(&session->s_mutex);
1725        if (took_snap_rwsem)
1726                up_read(&mdsc->snap_rwsem);
1727}
1728
1729/*
1730 * Try to flush dirty caps back to the auth mds.
1731 */
1732static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
1733                          unsigned *flush_tid)
1734{
1735        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1736        struct ceph_inode_info *ci = ceph_inode(inode);
1737        int unlock_session = session ? 0 : 1;
1738        int flushing = 0;
1739
1740retry:
1741        spin_lock(&ci->i_ceph_lock);
1742        if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1743                dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1744                goto out;
1745        }
1746        if (ci->i_dirty_caps && ci->i_auth_cap) {
1747                struct ceph_cap *cap = ci->i_auth_cap;
1748                int used = __ceph_caps_used(ci);
1749                int want = __ceph_caps_wanted(ci);
1750                int delayed;
1751
1752                if (!session) {
1753                        spin_unlock(&ci->i_ceph_lock);
1754                        session = cap->session;
1755                        mutex_lock(&session->s_mutex);
1756                        goto retry;
1757                }
1758                BUG_ON(session != cap->session);
1759                if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
1760                        goto out;
1761
1762                flushing = __mark_caps_flushing(inode, session);
1763
1764                /* __send_cap drops i_ceph_lock */
1765                delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
1766                                     cap->issued | cap->implemented, flushing,
1767                                     flush_tid);
1768                if (!delayed)
1769                        goto out_unlocked;
1770
1771                spin_lock(&ci->i_ceph_lock);
1772                __cap_delay_requeue(mdsc, ci);
1773        }
1774out:
1775        spin_unlock(&ci->i_ceph_lock);
1776out_unlocked:
1777        if (session && unlock_session)
1778                mutex_unlock(&session->s_mutex);
1779        return flushing;
1780}
1781
1782/*
1783 * Return true if we've flushed caps through the given flush_tid.
1784 */
1785static int caps_are_flushed(struct inode *inode, unsigned tid)
1786{
1787        struct ceph_inode_info *ci = ceph_inode(inode);
1788        int i, ret = 1;
1789
1790        spin_lock(&ci->i_ceph_lock);
1791        for (i = 0; i < CEPH_CAP_BITS; i++)
1792                if ((ci->i_flushing_caps & (1 << i)) &&
1793                    ci->i_cap_flush_tid[i] <= tid) {
1794                        /* still flushing this bit */
1795                        ret = 0;
1796                        break;
1797                }
1798        spin_unlock(&ci->i_ceph_lock);
1799        return ret;
1800}
1801
1802/*
1803 * Wait on any unsafe replies for the given inode.  First wait on the
1804 * newest request, and make that the upper bound.  Then, if there are
1805 * more requests, keep waiting on the oldest as long as it is still older
1806 * than the original request.
1807 */
1808static void sync_write_wait(struct inode *inode)
1809{
1810        struct ceph_inode_info *ci = ceph_inode(inode);
1811        struct list_head *head = &ci->i_unsafe_writes;
1812        struct ceph_osd_request *req;
1813        u64 last_tid;
1814
1815        spin_lock(&ci->i_unsafe_lock);
1816        if (list_empty(head))
1817                goto out;
1818
1819        /* set upper bound as _last_ entry in chain */
1820        req = list_entry(head->prev, struct ceph_osd_request,
1821                         r_unsafe_item);
1822        last_tid = req->r_tid;
1823
1824        do {
1825                ceph_osdc_get_request(req);
1826                spin_unlock(&ci->i_unsafe_lock);
1827                dout("sync_write_wait on tid %llu (until %llu)\n",
1828                     req->r_tid, last_tid);
1829                wait_for_completion(&req->r_safe_completion);
1830                spin_lock(&ci->i_unsafe_lock);
1831                ceph_osdc_put_request(req);
1832
1833                /*
1834                 * from here on look at first entry in chain, since we
1835                 * only want to wait for anything older than last_tid
1836                 */
1837                if (list_empty(head))
1838                        break;
1839                req = list_entry(head->next, struct ceph_osd_request,
1840                                 r_unsafe_item);
1841        } while (req->r_tid < last_tid);
1842out:
1843        spin_unlock(&ci->i_unsafe_lock);
1844}
1845
1846int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1847{
1848        struct inode *inode = file->f_mapping->host;
1849        struct ceph_inode_info *ci = ceph_inode(inode);
1850        unsigned flush_tid;
1851        int ret;
1852        int dirty;
1853
1854        dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
1855        sync_write_wait(inode);
1856
1857        ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1858        if (ret < 0)
1859                return ret;
1860        mutex_lock(&inode->i_mutex);
1861
1862        dirty = try_flush_caps(inode, NULL, &flush_tid);
1863        dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
1864
1865        /*
1866         * only wait on non-file metadata writeback (the mds
1867         * can recover size and mtime, so we don't need to
1868         * wait for that)
1869         */
1870        if (!datasync && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
1871                dout("fsync waiting for flush_tid %u\n", flush_tid);
1872                ret = wait_event_interruptible(ci->i_cap_wq,
1873                                       caps_are_flushed(inode, flush_tid));
1874        }
1875
1876        dout("fsync %p%s done\n", inode, datasync ? " datasync" : "");
1877        mutex_unlock(&inode->i_mutex);
1878        return ret;
1879}
1880
1881/*
1882 * Flush any dirty caps back to the mds.  If we aren't asked to wait,
1883 * queue inode for flush but don't do so immediately, because we can
1884 * get by with fewer MDS messages if we wait for data writeback to
1885 * complete first.
1886 */
1887int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
1888{
1889        struct ceph_inode_info *ci = ceph_inode(inode);
1890        unsigned flush_tid;
1891        int err = 0;
1892        int dirty;
1893        int wait = wbc->sync_mode == WB_SYNC_ALL;
1894
1895        dout("write_inode %p wait=%d\n", inode, wait);
1896        if (wait) {
1897                dirty = try_flush_caps(inode, NULL, &flush_tid);
1898                if (dirty)
1899                        err = wait_event_interruptible(ci->i_cap_wq,
1900                                       caps_are_flushed(inode, flush_tid));
1901        } else {
1902                struct ceph_mds_client *mdsc =
1903                        ceph_sb_to_client(inode->i_sb)->mdsc;
1904
1905                spin_lock(&ci->i_ceph_lock);
1906                if (__ceph_caps_dirty(ci))
1907                        __cap_delay_requeue_front(mdsc, ci);
1908                spin_unlock(&ci->i_ceph_lock);
1909        }
1910        return err;
1911}
1912
1913/*
1914 * After a recovering MDS goes active, we need to resend any caps
1915 * we were flushing.
1916 *
1917 * Caller holds session->s_mutex.
1918 */
1919static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
1920                                   struct ceph_mds_session *session)
1921{
1922        struct ceph_cap_snap *capsnap;
1923
1924        dout("kick_flushing_capsnaps mds%d\n", session->s_mds);
1925        list_for_each_entry(capsnap, &session->s_cap_snaps_flushing,
1926                            flushing_item) {
1927                struct ceph_inode_info *ci = capsnap->ci;
1928                struct inode *inode = &ci->vfs_inode;
1929                struct ceph_cap *cap;
1930
1931                spin_lock(&ci->i_ceph_lock);
1932                cap = ci->i_auth_cap;
1933                if (cap && cap->session == session) {
1934                        dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
1935                             cap, capsnap);
1936                        __ceph_flush_snaps(ci, &session, 1);
1937                } else {
1938                        pr_err("%p auth cap %p not mds%d ???\n", inode,
1939                               cap, session->s_mds);
1940                }
1941                spin_unlock(&ci->i_ceph_lock);
1942        }
1943}
1944
1945void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
1946                             struct ceph_mds_session *session)
1947{
1948        struct ceph_inode_info *ci;
1949
1950        kick_flushing_capsnaps(mdsc, session);
1951
1952        dout("kick_flushing_caps mds%d\n", session->s_mds);
1953        list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
1954                struct inode *inode = &ci->vfs_inode;
1955                struct ceph_cap *cap;
1956                int delayed = 0;
1957
1958                spin_lock(&ci->i_ceph_lock);
1959                cap = ci->i_auth_cap;
1960                if (cap && cap->session == session) {
1961                        dout("kick_flushing_caps %p cap %p %s\n", inode,
1962                             cap, ceph_cap_string(ci->i_flushing_caps));
1963                        delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
1964                                             __ceph_caps_used(ci),
1965                                             __ceph_caps_wanted(ci),
1966                                             cap->issued | cap->implemented,
1967                                             ci->i_flushing_caps, NULL);
1968                        if (delayed) {
1969                                spin_lock(&ci->i_ceph_lock);
1970                                __cap_delay_requeue(mdsc, ci);
1971                                spin_unlock(&ci->i_ceph_lock);
1972                        }
1973                } else {
1974                        pr_err("%p auth cap %p not mds%d ???\n", inode,
1975                               cap, session->s_mds);
1976                        spin_unlock(&ci->i_ceph_lock);
1977                }
1978        }
1979}
1980
1981static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
1982                                     struct ceph_mds_session *session,
1983                                     struct inode *inode)
1984{
1985        struct ceph_inode_info *ci = ceph_inode(inode);
1986        struct ceph_cap *cap;
1987        int delayed = 0;
1988
1989        spin_lock(&ci->i_ceph_lock);
1990        cap = ci->i_auth_cap;
1991        dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
1992             ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
1993
1994        __ceph_flush_snaps(ci, &session, 1);
1995
1996        if (ci->i_flushing_caps) {
1997                spin_lock(&mdsc->cap_dirty_lock);
1998                list_move_tail(&ci->i_flushing_item,
1999                               &cap->session->s_cap_flushing);
2000                spin_unlock(&mdsc->cap_dirty_lock);
2001
2002                delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
2003                                     __ceph_caps_used(ci),
2004                                     __ceph_caps_wanted(ci),
2005                                     cap->issued | cap->implemented,
2006                                     ci->i_flushing_caps, NULL);
2007                if (delayed) {
2008                        spin_lock(&ci->i_ceph_lock);
2009                        __cap_delay_requeue(mdsc, ci);
2010                        spin_unlock(&ci->i_ceph_lock);
2011                }
2012        } else {
2013                spin_unlock(&ci->i_ceph_lock);
2014        }
2015}
2016
2017
2018/*
2019 * Take references to capabilities we hold, so that we don't release
2020 * them to the MDS prematurely.
2021 *
2022 * Protected by i_ceph_lock.
2023 */
2024static void __take_cap_refs(struct ceph_inode_info *ci, int got)
2025{
2026        if (got & CEPH_CAP_PIN)
2027                ci->i_pin_ref++;
2028        if (got & CEPH_CAP_FILE_RD)
2029                ci->i_rd_ref++;
2030        if (got & CEPH_CAP_FILE_CACHE)
2031                ci->i_rdcache_ref++;
2032        if (got & CEPH_CAP_FILE_WR)
2033                ci->i_wr_ref++;
2034        if (got & CEPH_CAP_FILE_BUFFER) {
2035                if (ci->i_wb_ref == 0)
2036                        ihold(&ci->vfs_inode);
2037                ci->i_wb_ref++;
2038                dout("__take_cap_refs %p wb %d -> %d (?)\n",
2039                     &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
2040        }
2041}
2042
2043/*
2044 * Try to grab cap references.  Specify those refs we @want, and the
2045 * minimal set we @need.  Also include the larger offset we are writing
2046 * to (when applicable), and check against max_size here as well.
2047 * Note that caller is responsible for ensuring max_size increases are
2048 * requested from the MDS.
2049 */
2050static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
2051                            int *got, loff_t endoff, int *check_max, int *err)
2052{
2053        struct inode *inode = &ci->vfs_inode;
2054        int ret = 0;
2055        int have, implemented;
2056        int file_wanted;
2057
2058        dout("get_cap_refs %p need %s want %s\n", inode,
2059             ceph_cap_string(need), ceph_cap_string(want));
2060        spin_lock(&ci->i_ceph_lock);
2061
2062        /* make sure file is actually open */
2063        file_wanted = __ceph_caps_file_wanted(ci);
2064        if ((file_wanted & need) == 0) {
2065                dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
2066                     ceph_cap_string(need), ceph_cap_string(file_wanted));
2067                *err = -EBADF;
2068                ret = 1;
2069                goto out;
2070        }
2071
2072        /* finish pending truncate */
2073        while (ci->i_truncate_pending) {
2074                spin_unlock(&ci->i_ceph_lock);
2075                if (!(need & CEPH_CAP_FILE_WR))
2076                        mutex_lock(&inode->i_mutex);
2077                __ceph_do_pending_vmtruncate(inode);
2078                if (!(need & CEPH_CAP_FILE_WR))
2079                        mutex_unlock(&inode->i_mutex);
2080                spin_lock(&ci->i_ceph_lock);
2081        }
2082
2083        if (need & CEPH_CAP_FILE_WR) {
2084                if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
2085                        dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
2086                             inode, endoff, ci->i_max_size);
2087                        if (endoff > ci->i_wanted_max_size) {
2088                                *check_max = 1;
2089                                ret = 1;
2090                        }
2091                        goto out;
2092                }
2093                /*
2094                 * If a sync write is in progress, we must wait, so that we
2095                 * can get a final snapshot value for size+mtime.
2096                 */
2097                if (__ceph_have_pending_cap_snap(ci)) {
2098                        dout("get_cap_refs %p cap_snap_pending\n", inode);
2099                        goto out;
2100                }
2101        }
2102        have = __ceph_caps_issued(ci, &implemented);
2103
2104        if ((have & need) == need) {
2105                /*
2106                 * Look at (implemented & ~have & not) so that we keep waiting
2107                 * on transition from wanted -> needed caps.  This is needed
2108                 * for WRBUFFER|WR -> WR to avoid a new WR sync write from
2109                 * going before a prior buffered writeback happens.
2110                 */
2111                int not = want & ~(have & need);
2112                int revoking = implemented & ~have;
2113                dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
2114                     inode, ceph_cap_string(have), ceph_cap_string(not),
2115                     ceph_cap_string(revoking));
2116                if ((revoking & not) == 0) {
2117                        *got = need | (have & want);
2118                        __take_cap_refs(ci, *got);
2119                        ret = 1;
2120                }
2121        } else {
2122                dout("get_cap_refs %p have %s needed %s\n", inode,
2123                     ceph_cap_string(have), ceph_cap_string(need));
2124        }
2125out:
2126        spin_unlock(&ci->i_ceph_lock);
2127        dout("get_cap_refs %p ret %d got %s\n", inode,
2128             ret, ceph_cap_string(*got));
2129        return ret;
2130}
2131
2132/*
2133 * Check the offset we are writing up to against our current
2134 * max_size.  If necessary, tell the MDS we want to write to
2135 * a larger offset.
2136 */
2137static void check_max_size(struct inode *inode, loff_t endoff)
2138{
2139        struct ceph_inode_info *ci = ceph_inode(inode);
2140        int check = 0;
2141
2142        /* do we need to explicitly request a larger max_size? */
2143        spin_lock(&ci->i_ceph_lock);
2144        if ((endoff >= ci->i_max_size ||
2145             endoff > (inode->i_size << 1)) &&
2146            endoff > ci->i_wanted_max_size) {
2147                dout("write %p at large endoff %llu, req max_size\n",
2148                     inode, endoff);
2149                ci->i_wanted_max_size = endoff;
2150                check = 1;
2151        }
2152        spin_unlock(&ci->i_ceph_lock);
2153        if (check)
2154                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2155}
2156
2157/*
2158 * Wait for caps, and take cap references.  If we can't get a WR cap
2159 * due to a small max_size, make sure we check_max_size (and possibly
2160 * ask the mds) so we don't get hung up indefinitely.
2161 */
2162int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, int *got,
2163                  loff_t endoff)
2164{
2165        int check_max, ret, err;
2166
2167retry:
2168        if (endoff > 0)
2169                check_max_size(&ci->vfs_inode, endoff);
2170        check_max = 0;
2171        err = 0;
2172        ret = wait_event_interruptible(ci->i_cap_wq,
2173                                       try_get_cap_refs(ci, need, want,
2174                                                        got, endoff,
2175                                                        &check_max, &err));
2176        if (err)
2177                ret = err;
2178        if (check_max)
2179                goto retry;
2180        return ret;
2181}
2182
2183/*
2184 * Take cap refs.  Caller must already know we hold at least one ref
2185 * on the caps in question or we don't know this is safe.
2186 */
2187void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2188{
2189        spin_lock(&ci->i_ceph_lock);
2190        __take_cap_refs(ci, caps);
2191        spin_unlock(&ci->i_ceph_lock);
2192}
2193
2194/*
2195 * Release cap refs.
2196 *
2197 * If we released the last ref on any given cap, call ceph_check_caps
2198 * to release (or schedule a release).
2199 *
2200 * If we are releasing a WR cap (from a sync write), finalize any affected
2201 * cap_snap, and wake up any waiters.
2202 */
2203void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2204{
2205        struct inode *inode = &ci->vfs_inode;
2206        int last = 0, put = 0, flushsnaps = 0, wake = 0;
2207        struct ceph_cap_snap *capsnap;
2208
2209        spin_lock(&ci->i_ceph_lock);
2210        if (had & CEPH_CAP_PIN)
2211                --ci->i_pin_ref;
2212        if (had & CEPH_CAP_FILE_RD)
2213                if (--ci->i_rd_ref == 0)
2214                        last++;
2215        if (had & CEPH_CAP_FILE_CACHE)
2216                if (--ci->i_rdcache_ref == 0)
2217                        last++;
2218        if (had & CEPH_CAP_FILE_BUFFER) {
2219                if (--ci->i_wb_ref == 0) {
2220                        last++;
2221                        put++;
2222                }
2223                dout("put_cap_refs %p wb %d -> %d (?)\n",
2224                     inode, ci->i_wb_ref+1, ci->i_wb_ref);
2225        }
2226        if (had & CEPH_CAP_FILE_WR)
2227                if (--ci->i_wr_ref == 0) {
2228                        last++;
2229                        if (!list_empty(&ci->i_cap_snaps)) {
2230                                capsnap = list_first_entry(&ci->i_cap_snaps,
2231                                                     struct ceph_cap_snap,
2232                                                     ci_item);
2233                                if (capsnap->writing) {
2234                                        capsnap->writing = 0;
2235                                        flushsnaps =
2236                                                __ceph_finish_cap_snap(ci,
2237                                                                       capsnap);
2238                                        wake = 1;
2239                                }
2240                        }
2241                }
2242        spin_unlock(&ci->i_ceph_lock);
2243
2244        dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2245             last ? " last" : "", put ? " put" : "");
2246
2247        if (last && !flushsnaps)
2248                ceph_check_caps(ci, 0, NULL);
2249        else if (flushsnaps)
2250                ceph_flush_snaps(ci);
2251        if (wake)
2252                wake_up_all(&ci->i_cap_wq);
2253        if (put)
2254                iput(inode);
2255}
2256
2257/*
2258 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2259 * context.  Adjust per-snap dirty page accounting as appropriate.
2260 * Once all dirty data for a cap_snap is flushed, flush snapped file
2261 * metadata back to the MDS.  If we dropped the last ref, call
2262 * ceph_check_caps.
2263 */
2264void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2265                                struct ceph_snap_context *snapc)
2266{
2267        struct inode *inode = &ci->vfs_inode;
2268        int last = 0;
2269        int complete_capsnap = 0;
2270        int drop_capsnap = 0;
2271        int found = 0;
2272        struct ceph_cap_snap *capsnap = NULL;
2273
2274        spin_lock(&ci->i_ceph_lock);
2275        ci->i_wrbuffer_ref -= nr;
2276        last = !ci->i_wrbuffer_ref;
2277
2278        if (ci->i_head_snapc == snapc) {
2279                ci->i_wrbuffer_ref_head -= nr;
2280                if (ci->i_wrbuffer_ref_head == 0 &&
2281                    ci->i_dirty_caps == 0 && ci->i_flushing_caps == 0) {
2282                        BUG_ON(!ci->i_head_snapc);
2283                        ceph_put_snap_context(ci->i_head_snapc);
2284                        ci->i_head_snapc = NULL;
2285                }
2286                dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2287                     inode,
2288                     ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2289                     ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2290                     last ? " LAST" : "");
2291        } else {
2292                list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2293                        if (capsnap->context == snapc) {
2294                                found = 1;
2295                                break;
2296                        }
2297                }
2298                BUG_ON(!found);
2299                capsnap->dirty_pages -= nr;
2300                if (capsnap->dirty_pages == 0) {
2301                        complete_capsnap = 1;
2302                        if (capsnap->dirty == 0)
2303                                /* cap writeback completed before we created
2304                                 * the cap_snap; no FLUSHSNAP is needed */
2305                                drop_capsnap = 1;
2306                }
2307                dout("put_wrbuffer_cap_refs on %p cap_snap %p "
2308                     " snap %lld %d/%d -> %d/%d %s%s%s\n",
2309                     inode, capsnap, capsnap->context->seq,
2310                     ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2311                     ci->i_wrbuffer_ref, capsnap->dirty_pages,
2312                     last ? " (wrbuffer last)" : "",
2313                     complete_capsnap ? " (complete capsnap)" : "",
2314                     drop_capsnap ? " (drop capsnap)" : "");
2315                if (drop_capsnap) {
2316                        ceph_put_snap_context(capsnap->context);
2317                        list_del(&capsnap->ci_item);
2318                        list_del(&capsnap->flushing_item);
2319                        ceph_put_cap_snap(capsnap);
2320                }
2321        }
2322
2323        spin_unlock(&ci->i_ceph_lock);
2324
2325        if (last) {
2326                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2327                iput(inode);
2328        } else if (complete_capsnap) {
2329                ceph_flush_snaps(ci);
2330                wake_up_all(&ci->i_cap_wq);
2331        }
2332        if (drop_capsnap)
2333                iput(inode);
2334}
2335
2336/*
2337 * Handle a cap GRANT message from the MDS.  (Note that a GRANT may
2338 * actually be a revocation if it specifies a smaller cap set.)
2339 *
2340 * caller holds s_mutex and i_ceph_lock, we drop both.
2341 *
2342 * return value:
2343 *  0 - ok
2344 *  1 - check_caps on auth cap only (writeback)
2345 *  2 - check_caps (ack revoke)
2346 */
2347static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2348                             struct ceph_mds_session *session,
2349                             struct ceph_cap *cap,
2350                             struct ceph_buffer *xattr_buf)
2351                __releases(ci->i_ceph_lock)
2352{
2353        struct ceph_inode_info *ci = ceph_inode(inode);
2354        int mds = session->s_mds;
2355        int seq = le32_to_cpu(grant->seq);
2356        int newcaps = le32_to_cpu(grant->caps);
2357        int issued, implemented, used, wanted, dirty;
2358        u64 size = le64_to_cpu(grant->size);
2359        u64 max_size = le64_to_cpu(grant->max_size);
2360        struct timespec mtime, atime, ctime;
2361        int check_caps = 0;
2362        int wake = 0;
2363        int writeback = 0;
2364        int revoked_rdcache = 0;
2365        int queue_invalidate = 0;
2366
2367        dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2368             inode, cap, mds, seq, ceph_cap_string(newcaps));
2369        dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2370                inode->i_size);
2371
2372        /*
2373         * If CACHE is being revoked, and we have no dirty buffers,
2374         * try to invalidate (once).  (If there are dirty buffers, we
2375         * will invalidate _after_ writeback.)
2376         */
2377        if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
2378            (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2379            !ci->i_wrbuffer_ref) {
2380                if (try_nonblocking_invalidate(inode) == 0) {
2381                        revoked_rdcache = 1;
2382                } else {
2383                        /* there were locked pages.. invalidate later
2384                           in a separate thread. */
2385                        if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2386                                queue_invalidate = 1;
2387                                ci->i_rdcache_revoking = ci->i_rdcache_gen;
2388                        }
2389                }
2390        }
2391
2392        /* side effects now are allowed */
2393
2394        issued = __ceph_caps_issued(ci, &implemented);
2395        issued |= implemented | __ceph_caps_dirty(ci);
2396
2397        cap->cap_gen = session->s_cap_gen;
2398
2399        __check_cap_issue(ci, cap, newcaps);
2400
2401        if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
2402                inode->i_mode = le32_to_cpu(grant->mode);
2403                inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid));
2404                inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid));
2405                dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
2406                     from_kuid(&init_user_ns, inode->i_uid),
2407                     from_kgid(&init_user_ns, inode->i_gid));
2408        }
2409
2410        if ((issued & CEPH_CAP_LINK_EXCL) == 0)
2411                set_nlink(inode, le32_to_cpu(grant->nlink));
2412
2413        if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
2414                int len = le32_to_cpu(grant->xattr_len);
2415                u64 version = le64_to_cpu(grant->xattr_version);
2416
2417                if (version > ci->i_xattrs.version) {
2418                        dout(" got new xattrs v%llu on %p len %d\n",
2419                             version, inode, len);
2420                        if (ci->i_xattrs.blob)
2421                                ceph_buffer_put(ci->i_xattrs.blob);
2422                        ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
2423                        ci->i_xattrs.version = version;
2424                }
2425        }
2426
2427        /* size/ctime/mtime/atime? */
2428        ceph_fill_file_size(inode, issued,
2429                            le32_to_cpu(grant->truncate_seq),
2430                            le64_to_cpu(grant->truncate_size), size);
2431        ceph_decode_timespec(&mtime, &grant->mtime);
2432        ceph_decode_timespec(&atime, &grant->atime);
2433        ceph_decode_timespec(&ctime, &grant->ctime);
2434        ceph_fill_file_time(inode, issued,
2435                            le32_to_cpu(grant->time_warp_seq), &ctime, &mtime,
2436                            &atime);
2437
2438        /* max size increase? */
2439        if (ci->i_auth_cap == cap && max_size != ci->i_max_size) {
2440                dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
2441                ci->i_max_size = max_size;
2442                if (max_size >= ci->i_wanted_max_size) {
2443                        ci->i_wanted_max_size = 0;  /* reset */
2444                        ci->i_requested_max_size = 0;
2445                }
2446                wake = 1;
2447        }
2448
2449        /* check cap bits */
2450        wanted = __ceph_caps_wanted(ci);
2451        used = __ceph_caps_used(ci);
2452        dirty = __ceph_caps_dirty(ci);
2453        dout(" my wanted = %s, used = %s, dirty %s\n",
2454             ceph_cap_string(wanted),
2455             ceph_cap_string(used),
2456             ceph_cap_string(dirty));
2457        if (wanted != le32_to_cpu(grant->wanted)) {
2458                dout("mds wanted %s -> %s\n",
2459                     ceph_cap_string(le32_to_cpu(grant->wanted)),
2460                     ceph_cap_string(wanted));
2461                /* imported cap may not have correct mds_wanted */
2462                if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT)
2463                        check_caps = 1;
2464        }
2465
2466        cap->seq = seq;
2467
2468        /* file layout may have changed */
2469        ci->i_layout = grant->layout;
2470
2471        /* revocation, grant, or no-op? */
2472        if (cap->issued & ~newcaps) {
2473                int revoking = cap->issued & ~newcaps;
2474
2475                dout("revocation: %s -> %s (revoking %s)\n",
2476                     ceph_cap_string(cap->issued),
2477                     ceph_cap_string(newcaps),
2478                     ceph_cap_string(revoking));
2479                if (revoking & used & CEPH_CAP_FILE_BUFFER)
2480                        writeback = 1;  /* initiate writeback; will delay ack */
2481                else if (revoking == CEPH_CAP_FILE_CACHE &&
2482                         (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2483                         queue_invalidate)
2484                        ; /* do nothing yet, invalidation will be queued */
2485                else if (cap == ci->i_auth_cap)
2486                        check_caps = 1; /* check auth cap only */
2487                else
2488                        check_caps = 2; /* check all caps */
2489                cap->issued = newcaps;
2490                cap->implemented |= newcaps;
2491        } else if (cap->issued == newcaps) {
2492                dout("caps unchanged: %s -> %s\n",
2493                     ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
2494        } else {
2495                dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
2496                     ceph_cap_string(newcaps));
2497                /* non-auth MDS is revoking the newly grant caps ? */
2498                if (cap == ci->i_auth_cap &&
2499                    __ceph_caps_revoking_other(ci, cap, newcaps))
2500                    check_caps = 2;
2501
2502                cap->issued = newcaps;
2503                cap->implemented |= newcaps; /* add bits only, to
2504                                              * avoid stepping on a
2505                                              * pending revocation */
2506                wake = 1;
2507        }
2508        BUG_ON(cap->issued & ~cap->implemented);
2509
2510        spin_unlock(&ci->i_ceph_lock);
2511        if (writeback)
2512                /*
2513                 * queue inode for writeback: we can't actually call
2514                 * filemap_write_and_wait, etc. from message handler
2515                 * context.
2516                 */
2517                ceph_queue_writeback(inode);
2518        if (queue_invalidate)
2519                ceph_queue_invalidate(inode);
2520        if (wake)
2521                wake_up_all(&ci->i_cap_wq);
2522
2523        if (check_caps == 1)
2524                ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
2525                                session);
2526        else if (check_caps == 2)
2527                ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
2528        else
2529                mutex_unlock(&session->s_mutex);
2530}
2531
2532/*
2533 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
2534 * MDS has been safely committed.
2535 */
2536static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2537                                 struct ceph_mds_caps *m,
2538                                 struct ceph_mds_session *session,
2539                                 struct ceph_cap *cap)
2540        __releases(ci->i_ceph_lock)
2541{
2542        struct ceph_inode_info *ci = ceph_inode(inode);
2543        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
2544        unsigned seq = le32_to_cpu(m->seq);
2545        int dirty = le32_to_cpu(m->dirty);
2546        int cleaned = 0;
2547        int drop = 0;
2548        int i;
2549
2550        for (i = 0; i < CEPH_CAP_BITS; i++)
2551                if ((dirty & (1 << i)) &&
2552                    flush_tid == ci->i_cap_flush_tid[i])
2553                        cleaned |= 1 << i;
2554
2555        dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
2556             " flushing %s -> %s\n",
2557             inode, session->s_mds, seq, ceph_cap_string(dirty),
2558             ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
2559             ceph_cap_string(ci->i_flushing_caps & ~cleaned));
2560
2561        if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned))
2562                goto out;
2563
2564        ci->i_flushing_caps &= ~cleaned;
2565
2566        spin_lock(&mdsc->cap_dirty_lock);
2567        if (ci->i_flushing_caps == 0) {
2568                list_del_init(&ci->i_flushing_item);
2569                if (!list_empty(&session->s_cap_flushing))
2570                        dout(" mds%d still flushing cap on %p\n",
2571                             session->s_mds,
2572                             &list_entry(session->s_cap_flushing.next,
2573                                         struct ceph_inode_info,
2574                                         i_flushing_item)->vfs_inode);
2575                mdsc->num_cap_flushing--;
2576                wake_up_all(&mdsc->cap_flushing_wq);
2577                dout(" inode %p now !flushing\n", inode);
2578
2579                if (ci->i_dirty_caps == 0) {
2580                        dout(" inode %p now clean\n", inode);
2581                        BUG_ON(!list_empty(&ci->i_dirty_item));
2582                        drop = 1;
2583                        if (ci->i_wrbuffer_ref_head == 0) {
2584                                BUG_ON(!ci->i_head_snapc);
2585                                ceph_put_snap_context(ci->i_head_snapc);
2586                                ci->i_head_snapc = NULL;
2587                        }
2588                } else {
2589                        BUG_ON(list_empty(&ci->i_dirty_item));
2590                }
2591        }
2592        spin_unlock(&mdsc->cap_dirty_lock);
2593        wake_up_all(&ci->i_cap_wq);
2594
2595out:
2596        spin_unlock(&ci->i_ceph_lock);
2597        if (drop)
2598                iput(inode);
2599}
2600
2601/*
2602 * Handle FLUSHSNAP_ACK.  MDS has flushed snap data to disk and we can
2603 * throw away our cap_snap.
2604 *
2605 * Caller hold s_mutex.
2606 */
2607static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
2608                                     struct ceph_mds_caps *m,
2609                                     struct ceph_mds_session *session)
2610{
2611        struct ceph_inode_info *ci = ceph_inode(inode);
2612        u64 follows = le64_to_cpu(m->snap_follows);
2613        struct ceph_cap_snap *capsnap;
2614        int drop = 0;
2615
2616        dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
2617             inode, ci, session->s_mds, follows);
2618
2619        spin_lock(&ci->i_ceph_lock);
2620        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2621                if (capsnap->follows == follows) {
2622                        if (capsnap->flush_tid != flush_tid) {
2623                                dout(" cap_snap %p follows %lld tid %lld !="
2624                                     " %lld\n", capsnap, follows,
2625                                     flush_tid, capsnap->flush_tid);
2626                                break;
2627                        }
2628                        WARN_ON(capsnap->dirty_pages || capsnap->writing);
2629                        dout(" removing %p cap_snap %p follows %lld\n",
2630                             inode, capsnap, follows);
2631                        ceph_put_snap_context(capsnap->context);
2632                        list_del(&capsnap->ci_item);
2633                        list_del(&capsnap->flushing_item);
2634                        ceph_put_cap_snap(capsnap);
2635                        drop = 1;
2636                        break;
2637                } else {
2638                        dout(" skipping cap_snap %p follows %lld\n",
2639                             capsnap, capsnap->follows);
2640                }
2641        }
2642        spin_unlock(&ci->i_ceph_lock);
2643        if (drop)
2644                iput(inode);
2645}
2646
2647/*
2648 * Handle TRUNC from MDS, indicating file truncation.
2649 *
2650 * caller hold s_mutex.
2651 */
2652static void handle_cap_trunc(struct inode *inode,
2653                             struct ceph_mds_caps *trunc,
2654                             struct ceph_mds_session *session)
2655        __releases(ci->i_ceph_lock)
2656{
2657        struct ceph_inode_info *ci = ceph_inode(inode);
2658        int mds = session->s_mds;
2659        int seq = le32_to_cpu(trunc->seq);
2660        u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
2661        u64 truncate_size = le64_to_cpu(trunc->truncate_size);
2662        u64 size = le64_to_cpu(trunc->size);
2663        int implemented = 0;
2664        int dirty = __ceph_caps_dirty(ci);
2665        int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
2666        int queue_trunc = 0;
2667
2668        issued |= implemented | dirty;
2669
2670        dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
2671             inode, mds, seq, truncate_size, truncate_seq);
2672        queue_trunc = ceph_fill_file_size(inode, issued,
2673                                          truncate_seq, truncate_size, size);
2674        spin_unlock(&ci->i_ceph_lock);
2675
2676        if (queue_trunc)
2677                ceph_queue_vmtruncate(inode);
2678}
2679
2680/*
2681 * Handle EXPORT from MDS.  Cap is being migrated _from_ this mds to a
2682 * different one.  If we are the most recent migration we've seen (as
2683 * indicated by mseq), make note of the migrating cap bits for the
2684 * duration (until we see the corresponding IMPORT).
2685 *
2686 * caller holds s_mutex
2687 */
2688static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2689                              struct ceph_mds_session *session,
2690                              int *open_target_sessions)
2691{
2692        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
2693        struct ceph_inode_info *ci = ceph_inode(inode);
2694        int mds = session->s_mds;
2695        unsigned mseq = le32_to_cpu(ex->migrate_seq);
2696        struct ceph_cap *cap = NULL, *t;
2697        struct rb_node *p;
2698        int remember = 1;
2699
2700        dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
2701             inode, ci, mds, mseq);
2702
2703        spin_lock(&ci->i_ceph_lock);
2704
2705        /* make sure we haven't seen a higher mseq */
2706        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
2707                t = rb_entry(p, struct ceph_cap, ci_node);
2708                if (ceph_seq_cmp(t->mseq, mseq) > 0) {
2709                        dout(" higher mseq on cap from mds%d\n",
2710                             t->session->s_mds);
2711                        remember = 0;
2712                }
2713                if (t->session->s_mds == mds)
2714                        cap = t;
2715        }
2716
2717        if (cap) {
2718                if (remember) {
2719                        /* make note */
2720                        ci->i_cap_exporting_mds = mds;
2721                        ci->i_cap_exporting_mseq = mseq;
2722                        ci->i_cap_exporting_issued = cap->issued;
2723
2724                        /*
2725                         * make sure we have open sessions with all possible
2726                         * export targets, so that we get the matching IMPORT
2727                         */
2728                        *open_target_sessions = 1;
2729
2730                        /*
2731                         * we can't flush dirty caps that we've seen the
2732                         * EXPORT but no IMPORT for
2733                         */
2734                        spin_lock(&mdsc->cap_dirty_lock);
2735                        if (!list_empty(&ci->i_dirty_item)) {
2736                                dout(" moving %p to cap_dirty_migrating\n",
2737                                     inode);
2738                                list_move(&ci->i_dirty_item,
2739                                          &mdsc->cap_dirty_migrating);
2740                        }
2741                        spin_unlock(&mdsc->cap_dirty_lock);
2742                }
2743                __ceph_remove_cap(cap);
2744        }
2745        /* else, we already released it */
2746
2747        spin_unlock(&ci->i_ceph_lock);
2748}
2749
2750/*
2751 * Handle cap IMPORT.  If there are temp bits from an older EXPORT,
2752 * clean them up.
2753 *
2754 * caller holds s_mutex.
2755 */
2756static void handle_cap_import(struct ceph_mds_client *mdsc,
2757                              struct inode *inode, struct ceph_mds_caps *im,
2758                              struct ceph_mds_session *session,
2759                              void *snaptrace, int snaptrace_len)
2760{
2761        struct ceph_inode_info *ci = ceph_inode(inode);
2762        int mds = session->s_mds;
2763        unsigned issued = le32_to_cpu(im->caps);
2764        unsigned wanted = le32_to_cpu(im->wanted);
2765        unsigned seq = le32_to_cpu(im->seq);
2766        unsigned mseq = le32_to_cpu(im->migrate_seq);
2767        u64 realmino = le64_to_cpu(im->realm);
2768        u64 cap_id = le64_to_cpu(im->cap_id);
2769
2770        if (ci->i_cap_exporting_mds >= 0 &&
2771            ceph_seq_cmp(ci->i_cap_exporting_mseq, mseq) < 0) {
2772                dout("handle_cap_import inode %p ci %p mds%d mseq %d"
2773                     " - cleared exporting from mds%d\n",
2774                     inode, ci, mds, mseq,
2775                     ci->i_cap_exporting_mds);
2776                ci->i_cap_exporting_issued = 0;
2777                ci->i_cap_exporting_mseq = 0;
2778                ci->i_cap_exporting_mds = -1;
2779
2780                spin_lock(&mdsc->cap_dirty_lock);
2781                if (!list_empty(&ci->i_dirty_item)) {
2782                        dout(" moving %p back to cap_dirty\n", inode);
2783                        list_move(&ci->i_dirty_item, &mdsc->cap_dirty);
2784                }
2785                spin_unlock(&mdsc->cap_dirty_lock);
2786        } else {
2787                dout("handle_cap_import inode %p ci %p mds%d mseq %d\n",
2788                     inode, ci, mds, mseq);
2789        }
2790
2791        down_write(&mdsc->snap_rwsem);
2792        ceph_update_snap_trace(mdsc, snaptrace, snaptrace+snaptrace_len,
2793                               false);
2794        downgrade_write(&mdsc->snap_rwsem);
2795        ceph_add_cap(inode, session, cap_id, -1,
2796                     issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH,
2797                     NULL /* no caps context */);
2798        kick_flushing_inode_caps(mdsc, session, inode);
2799        up_read(&mdsc->snap_rwsem);
2800
2801        /* make sure we re-request max_size, if necessary */
2802        spin_lock(&ci->i_ceph_lock);
2803        ci->i_wanted_max_size = 0;  /* reset */
2804        ci->i_requested_max_size = 0;
2805        spin_unlock(&ci->i_ceph_lock);
2806}
2807
2808/*
2809 * Handle a caps message from the MDS.
2810 *
2811 * Identify the appropriate session, inode, and call the right handler
2812 * based on the cap op.
2813 */
2814void ceph_handle_caps(struct ceph_mds_session *session,
2815                      struct ceph_msg *msg)
2816{
2817        struct ceph_mds_client *mdsc = session->s_mdsc;
2818        struct super_block *sb = mdsc->fsc->sb;
2819        struct inode *inode;
2820        struct ceph_inode_info *ci;
2821        struct ceph_cap *cap;
2822        struct ceph_mds_caps *h;
2823        int mds = session->s_mds;
2824        int op;
2825        u32 seq, mseq;
2826        struct ceph_vino vino;
2827        u64 cap_id;
2828        u64 size, max_size;
2829        u64 tid;
2830        void *snaptrace;
2831        size_t snaptrace_len;
2832        void *flock;
2833        u32 flock_len;
2834        int open_target_sessions = 0;
2835
2836        dout("handle_caps from mds%d\n", mds);
2837
2838        /* decode */
2839        tid = le64_to_cpu(msg->hdr.tid);
2840        if (msg->front.iov_len < sizeof(*h))
2841                goto bad;
2842        h = msg->front.iov_base;
2843        op = le32_to_cpu(h->op);
2844        vino.ino = le64_to_cpu(h->ino);
2845        vino.snap = CEPH_NOSNAP;
2846        cap_id = le64_to_cpu(h->cap_id);
2847        seq = le32_to_cpu(h->seq);
2848        mseq = le32_to_cpu(h->migrate_seq);
2849        size = le64_to_cpu(h->size);
2850        max_size = le64_to_cpu(h->max_size);
2851
2852        snaptrace = h + 1;
2853        snaptrace_len = le32_to_cpu(h->snap_trace_len);
2854
2855        if (le16_to_cpu(msg->hdr.version) >= 2) {
2856                void *p, *end;
2857
2858                p = snaptrace + snaptrace_len;
2859                end = msg->front.iov_base + msg->front.iov_len;
2860                ceph_decode_32_safe(&p, end, flock_len, bad);
2861                flock = p;
2862        } else {
2863                flock = NULL;
2864                flock_len = 0;
2865        }
2866
2867        mutex_lock(&session->s_mutex);
2868        session->s_seq++;
2869        dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
2870             (unsigned)seq);
2871
2872        if (op == CEPH_CAP_OP_IMPORT)
2873                ceph_add_cap_releases(mdsc, session);
2874
2875        /* lookup ino */
2876        inode = ceph_find_inode(sb, vino);
2877        ci = ceph_inode(inode);
2878        dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
2879             vino.snap, inode);
2880        if (!inode) {
2881                dout(" i don't have ino %llx\n", vino.ino);
2882
2883                if (op == CEPH_CAP_OP_IMPORT)
2884                        __queue_cap_release(session, vino.ino, cap_id,
2885                                            mseq, seq);
2886                goto flush_cap_releases;
2887        }
2888
2889        /* these will work even if we don't have a cap yet */
2890        switch (op) {
2891        case CEPH_CAP_OP_FLUSHSNAP_ACK:
2892                handle_cap_flushsnap_ack(inode, tid, h, session);
2893                goto done;
2894
2895        case CEPH_CAP_OP_EXPORT:
2896                handle_cap_export(inode, h, session, &open_target_sessions);
2897                goto done;
2898
2899        case CEPH_CAP_OP_IMPORT:
2900                handle_cap_import(mdsc, inode, h, session,
2901                                  snaptrace, snaptrace_len);
2902        }
2903
2904        /* the rest require a cap */
2905        spin_lock(&ci->i_ceph_lock);
2906        cap = __get_cap_for_mds(ceph_inode(inode), mds);
2907        if (!cap) {
2908                dout(" no cap on %p ino %llx.%llx from mds%d\n",
2909                     inode, ceph_ino(inode), ceph_snap(inode), mds);
2910                spin_unlock(&ci->i_ceph_lock);
2911                goto flush_cap_releases;
2912        }
2913
2914        /* note that each of these drops i_ceph_lock for us */
2915        switch (op) {
2916        case CEPH_CAP_OP_REVOKE:
2917        case CEPH_CAP_OP_GRANT:
2918        case CEPH_CAP_OP_IMPORT:
2919                handle_cap_grant(inode, h, session, cap, msg->middle);
2920                goto done_unlocked;
2921
2922        case CEPH_CAP_OP_FLUSH_ACK:
2923                handle_cap_flush_ack(inode, tid, h, session, cap);
2924                break;
2925
2926        case CEPH_CAP_OP_TRUNC:
2927                handle_cap_trunc(inode, h, session);
2928                break;
2929
2930        default:
2931                spin_unlock(&ci->i_ceph_lock);
2932                pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
2933                       ceph_cap_op_name(op));
2934        }
2935
2936        goto done;
2937
2938flush_cap_releases:
2939        /*
2940         * send any full release message to try to move things
2941         * along for the mds (who clearly thinks we still have this
2942         * cap).
2943         */
2944        ceph_add_cap_releases(mdsc, session);
2945        ceph_send_cap_releases(mdsc, session);
2946
2947done:
2948        mutex_unlock(&session->s_mutex);
2949done_unlocked:
2950        if (inode)
2951                iput(inode);
2952        if (open_target_sessions)
2953                ceph_mdsc_open_export_target_sessions(mdsc, session);
2954        return;
2955
2956bad:
2957        pr_err("ceph_handle_caps: corrupt message\n");
2958        ceph_msg_dump(msg);
2959        return;
2960}
2961
2962/*
2963 * Delayed work handler to process end of delayed cap release LRU list.
2964 */
2965void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
2966{
2967        struct ceph_inode_info *ci;
2968        int flags = CHECK_CAPS_NODELAY;
2969
2970        dout("check_delayed_caps\n");
2971        while (1) {
2972                spin_lock(&mdsc->cap_delay_lock);
2973                if (list_empty(&mdsc->cap_delay_list))
2974                        break;
2975                ci = list_first_entry(&mdsc->cap_delay_list,
2976                                      struct ceph_inode_info,
2977                                      i_cap_delay_list);
2978                if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
2979                    time_before(jiffies, ci->i_hold_caps_max))
2980                        break;
2981                list_del_init(&ci->i_cap_delay_list);
2982                spin_unlock(&mdsc->cap_delay_lock);
2983                dout("check_delayed_caps on %p\n", &ci->vfs_inode);
2984                ceph_check_caps(ci, flags, NULL);
2985        }
2986        spin_unlock(&mdsc->cap_delay_lock);
2987}
2988
2989/*
2990 * Flush all dirty caps to the mds
2991 */
2992void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
2993{
2994        struct ceph_inode_info *ci;
2995        struct inode *inode;
2996
2997        dout("flush_dirty_caps\n");
2998        spin_lock(&mdsc->cap_dirty_lock);
2999        while (!list_empty(&mdsc->cap_dirty)) {
3000                ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
3001                                      i_dirty_item);
3002                inode = &ci->vfs_inode;
3003                ihold(inode);
3004                dout("flush_dirty_caps %p\n", inode);
3005                spin_unlock(&mdsc->cap_dirty_lock);
3006                ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL);
3007                iput(inode);
3008                spin_lock(&mdsc->cap_dirty_lock);
3009        }
3010        spin_unlock(&mdsc->cap_dirty_lock);
3011        dout("flush_dirty_caps done\n");
3012}
3013
3014/*
3015 * Drop open file reference.  If we were the last open file,
3016 * we may need to release capabilities to the MDS (or schedule
3017 * their delayed release).
3018 */
3019void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
3020{
3021        struct inode *inode = &ci->vfs_inode;
3022        int last = 0;
3023
3024        spin_lock(&ci->i_ceph_lock);
3025        dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
3026             ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
3027        BUG_ON(ci->i_nr_by_mode[fmode] == 0);
3028        if (--ci->i_nr_by_mode[fmode] == 0)
3029                last++;
3030        spin_unlock(&ci->i_ceph_lock);
3031
3032        if (last && ci->i_vino.snap == CEPH_NOSNAP)
3033                ceph_check_caps(ci, 0, NULL);
3034}
3035
3036/*
3037 * Helpers for embedding cap and dentry lease releases into mds
3038 * requests.
3039 *
3040 * @force is used by dentry_release (below) to force inclusion of a
3041 * record for the directory inode, even when there aren't any caps to
3042 * drop.
3043 */
3044int ceph_encode_inode_release(void **p, struct inode *inode,
3045                              int mds, int drop, int unless, int force)
3046{
3047        struct ceph_inode_info *ci = ceph_inode(inode);
3048        struct ceph_cap *cap;
3049        struct ceph_mds_request_release *rel = *p;
3050        int used, dirty;
3051        int ret = 0;
3052
3053        spin_lock(&ci->i_ceph_lock);
3054        used = __ceph_caps_used(ci);
3055        dirty = __ceph_caps_dirty(ci);
3056
3057        dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
3058             inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
3059             ceph_cap_string(unless));
3060
3061        /* only drop unused, clean caps */
3062        drop &= ~(used | dirty);
3063
3064        cap = __get_cap_for_mds(ci, mds);
3065        if (cap && __cap_is_valid(cap)) {
3066                if (force ||
3067                    ((cap->issued & drop) &&
3068                     (cap->issued & unless) == 0)) {
3069                        if ((cap->issued & drop) &&
3070                            (cap->issued & unless) == 0) {
3071                                int wanted = __ceph_caps_wanted(ci);
3072                                if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0)
3073                                        wanted |= cap->mds_wanted;
3074                                dout("encode_inode_release %p cap %p "
3075                                     "%s -> %s, wanted %s -> %s\n", inode, cap,
3076                                     ceph_cap_string(cap->issued),
3077                                     ceph_cap_string(cap->issued & ~drop),
3078                                     ceph_cap_string(cap->mds_wanted),
3079                                     ceph_cap_string(wanted));
3080
3081                                cap->issued &= ~drop;
3082                                cap->implemented &= ~drop;
3083                                cap->mds_wanted = wanted;
3084                        } else {
3085                                dout("encode_inode_release %p cap %p %s"
3086                                     " (force)\n", inode, cap,
3087                                     ceph_cap_string(cap->issued));
3088                        }
3089
3090                        rel->ino = cpu_to_le64(ceph_ino(inode));
3091                        rel->cap_id = cpu_to_le64(cap->cap_id);
3092                        rel->seq = cpu_to_le32(cap->seq);
3093                        rel->issue_seq = cpu_to_le32(cap->issue_seq),
3094                        rel->mseq = cpu_to_le32(cap->mseq);
3095                        rel->caps = cpu_to_le32(cap->issued);
3096                        rel->wanted = cpu_to_le32(cap->mds_wanted);
3097                        rel->dname_len = 0;
3098                        rel->dname_seq = 0;
3099                        *p += sizeof(*rel);
3100                        ret = 1;
3101                } else {
3102                        dout("encode_inode_release %p cap %p %s\n",
3103                             inode, cap, ceph_cap_string(cap->issued));
3104                }
3105        }
3106        spin_unlock(&ci->i_ceph_lock);
3107        return ret;
3108}
3109
3110int ceph_encode_dentry_release(void **p, struct dentry *dentry,
3111                               int mds, int drop, int unless)
3112{
3113        struct inode *dir = dentry->d_parent->d_inode;
3114        struct ceph_mds_request_release *rel = *p;
3115        struct ceph_dentry_info *di = ceph_dentry(dentry);
3116        int force = 0;
3117        int ret;
3118
3119        /*
3120         * force an record for the directory caps if we have a dentry lease.
3121         * this is racy (can't take i_ceph_lock and d_lock together), but it
3122         * doesn't have to be perfect; the mds will revoke anything we don't
3123         * release.
3124         */
3125        spin_lock(&dentry->d_lock);
3126        if (di->lease_session && di->lease_session->s_mds == mds)
3127                force = 1;
3128        spin_unlock(&dentry->d_lock);
3129
3130        ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
3131
3132        spin_lock(&dentry->d_lock);
3133        if (ret && di->lease_session && di->lease_session->s_mds == mds) {
3134                dout("encode_dentry_release %p mds%d seq %d\n",
3135                     dentry, mds, (int)di->lease_seq);
3136                rel->dname_len = cpu_to_le32(dentry->d_name.len);
3137                memcpy(*p, dentry->d_name.name, dentry->d_name.len);
3138                *p += dentry->d_name.len;
3139                rel->dname_seq = cpu_to_le32(di->lease_seq);
3140                __ceph_mdsc_drop_dentry_lease(dentry);
3141        }
3142        spin_unlock(&dentry->d_lock);
3143        return ret;
3144}
3145
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.