linux/fs/ceph/inode.c
<<
>>
Prefs
   1#include <linux/ceph/ceph_debug.h>
   2
   3#include <linux/module.h>
   4#include <linux/fs.h>
   5#include <linux/slab.h>
   6#include <linux/string.h>
   7#include <linux/uaccess.h>
   8#include <linux/kernel.h>
   9#include <linux/namei.h>
  10#include <linux/writeback.h>
  11#include <linux/vmalloc.h>
  12
  13#include "super.h"
  14#include "mds_client.h"
  15#include "cache.h"
  16#include <linux/ceph/decode.h>
  17
  18/*
  19 * Ceph inode operations
  20 *
  21 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
  22 * setattr, etc.), xattr helpers, and helpers for assimilating
  23 * metadata returned by the MDS into our cache.
  24 *
  25 * Also define helpers for doing asynchronous writeback, invalidation,
  26 * and truncation for the benefit of those who can't afford to block
  27 * (typically because they are in the message handler path).
  28 */
  29
  30static const struct inode_operations ceph_symlink_iops;
  31
  32static void ceph_invalidate_work(struct work_struct *work);
  33static void ceph_writeback_work(struct work_struct *work);
  34static void ceph_vmtruncate_work(struct work_struct *work);
  35
  36/*
  37 * find or create an inode, given the ceph ino number
  38 */
  39static int ceph_set_ino_cb(struct inode *inode, void *data)
  40{
  41        ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
  42        inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
  43        return 0;
  44}
  45
  46struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
  47{
  48        struct inode *inode;
  49        ino_t t = ceph_vino_to_ino(vino);
  50
  51        inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
  52        if (inode == NULL)
  53                return ERR_PTR(-ENOMEM);
  54        if (inode->i_state & I_NEW) {
  55                dout("get_inode created new inode %p %llx.%llx ino %llx\n",
  56                     inode, ceph_vinop(inode), (u64)inode->i_ino);
  57                unlock_new_inode(inode);
  58        }
  59
  60        dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
  61             vino.snap, inode);
  62        return inode;
  63}
  64
  65/*
  66 * get/constuct snapdir inode for a given directory
  67 */
  68struct inode *ceph_get_snapdir(struct inode *parent)
  69{
  70        struct ceph_vino vino = {
  71                .ino = ceph_ino(parent),
  72                .snap = CEPH_SNAPDIR,
  73        };
  74        struct inode *inode = ceph_get_inode(parent->i_sb, vino);
  75        struct ceph_inode_info *ci = ceph_inode(inode);
  76
  77        BUG_ON(!S_ISDIR(parent->i_mode));
  78        if (IS_ERR(inode))
  79                return inode;
  80        inode->i_mode = parent->i_mode;
  81        inode->i_uid = parent->i_uid;
  82        inode->i_gid = parent->i_gid;
  83        inode->i_op = &ceph_dir_iops;
  84        inode->i_fop = &ceph_dir_fops;
  85        ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
  86        ci->i_rbytes = 0;
  87        return inode;
  88}
  89
  90const struct inode_operations ceph_file_iops = {
  91        .permission = ceph_permission,
  92        .setattr = ceph_setattr,
  93        .getattr = ceph_getattr,
  94        .setxattr = ceph_setxattr,
  95        .getxattr = ceph_getxattr,
  96        .listxattr = ceph_listxattr,
  97        .removexattr = ceph_removexattr,
  98};
  99
 100
 101/*
 102 * We use a 'frag tree' to keep track of the MDS's directory fragments
 103 * for a given inode (usually there is just a single fragment).  We
 104 * need to know when a child frag is delegated to a new MDS, or when
 105 * it is flagged as replicated, so we can direct our requests
 106 * accordingly.
 107 */
 108
 109/*
 110 * find/create a frag in the tree
 111 */
 112static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
 113                                                    u32 f)
 114{
 115        struct rb_node **p;
 116        struct rb_node *parent = NULL;
 117        struct ceph_inode_frag *frag;
 118        int c;
 119
 120        p = &ci->i_fragtree.rb_node;
 121        while (*p) {
 122                parent = *p;
 123                frag = rb_entry(parent, struct ceph_inode_frag, node);
 124                c = ceph_frag_compare(f, frag->frag);
 125                if (c < 0)
 126                        p = &(*p)->rb_left;
 127                else if (c > 0)
 128                        p = &(*p)->rb_right;
 129                else
 130                        return frag;
 131        }
 132
 133        frag = kmalloc(sizeof(*frag), GFP_NOFS);
 134        if (!frag) {
 135                pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
 136                       "frag %x\n", &ci->vfs_inode,
 137                       ceph_vinop(&ci->vfs_inode), f);
 138                return ERR_PTR(-ENOMEM);
 139        }
 140        frag->frag = f;
 141        frag->split_by = 0;
 142        frag->mds = -1;
 143        frag->ndist = 0;
 144
 145        rb_link_node(&frag->node, parent, p);
 146        rb_insert_color(&frag->node, &ci->i_fragtree);
 147
 148        dout("get_or_create_frag added %llx.%llx frag %x\n",
 149             ceph_vinop(&ci->vfs_inode), f);
 150        return frag;
 151}
 152
 153/*
 154 * find a specific frag @f
 155 */
 156struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
 157{
 158        struct rb_node *n = ci->i_fragtree.rb_node;
 159
 160        while (n) {
 161                struct ceph_inode_frag *frag =
 162                        rb_entry(n, struct ceph_inode_frag, node);
 163                int c = ceph_frag_compare(f, frag->frag);
 164                if (c < 0)
 165                        n = n->rb_left;
 166                else if (c > 0)
 167                        n = n->rb_right;
 168                else
 169                        return frag;
 170        }
 171        return NULL;
 172}
 173
 174/*
 175 * Choose frag containing the given value @v.  If @pfrag is
 176 * specified, copy the frag delegation info to the caller if
 177 * it is present.
 178 */
 179u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
 180                     struct ceph_inode_frag *pfrag,
 181                     int *found)
 182{
 183        u32 t = ceph_frag_make(0, 0);
 184        struct ceph_inode_frag *frag;
 185        unsigned nway, i;
 186        u32 n;
 187
 188        if (found)
 189                *found = 0;
 190
 191        mutex_lock(&ci->i_fragtree_mutex);
 192        while (1) {
 193                WARN_ON(!ceph_frag_contains_value(t, v));
 194                frag = __ceph_find_frag(ci, t);
 195                if (!frag)
 196                        break; /* t is a leaf */
 197                if (frag->split_by == 0) {
 198                        if (pfrag)
 199                                memcpy(pfrag, frag, sizeof(*pfrag));
 200                        if (found)
 201                                *found = 1;
 202                        break;
 203                }
 204
 205                /* choose child */
 206                nway = 1 << frag->split_by;
 207                dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
 208                     frag->split_by, nway);
 209                for (i = 0; i < nway; i++) {
 210                        n = ceph_frag_make_child(t, frag->split_by, i);
 211                        if (ceph_frag_contains_value(n, v)) {
 212                                t = n;
 213                                break;
 214                        }
 215                }
 216                BUG_ON(i == nway);
 217        }
 218        dout("choose_frag(%x) = %x\n", v, t);
 219
 220        mutex_unlock(&ci->i_fragtree_mutex);
 221        return t;
 222}
 223
 224/*
 225 * Process dirfrag (delegation) info from the mds.  Include leaf
 226 * fragment in tree ONLY if ndist > 0.  Otherwise, only
 227 * branches/splits are included in i_fragtree)
 228 */
 229static int ceph_fill_dirfrag(struct inode *inode,
 230                             struct ceph_mds_reply_dirfrag *dirinfo)
 231{
 232        struct ceph_inode_info *ci = ceph_inode(inode);
 233        struct ceph_inode_frag *frag;
 234        u32 id = le32_to_cpu(dirinfo->frag);
 235        int mds = le32_to_cpu(dirinfo->auth);
 236        int ndist = le32_to_cpu(dirinfo->ndist);
 237        int i;
 238        int err = 0;
 239
 240        mutex_lock(&ci->i_fragtree_mutex);
 241        if (ndist == 0) {
 242                /* no delegation info needed. */
 243                frag = __ceph_find_frag(ci, id);
 244                if (!frag)
 245                        goto out;
 246                if (frag->split_by == 0) {
 247                        /* tree leaf, remove */
 248                        dout("fill_dirfrag removed %llx.%llx frag %x"
 249                             " (no ref)\n", ceph_vinop(inode), id);
 250                        rb_erase(&frag->node, &ci->i_fragtree);
 251                        kfree(frag);
 252                } else {
 253                        /* tree branch, keep and clear */
 254                        dout("fill_dirfrag cleared %llx.%llx frag %x"
 255                             " referral\n", ceph_vinop(inode), id);
 256                        frag->mds = -1;
 257                        frag->ndist = 0;
 258                }
 259                goto out;
 260        }
 261
 262
 263        /* find/add this frag to store mds delegation info */
 264        frag = __get_or_create_frag(ci, id);
 265        if (IS_ERR(frag)) {
 266                /* this is not the end of the world; we can continue
 267                   with bad/inaccurate delegation info */
 268                pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
 269                       ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
 270                err = -ENOMEM;
 271                goto out;
 272        }
 273
 274        frag->mds = mds;
 275        frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
 276        for (i = 0; i < frag->ndist; i++)
 277                frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
 278        dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
 279             ceph_vinop(inode), frag->frag, frag->ndist);
 280
 281out:
 282        mutex_unlock(&ci->i_fragtree_mutex);
 283        return err;
 284}
 285
 286
 287/*
 288 * initialize a newly allocated inode.
 289 */
 290struct inode *ceph_alloc_inode(struct super_block *sb)
 291{
 292        struct ceph_inode_info *ci;
 293        int i;
 294
 295        ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
 296        if (!ci)
 297                return NULL;
 298
 299        dout("alloc_inode %p\n", &ci->vfs_inode);
 300
 301        spin_lock_init(&ci->i_ceph_lock);
 302
 303        ci->i_version = 0;
 304        ci->i_time_warp_seq = 0;
 305        ci->i_ceph_flags = 0;
 306        atomic_set(&ci->i_release_count, 1);
 307        atomic_set(&ci->i_complete_count, 0);
 308        ci->i_symlink = NULL;
 309
 310        memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
 311
 312        ci->i_fragtree = RB_ROOT;
 313        mutex_init(&ci->i_fragtree_mutex);
 314
 315        ci->i_xattrs.blob = NULL;
 316        ci->i_xattrs.prealloc_blob = NULL;
 317        ci->i_xattrs.dirty = false;
 318        ci->i_xattrs.index = RB_ROOT;
 319        ci->i_xattrs.count = 0;
 320        ci->i_xattrs.names_size = 0;
 321        ci->i_xattrs.vals_size = 0;
 322        ci->i_xattrs.version = 0;
 323        ci->i_xattrs.index_version = 0;
 324
 325        ci->i_caps = RB_ROOT;
 326        ci->i_auth_cap = NULL;
 327        ci->i_dirty_caps = 0;
 328        ci->i_flushing_caps = 0;
 329        INIT_LIST_HEAD(&ci->i_dirty_item);
 330        INIT_LIST_HEAD(&ci->i_flushing_item);
 331        ci->i_cap_flush_seq = 0;
 332        ci->i_cap_flush_last_tid = 0;
 333        memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
 334        init_waitqueue_head(&ci->i_cap_wq);
 335        ci->i_hold_caps_min = 0;
 336        ci->i_hold_caps_max = 0;
 337        INIT_LIST_HEAD(&ci->i_cap_delay_list);
 338        ci->i_cap_exporting_mds = 0;
 339        ci->i_cap_exporting_mseq = 0;
 340        ci->i_cap_exporting_issued = 0;
 341        INIT_LIST_HEAD(&ci->i_cap_snaps);
 342        ci->i_head_snapc = NULL;
 343        ci->i_snap_caps = 0;
 344
 345        for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
 346                ci->i_nr_by_mode[i] = 0;
 347
 348        mutex_init(&ci->i_truncate_mutex);
 349        ci->i_truncate_seq = 0;
 350        ci->i_truncate_size = 0;
 351        ci->i_truncate_pending = 0;
 352
 353        ci->i_max_size = 0;
 354        ci->i_reported_size = 0;
 355        ci->i_wanted_max_size = 0;
 356        ci->i_requested_max_size = 0;
 357
 358        ci->i_pin_ref = 0;
 359        ci->i_rd_ref = 0;
 360        ci->i_rdcache_ref = 0;
 361        ci->i_wr_ref = 0;
 362        ci->i_wb_ref = 0;
 363        ci->i_wrbuffer_ref = 0;
 364        ci->i_wrbuffer_ref_head = 0;
 365        ci->i_shared_gen = 0;
 366        ci->i_rdcache_gen = 0;
 367        ci->i_rdcache_revoking = 0;
 368
 369        INIT_LIST_HEAD(&ci->i_unsafe_writes);
 370        INIT_LIST_HEAD(&ci->i_unsafe_dirops);
 371        spin_lock_init(&ci->i_unsafe_lock);
 372
 373        ci->i_snap_realm = NULL;
 374        INIT_LIST_HEAD(&ci->i_snap_realm_item);
 375        INIT_LIST_HEAD(&ci->i_snap_flush_item);
 376
 377        INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
 378        INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
 379
 380        INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
 381
 382        ceph_fscache_inode_init(ci);
 383
 384        return &ci->vfs_inode;
 385}
 386
 387static void ceph_i_callback(struct rcu_head *head)
 388{
 389        struct inode *inode = container_of(head, struct inode, i_rcu);
 390        struct ceph_inode_info *ci = ceph_inode(inode);
 391
 392        kmem_cache_free(ceph_inode_cachep, ci);
 393}
 394
 395void ceph_destroy_inode(struct inode *inode)
 396{
 397        struct ceph_inode_info *ci = ceph_inode(inode);
 398        struct ceph_inode_frag *frag;
 399        struct rb_node *n;
 400
 401        dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
 402
 403        ceph_fscache_unregister_inode_cookie(ci);
 404
 405        ceph_queue_caps_release(inode);
 406
 407        /*
 408         * we may still have a snap_realm reference if there are stray
 409         * caps in i_cap_exporting_issued or i_snap_caps.
 410         */
 411        if (ci->i_snap_realm) {
 412                struct ceph_mds_client *mdsc =
 413                        ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
 414                struct ceph_snap_realm *realm = ci->i_snap_realm;
 415
 416                dout(" dropping residual ref to snap realm %p\n", realm);
 417                spin_lock(&realm->inodes_with_caps_lock);
 418                list_del_init(&ci->i_snap_realm_item);
 419                spin_unlock(&realm->inodes_with_caps_lock);
 420                ceph_put_snap_realm(mdsc, realm);
 421        }
 422
 423        kfree(ci->i_symlink);
 424        while ((n = rb_first(&ci->i_fragtree)) != NULL) {
 425                frag = rb_entry(n, struct ceph_inode_frag, node);
 426                rb_erase(n, &ci->i_fragtree);
 427                kfree(frag);
 428        }
 429
 430        __ceph_destroy_xattrs(ci);
 431        if (ci->i_xattrs.blob)
 432                ceph_buffer_put(ci->i_xattrs.blob);
 433        if (ci->i_xattrs.prealloc_blob)
 434                ceph_buffer_put(ci->i_xattrs.prealloc_blob);
 435
 436        call_rcu(&inode->i_rcu, ceph_i_callback);
 437}
 438
 439/*
 440 * Helpers to fill in size, ctime, mtime, and atime.  We have to be
 441 * careful because either the client or MDS may have more up to date
 442 * info, depending on which capabilities are held, and whether
 443 * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
 444 * and size are monotonically increasing, except when utimes() or
 445 * truncate() increments the corresponding _seq values.)
 446 */
 447int ceph_fill_file_size(struct inode *inode, int issued,
 448                        u32 truncate_seq, u64 truncate_size, u64 size)
 449{
 450        struct ceph_inode_info *ci = ceph_inode(inode);
 451        int queue_trunc = 0;
 452
 453        if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
 454            (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
 455                dout("size %lld -> %llu\n", inode->i_size, size);
 456                inode->i_size = size;
 457                inode->i_blocks = (size + (1<<9) - 1) >> 9;
 458                ci->i_reported_size = size;
 459                if (truncate_seq != ci->i_truncate_seq) {
 460                        dout("truncate_seq %u -> %u\n",
 461                             ci->i_truncate_seq, truncate_seq);
 462                        ci->i_truncate_seq = truncate_seq;
 463
 464                        /* the MDS should have revoked these caps */
 465                        WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
 466                                               CEPH_CAP_FILE_RD |
 467                                               CEPH_CAP_FILE_WR |
 468                                               CEPH_CAP_FILE_LAZYIO));
 469                        /*
 470                         * If we hold relevant caps, or in the case where we're
 471                         * not the only client referencing this file and we
 472                         * don't hold those caps, then we need to check whether
 473                         * the file is either opened or mmaped
 474                         */
 475                        if ((issued & (CEPH_CAP_FILE_CACHE|
 476                                       CEPH_CAP_FILE_BUFFER)) ||
 477                            mapping_mapped(inode->i_mapping) ||
 478                            __ceph_caps_file_wanted(ci)) {
 479                                ci->i_truncate_pending++;
 480                                queue_trunc = 1;
 481                        }
 482                }
 483        }
 484        if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
 485            ci->i_truncate_size != truncate_size) {
 486                dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
 487                     truncate_size);
 488                ci->i_truncate_size = truncate_size;
 489        }
 490
 491        if (queue_trunc)
 492                ceph_fscache_invalidate(inode);
 493
 494        return queue_trunc;
 495}
 496
 497void ceph_fill_file_time(struct inode *inode, int issued,
 498                         u64 time_warp_seq, struct timespec *ctime,
 499                         struct timespec *mtime, struct timespec *atime)
 500{
 501        struct ceph_inode_info *ci = ceph_inode(inode);
 502        int warn = 0;
 503
 504        if (issued & (CEPH_CAP_FILE_EXCL|
 505                      CEPH_CAP_FILE_WR|
 506                      CEPH_CAP_FILE_BUFFER|
 507                      CEPH_CAP_AUTH_EXCL|
 508                      CEPH_CAP_XATTR_EXCL)) {
 509                if (timespec_compare(ctime, &inode->i_ctime) > 0) {
 510                        dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
 511                             inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
 512                             ctime->tv_sec, ctime->tv_nsec);
 513                        inode->i_ctime = *ctime;
 514                }
 515                if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
 516                        /* the MDS did a utimes() */
 517                        dout("mtime %ld.%09ld -> %ld.%09ld "
 518                             "tw %d -> %d\n",
 519                             inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
 520                             mtime->tv_sec, mtime->tv_nsec,
 521                             ci->i_time_warp_seq, (int)time_warp_seq);
 522
 523                        inode->i_mtime = *mtime;
 524                        inode->i_atime = *atime;
 525                        ci->i_time_warp_seq = time_warp_seq;
 526                } else if (time_warp_seq == ci->i_time_warp_seq) {
 527                        /* nobody did utimes(); take the max */
 528                        if (timespec_compare(mtime, &inode->i_mtime) > 0) {
 529                                dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
 530                                     inode->i_mtime.tv_sec,
 531                                     inode->i_mtime.tv_nsec,
 532                                     mtime->tv_sec, mtime->tv_nsec);
 533                                inode->i_mtime = *mtime;
 534                        }
 535                        if (timespec_compare(atime, &inode->i_atime) > 0) {
 536                                dout("atime %ld.%09ld -> %ld.%09ld inc\n",
 537                                     inode->i_atime.tv_sec,
 538                                     inode->i_atime.tv_nsec,
 539                                     atime->tv_sec, atime->tv_nsec);
 540                                inode->i_atime = *atime;
 541                        }
 542                } else if (issued & CEPH_CAP_FILE_EXCL) {
 543                        /* we did a utimes(); ignore mds values */
 544                } else {
 545                        warn = 1;
 546                }
 547        } else {
 548                /* we have no write|excl caps; whatever the MDS says is true */
 549                if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
 550                        inode->i_ctime = *ctime;
 551                        inode->i_mtime = *mtime;
 552                        inode->i_atime = *atime;
 553                        ci->i_time_warp_seq = time_warp_seq;
 554                } else {
 555                        warn = 1;
 556                }
 557        }
 558        if (warn) /* time_warp_seq shouldn't go backwards */
 559                dout("%p mds time_warp_seq %llu < %u\n",
 560                     inode, time_warp_seq, ci->i_time_warp_seq);
 561}
 562
 563/*
 564 * Populate an inode based on info from mds.  May be called on new or
 565 * existing inodes.
 566 */
 567static int fill_inode(struct inode *inode,
 568                      struct ceph_mds_reply_info_in *iinfo,
 569                      struct ceph_mds_reply_dirfrag *dirinfo,
 570                      struct ceph_mds_session *session,
 571                      unsigned long ttl_from, int cap_fmode,
 572                      struct ceph_cap_reservation *caps_reservation)
 573{
 574        struct ceph_mds_reply_inode *info = iinfo->in;
 575        struct ceph_inode_info *ci = ceph_inode(inode);
 576        int i;
 577        int issued = 0, implemented;
 578        struct timespec mtime, atime, ctime;
 579        u32 nsplits;
 580        struct ceph_inode_frag *frag;
 581        struct rb_node *rb_node;
 582        struct ceph_buffer *xattr_blob = NULL;
 583        int err = 0;
 584        int queue_trunc = 0;
 585
 586        dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
 587             inode, ceph_vinop(inode), le64_to_cpu(info->version),
 588             ci->i_version);
 589
 590        /*
 591         * prealloc xattr data, if it looks like we'll need it.  only
 592         * if len > 4 (meaning there are actually xattrs; the first 4
 593         * bytes are the xattr count).
 594         */
 595        if (iinfo->xattr_len > 4) {
 596                xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
 597                if (!xattr_blob)
 598                        pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
 599                               iinfo->xattr_len);
 600        }
 601
 602        spin_lock(&ci->i_ceph_lock);
 603
 604        /*
 605         * provided version will be odd if inode value is projected,
 606         * even if stable.  skip the update if we have newer stable
 607         * info (ours>=theirs, e.g. due to racing mds replies), unless
 608         * we are getting projected (unstable) info (in which case the
 609         * version is odd, and we want ours>theirs).
 610         *   us   them
 611         *   2    2     skip
 612         *   3    2     skip
 613         *   3    3     update
 614         */
 615        if (le64_to_cpu(info->version) > 0 &&
 616            (ci->i_version & ~1) >= le64_to_cpu(info->version))
 617                goto no_change;
 618        
 619        issued = __ceph_caps_issued(ci, &implemented);
 620        issued |= implemented | __ceph_caps_dirty(ci);
 621
 622        /* update inode */
 623        ci->i_version = le64_to_cpu(info->version);
 624        inode->i_version++;
 625        inode->i_rdev = le32_to_cpu(info->rdev);
 626
 627        if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
 628                inode->i_mode = le32_to_cpu(info->mode);
 629                inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
 630                inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
 631                dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
 632                     from_kuid(&init_user_ns, inode->i_uid),
 633                     from_kgid(&init_user_ns, inode->i_gid));
 634        }
 635
 636        if ((issued & CEPH_CAP_LINK_EXCL) == 0)
 637                set_nlink(inode, le32_to_cpu(info->nlink));
 638
 639        /* be careful with mtime, atime, size */
 640        ceph_decode_timespec(&atime, &info->atime);
 641        ceph_decode_timespec(&mtime, &info->mtime);
 642        ceph_decode_timespec(&ctime, &info->ctime);
 643        queue_trunc = ceph_fill_file_size(inode, issued,
 644                                          le32_to_cpu(info->truncate_seq),
 645                                          le64_to_cpu(info->truncate_size),
 646                                          le64_to_cpu(info->size));
 647        ceph_fill_file_time(inode, issued,
 648                            le32_to_cpu(info->time_warp_seq),
 649                            &ctime, &mtime, &atime);
 650
 651        /* only update max_size on auth cap */
 652        if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
 653            ci->i_max_size != le64_to_cpu(info->max_size)) {
 654                dout("max_size %lld -> %llu\n", ci->i_max_size,
 655                     le64_to_cpu(info->max_size));
 656                ci->i_max_size = le64_to_cpu(info->max_size);
 657        }
 658
 659        ci->i_layout = info->layout;
 660        inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
 661
 662        /* xattrs */
 663        /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
 664        if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
 665            le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
 666                if (ci->i_xattrs.blob)
 667                        ceph_buffer_put(ci->i_xattrs.blob);
 668                ci->i_xattrs.blob = xattr_blob;
 669                if (xattr_blob)
 670                        memcpy(ci->i_xattrs.blob->vec.iov_base,
 671                               iinfo->xattr_data, iinfo->xattr_len);
 672                ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
 673                xattr_blob = NULL;
 674        }
 675
 676        inode->i_mapping->a_ops = &ceph_aops;
 677        inode->i_mapping->backing_dev_info =
 678                &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
 679
 680        switch (inode->i_mode & S_IFMT) {
 681        case S_IFIFO:
 682        case S_IFBLK:
 683        case S_IFCHR:
 684        case S_IFSOCK:
 685                init_special_inode(inode, inode->i_mode, inode->i_rdev);
 686                inode->i_op = &ceph_file_iops;
 687                break;
 688        case S_IFREG:
 689                inode->i_op = &ceph_file_iops;
 690                inode->i_fop = &ceph_file_fops;
 691                break;
 692        case S_IFLNK:
 693                inode->i_op = &ceph_symlink_iops;
 694                if (!ci->i_symlink) {
 695                        u32 symlen = iinfo->symlink_len;
 696                        char *sym;
 697
 698                        spin_unlock(&ci->i_ceph_lock);
 699
 700                        err = -EINVAL;
 701                        if (WARN_ON(symlen != inode->i_size))
 702                                goto out;
 703
 704                        err = -ENOMEM;
 705                        sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
 706                        if (!sym)
 707                                goto out;
 708
 709                        spin_lock(&ci->i_ceph_lock);
 710                        if (!ci->i_symlink)
 711                                ci->i_symlink = sym;
 712                        else
 713                                kfree(sym); /* lost a race */
 714                }
 715                break;
 716        case S_IFDIR:
 717                inode->i_op = &ceph_dir_iops;
 718                inode->i_fop = &ceph_dir_fops;
 719
 720                ci->i_dir_layout = iinfo->dir_layout;
 721
 722                ci->i_files = le64_to_cpu(info->files);
 723                ci->i_subdirs = le64_to_cpu(info->subdirs);
 724                ci->i_rbytes = le64_to_cpu(info->rbytes);
 725                ci->i_rfiles = le64_to_cpu(info->rfiles);
 726                ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
 727                ceph_decode_timespec(&ci->i_rctime, &info->rctime);
 728                break;
 729        default:
 730                pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
 731                       ceph_vinop(inode), inode->i_mode);
 732        }
 733
 734        /* set dir completion flag? */
 735        if (S_ISDIR(inode->i_mode) &&
 736            ci->i_files == 0 && ci->i_subdirs == 0 &&
 737            ceph_snap(inode) == CEPH_NOSNAP &&
 738            (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
 739            (issued & CEPH_CAP_FILE_EXCL) == 0 &&
 740            !__ceph_dir_is_complete(ci)) {
 741                dout(" marking %p complete (empty)\n", inode);
 742                __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count));
 743                ci->i_max_offset = 2;
 744        }
 745no_change:
 746        spin_unlock(&ci->i_ceph_lock);
 747
 748        /* queue truncate if we saw i_size decrease */
 749        if (queue_trunc)
 750                ceph_queue_vmtruncate(inode);
 751
 752        /* populate frag tree */
 753        /* FIXME: move me up, if/when version reflects fragtree changes */
 754        nsplits = le32_to_cpu(info->fragtree.nsplits);
 755        mutex_lock(&ci->i_fragtree_mutex);
 756        rb_node = rb_first(&ci->i_fragtree);
 757        for (i = 0; i < nsplits; i++) {
 758                u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
 759                frag = NULL;
 760                while (rb_node) {
 761                        frag = rb_entry(rb_node, struct ceph_inode_frag, node);
 762                        if (ceph_frag_compare(frag->frag, id) >= 0) {
 763                                if (frag->frag != id)
 764                                        frag = NULL;
 765                                else
 766                                        rb_node = rb_next(rb_node);
 767                                break;
 768                        }
 769                        rb_node = rb_next(rb_node);
 770                        rb_erase(&frag->node, &ci->i_fragtree);
 771                        kfree(frag);
 772                        frag = NULL;
 773                }
 774                if (!frag) {
 775                        frag = __get_or_create_frag(ci, id);
 776                        if (IS_ERR(frag))
 777                                continue;
 778                }
 779                frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
 780                dout(" frag %x split by %d\n", frag->frag, frag->split_by);
 781        }
 782        while (rb_node) {
 783                frag = rb_entry(rb_node, struct ceph_inode_frag, node);
 784                rb_node = rb_next(rb_node);
 785                rb_erase(&frag->node, &ci->i_fragtree);
 786                kfree(frag);
 787        }
 788        mutex_unlock(&ci->i_fragtree_mutex);
 789
 790        /* were we issued a capability? */
 791        if (info->cap.caps) {
 792                if (ceph_snap(inode) == CEPH_NOSNAP) {
 793                        ceph_add_cap(inode, session,
 794                                     le64_to_cpu(info->cap.cap_id),
 795                                     cap_fmode,
 796                                     le32_to_cpu(info->cap.caps),
 797                                     le32_to_cpu(info->cap.wanted),
 798                                     le32_to_cpu(info->cap.seq),
 799                                     le32_to_cpu(info->cap.mseq),
 800                                     le64_to_cpu(info->cap.realm),
 801                                     info->cap.flags,
 802                                     caps_reservation);
 803                } else {
 804                        spin_lock(&ci->i_ceph_lock);
 805                        dout(" %p got snap_caps %s\n", inode,
 806                             ceph_cap_string(le32_to_cpu(info->cap.caps)));
 807                        ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
 808                        if (cap_fmode >= 0)
 809                                __ceph_get_fmode(ci, cap_fmode);
 810                        spin_unlock(&ci->i_ceph_lock);
 811                }
 812        } else if (cap_fmode >= 0) {
 813                pr_warning("mds issued no caps on %llx.%llx\n",
 814                           ceph_vinop(inode));
 815                __ceph_get_fmode(ci, cap_fmode);
 816        }
 817
 818        /* update delegation info? */
 819        if (dirinfo)
 820                ceph_fill_dirfrag(inode, dirinfo);
 821
 822        err = 0;
 823
 824out:
 825        if (xattr_blob)
 826                ceph_buffer_put(xattr_blob);
 827        return err;
 828}
 829
 830/*
 831 * caller should hold session s_mutex.
 832 */
 833static void update_dentry_lease(struct dentry *dentry,
 834                                struct ceph_mds_reply_lease *lease,
 835                                struct ceph_mds_session *session,
 836                                unsigned long from_time)
 837{
 838        struct ceph_dentry_info *di = ceph_dentry(dentry);
 839        long unsigned duration = le32_to_cpu(lease->duration_ms);
 840        long unsigned ttl = from_time + (duration * HZ) / 1000;
 841        long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
 842        struct inode *dir;
 843
 844        /* only track leases on regular dentries */
 845        if (dentry->d_op != &ceph_dentry_ops)
 846                return;
 847
 848        spin_lock(&dentry->d_lock);
 849        dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
 850             dentry, duration, ttl);
 851
 852        /* make lease_rdcache_gen match directory */
 853        dir = dentry->d_parent->d_inode;
 854        di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
 855
 856        if (duration == 0)
 857                goto out_unlock;
 858
 859        if (di->lease_gen == session->s_cap_gen &&
 860            time_before(ttl, dentry->d_time))
 861                goto out_unlock;  /* we already have a newer lease. */
 862
 863        if (di->lease_session && di->lease_session != session)
 864                goto out_unlock;
 865
 866        ceph_dentry_lru_touch(dentry);
 867
 868        if (!di->lease_session)
 869                di->lease_session = ceph_get_mds_session(session);
 870        di->lease_gen = session->s_cap_gen;
 871        di->lease_seq = le32_to_cpu(lease->seq);
 872        di->lease_renew_after = half_ttl;
 873        di->lease_renew_from = 0;
 874        dentry->d_time = ttl;
 875out_unlock:
 876        spin_unlock(&dentry->d_lock);
 877        return;
 878}
 879
 880/*
 881 * Set dentry's directory position based on the current dir's max, and
 882 * order it in d_subdirs, so that dcache_readdir behaves.
 883 *
 884 * Always called under directory's i_mutex.
 885 */
 886static void ceph_set_dentry_offset(struct dentry *dn)
 887{
 888        struct dentry *dir = dn->d_parent;
 889        struct inode *inode = dir->d_inode;
 890        struct ceph_inode_info *ci;
 891        struct ceph_dentry_info *di;
 892
 893        BUG_ON(!inode);
 894
 895        ci = ceph_inode(inode);
 896        di = ceph_dentry(dn);
 897
 898        spin_lock(&ci->i_ceph_lock);
 899        if (!__ceph_dir_is_complete(ci)) {
 900                spin_unlock(&ci->i_ceph_lock);
 901                return;
 902        }
 903        di->offset = ceph_inode(inode)->i_max_offset++;
 904        spin_unlock(&ci->i_ceph_lock);
 905
 906        spin_lock(&dir->d_lock);
 907        spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
 908        list_move(&dn->d_u.d_child, &dir->d_subdirs);
 909        dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
 910             dn->d_u.d_child.prev, dn->d_u.d_child.next);
 911        spin_unlock(&dn->d_lock);
 912        spin_unlock(&dir->d_lock);
 913}
 914
 915/*
 916 * splice a dentry to an inode.
 917 * caller must hold directory i_mutex for this to be safe.
 918 *
 919 * we will only rehash the resulting dentry if @prehash is
 920 * true; @prehash will be set to false (for the benefit of
 921 * the caller) if we fail.
 922 */
 923static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
 924                                    bool *prehash, bool set_offset)
 925{
 926        struct dentry *realdn;
 927
 928        BUG_ON(dn->d_inode);
 929
 930        /* dn must be unhashed */
 931        if (!d_unhashed(dn))
 932                d_drop(dn);
 933        realdn = d_materialise_unique(dn, in);
 934        if (IS_ERR(realdn)) {
 935                pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
 936                       PTR_ERR(realdn), dn, in, ceph_vinop(in));
 937                if (prehash)
 938                        *prehash = false; /* don't rehash on error */
 939                dn = realdn; /* note realdn contains the error */
 940                goto out;
 941        } else if (realdn) {
 942                dout("dn %p (%d) spliced with %p (%d) "
 943                     "inode %p ino %llx.%llx\n",
 944                     dn, d_count(dn),
 945                     realdn, d_count(realdn),
 946                     realdn->d_inode, ceph_vinop(realdn->d_inode));
 947                dput(dn);
 948                dn = realdn;
 949        } else {
 950                BUG_ON(!ceph_dentry(dn));
 951                dout("dn %p attached to %p ino %llx.%llx\n",
 952                     dn, dn->d_inode, ceph_vinop(dn->d_inode));
 953        }
 954        if ((!prehash || *prehash) && d_unhashed(dn))
 955                d_rehash(dn);
 956        if (set_offset)
 957                ceph_set_dentry_offset(dn);
 958out:
 959        return dn;
 960}
 961
 962/*
 963 * Incorporate results into the local cache.  This is either just
 964 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
 965 * after a lookup).
 966 *
 967 * A reply may contain
 968 *         a directory inode along with a dentry.
 969 *  and/or a target inode
 970 *
 971 * Called with snap_rwsem (read).
 972 */
 973int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
 974                    struct ceph_mds_session *session)
 975{
 976        struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
 977        struct inode *in = NULL;
 978        struct ceph_mds_reply_inode *ininfo;
 979        struct ceph_vino vino;
 980        struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
 981        int err = 0;
 982
 983        dout("fill_trace %p is_dentry %d is_target %d\n", req,
 984             rinfo->head->is_dentry, rinfo->head->is_target);
 985
 986#if 0
 987        /*
 988         * Debugging hook:
 989         *
 990         * If we resend completed ops to a recovering mds, we get no
 991         * trace.  Since that is very rare, pretend this is the case
 992         * to ensure the 'no trace' handlers in the callers behave.
 993         *
 994         * Fill in inodes unconditionally to avoid breaking cap
 995         * invariants.
 996         */
 997        if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
 998                pr_info("fill_trace faking empty trace on %lld %s\n",
 999                        req->r_tid, ceph_mds_op_name(rinfo->head->op));
1000                if (rinfo->head->is_dentry) {
1001                        rinfo->head->is_dentry = 0;
1002                        err = fill_inode(req->r_locked_dir,
1003                                         &rinfo->diri, rinfo->dirfrag,
1004                                         session, req->r_request_started, -1);
1005                }
1006                if (rinfo->head->is_target) {
1007                        rinfo->head->is_target = 0;
1008                        ininfo = rinfo->targeti.in;
1009                        vino.ino = le64_to_cpu(ininfo->ino);
1010                        vino.snap = le64_to_cpu(ininfo->snapid);
1011                        in = ceph_get_inode(sb, vino);
1012                        err = fill_inode(in, &rinfo->targeti, NULL,
1013                                         session, req->r_request_started,
1014                                         req->r_fmode);
1015                        iput(in);
1016                }
1017        }
1018#endif
1019
1020        if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1021                dout("fill_trace reply is empty!\n");
1022                if (rinfo->head->result == 0 && req->r_locked_dir)
1023                        ceph_invalidate_dir_request(req);
1024                return 0;
1025        }
1026
1027        if (rinfo->head->is_dentry) {
1028                struct inode *dir = req->r_locked_dir;
1029
1030                if (dir) {
1031                        err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
1032                                         session, req->r_request_started, -1,
1033                                         &req->r_caps_reservation);
1034                        if (err < 0)
1035                                return err;
1036                } else {
1037                        WARN_ON_ONCE(1);
1038                }
1039        }
1040
1041        if (rinfo->head->is_target) {
1042                vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1043                vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1044
1045                in = ceph_get_inode(sb, vino);
1046                if (IS_ERR(in)) {
1047                        err = PTR_ERR(in);
1048                        goto done;
1049                }
1050                req->r_target_inode = in;
1051
1052                err = fill_inode(in, &rinfo->targeti, NULL,
1053                                session, req->r_request_started,
1054                                (le32_to_cpu(rinfo->head->result) == 0) ?
1055                                req->r_fmode : -1,
1056                                &req->r_caps_reservation);
1057                if (err < 0) {
1058                        pr_err("fill_inode badness %p %llx.%llx\n",
1059                                in, ceph_vinop(in));
1060                        goto done;
1061                }
1062        }
1063
1064        /*
1065         * ignore null lease/binding on snapdir ENOENT, or else we
1066         * will have trouble splicing in the virtual snapdir later
1067         */
1068        if (rinfo->head->is_dentry && !req->r_aborted &&
1069            req->r_locked_dir &&
1070            (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1071                                               fsc->mount_options->snapdir_name,
1072                                               req->r_dentry->d_name.len))) {
1073                /*
1074                 * lookup link rename   : null -> possibly existing inode
1075                 * mknod symlink mkdir  : null -> new inode
1076                 * unlink               : linked -> null
1077                 */
1078                struct inode *dir = req->r_locked_dir;
1079                struct dentry *dn = req->r_dentry;
1080                bool have_dir_cap, have_lease;
1081
1082                BUG_ON(!dn);
1083                BUG_ON(!dir);
1084                BUG_ON(dn->d_parent->d_inode != dir);
1085                BUG_ON(ceph_ino(dir) !=
1086                       le64_to_cpu(rinfo->diri.in->ino));
1087                BUG_ON(ceph_snap(dir) !=
1088                       le64_to_cpu(rinfo->diri.in->snapid));
1089
1090                /* do we have a lease on the whole dir? */
1091                have_dir_cap =
1092                        (le32_to_cpu(rinfo->diri.in->cap.caps) &
1093                         CEPH_CAP_FILE_SHARED);
1094
1095                /* do we have a dn lease? */
1096                have_lease = have_dir_cap ||
1097                        le32_to_cpu(rinfo->dlease->duration_ms);
1098                if (!have_lease)
1099                        dout("fill_trace  no dentry lease or dir cap\n");
1100
1101                /* rename? */
1102                if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1103                        dout(" src %p '%.*s' dst %p '%.*s'\n",
1104                             req->r_old_dentry,
1105                             req->r_old_dentry->d_name.len,
1106                             req->r_old_dentry->d_name.name,
1107                             dn, dn->d_name.len, dn->d_name.name);
1108                        dout("fill_trace doing d_move %p -> %p\n",
1109                             req->r_old_dentry, dn);
1110
1111                        d_move(req->r_old_dentry, dn);
1112                        dout(" src %p '%.*s' dst %p '%.*s'\n",
1113                             req->r_old_dentry,
1114                             req->r_old_dentry->d_name.len,
1115                             req->r_old_dentry->d_name.name,
1116                             dn, dn->d_name.len, dn->d_name.name);
1117
1118                        /* ensure target dentry is invalidated, despite
1119                           rehashing bug in vfs_rename_dir */
1120                        ceph_invalidate_dentry_lease(dn);
1121
1122                        /*
1123                         * d_move() puts the renamed dentry at the end of
1124                         * d_subdirs.  We need to assign it an appropriate
1125                         * directory offset so we can behave when dir is
1126                         * complete.
1127                         */
1128                        ceph_set_dentry_offset(req->r_old_dentry);
1129                        dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1130                             ceph_dentry(req->r_old_dentry)->offset);
1131
1132                        dn = req->r_old_dentry;  /* use old_dentry */
1133                }
1134
1135                /* null dentry? */
1136                if (!rinfo->head->is_target) {
1137                        dout("fill_trace null dentry\n");
1138                        if (dn->d_inode) {
1139                                dout("d_delete %p\n", dn);
1140                                d_delete(dn);
1141                        } else {
1142                                dout("d_instantiate %p NULL\n", dn);
1143                                d_instantiate(dn, NULL);
1144                                if (have_lease && d_unhashed(dn))
1145                                        d_rehash(dn);
1146                                update_dentry_lease(dn, rinfo->dlease,
1147                                                    session,
1148                                                    req->r_request_started);
1149                        }
1150                        goto done;
1151                }
1152
1153                /* attach proper inode */
1154                if (!dn->d_inode) {
1155                        ihold(in);
1156                        dn = splice_dentry(dn, in, &have_lease, true);
1157                        if (IS_ERR(dn)) {
1158                                err = PTR_ERR(dn);
1159                                goto done;
1160                        }
1161                        req->r_dentry = dn;  /* may have spliced */
1162                } else if (dn->d_inode && dn->d_inode != in) {
1163                        dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1164                             dn, dn->d_inode, ceph_vinop(dn->d_inode),
1165                             ceph_vinop(in));
1166                        have_lease = false;
1167                }
1168
1169                if (have_lease)
1170                        update_dentry_lease(dn, rinfo->dlease, session,
1171                                            req->r_request_started);
1172                dout(" final dn %p\n", dn);
1173        } else if (!req->r_aborted &&
1174                   (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1175                    req->r_op == CEPH_MDS_OP_MKSNAP)) {
1176                struct dentry *dn = req->r_dentry;
1177
1178                /* fill out a snapdir LOOKUPSNAP dentry */
1179                BUG_ON(!dn);
1180                BUG_ON(!req->r_locked_dir);
1181                BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
1182                ininfo = rinfo->targeti.in;
1183                vino.ino = le64_to_cpu(ininfo->ino);
1184                vino.snap = le64_to_cpu(ininfo->snapid);
1185                dout(" linking snapped dir %p to dn %p\n", in, dn);
1186                ihold(in);
1187                dn = splice_dentry(dn, in, NULL, true);
1188                if (IS_ERR(dn)) {
1189                        err = PTR_ERR(dn);
1190                        goto done;
1191                }
1192                req->r_dentry = dn;  /* may have spliced */
1193        }
1194done:
1195        dout("fill_trace done err=%d\n", err);
1196        return err;
1197}
1198
1199/*
1200 * Prepopulate our cache with readdir results, leases, etc.
1201 */
1202static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1203                                           struct ceph_mds_session *session)
1204{
1205        struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1206        int i, err = 0;
1207
1208        for (i = 0; i < rinfo->dir_nr; i++) {
1209                struct ceph_vino vino;
1210                struct inode *in;
1211                int rc;
1212
1213                vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1214                vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1215
1216                in = ceph_get_inode(req->r_dentry->d_sb, vino);
1217                if (IS_ERR(in)) {
1218                        err = PTR_ERR(in);
1219                        dout("new_inode badness got %d\n", err);
1220                        continue;
1221                }
1222                rc = fill_inode(in, &rinfo->dir_in[i], NULL, session,
1223                                req->r_request_started, -1,
1224                                &req->r_caps_reservation);
1225                if (rc < 0) {
1226                        pr_err("fill_inode badness on %p got %d\n", in, rc);
1227                        err = rc;
1228                        continue;
1229                }
1230        }
1231
1232        return err;
1233}
1234
1235int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1236                             struct ceph_mds_session *session)
1237{
1238        struct dentry *parent = req->r_dentry;
1239        struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1240        struct qstr dname;
1241        struct dentry *dn;
1242        struct inode *in;
1243        int err = 0, ret, i;
1244        struct inode *snapdir = NULL;
1245        struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1246        struct ceph_dentry_info *di;
1247        u64 r_readdir_offset = req->r_readdir_offset;
1248        u32 frag = le32_to_cpu(rhead->args.readdir.frag);
1249
1250        if (rinfo->dir_dir &&
1251            le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1252                dout("readdir_prepopulate got new frag %x -> %x\n",
1253                     frag, le32_to_cpu(rinfo->dir_dir->frag));
1254                frag = le32_to_cpu(rinfo->dir_dir->frag);
1255                if (ceph_frag_is_leftmost(frag))
1256                        r_readdir_offset = 2;
1257                else
1258                        r_readdir_offset = 0;
1259        }
1260
1261        if (req->r_aborted)
1262                return readdir_prepopulate_inodes_only(req, session);
1263
1264        if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1265                snapdir = ceph_get_snapdir(parent->d_inode);
1266                parent = d_find_alias(snapdir);
1267                dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1268                     rinfo->dir_nr, parent);
1269        } else {
1270                dout("readdir_prepopulate %d items under dn %p\n",
1271                     rinfo->dir_nr, parent);
1272                if (rinfo->dir_dir)
1273                        ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1274        }
1275
1276        /* FIXME: release caps/leases if error occurs */
1277        for (i = 0; i < rinfo->dir_nr; i++) {
1278                struct ceph_vino vino;
1279
1280                dname.name = rinfo->dir_dname[i];
1281                dname.len = rinfo->dir_dname_len[i];
1282                dname.hash = full_name_hash(dname.name, dname.len);
1283
1284                vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1285                vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1286
1287retry_lookup:
1288                dn = d_lookup(parent, &dname);
1289                dout("d_lookup on parent=%p name=%.*s got %p\n",
1290                     parent, dname.len, dname.name, dn);
1291
1292                if (!dn) {
1293                        dn = d_alloc(parent, &dname);
1294                        dout("d_alloc %p '%.*s' = %p\n", parent,
1295                             dname.len, dname.name, dn);
1296                        if (dn == NULL) {
1297                                dout("d_alloc badness\n");
1298                                err = -ENOMEM;
1299                                goto out;
1300                        }
1301                        ret = ceph_init_dentry(dn);
1302                        if (ret < 0) {
1303                                dput(dn);
1304                                err = ret;
1305                                goto out;
1306                        }
1307                } else if (dn->d_inode &&
1308                           (ceph_ino(dn->d_inode) != vino.ino ||
1309                            ceph_snap(dn->d_inode) != vino.snap)) {
1310                        dout(" dn %p points to wrong inode %p\n",
1311                             dn, dn->d_inode);
1312                        d_delete(dn);
1313                        dput(dn);
1314                        goto retry_lookup;
1315                } else {
1316                        /* reorder parent's d_subdirs */
1317                        spin_lock(&parent->d_lock);
1318                        spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
1319                        list_move(&dn->d_u.d_child, &parent->d_subdirs);
1320                        spin_unlock(&dn->d_lock);
1321                        spin_unlock(&parent->d_lock);
1322                }
1323
1324                /* inode */
1325                if (dn->d_inode) {
1326                        in = dn->d_inode;
1327                } else {
1328                        in = ceph_get_inode(parent->d_sb, vino);
1329                        if (IS_ERR(in)) {
1330                                dout("new_inode badness\n");
1331                                d_drop(dn);
1332                                dput(dn);
1333                                err = PTR_ERR(in);
1334                                goto out;
1335                        }
1336                }
1337
1338                if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1339                               req->r_request_started, -1,
1340                               &req->r_caps_reservation) < 0) {
1341                        pr_err("fill_inode badness on %p\n", in);
1342                        if (!dn->d_inode)
1343                                iput(in);
1344                        d_drop(dn);
1345                        goto next_item;
1346                }
1347
1348                if (!dn->d_inode) {
1349                        dn = splice_dentry(dn, in, NULL, false);
1350                        if (IS_ERR(dn)) {
1351                                err = PTR_ERR(dn);
1352                                dn = NULL;
1353                                goto next_item;
1354                        }
1355                }
1356
1357                di = dn->d_fsdata;
1358                di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
1359
1360                update_dentry_lease(dn, rinfo->dir_dlease[i],
1361                                    req->r_session,
1362                                    req->r_request_started);
1363next_item:
1364                if (dn)
1365                        dput(dn);
1366        }
1367        if (err == 0)
1368                req->r_did_prepopulate = true;
1369
1370out:
1371        if (snapdir) {
1372                iput(snapdir);
1373                dput(parent);
1374        }
1375        dout("readdir_prepopulate done\n");
1376        return err;
1377}
1378
1379int ceph_inode_set_size(struct inode *inode, loff_t size)
1380{
1381        struct ceph_inode_info *ci = ceph_inode(inode);
1382        int ret = 0;
1383
1384        spin_lock(&ci->i_ceph_lock);
1385        dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1386        inode->i_size = size;
1387        inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1388
1389        /* tell the MDS if we are approaching max_size */
1390        if ((size << 1) >= ci->i_max_size &&
1391            (ci->i_reported_size << 1) < ci->i_max_size)
1392                ret = 1;
1393
1394        spin_unlock(&ci->i_ceph_lock);
1395        return ret;
1396}
1397
1398/*
1399 * Write back inode data in a worker thread.  (This can't be done
1400 * in the message handler context.)
1401 */
1402void ceph_queue_writeback(struct inode *inode)
1403{
1404        ihold(inode);
1405        if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1406                       &ceph_inode(inode)->i_wb_work)) {
1407                dout("ceph_queue_writeback %p\n", inode);
1408        } else {
1409                dout("ceph_queue_writeback %p failed\n", inode);
1410                iput(inode);
1411        }
1412}
1413
1414static void ceph_writeback_work(struct work_struct *work)
1415{
1416        struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1417                                                  i_wb_work);
1418        struct inode *inode = &ci->vfs_inode;
1419
1420        dout("writeback %p\n", inode);
1421        filemap_fdatawrite(&inode->i_data);
1422        iput(inode);
1423}
1424
1425/*
1426 * queue an async invalidation
1427 */
1428void ceph_queue_invalidate(struct inode *inode)
1429{
1430        ihold(inode);
1431        if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1432                       &ceph_inode(inode)->i_pg_inv_work)) {
1433                dout("ceph_queue_invalidate %p\n", inode);
1434        } else {
1435                dout("ceph_queue_invalidate %p failed\n", inode);
1436                iput(inode);
1437        }
1438}
1439
1440/*
1441 * Invalidate inode pages in a worker thread.  (This can't be done
1442 * in the message handler context.)
1443 */
1444static void ceph_invalidate_work(struct work_struct *work)
1445{
1446        struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1447                                                  i_pg_inv_work);
1448        struct inode *inode = &ci->vfs_inode;
1449        u32 orig_gen;
1450        int check = 0;
1451
1452        mutex_lock(&ci->i_truncate_mutex);
1453        spin_lock(&ci->i_ceph_lock);
1454        dout("invalidate_pages %p gen %d revoking %d\n", inode,
1455             ci->i_rdcache_gen, ci->i_rdcache_revoking);
1456        if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1457                /* nevermind! */
1458                spin_unlock(&ci->i_ceph_lock);
1459                mutex_unlock(&ci->i_truncate_mutex);
1460                goto out;
1461        }
1462        orig_gen = ci->i_rdcache_gen;
1463        spin_unlock(&ci->i_ceph_lock);
1464
1465        truncate_inode_pages(inode->i_mapping, 0);
1466
1467        spin_lock(&ci->i_ceph_lock);
1468        if (orig_gen == ci->i_rdcache_gen &&
1469            orig_gen == ci->i_rdcache_revoking) {
1470                dout("invalidate_pages %p gen %d successful\n", inode,
1471                     ci->i_rdcache_gen);
1472                ci->i_rdcache_revoking--;
1473                check = 1;
1474        } else {
1475                dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1476                     inode, orig_gen, ci->i_rdcache_gen,
1477                     ci->i_rdcache_revoking);
1478        }
1479        spin_unlock(&ci->i_ceph_lock);
1480        mutex_unlock(&ci->i_truncate_mutex);
1481
1482        if (check)
1483                ceph_check_caps(ci, 0, NULL);
1484out:
1485        iput(inode);
1486}
1487
1488
1489/*
1490 * called by trunc_wq;
1491 *
1492 * We also truncate in a separate thread as well.
1493 */
1494static void ceph_vmtruncate_work(struct work_struct *work)
1495{
1496        struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1497                                                  i_vmtruncate_work);
1498        struct inode *inode = &ci->vfs_inode;
1499
1500        dout("vmtruncate_work %p\n", inode);
1501        __ceph_do_pending_vmtruncate(inode);
1502        iput(inode);
1503}
1504
1505/*
1506 * Queue an async vmtruncate.  If we fail to queue work, we will handle
1507 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1508 */
1509void ceph_queue_vmtruncate(struct inode *inode)
1510{
1511        struct ceph_inode_info *ci = ceph_inode(inode);
1512
1513        ihold(inode);
1514
1515        if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1516                       &ci->i_vmtruncate_work)) {
1517                dout("ceph_queue_vmtruncate %p\n", inode);
1518        } else {
1519                dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1520                     inode, ci->i_truncate_pending);
1521                iput(inode);
1522        }
1523}
1524
1525/*
1526 * Make sure any pending truncation is applied before doing anything
1527 * that may depend on it.
1528 */
1529void __ceph_do_pending_vmtruncate(struct inode *inode)
1530{
1531        struct ceph_inode_info *ci = ceph_inode(inode);
1532        u64 to;
1533        int wrbuffer_refs, finish = 0;
1534
1535        mutex_lock(&ci->i_truncate_mutex);
1536retry:
1537        spin_lock(&ci->i_ceph_lock);
1538        if (ci->i_truncate_pending == 0) {
1539                dout("__do_pending_vmtruncate %p none pending\n", inode);
1540                spin_unlock(&ci->i_ceph_lock);
1541                mutex_unlock(&ci->i_truncate_mutex);
1542                return;
1543        }
1544
1545        /*
1546         * make sure any dirty snapped pages are flushed before we
1547         * possibly truncate them.. so write AND block!
1548         */
1549        if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1550                dout("__do_pending_vmtruncate %p flushing snaps first\n",
1551                     inode);
1552                spin_unlock(&ci->i_ceph_lock);
1553                filemap_write_and_wait_range(&inode->i_data, 0,
1554                                             inode->i_sb->s_maxbytes);
1555                goto retry;
1556        }
1557
1558        /* there should be no reader or writer */
1559        WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1560
1561        to = ci->i_truncate_size;
1562        wrbuffer_refs = ci->i_wrbuffer_ref;
1563        dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1564             ci->i_truncate_pending, to);
1565        spin_unlock(&ci->i_ceph_lock);
1566
1567        truncate_inode_pages(inode->i_mapping, to);
1568
1569        spin_lock(&ci->i_ceph_lock);
1570        if (to == ci->i_truncate_size) {
1571                ci->i_truncate_pending = 0;
1572                finish = 1;
1573        }
1574        spin_unlock(&ci->i_ceph_lock);
1575        if (!finish)
1576                goto retry;
1577
1578        mutex_unlock(&ci->i_truncate_mutex);
1579
1580        if (wrbuffer_refs == 0)
1581                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1582
1583        wake_up_all(&ci->i_cap_wq);
1584}
1585
1586/*
1587 * symlinks
1588 */
1589static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
1590{
1591        struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
1592        nd_set_link(nd, ci->i_symlink);
1593        return NULL;
1594}
1595
1596static const struct inode_operations ceph_symlink_iops = {
1597        .readlink = generic_readlink,
1598        .follow_link = ceph_sym_follow_link,
1599        .setattr = ceph_setattr,
1600        .getattr = ceph_getattr,
1601        .setxattr = ceph_setxattr,
1602        .getxattr = ceph_getxattr,
1603        .listxattr = ceph_listxattr,
1604        .removexattr = ceph_removexattr,
1605};
1606
1607/*
1608 * setattr
1609 */
1610int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1611{
1612        struct inode *inode = dentry->d_inode;
1613        struct ceph_inode_info *ci = ceph_inode(inode);
1614        struct inode *parent_inode;
1615        const unsigned int ia_valid = attr->ia_valid;
1616        struct ceph_mds_request *req;
1617        struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
1618        int issued;
1619        int release = 0, dirtied = 0;
1620        int mask = 0;
1621        int err = 0;
1622        int inode_dirty_flags = 0;
1623
1624        if (ceph_snap(inode) != CEPH_NOSNAP)
1625                return -EROFS;
1626
1627        err = inode_change_ok(inode, attr);
1628        if (err != 0)
1629                return err;
1630
1631        req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1632                                       USE_AUTH_MDS);
1633        if (IS_ERR(req))
1634                return PTR_ERR(req);
1635
1636        spin_lock(&ci->i_ceph_lock);
1637        issued = __ceph_caps_issued(ci, NULL);
1638        dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1639
1640        if (ia_valid & ATTR_UID) {
1641                dout("setattr %p uid %d -> %d\n", inode,
1642                     from_kuid(&init_user_ns, inode->i_uid),
1643                     from_kuid(&init_user_ns, attr->ia_uid));
1644                if (issued & CEPH_CAP_AUTH_EXCL) {
1645                        inode->i_uid = attr->ia_uid;
1646                        dirtied |= CEPH_CAP_AUTH_EXCL;
1647                } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1648                           !uid_eq(attr->ia_uid, inode->i_uid)) {
1649                        req->r_args.setattr.uid = cpu_to_le32(
1650                                from_kuid(&init_user_ns, attr->ia_uid));
1651                        mask |= CEPH_SETATTR_UID;
1652                        release |= CEPH_CAP_AUTH_SHARED;
1653                }
1654        }
1655        if (ia_valid & ATTR_GID) {
1656                dout("setattr %p gid %d -> %d\n", inode,
1657                     from_kgid(&init_user_ns, inode->i_gid),
1658                     from_kgid(&init_user_ns, attr->ia_gid));
1659                if (issued & CEPH_CAP_AUTH_EXCL) {
1660                        inode->i_gid = attr->ia_gid;
1661                        dirtied |= CEPH_CAP_AUTH_EXCL;
1662                } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1663                           !gid_eq(attr->ia_gid, inode->i_gid)) {
1664                        req->r_args.setattr.gid = cpu_to_le32(
1665                                from_kgid(&init_user_ns, attr->ia_gid));
1666                        mask |= CEPH_SETATTR_GID;
1667                        release |= CEPH_CAP_AUTH_SHARED;
1668                }
1669        }
1670        if (ia_valid & ATTR_MODE) {
1671                dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1672                     attr->ia_mode);
1673                if (issued & CEPH_CAP_AUTH_EXCL) {
1674                        inode->i_mode = attr->ia_mode;
1675                        dirtied |= CEPH_CAP_AUTH_EXCL;
1676                } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1677                           attr->ia_mode != inode->i_mode) {
1678                        req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1679                        mask |= CEPH_SETATTR_MODE;
1680                        release |= CEPH_CAP_AUTH_SHARED;
1681                }
1682        }
1683
1684        if (ia_valid & ATTR_ATIME) {
1685                dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1686                     inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1687                     attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1688                if (issued & CEPH_CAP_FILE_EXCL) {
1689                        ci->i_time_warp_seq++;
1690                        inode->i_atime = attr->ia_atime;
1691                        dirtied |= CEPH_CAP_FILE_EXCL;
1692                } else if ((issued & CEPH_CAP_FILE_WR) &&
1693                           timespec_compare(&inode->i_atime,
1694                                            &attr->ia_atime) < 0) {
1695                        inode->i_atime = attr->ia_atime;
1696                        dirtied |= CEPH_CAP_FILE_WR;
1697                } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1698                           !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1699                        ceph_encode_timespec(&req->r_args.setattr.atime,
1700                                             &attr->ia_atime);
1701                        mask |= CEPH_SETATTR_ATIME;
1702                        release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1703                                CEPH_CAP_FILE_WR;
1704                }
1705        }
1706        if (ia_valid & ATTR_MTIME) {
1707                dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1708                     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1709                     attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1710                if (issued & CEPH_CAP_FILE_EXCL) {
1711                        ci->i_time_warp_seq++;
1712                        inode->i_mtime = attr->ia_mtime;
1713                        dirtied |= CEPH_CAP_FILE_EXCL;
1714                } else if ((issued & CEPH_CAP_FILE_WR) &&
1715                           timespec_compare(&inode->i_mtime,
1716                                            &attr->ia_mtime) < 0) {
1717                        inode->i_mtime = attr->ia_mtime;
1718                        dirtied |= CEPH_CAP_FILE_WR;
1719                } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1720                           !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1721                        ceph_encode_timespec(&req->r_args.setattr.mtime,
1722                                             &attr->ia_mtime);
1723                        mask |= CEPH_SETATTR_MTIME;
1724                        release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1725                                CEPH_CAP_FILE_WR;
1726                }
1727        }
1728        if (ia_valid & ATTR_SIZE) {
1729                dout("setattr %p size %lld -> %lld\n", inode,
1730                     inode->i_size, attr->ia_size);
1731                if (attr->ia_size > inode->i_sb->s_maxbytes) {
1732                        err = -EINVAL;
1733                        goto out;
1734                }
1735                if ((issued & CEPH_CAP_FILE_EXCL) &&
1736                    attr->ia_size > inode->i_size) {
1737                        inode->i_size = attr->ia_size;
1738                        inode->i_blocks =
1739                                (attr->ia_size + (1 << 9) - 1) >> 9;
1740                        inode->i_ctime = attr->ia_ctime;
1741                        ci->i_reported_size = attr->ia_size;
1742                        dirtied |= CEPH_CAP_FILE_EXCL;
1743                } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1744                           attr->ia_size != inode->i_size) {
1745                        req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1746                        req->r_args.setattr.old_size =
1747                                cpu_to_le64(inode->i_size);
1748                        mask |= CEPH_SETATTR_SIZE;
1749                        release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1750                                CEPH_CAP_FILE_WR;
1751                }
1752        }
1753
1754        /* these do nothing */
1755        if (ia_valid & ATTR_CTIME) {
1756                bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1757                                         ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1758                dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1759                     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1760                     attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1761                     only ? "ctime only" : "ignored");
1762                inode->i_ctime = attr->ia_ctime;
1763                if (only) {
1764                        /*
1765                         * if kernel wants to dirty ctime but nothing else,
1766                         * we need to choose a cap to dirty under, or do
1767                         * a almost-no-op setattr
1768                         */
1769                        if (issued & CEPH_CAP_AUTH_EXCL)
1770                                dirtied |= CEPH_CAP_AUTH_EXCL;
1771                        else if (issued & CEPH_CAP_FILE_EXCL)
1772                                dirtied |= CEPH_CAP_FILE_EXCL;
1773                        else if (issued & CEPH_CAP_XATTR_EXCL)
1774                                dirtied |= CEPH_CAP_XATTR_EXCL;
1775                        else
1776                                mask |= CEPH_SETATTR_CTIME;
1777                }
1778        }
1779        if (ia_valid & ATTR_FILE)
1780                dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1781
1782        if (dirtied) {
1783                inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied);
1784                inode->i_ctime = CURRENT_TIME;
1785        }
1786
1787        release &= issued;
1788        spin_unlock(&ci->i_ceph_lock);
1789
1790        if (inode_dirty_flags)
1791                __mark_inode_dirty(inode, inode_dirty_flags);
1792
1793        if (mask) {
1794                req->r_inode = inode;
1795                ihold(inode);
1796                req->r_inode_drop = release;
1797                req->r_args.setattr.mask = cpu_to_le32(mask);
1798                req->r_num_caps = 1;
1799                parent_inode = ceph_get_dentry_parent_inode(dentry);
1800                err = ceph_mdsc_do_request(mdsc, parent_inode, req);
1801                iput(parent_inode);
1802        }
1803        dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1804             ceph_cap_string(dirtied), mask);
1805
1806        ceph_mdsc_put_request(req);
1807        if (mask & CEPH_SETATTR_SIZE)
1808                __ceph_do_pending_vmtruncate(inode);
1809        return err;
1810out:
1811        spin_unlock(&ci->i_ceph_lock);
1812        ceph_mdsc_put_request(req);
1813        return err;
1814}
1815
1816/*
1817 * Verify that we have a lease on the given mask.  If not,
1818 * do a getattr against an mds.
1819 */
1820int ceph_do_getattr(struct inode *inode, int mask)
1821{
1822        struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
1823        struct ceph_mds_client *mdsc = fsc->mdsc;
1824        struct ceph_mds_request *req;
1825        int err;
1826
1827        if (ceph_snap(inode) == CEPH_SNAPDIR) {
1828                dout("do_getattr inode %p SNAPDIR\n", inode);
1829                return 0;
1830        }
1831
1832        dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
1833        if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
1834                return 0;
1835
1836        req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1837        if (IS_ERR(req))
1838                return PTR_ERR(req);
1839        req->r_inode = inode;
1840        ihold(inode);
1841        req->r_num_caps = 1;
1842        req->r_args.getattr.mask = cpu_to_le32(mask);
1843        err = ceph_mdsc_do_request(mdsc, NULL, req);
1844        ceph_mdsc_put_request(req);
1845        dout("do_getattr result=%d\n", err);
1846        return err;
1847}
1848
1849
1850/*
1851 * Check inode permissions.  We verify we have a valid value for
1852 * the AUTH cap, then call the generic handler.
1853 */
1854int ceph_permission(struct inode *inode, int mask)
1855{
1856        int err;
1857
1858        if (mask & MAY_NOT_BLOCK)
1859                return -ECHILD;
1860
1861        err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
1862
1863        if (!err)
1864                err = generic_permission(inode, mask);
1865        return err;
1866}
1867
1868/*
1869 * Get all attributes.  Hopefully somedata we'll have a statlite()
1870 * and can limit the fields we require to be accurate.
1871 */
1872int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
1873                 struct kstat *stat)
1874{
1875        struct inode *inode = dentry->d_inode;
1876        struct ceph_inode_info *ci = ceph_inode(inode);
1877        int err;
1878
1879        err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
1880        if (!err) {
1881                generic_fillattr(inode, stat);
1882                stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
1883                if (ceph_snap(inode) != CEPH_NOSNAP)
1884                        stat->dev = ceph_snap(inode);
1885                else
1886                        stat->dev = 0;
1887                if (S_ISDIR(inode->i_mode)) {
1888                        if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
1889                                                RBYTES))
1890                                stat->size = ci->i_rbytes;
1891                        else
1892                                stat->size = ci->i_files + ci->i_subdirs;
1893                        stat->blocks = 0;
1894                        stat->blksize = 65536;
1895                }
1896        }
1897        return err;
1898}
1899