linux/fs/ceph/file.c
<<
>>
Prefs
   1#include <linux/ceph/ceph_debug.h>
   2
   3#include <linux/module.h>
   4#include <linux/sched.h>
   5#include <linux/slab.h>
   6#include <linux/file.h>
   7#include <linux/mount.h>
   8#include <linux/namei.h>
   9#include <linux/writeback.h>
  10#include <linux/aio.h>
  11#include <linux/falloc.h>
  12
  13#include "super.h"
  14#include "mds_client.h"
  15#include "cache.h"
  16
  17/*
  18 * Ceph file operations
  19 *
  20 * Implement basic open/close functionality, and implement
  21 * read/write.
  22 *
  23 * We implement three modes of file I/O:
  24 *  - buffered uses the generic_file_aio_{read,write} helpers
  25 *
  26 *  - synchronous is used when there is multi-client read/write
  27 *    sharing, avoids the page cache, and synchronously waits for an
  28 *    ack from the OSD.
  29 *
  30 *  - direct io takes the variant of the sync path that references
  31 *    user pages directly.
  32 *
  33 * fsync() flushes and waits on dirty pages, but just queues metadata
  34 * for writeback: since the MDS can recover size and mtime there is no
  35 * need to wait for MDS acknowledgement.
  36 */
  37
  38
  39/*
  40 * Prepare an open request.  Preallocate ceph_cap to avoid an
  41 * inopportune ENOMEM later.
  42 */
  43static struct ceph_mds_request *
  44prepare_open_request(struct super_block *sb, int flags, int create_mode)
  45{
  46        struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
  47        struct ceph_mds_client *mdsc = fsc->mdsc;
  48        struct ceph_mds_request *req;
  49        int want_auth = USE_ANY_MDS;
  50        int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
  51
  52        if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
  53                want_auth = USE_AUTH_MDS;
  54
  55        req = ceph_mdsc_create_request(mdsc, op, want_auth);
  56        if (IS_ERR(req))
  57                goto out;
  58        req->r_fmode = ceph_flags_to_mode(flags);
  59        req->r_args.open.flags = cpu_to_le32(flags);
  60        req->r_args.open.mode = cpu_to_le32(create_mode);
  61out:
  62        return req;
  63}
  64
  65/*
  66 * initialize private struct file data.
  67 * if we fail, clean up by dropping fmode reference on the ceph_inode
  68 */
  69static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
  70{
  71        struct ceph_file_info *cf;
  72        int ret = 0;
  73        struct ceph_inode_info *ci = ceph_inode(inode);
  74        struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
  75        struct ceph_mds_client *mdsc = fsc->mdsc;
  76
  77        switch (inode->i_mode & S_IFMT) {
  78        case S_IFREG:
  79                /* First file open request creates the cookie, we want to keep
  80                 * this cookie around for the filetime of the inode as not to
  81                 * have to worry about fscache register / revoke / operation
  82                 * races.
  83                 *
  84                 * Also, if we know the operation is going to invalidate data
  85                 * (non readonly) just nuke the cache right away.
  86                 */
  87                ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
  88                if ((fmode & CEPH_FILE_MODE_WR))
  89                        ceph_fscache_invalidate(inode);
  90        case S_IFDIR:
  91                dout("init_file %p %p 0%o (regular)\n", inode, file,
  92                     inode->i_mode);
  93                cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO);
  94                if (cf == NULL) {
  95                        ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
  96                        return -ENOMEM;
  97                }
  98                cf->fmode = fmode;
  99                cf->next_offset = 2;
 100                file->private_data = cf;
 101                BUG_ON(inode->i_fop->release != ceph_release);
 102                break;
 103
 104        case S_IFLNK:
 105                dout("init_file %p %p 0%o (symlink)\n", inode, file,
 106                     inode->i_mode);
 107                ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 108                break;
 109
 110        default:
 111                dout("init_file %p %p 0%o (special)\n", inode, file,
 112                     inode->i_mode);
 113                /*
 114                 * we need to drop the open ref now, since we don't
 115                 * have .release set to ceph_release.
 116                 */
 117                ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
 118                BUG_ON(inode->i_fop->release == ceph_release);
 119
 120                /* call the proper open fop */
 121                ret = inode->i_fop->open(inode, file);
 122        }
 123        return ret;
 124}
 125
 126/*
 127 * If we already have the requisite capabilities, we can satisfy
 128 * the open request locally (no need to request new caps from the
 129 * MDS).  We do, however, need to inform the MDS (asynchronously)
 130 * if our wanted caps set expands.
 131 */
 132int ceph_open(struct inode *inode, struct file *file)
 133{
 134        struct ceph_inode_info *ci = ceph_inode(inode);
 135        struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
 136        struct ceph_mds_client *mdsc = fsc->mdsc;
 137        struct ceph_mds_request *req;
 138        struct ceph_file_info *cf = file->private_data;
 139        struct inode *parent_inode = NULL;
 140        int err;
 141        int flags, fmode, wanted;
 142
 143        if (cf) {
 144                dout("open file %p is already opened\n", file);
 145                return 0;
 146        }
 147
 148        /* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
 149        flags = file->f_flags & ~(O_CREAT|O_EXCL);
 150        if (S_ISDIR(inode->i_mode))
 151                flags = O_DIRECTORY;  /* mds likes to know */
 152
 153        dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
 154             ceph_vinop(inode), file, flags, file->f_flags);
 155        fmode = ceph_flags_to_mode(flags);
 156        wanted = ceph_caps_for_mode(fmode);
 157
 158        /* snapped files are read-only */
 159        if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
 160                return -EROFS;
 161
 162        /* trivially open snapdir */
 163        if (ceph_snap(inode) == CEPH_SNAPDIR) {
 164                spin_lock(&ci->i_ceph_lock);
 165                __ceph_get_fmode(ci, fmode);
 166                spin_unlock(&ci->i_ceph_lock);
 167                return ceph_init_file(inode, file, fmode);
 168        }
 169
 170        /*
 171         * No need to block if we have caps on the auth MDS (for
 172         * write) or any MDS (for read).  Update wanted set
 173         * asynchronously.
 174         */
 175        spin_lock(&ci->i_ceph_lock);
 176        if (__ceph_is_any_real_caps(ci) &&
 177            (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
 178                int mds_wanted = __ceph_caps_mds_wanted(ci);
 179                int issued = __ceph_caps_issued(ci, NULL);
 180
 181                dout("open %p fmode %d want %s issued %s using existing\n",
 182                     inode, fmode, ceph_cap_string(wanted),
 183                     ceph_cap_string(issued));
 184                __ceph_get_fmode(ci, fmode);
 185                spin_unlock(&ci->i_ceph_lock);
 186
 187                /* adjust wanted? */
 188                if ((issued & wanted) != wanted &&
 189                    (mds_wanted & wanted) != wanted &&
 190                    ceph_snap(inode) != CEPH_SNAPDIR)
 191                        ceph_check_caps(ci, 0, NULL);
 192
 193                return ceph_init_file(inode, file, fmode);
 194        } else if (ceph_snap(inode) != CEPH_NOSNAP &&
 195                   (ci->i_snap_caps & wanted) == wanted) {
 196                __ceph_get_fmode(ci, fmode);
 197                spin_unlock(&ci->i_ceph_lock);
 198                return ceph_init_file(inode, file, fmode);
 199        }
 200
 201        spin_unlock(&ci->i_ceph_lock);
 202
 203        dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
 204        req = prepare_open_request(inode->i_sb, flags, 0);
 205        if (IS_ERR(req)) {
 206                err = PTR_ERR(req);
 207                goto out;
 208        }
 209        req->r_inode = inode;
 210        ihold(inode);
 211
 212        req->r_num_caps = 1;
 213        if (flags & (O_CREAT|O_TRUNC))
 214                parent_inode = ceph_get_dentry_parent_inode(file->f_dentry);
 215        err = ceph_mdsc_do_request(mdsc, parent_inode, req);
 216        iput(parent_inode);
 217        if (!err)
 218                err = ceph_init_file(inode, file, req->r_fmode);
 219        ceph_mdsc_put_request(req);
 220        dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
 221out:
 222        return err;
 223}
 224
 225
 226/*
 227 * Do a lookup + open with a single request.  If we get a non-existent
 228 * file or symlink, return 1 so the VFS can retry.
 229 */
 230int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
 231                     struct file *file, unsigned flags, umode_t mode,
 232                     int *opened)
 233{
 234        struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
 235        struct ceph_mds_client *mdsc = fsc->mdsc;
 236        struct ceph_mds_request *req;
 237        struct dentry *dn;
 238        int err;
 239
 240        dout("atomic_open %p dentry %p '%.*s' %s flags %d mode 0%o\n",
 241             dir, dentry, dentry->d_name.len, dentry->d_name.name,
 242             d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
 243
 244        if (dentry->d_name.len > NAME_MAX)
 245                return -ENAMETOOLONG;
 246
 247        err = ceph_init_dentry(dentry);
 248        if (err < 0)
 249                return err;
 250
 251        /* do the open */
 252        req = prepare_open_request(dir->i_sb, flags, mode);
 253        if (IS_ERR(req))
 254                return PTR_ERR(req);
 255        req->r_dentry = dget(dentry);
 256        req->r_num_caps = 2;
 257        if (flags & O_CREAT) {
 258                req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
 259                req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
 260        }
 261        req->r_locked_dir = dir;           /* caller holds dir->i_mutex */
 262        err = ceph_mdsc_do_request(mdsc,
 263                                   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
 264                                   req);
 265        if (err)
 266                goto out_err;
 267
 268        err = ceph_handle_snapdir(req, dentry, err);
 269        if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
 270                err = ceph_handle_notrace_create(dir, dentry);
 271
 272        if (d_unhashed(dentry)) {
 273                dn = ceph_finish_lookup(req, dentry, err);
 274                if (IS_ERR(dn))
 275                        err = PTR_ERR(dn);
 276        } else {
 277                /* we were given a hashed negative dentry */
 278                dn = NULL;
 279        }
 280        if (err)
 281                goto out_err;
 282        if (dn || dentry->d_inode == NULL || S_ISLNK(dentry->d_inode->i_mode)) {
 283                /* make vfs retry on splice, ENOENT, or symlink */
 284                dout("atomic_open finish_no_open on dn %p\n", dn);
 285                err = finish_no_open(file, dn);
 286        } else {
 287                dout("atomic_open finish_open on dn %p\n", dn);
 288                if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
 289                        *opened |= FILE_CREATED;
 290                }
 291                err = finish_open(file, dentry, ceph_open, opened);
 292        }
 293
 294out_err:
 295        ceph_mdsc_put_request(req);
 296        dout("atomic_open result=%d\n", err);
 297        return err;
 298}
 299
 300int ceph_release(struct inode *inode, struct file *file)
 301{
 302        struct ceph_inode_info *ci = ceph_inode(inode);
 303        struct ceph_file_info *cf = file->private_data;
 304
 305        dout("release inode %p file %p\n", inode, file);
 306        ceph_put_fmode(ci, cf->fmode);
 307        if (cf->last_readdir)
 308                ceph_mdsc_put_request(cf->last_readdir);
 309        kfree(cf->last_name);
 310        kfree(cf->dir_info);
 311        dput(cf->dentry);
 312        kmem_cache_free(ceph_file_cachep, cf);
 313
 314        /* wake up anyone waiting for caps on this inode */
 315        wake_up_all(&ci->i_cap_wq);
 316        return 0;
 317}
 318
 319/*
 320 * Read a range of bytes striped over one or more objects.  Iterate over
 321 * objects we stripe over.  (That's not atomic, but good enough for now.)
 322 *
 323 * If we get a short result from the OSD, check against i_size; we need to
 324 * only return a short read to the caller if we hit EOF.
 325 */
 326static int striped_read(struct inode *inode,
 327                        u64 off, u64 len,
 328                        struct page **pages, int num_pages,
 329                        int *checkeof, bool o_direct,
 330                        unsigned long buf_align)
 331{
 332        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 333        struct ceph_inode_info *ci = ceph_inode(inode);
 334        u64 pos, this_len, left;
 335        int io_align, page_align;
 336        int pages_left;
 337        int read;
 338        struct page **page_pos;
 339        int ret;
 340        bool hit_stripe, was_short;
 341
 342        /*
 343         * we may need to do multiple reads.  not atomic, unfortunately.
 344         */
 345        pos = off;
 346        left = len;
 347        page_pos = pages;
 348        pages_left = num_pages;
 349        read = 0;
 350        io_align = off & ~PAGE_MASK;
 351
 352more:
 353        if (o_direct)
 354                page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
 355        else
 356                page_align = pos & ~PAGE_MASK;
 357        this_len = left;
 358        ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
 359                                  &ci->i_layout, pos, &this_len,
 360                                  ci->i_truncate_seq,
 361                                  ci->i_truncate_size,
 362                                  page_pos, pages_left, page_align);
 363        if (ret == -ENOENT)
 364                ret = 0;
 365        hit_stripe = this_len < left;
 366        was_short = ret >= 0 && ret < this_len;
 367        dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
 368             ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
 369
 370        if (ret >= 0) {
 371                int didpages;
 372                if (was_short && (pos + ret < inode->i_size)) {
 373                        u64 tmp = min(this_len - ret,
 374                                        inode->i_size - pos - ret);
 375                        dout(" zero gap %llu to %llu\n",
 376                                pos + ret, pos + ret + tmp);
 377                        ceph_zero_page_vector_range(page_align + read + ret,
 378                                                        tmp, pages);
 379                        ret += tmp;
 380                }
 381
 382                didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
 383                pos += ret;
 384                read = pos - off;
 385                left -= ret;
 386                page_pos += didpages;
 387                pages_left -= didpages;
 388
 389                /* hit stripe and need continue*/
 390                if (left && hit_stripe && pos < inode->i_size)
 391                        goto more;
 392        }
 393
 394        if (read > 0) {
 395                ret = read;
 396                /* did we bounce off eof? */
 397                if (pos + left > inode->i_size)
 398                        *checkeof = 1;
 399        }
 400
 401        dout("striped_read returns %d\n", ret);
 402        return ret;
 403}
 404
 405/*
 406 * Completely synchronous read and write methods.  Direct from __user
 407 * buffer to osd, or directly to user pages (if O_DIRECT).
 408 *
 409 * If the read spans object boundary, just do multiple reads.
 410 */
 411static ssize_t ceph_sync_read(struct file *file, char __user *data,
 412                              unsigned len, loff_t *poff, int *checkeof)
 413{
 414        struct inode *inode = file_inode(file);
 415        struct page **pages;
 416        u64 off = *poff;
 417        int num_pages, ret;
 418
 419        dout("sync_read on file %p %llu~%u %s\n", file, off, len,
 420             (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
 421
 422        if (file->f_flags & O_DIRECT) {
 423                num_pages = calc_pages_for((unsigned long)data, len);
 424                pages = ceph_get_direct_page_vector(data, num_pages, true);
 425        } else {
 426                num_pages = calc_pages_for(off, len);
 427                pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
 428        }
 429        if (IS_ERR(pages))
 430                return PTR_ERR(pages);
 431
 432        /*
 433         * flush any page cache pages in this range.  this
 434         * will make concurrent normal and sync io slow,
 435         * but it will at least behave sensibly when they are
 436         * in sequence.
 437         */
 438        ret = filemap_write_and_wait(inode->i_mapping);
 439        if (ret < 0)
 440                goto done;
 441
 442        ret = striped_read(inode, off, len, pages, num_pages, checkeof,
 443                           file->f_flags & O_DIRECT,
 444                           (unsigned long)data & ~PAGE_MASK);
 445
 446        if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
 447                ret = ceph_copy_page_vector_to_user(pages, data, off, ret);
 448        if (ret >= 0)
 449                *poff = off + ret;
 450
 451done:
 452        if (file->f_flags & O_DIRECT)
 453                ceph_put_page_vector(pages, num_pages, true);
 454        else
 455                ceph_release_page_vector(pages, num_pages);
 456        dout("sync_read result %d\n", ret);
 457        return ret;
 458}
 459
 460/*
 461 * Write commit request unsafe callback, called to tell us when a
 462 * request is unsafe (that is, in flight--has been handed to the
 463 * messenger to send to its target osd).  It is called again when
 464 * we've received a response message indicating the request is
 465 * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
 466 * is completed early (and unsuccessfully) due to a timeout or
 467 * interrupt.
 468 *
 469 * This is used if we requested both an ACK and ONDISK commit reply
 470 * from the OSD.
 471 */
 472static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
 473{
 474        struct ceph_inode_info *ci = ceph_inode(req->r_inode);
 475
 476        dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
 477                unsafe ? "un" : "");
 478        if (unsafe) {
 479                ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
 480                spin_lock(&ci->i_unsafe_lock);
 481                list_add_tail(&req->r_unsafe_item,
 482                              &ci->i_unsafe_writes);
 483                spin_unlock(&ci->i_unsafe_lock);
 484        } else {
 485                spin_lock(&ci->i_unsafe_lock);
 486                list_del_init(&req->r_unsafe_item);
 487                spin_unlock(&ci->i_unsafe_lock);
 488                ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
 489        }
 490}
 491
 492/*
 493 * Synchronous write, straight from __user pointer or user pages (if
 494 * O_DIRECT).
 495 *
 496 * If write spans object boundary, just do multiple writes.  (For a
 497 * correct atomic write, we should e.g. take write locks on all
 498 * objects, rollback on failure, etc.)
 499 */
 500static ssize_t ceph_sync_write(struct file *file, const char __user *data,
 501                               size_t left, loff_t pos, loff_t *ppos)
 502{
 503        struct inode *inode = file_inode(file);
 504        struct ceph_inode_info *ci = ceph_inode(inode);
 505        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 506        struct ceph_snap_context *snapc;
 507        struct ceph_vino vino;
 508        struct ceph_osd_request *req;
 509        int num_ops = 1;
 510        struct page **pages;
 511        int num_pages;
 512        u64 len;
 513        int written = 0;
 514        int flags;
 515        int check_caps = 0;
 516        int page_align, io_align;
 517        unsigned long buf_align;
 518        int ret;
 519        struct timespec mtime = CURRENT_TIME;
 520        bool own_pages = false;
 521
 522        if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
 523                return -EROFS;
 524
 525        dout("sync_write on file %p %lld~%u %s\n", file, pos,
 526             (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
 527
 528        ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left);
 529        if (ret < 0)
 530                return ret;
 531
 532        ret = invalidate_inode_pages2_range(inode->i_mapping,
 533                                            pos >> PAGE_CACHE_SHIFT,
 534                                            (pos + left) >> PAGE_CACHE_SHIFT);
 535        if (ret < 0)
 536                dout("invalidate_inode_pages2_range returned %d\n", ret);
 537
 538        flags = CEPH_OSD_FLAG_ORDERSNAP |
 539                CEPH_OSD_FLAG_ONDISK |
 540                CEPH_OSD_FLAG_WRITE;
 541        if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0)
 542                flags |= CEPH_OSD_FLAG_ACK;
 543        else
 544                num_ops++;      /* Also include a 'startsync' command. */
 545
 546        /*
 547         * we may need to do multiple writes here if we span an object
 548         * boundary.  this isn't atomic, unfortunately.  :(
 549         */
 550more:
 551        io_align = pos & ~PAGE_MASK;
 552        buf_align = (unsigned long)data & ~PAGE_MASK;
 553        len = left;
 554
 555        snapc = ci->i_snap_realm->cached_context;
 556        vino = ceph_vino(inode);
 557        req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
 558                                    vino, pos, &len, num_ops,
 559                                    CEPH_OSD_OP_WRITE, flags, snapc,
 560                                    ci->i_truncate_seq, ci->i_truncate_size,
 561                                    false);
 562        if (IS_ERR(req))
 563                return PTR_ERR(req);
 564
 565        /* write from beginning of first page, regardless of io alignment */
 566        page_align = file->f_flags & O_DIRECT ? buf_align : io_align;
 567        num_pages = calc_pages_for(page_align, len);
 568        if (file->f_flags & O_DIRECT) {
 569                pages = ceph_get_direct_page_vector(data, num_pages, false);
 570                if (IS_ERR(pages)) {
 571                        ret = PTR_ERR(pages);
 572                        goto out;
 573                }
 574
 575                /*
 576                 * throw out any page cache pages in this range. this
 577                 * may block.
 578                 */
 579                truncate_inode_pages_range(inode->i_mapping, pos,
 580                                           (pos+len) | (PAGE_CACHE_SIZE-1));
 581        } else {
 582                pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
 583                if (IS_ERR(pages)) {
 584                        ret = PTR_ERR(pages);
 585                        goto out;
 586                }
 587                ret = ceph_copy_user_to_page_vector(pages, data, pos, len);
 588                if (ret < 0) {
 589                        ceph_release_page_vector(pages, num_pages);
 590                        goto out;
 591                }
 592
 593                if ((file->f_flags & O_SYNC) == 0) {
 594                        /* get a second commit callback */
 595                        req->r_unsafe_callback = ceph_sync_write_unsafe;
 596                        req->r_inode = inode;
 597                        own_pages = true;
 598                }
 599        }
 600        osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
 601                                        false, own_pages);
 602
 603        /* BUG_ON(vino.snap != CEPH_NOSNAP); */
 604        ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
 605
 606        ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
 607        if (!ret)
 608                ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
 609
 610        if (file->f_flags & O_DIRECT)
 611                ceph_put_page_vector(pages, num_pages, false);
 612        else if (file->f_flags & O_SYNC)
 613                ceph_release_page_vector(pages, num_pages);
 614
 615out:
 616        ceph_osdc_put_request(req);
 617        if (ret == 0) {
 618                pos += len;
 619                written += len;
 620                left -= len;
 621                data += len;
 622                if (left)
 623                        goto more;
 624
 625                ret = written;
 626                *ppos = pos;
 627                if (pos > i_size_read(inode))
 628                        check_caps = ceph_inode_set_size(inode, pos);
 629                if (check_caps)
 630                        ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY,
 631                                        NULL);
 632        } else if (ret != -EOLDSNAPC && written > 0) {
 633                ret = written;
 634        }
 635        return ret;
 636}
 637
 638/*
 639 * Wrap generic_file_aio_read with checks for cap bits on the inode.
 640 * Atomically grab references, so that those bits are not released
 641 * back to the MDS mid-read.
 642 *
 643 * Hmm, the sync read case isn't actually async... should it be?
 644 */
 645static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
 646                             unsigned long nr_segs, loff_t pos)
 647{
 648        struct file *filp = iocb->ki_filp;
 649        struct ceph_file_info *fi = filp->private_data;
 650        loff_t *ppos = &iocb->ki_pos;
 651        size_t len = iov->iov_len;
 652        struct inode *inode = file_inode(filp);
 653        struct ceph_inode_info *ci = ceph_inode(inode);
 654        void __user *base = iov->iov_base;
 655        ssize_t ret;
 656        int want, got = 0;
 657        int checkeof = 0, read = 0;
 658
 659        dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
 660             inode, ceph_vinop(inode), pos, (unsigned)len, inode);
 661again:
 662        if (fi->fmode & CEPH_FILE_MODE_LAZY)
 663                want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
 664        else
 665                want = CEPH_CAP_FILE_CACHE;
 666        ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1);
 667        if (ret < 0)
 668                goto out;
 669        dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
 670             inode, ceph_vinop(inode), pos, (unsigned)len,
 671             ceph_cap_string(got));
 672
 673        if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
 674            (iocb->ki_filp->f_flags & O_DIRECT) ||
 675            (fi->flags & CEPH_F_SYNC))
 676                /* hmm, this isn't really async... */
 677                ret = ceph_sync_read(filp, base, len, ppos, &checkeof);
 678        else
 679                ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
 680
 681out:
 682        dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
 683             inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
 684        ceph_put_cap_refs(ci, got);
 685
 686        if (checkeof && ret >= 0) {
 687                int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
 688
 689                /* hit EOF or hole? */
 690                if (statret == 0 && *ppos < inode->i_size) {
 691                        dout("aio_read sync_read hit hole, ppos %lld < size %lld, reading more\n", *ppos, inode->i_size);
 692                        read += ret;
 693                        base += ret;
 694                        len -= ret;
 695                        checkeof = 0;
 696                        goto again;
 697                }
 698        }
 699        if (ret >= 0)
 700                ret += read;
 701
 702        return ret;
 703}
 704
 705/*
 706 * Take cap references to avoid releasing caps to MDS mid-write.
 707 *
 708 * If we are synchronous, and write with an old snap context, the OSD
 709 * may return EOLDSNAPC.  In that case, retry the write.. _after_
 710 * dropping our cap refs and allowing the pending snap to logically
 711 * complete _before_ this write occurs.
 712 *
 713 * If we are near ENOSPC, write synchronously.
 714 */
 715static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
 716                       unsigned long nr_segs, loff_t pos)
 717{
 718        struct file *file = iocb->ki_filp;
 719        struct ceph_file_info *fi = file->private_data;
 720        struct inode *inode = file_inode(file);
 721        struct ceph_inode_info *ci = ceph_inode(inode);
 722        struct ceph_osd_client *osdc =
 723                &ceph_sb_to_client(inode->i_sb)->client->osdc;
 724        ssize_t count, written = 0;
 725        int err, want, got;
 726
 727        if (ceph_snap(inode) != CEPH_NOSNAP)
 728                return -EROFS;
 729
 730        mutex_lock(&inode->i_mutex);
 731
 732        err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
 733        if (err)
 734                goto out;
 735
 736        /* We can write back this queue in page reclaim */
 737        current->backing_dev_info = file->f_mapping->backing_dev_info;
 738
 739        err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
 740        if (err)
 741                goto out;
 742
 743        if (count == 0)
 744                goto out;
 745
 746        err = file_remove_suid(file);
 747        if (err)
 748                goto out;
 749
 750        err = file_update_time(file);
 751        if (err)
 752                goto out;
 753
 754retry_snap:
 755        if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
 756                err = -ENOSPC;
 757                goto out;
 758        }
 759
 760        dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
 761             inode, ceph_vinop(inode), pos, count, inode->i_size);
 762        if (fi->fmode & CEPH_FILE_MODE_LAZY)
 763                want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
 764        else
 765                want = CEPH_CAP_FILE_BUFFER;
 766        got = 0;
 767        err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, pos + count);
 768        if (err < 0)
 769                goto out;
 770
 771        dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
 772             inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
 773
 774        if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
 775            (iocb->ki_filp->f_flags & O_DIRECT) ||
 776            (fi->flags & CEPH_F_SYNC)) {
 777                mutex_unlock(&inode->i_mutex);
 778                written = ceph_sync_write(file, iov->iov_base, count,
 779                                          pos, &iocb->ki_pos);
 780                if (written == -EOLDSNAPC) {
 781                        dout("aio_write %p %llx.%llx %llu~%u"
 782                                "got EOLDSNAPC, retrying\n",
 783                                inode, ceph_vinop(inode),
 784                                pos, (unsigned)iov->iov_len);
 785                        mutex_lock(&inode->i_mutex);
 786                        goto retry_snap;
 787                }
 788        } else {
 789                /*
 790                 * No need to acquire the i_truncate_mutex. Because
 791                 * the MDS revokes Fwb caps before sending truncate
 792                 * message to us. We can't get Fwb cap while there
 793                 * are pending vmtruncate. So write and vmtruncate
 794                 * can not run at the same time
 795                 */
 796                written = generic_file_buffered_write(iocb, iov, nr_segs,
 797                                                      pos, &iocb->ki_pos,
 798                                                      count, 0);
 799                mutex_unlock(&inode->i_mutex);
 800        }
 801
 802        if (written >= 0) {
 803                int dirty;
 804                spin_lock(&ci->i_ceph_lock);
 805                dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
 806                spin_unlock(&ci->i_ceph_lock);
 807                if (dirty)
 808                        __mark_inode_dirty(inode, dirty);
 809        }
 810
 811        dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
 812             inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
 813             ceph_cap_string(got));
 814        ceph_put_cap_refs(ci, got);
 815
 816        if (written >= 0 &&
 817            ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) ||
 818             ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
 819                err = vfs_fsync_range(file, pos, pos + written - 1, 1);
 820                if (err < 0)
 821                        written = err;
 822        }
 823
 824        goto out_unlocked;
 825
 826out:
 827        mutex_unlock(&inode->i_mutex);
 828out_unlocked:
 829        current->backing_dev_info = NULL;
 830        return written ? written : err;
 831}
 832
 833/*
 834 * llseek.  be sure to verify file size on SEEK_END.
 835 */
 836static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
 837{
 838        struct inode *inode = file->f_mapping->host;
 839        int ret;
 840
 841        mutex_lock(&inode->i_mutex);
 842
 843        if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
 844                ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
 845                if (ret < 0) {
 846                        offset = ret;
 847                        goto out;
 848                }
 849        }
 850
 851        switch (whence) {
 852        case SEEK_END:
 853                offset += inode->i_size;
 854                break;
 855        case SEEK_CUR:
 856                /*
 857                 * Here we special-case the lseek(fd, 0, SEEK_CUR)
 858                 * position-querying operation.  Avoid rewriting the "same"
 859                 * f_pos value back to the file because a concurrent read(),
 860                 * write() or lseek() might have altered it
 861                 */
 862                if (offset == 0) {
 863                        offset = file->f_pos;
 864                        goto out;
 865                }
 866                offset += file->f_pos;
 867                break;
 868        case SEEK_DATA:
 869                if (offset >= inode->i_size) {
 870                        ret = -ENXIO;
 871                        goto out;
 872                }
 873                break;
 874        case SEEK_HOLE:
 875                if (offset >= inode->i_size) {
 876                        ret = -ENXIO;
 877                        goto out;
 878                }
 879                offset = inode->i_size;
 880                break;
 881        }
 882
 883        offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
 884
 885out:
 886        mutex_unlock(&inode->i_mutex);
 887        return offset;
 888}
 889
 890static inline void ceph_zero_partial_page(
 891        struct inode *inode, loff_t offset, unsigned size)
 892{
 893        struct page *page;
 894        pgoff_t index = offset >> PAGE_CACHE_SHIFT;
 895
 896        page = find_lock_page(inode->i_mapping, index);
 897        if (page) {
 898                wait_on_page_writeback(page);
 899                zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size);
 900                unlock_page(page);
 901                page_cache_release(page);
 902        }
 903}
 904
 905static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
 906                                      loff_t length)
 907{
 908        loff_t nearly = round_up(offset, PAGE_CACHE_SIZE);
 909        if (offset < nearly) {
 910                loff_t size = nearly - offset;
 911                if (length < size)
 912                        size = length;
 913                ceph_zero_partial_page(inode, offset, size);
 914                offset += size;
 915                length -= size;
 916        }
 917        if (length >= PAGE_CACHE_SIZE) {
 918                loff_t size = round_down(length, PAGE_CACHE_SIZE);
 919                truncate_pagecache_range(inode, offset, offset + size - 1);
 920                offset += size;
 921                length -= size;
 922        }
 923        if (length)
 924                ceph_zero_partial_page(inode, offset, length);
 925}
 926
 927static int ceph_zero_partial_object(struct inode *inode,
 928                                    loff_t offset, loff_t *length)
 929{
 930        struct ceph_inode_info *ci = ceph_inode(inode);
 931        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 932        struct ceph_osd_request *req;
 933        int ret = 0;
 934        loff_t zero = 0;
 935        int op;
 936
 937        if (!length) {
 938                op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
 939                length = &zero;
 940        } else {
 941                op = CEPH_OSD_OP_ZERO;
 942        }
 943
 944        req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
 945                                        ceph_vino(inode),
 946                                        offset, length,
 947                                        1, op,
 948                                        CEPH_OSD_FLAG_WRITE |
 949                                        CEPH_OSD_FLAG_ONDISK,
 950                                        NULL, 0, 0, false);
 951        if (IS_ERR(req)) {
 952                ret = PTR_ERR(req);
 953                goto out;
 954        }
 955
 956        ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap,
 957                                &inode->i_mtime);
 958
 959        ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
 960        if (!ret) {
 961                ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
 962                if (ret == -ENOENT)
 963                        ret = 0;
 964        }
 965        ceph_osdc_put_request(req);
 966
 967out:
 968        return ret;
 969}
 970
 971static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
 972{
 973        int ret = 0;
 974        struct ceph_inode_info *ci = ceph_inode(inode);
 975        s32 stripe_unit = ceph_file_layout_su(ci->i_layout);
 976        s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
 977        s32 object_size = ceph_file_layout_object_size(ci->i_layout);
 978        u64 object_set_size = object_size * stripe_count;
 979        u64 nearly, t;
 980
 981        /* round offset up to next period boundary */
 982        nearly = offset + object_set_size - 1;
 983        t = nearly;
 984        nearly -= do_div(t, object_set_size);
 985
 986        while (length && offset < nearly) {
 987                loff_t size = length;
 988                ret = ceph_zero_partial_object(inode, offset, &size);
 989                if (ret < 0)
 990                        return ret;
 991                offset += size;
 992                length -= size;
 993        }
 994        while (length >= object_set_size) {
 995                int i;
 996                loff_t pos = offset;
 997                for (i = 0; i < stripe_count; ++i) {
 998                        ret = ceph_zero_partial_object(inode, pos, NULL);
 999                        if (ret < 0)
1000                                return ret;
1001                        pos += stripe_unit;
1002                }
1003                offset += object_set_size;
1004                length -= object_set_size;
1005        }
1006        while (length) {
1007                loff_t size = length;
1008                ret = ceph_zero_partial_object(inode, offset, &size);
1009                if (ret < 0)
1010                        return ret;
1011                offset += size;
1012                length -= size;
1013        }
1014        return ret;
1015}
1016
1017static long ceph_fallocate(struct file *file, int mode,
1018                                loff_t offset, loff_t length)
1019{
1020        struct ceph_file_info *fi = file->private_data;
1021        struct inode *inode = file->f_dentry->d_inode;
1022        struct ceph_inode_info *ci = ceph_inode(inode);
1023        struct ceph_osd_client *osdc =
1024                &ceph_inode_to_client(inode)->client->osdc;
1025        int want, got = 0;
1026        int dirty;
1027        int ret = 0;
1028        loff_t endoff = 0;
1029        loff_t size;
1030
1031        if (!S_ISREG(inode->i_mode))
1032                return -EOPNOTSUPP;
1033
1034        if (IS_SWAPFILE(inode))
1035                return -ETXTBSY;
1036
1037        mutex_lock(&inode->i_mutex);
1038
1039        if (ceph_snap(inode) != CEPH_NOSNAP) {
1040                ret = -EROFS;
1041                goto unlock;
1042        }
1043
1044        if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) &&
1045                !(mode & FALLOC_FL_PUNCH_HOLE)) {
1046                ret = -ENOSPC;
1047                goto unlock;
1048        }
1049
1050        size = i_size_read(inode);
1051        if (!(mode & FALLOC_FL_KEEP_SIZE))
1052                endoff = offset + length;
1053
1054        if (fi->fmode & CEPH_FILE_MODE_LAZY)
1055                want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1056        else
1057                want = CEPH_CAP_FILE_BUFFER;
1058
1059        ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff);
1060        if (ret < 0)
1061                goto unlock;
1062
1063        if (mode & FALLOC_FL_PUNCH_HOLE) {
1064                if (offset < size)
1065                        ceph_zero_pagecache_range(inode, offset, length);
1066                ret = ceph_zero_objects(inode, offset, length);
1067        } else if (endoff > size) {
1068                truncate_pagecache_range(inode, size, -1);
1069                if (ceph_inode_set_size(inode, endoff))
1070                        ceph_check_caps(ceph_inode(inode),
1071                                CHECK_CAPS_AUTHONLY, NULL);
1072        }
1073
1074        if (!ret) {
1075                spin_lock(&ci->i_ceph_lock);
1076                dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
1077                spin_unlock(&ci->i_ceph_lock);
1078                if (dirty)
1079                        __mark_inode_dirty(inode, dirty);
1080        }
1081
1082        ceph_put_cap_refs(ci, got);
1083unlock:
1084        mutex_unlock(&inode->i_mutex);
1085        return ret;
1086}
1087
1088const struct file_operations ceph_file_fops = {
1089        .open = ceph_open,
1090        .release = ceph_release,
1091        .llseek = ceph_llseek,
1092        .read = do_sync_read,
1093        .write = do_sync_write,
1094        .aio_read = ceph_aio_read,
1095        .aio_write = ceph_aio_write,
1096        .mmap = ceph_mmap,
1097        .fsync = ceph_fsync,
1098        .lock = ceph_lock,
1099        .flock = ceph_flock,
1100        .splice_read = generic_file_splice_read,
1101        .splice_write = generic_file_splice_write,
1102        .unlocked_ioctl = ceph_ioctl,
1103        .compat_ioctl   = ceph_ioctl,
1104        .fallocate      = ceph_fallocate,
1105};
1106
1107