linux/fs/nfs/callback_proc.c
<<
>>
Prefs
   1/*
   2 * linux/fs/nfs/callback_proc.c
   3 *
   4 * Copyright (C) 2004 Trond Myklebust
   5 *
   6 * NFSv4 callback procedures
   7 */
   8#include <linux/nfs4.h>
   9#include <linux/nfs_fs.h>
  10#include <linux/slab.h>
  11#include <linux/rcupdate.h>
  12#include "nfs4_fs.h"
  13#include "callback.h"
  14#include "delegation.h"
  15#include "internal.h"
  16#include "pnfs.h"
  17
  18#ifdef NFS_DEBUG
  19#define NFSDBG_FACILITY NFSDBG_CALLBACK
  20#endif
  21
  22__be32 nfs4_callback_getattr(struct cb_getattrargs *args,
  23                             struct cb_getattrres *res,
  24                             struct cb_process_state *cps)
  25{
  26        struct nfs_delegation *delegation;
  27        struct nfs_inode *nfsi;
  28        struct inode *inode;
  29
  30        res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
  31        if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
  32                goto out;
  33
  34        res->bitmap[0] = res->bitmap[1] = 0;
  35        res->status = htonl(NFS4ERR_BADHANDLE);
  36
  37        dprintk_rcu("NFS: GETATTR callback request from %s\n",
  38                rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
  39
  40        inode = nfs_delegation_find_inode(cps->clp, &args->fh);
  41        if (inode == NULL)
  42                goto out;
  43        nfsi = NFS_I(inode);
  44        rcu_read_lock();
  45        delegation = rcu_dereference(nfsi->delegation);
  46        if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
  47                goto out_iput;
  48        res->size = i_size_read(inode);
  49        res->change_attr = delegation->change_attr;
  50        if (nfsi->npages != 0)
  51                res->change_attr++;
  52        res->ctime = inode->i_ctime;
  53        res->mtime = inode->i_mtime;
  54        res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
  55                args->bitmap[0];
  56        res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
  57                args->bitmap[1];
  58        res->status = 0;
  59out_iput:
  60        rcu_read_unlock();
  61        iput(inode);
  62out:
  63        dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
  64        return res->status;
  65}
  66
  67__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
  68                            struct cb_process_state *cps)
  69{
  70        struct inode *inode;
  71        __be32 res;
  72        
  73        res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
  74        if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
  75                goto out;
  76
  77        dprintk_rcu("NFS: RECALL callback request from %s\n",
  78                rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
  79
  80        res = htonl(NFS4ERR_BADHANDLE);
  81        inode = nfs_delegation_find_inode(cps->clp, &args->fh);
  82        if (inode == NULL)
  83                goto out;
  84        /* Set up a helper thread to actually return the delegation */
  85        switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
  86        case 0:
  87                res = 0;
  88                break;
  89        case -ENOENT:
  90                res = htonl(NFS4ERR_BAD_STATEID);
  91                break;
  92        default:
  93                res = htonl(NFS4ERR_RESOURCE);
  94        }
  95        iput(inode);
  96out:
  97        dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
  98        return res;
  99}
 100
 101#if defined(CONFIG_NFS_V4_1)
 102
 103/*
 104 * Lookup a layout by filehandle.
 105 *
 106 * Note: gets a refcount on the layout hdr and on its respective inode.
 107 * Caller must put the layout hdr and the inode.
 108 *
 109 * TODO: keep track of all layouts (and delegations) in a hash table
 110 * hashed by filehandle.
 111 */
 112static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp, struct nfs_fh *fh)
 113{
 114        struct nfs_server *server;
 115        struct inode *ino;
 116        struct pnfs_layout_hdr *lo;
 117
 118        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
 119                list_for_each_entry(lo, &server->layouts, plh_layouts) {
 120                        if (nfs_compare_fh(fh, &NFS_I(lo->plh_inode)->fh))
 121                                continue;
 122                        ino = igrab(lo->plh_inode);
 123                        if (!ino)
 124                                continue;
 125                        spin_lock(&ino->i_lock);
 126                        /* Is this layout in the process of being freed? */
 127                        if (NFS_I(ino)->layout != lo) {
 128                                spin_unlock(&ino->i_lock);
 129                                iput(ino);
 130                                continue;
 131                        }
 132                        pnfs_get_layout_hdr(lo);
 133                        spin_unlock(&ino->i_lock);
 134                        return lo;
 135                }
 136        }
 137
 138        return NULL;
 139}
 140
 141static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp, struct nfs_fh *fh)
 142{
 143        struct pnfs_layout_hdr *lo;
 144
 145        spin_lock(&clp->cl_lock);
 146        rcu_read_lock();
 147        lo = get_layout_by_fh_locked(clp, fh);
 148        rcu_read_unlock();
 149        spin_unlock(&clp->cl_lock);
 150
 151        return lo;
 152}
 153
 154static u32 initiate_file_draining(struct nfs_client *clp,
 155                                  struct cb_layoutrecallargs *args)
 156{
 157        struct inode *ino;
 158        struct pnfs_layout_hdr *lo;
 159        u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
 160        LIST_HEAD(free_me_list);
 161
 162        lo = get_layout_by_fh(clp, &args->cbl_fh);
 163        if (!lo)
 164                return NFS4ERR_NOMATCHING_LAYOUT;
 165
 166        ino = lo->plh_inode;
 167        spin_lock(&ino->i_lock);
 168        if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
 169            pnfs_mark_matching_lsegs_invalid(lo, &free_me_list,
 170                                        &args->cbl_range))
 171                rv = NFS4ERR_DELAY;
 172        else
 173                rv = NFS4ERR_NOMATCHING_LAYOUT;
 174        pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
 175        spin_unlock(&ino->i_lock);
 176        pnfs_free_lseg_list(&free_me_list);
 177        pnfs_put_layout_hdr(lo);
 178        iput(ino);
 179        return rv;
 180}
 181
 182static u32 initiate_bulk_draining(struct nfs_client *clp,
 183                                  struct cb_layoutrecallargs *args)
 184{
 185        struct nfs_server *server;
 186        struct pnfs_layout_hdr *lo;
 187        struct inode *ino;
 188        u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
 189        struct pnfs_layout_hdr *tmp;
 190        LIST_HEAD(recall_list);
 191        LIST_HEAD(free_me_list);
 192        struct pnfs_layout_range range = {
 193                .iomode = IOMODE_ANY,
 194                .offset = 0,
 195                .length = NFS4_MAX_UINT64,
 196        };
 197
 198        spin_lock(&clp->cl_lock);
 199        rcu_read_lock();
 200        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
 201                if ((args->cbl_recall_type == RETURN_FSID) &&
 202                    memcmp(&server->fsid, &args->cbl_fsid,
 203                           sizeof(struct nfs_fsid)))
 204                        continue;
 205
 206                list_for_each_entry(lo, &server->layouts, plh_layouts) {
 207                        ino = igrab(lo->plh_inode);
 208                        if (ino)
 209                                continue;
 210                        spin_lock(&ino->i_lock);
 211                        /* Is this layout in the process of being freed? */
 212                        if (NFS_I(ino)->layout != lo) {
 213                                spin_unlock(&ino->i_lock);
 214                                iput(ino);
 215                                continue;
 216                        }
 217                        pnfs_get_layout_hdr(lo);
 218                        spin_unlock(&ino->i_lock);
 219                        BUG_ON(!list_empty(&lo->plh_bulk_recall));
 220                        list_add(&lo->plh_bulk_recall, &recall_list);
 221                }
 222        }
 223        rcu_read_unlock();
 224        spin_unlock(&clp->cl_lock);
 225
 226        list_for_each_entry_safe(lo, tmp,
 227                                 &recall_list, plh_bulk_recall) {
 228                ino = lo->plh_inode;
 229                spin_lock(&ino->i_lock);
 230                set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
 231                if (pnfs_mark_matching_lsegs_invalid(lo, &free_me_list, &range))
 232                        rv = NFS4ERR_DELAY;
 233                list_del_init(&lo->plh_bulk_recall);
 234                spin_unlock(&ino->i_lock);
 235                pnfs_free_lseg_list(&free_me_list);
 236                pnfs_put_layout_hdr(lo);
 237                iput(ino);
 238        }
 239        return rv;
 240}
 241
 242static u32 do_callback_layoutrecall(struct nfs_client *clp,
 243                                    struct cb_layoutrecallargs *args)
 244{
 245        u32 res;
 246
 247        dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type);
 248        if (args->cbl_recall_type == RETURN_FILE)
 249                res = initiate_file_draining(clp, args);
 250        else
 251                res = initiate_bulk_draining(clp, args);
 252        dprintk("%s returning %i\n", __func__, res);
 253        return res;
 254
 255}
 256
 257__be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args,
 258                                  void *dummy, struct cb_process_state *cps)
 259{
 260        u32 res;
 261
 262        dprintk("%s: -->\n", __func__);
 263
 264        if (cps->clp)
 265                res = do_callback_layoutrecall(cps->clp, args);
 266        else
 267                res = NFS4ERR_OP_NOT_IN_SESSION;
 268
 269        dprintk("%s: exit with status = %d\n", __func__, res);
 270        return cpu_to_be32(res);
 271}
 272
 273static void pnfs_recall_all_layouts(struct nfs_client *clp)
 274{
 275        struct cb_layoutrecallargs args;
 276
 277        /* Pretend we got a CB_LAYOUTRECALL(ALL) */
 278        memset(&args, 0, sizeof(args));
 279        args.cbl_recall_type = RETURN_ALL;
 280        /* FIXME we ignore errors, what should we do? */
 281        do_callback_layoutrecall(clp, &args);
 282}
 283
 284__be32 nfs4_callback_devicenotify(struct cb_devicenotifyargs *args,
 285                                  void *dummy, struct cb_process_state *cps)
 286{
 287        int i;
 288        __be32 res = 0;
 289        struct nfs_client *clp = cps->clp;
 290        struct nfs_server *server = NULL;
 291
 292        dprintk("%s: -->\n", __func__);
 293
 294        if (!clp) {
 295                res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
 296                goto out;
 297        }
 298
 299        for (i = 0; i < args->ndevs; i++) {
 300                struct cb_devicenotifyitem *dev = &args->devs[i];
 301
 302                if (!server ||
 303                    server->pnfs_curr_ld->id != dev->cbd_layout_type) {
 304                        rcu_read_lock();
 305                        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
 306                                if (server->pnfs_curr_ld &&
 307                                    server->pnfs_curr_ld->id == dev->cbd_layout_type) {
 308                                        rcu_read_unlock();
 309                                        goto found;
 310                                }
 311                        rcu_read_unlock();
 312                        dprintk("%s: layout type %u not found\n",
 313                                __func__, dev->cbd_layout_type);
 314                        continue;
 315                }
 316
 317        found:
 318                if (dev->cbd_notify_type == NOTIFY_DEVICEID4_CHANGE)
 319                        dprintk("%s: NOTIFY_DEVICEID4_CHANGE not supported, "
 320                                "deleting instead\n", __func__);
 321                nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
 322        }
 323
 324out:
 325        kfree(args->devs);
 326        dprintk("%s: exit with status = %u\n",
 327                __func__, be32_to_cpu(res));
 328        return res;
 329}
 330
 331/*
 332 * Validate the sequenceID sent by the server.
 333 * Return success if the sequenceID is one more than what we last saw on
 334 * this slot, accounting for wraparound.  Increments the slot's sequence.
 335 *
 336 * We don't yet implement a duplicate request cache, instead we set the
 337 * back channel ca_maxresponsesize_cached to zero. This is OK for now
 338 * since we only currently implement idempotent callbacks anyway.
 339 *
 340 * We have a single slot backchannel at this time, so we don't bother
 341 * checking the used_slots bit array on the table.  The lower layer guarantees
 342 * a single outstanding callback request at a time.
 343 */
 344static __be32
 345validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
 346{
 347        struct nfs4_slot *slot;
 348
 349        dprintk("%s enter. slotid %d seqid %d\n",
 350                __func__, args->csa_slotid, args->csa_sequenceid);
 351
 352        if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS)
 353                return htonl(NFS4ERR_BADSLOT);
 354
 355        slot = tbl->slots + args->csa_slotid;
 356        dprintk("%s slot table seqid: %d\n", __func__, slot->seq_nr);
 357
 358        /* Normal */
 359        if (likely(args->csa_sequenceid == slot->seq_nr + 1)) {
 360                slot->seq_nr++;
 361                goto out_ok;
 362        }
 363
 364        /* Replay */
 365        if (args->csa_sequenceid == slot->seq_nr) {
 366                dprintk("%s seqid %d is a replay\n",
 367                        __func__, args->csa_sequenceid);
 368                /* Signal process_op to set this error on next op */
 369                if (args->csa_cachethis == 0)
 370                        return htonl(NFS4ERR_RETRY_UNCACHED_REP);
 371
 372                /* The ca_maxresponsesize_cached is 0 with no DRC */
 373                else if (args->csa_cachethis == 1)
 374                        return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
 375        }
 376
 377        /* Wraparound */
 378        if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) {
 379                slot->seq_nr = 1;
 380                goto out_ok;
 381        }
 382
 383        /* Misordered request */
 384        return htonl(NFS4ERR_SEQ_MISORDERED);
 385out_ok:
 386        tbl->highest_used_slotid = args->csa_slotid;
 387        return htonl(NFS4_OK);
 388}
 389
 390/*
 391 * For each referring call triple, check the session's slot table for
 392 * a match.  If the slot is in use and the sequence numbers match, the
 393 * client is still waiting for a response to the original request.
 394 */
 395static bool referring_call_exists(struct nfs_client *clp,
 396                                  uint32_t nrclists,
 397                                  struct referring_call_list *rclists)
 398{
 399        bool status = 0;
 400        int i, j;
 401        struct nfs4_session *session;
 402        struct nfs4_slot_table *tbl;
 403        struct referring_call_list *rclist;
 404        struct referring_call *ref;
 405
 406        /*
 407         * XXX When client trunking is implemented, this becomes
 408         * a session lookup from within the loop
 409         */
 410        session = clp->cl_session;
 411        tbl = &session->fc_slot_table;
 412
 413        for (i = 0; i < nrclists; i++) {
 414                rclist = &rclists[i];
 415                if (memcmp(session->sess_id.data,
 416                           rclist->rcl_sessionid.data,
 417                           NFS4_MAX_SESSIONID_LEN) != 0)
 418                        continue;
 419
 420                for (j = 0; j < rclist->rcl_nrefcalls; j++) {
 421                        ref = &rclist->rcl_refcalls[j];
 422
 423                        dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u "
 424                                "slotid %u\n", __func__,
 425                                ((u32 *)&rclist->rcl_sessionid.data)[0],
 426                                ((u32 *)&rclist->rcl_sessionid.data)[1],
 427                                ((u32 *)&rclist->rcl_sessionid.data)[2],
 428                                ((u32 *)&rclist->rcl_sessionid.data)[3],
 429                                ref->rc_sequenceid, ref->rc_slotid);
 430
 431                        spin_lock(&tbl->slot_tbl_lock);
 432                        status = (test_bit(ref->rc_slotid, tbl->used_slots) &&
 433                                  tbl->slots[ref->rc_slotid].seq_nr ==
 434                                        ref->rc_sequenceid);
 435                        spin_unlock(&tbl->slot_tbl_lock);
 436                        if (status)
 437                                goto out;
 438                }
 439        }
 440
 441out:
 442        return status;
 443}
 444
 445__be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
 446                              struct cb_sequenceres *res,
 447                              struct cb_process_state *cps)
 448{
 449        struct nfs4_slot_table *tbl;
 450        struct nfs_client *clp;
 451        int i;
 452        __be32 status = htonl(NFS4ERR_BADSESSION);
 453
 454        clp = nfs4_find_client_sessionid(cps->net, args->csa_addr, &args->csa_sessionid);
 455        if (clp == NULL)
 456                goto out;
 457
 458        tbl = &clp->cl_session->bc_slot_table;
 459
 460        spin_lock(&tbl->slot_tbl_lock);
 461        /* state manager is resetting the session */
 462        if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) {
 463                spin_unlock(&tbl->slot_tbl_lock);
 464                status = htonl(NFS4ERR_DELAY);
 465                /* Return NFS4ERR_BADSESSION if we're draining the session
 466                 * in order to reset it.
 467                 */
 468                if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
 469                        status = htonl(NFS4ERR_BADSESSION);
 470                goto out;
 471        }
 472
 473        status = validate_seqid(&clp->cl_session->bc_slot_table, args);
 474        spin_unlock(&tbl->slot_tbl_lock);
 475        if (status)
 476                goto out;
 477
 478        cps->slotid = args->csa_slotid;
 479
 480        /*
 481         * Check for pending referring calls.  If a match is found, a
 482         * related callback was received before the response to the original
 483         * call.
 484         */
 485        if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
 486                status = htonl(NFS4ERR_DELAY);
 487                goto out;
 488        }
 489
 490        memcpy(&res->csr_sessionid, &args->csa_sessionid,
 491               sizeof(res->csr_sessionid));
 492        res->csr_sequenceid = args->csa_sequenceid;
 493        res->csr_slotid = args->csa_slotid;
 494        res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
 495        res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
 496
 497out:
 498        cps->clp = clp; /* put in nfs4_callback_compound */
 499        for (i = 0; i < args->csa_nrclists; i++)
 500                kfree(args->csa_rclists[i].rcl_refcalls);
 501        kfree(args->csa_rclists);
 502
 503        if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
 504                cps->drc_status = status;
 505                status = 0;
 506        } else
 507                res->csr_status = status;
 508
 509        dprintk("%s: exit with status = %d res->csr_status %d\n", __func__,
 510                ntohl(status), ntohl(res->csr_status));
 511        return status;
 512}
 513
 514static bool
 515validate_bitmap_values(unsigned long mask)
 516{
 517        return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
 518}
 519
 520__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy,
 521                               struct cb_process_state *cps)
 522{
 523        __be32 status;
 524        fmode_t flags = 0;
 525
 526        status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
 527        if (!cps->clp) /* set in cb_sequence */
 528                goto out;
 529
 530        dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
 531                rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
 532
 533        status = cpu_to_be32(NFS4ERR_INVAL);
 534        if (!validate_bitmap_values(args->craa_type_mask))
 535                goto out;
 536
 537        status = cpu_to_be32(NFS4_OK);
 538        if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
 539                     &args->craa_type_mask))
 540                flags = FMODE_READ;
 541        if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
 542                     &args->craa_type_mask))
 543                flags |= FMODE_WRITE;
 544        if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
 545                     &args->craa_type_mask))
 546                pnfs_recall_all_layouts(cps->clp);
 547        if (flags)
 548                nfs_expire_all_delegation_types(cps->clp, flags);
 549out:
 550        dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
 551        return status;
 552}
 553
 554/* Reduce the fore channel's max_slots to the target value */
 555__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
 556                                struct cb_process_state *cps)
 557{
 558        struct nfs4_slot_table *fc_tbl;
 559        __be32 status;
 560
 561        status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
 562        if (!cps->clp) /* set in cb_sequence */
 563                goto out;
 564
 565        dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target max slots %d\n",
 566                rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
 567                args->crsa_target_max_slots);
 568
 569        fc_tbl = &cps->clp->cl_session->fc_slot_table;
 570
 571        status = htonl(NFS4ERR_BAD_HIGH_SLOT);
 572        if (args->crsa_target_max_slots > fc_tbl->max_slots ||
 573            args->crsa_target_max_slots < 1)
 574                goto out;
 575
 576        status = htonl(NFS4_OK);
 577        if (args->crsa_target_max_slots == fc_tbl->max_slots)
 578                goto out;
 579
 580        fc_tbl->target_max_slots = args->crsa_target_max_slots;
 581        nfs41_handle_recall_slot(cps->clp);
 582out:
 583        dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
 584        return status;
 585}
 586#endif /* CONFIG_NFS_V4_1 */
 587
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.