linux/fs/nfsd/nfscache.c
<<
>>
Prefs
   1/*
   2 * Request reply cache. This is currently a global cache, but this may
   3 * change in the future and be a per-client cache.
   4 *
   5 * This code is heavily inspired by the 44BSD implementation, although
   6 * it does things a bit differently.
   7 *
   8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
   9 */
  10
  11#include <linux/slab.h>
  12#include <linux/sunrpc/addr.h>
  13#include <linux/highmem.h>
  14#include <linux/log2.h>
  15#include <linux/hash.h>
  16#include <net/checksum.h>
  17
  18#include "nfsd.h"
  19#include "cache.h"
  20
  21#define NFSDDBG_FACILITY        NFSDDBG_REPCACHE
  22
  23/*
  24 * We use this value to determine the number of hash buckets from the max
  25 * cache size, the idea being that when the cache is at its maximum number
  26 * of entries, then this should be the average number of entries per bucket.
  27 */
  28#define TARGET_BUCKET_SIZE      64
  29
  30static struct hlist_head *      cache_hash;
  31static struct list_head         lru_head;
  32static struct kmem_cache        *drc_slab;
  33
  34/* max number of entries allowed in the cache */
  35static unsigned int             max_drc_entries;
  36
  37/* number of significant bits in the hash value */
  38static unsigned int             maskbits;
  39
  40/*
  41 * Stats and other tracking of on the duplicate reply cache. All of these and
  42 * the "rc" fields in nfsdstats are protected by the cache_lock
  43 */
  44
  45/* total number of entries */
  46static unsigned int             num_drc_entries;
  47
  48/* cache misses due only to checksum comparison failures */
  49static unsigned int             payload_misses;
  50
  51/* amount of memory (in bytes) currently consumed by the DRC */
  52static unsigned int             drc_mem_usage;
  53
  54/* longest hash chain seen */
  55static unsigned int             longest_chain;
  56
  57/* size of cache when we saw the longest hash chain */
  58static unsigned int             longest_chain_cachesize;
  59
  60static int      nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
  61static void     cache_cleaner_func(struct work_struct *unused);
  62static int      nfsd_reply_cache_shrink(struct shrinker *shrink,
  63                                        struct shrink_control *sc);
  64
  65static struct shrinker nfsd_reply_cache_shrinker = {
  66        .shrink = nfsd_reply_cache_shrink,
  67        .seeks  = 1,
  68};
  69
  70/*
  71 * locking for the reply cache:
  72 * A cache entry is "single use" if c_state == RC_INPROG
  73 * Otherwise, it when accessing _prev or _next, the lock must be held.
  74 */
  75static DEFINE_SPINLOCK(cache_lock);
  76static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
  77
  78/*
  79 * Put a cap on the size of the DRC based on the amount of available
  80 * low memory in the machine.
  81 *
  82 *  64MB:    8192
  83 * 128MB:   11585
  84 * 256MB:   16384
  85 * 512MB:   23170
  86 *   1GB:   32768
  87 *   2GB:   46340
  88 *   4GB:   65536
  89 *   8GB:   92681
  90 *  16GB:  131072
  91 *
  92 * ...with a hard cap of 256k entries. In the worst case, each entry will be
  93 * ~1k, so the above numbers should give a rough max of the amount of memory
  94 * used in k.
  95 */
  96static unsigned int
  97nfsd_cache_size_limit(void)
  98{
  99        unsigned int limit;
 100        unsigned long low_pages = totalram_pages - totalhigh_pages;
 101
 102        limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
 103        return min_t(unsigned int, limit, 256*1024);
 104}
 105
 106/*
 107 * Compute the number of hash buckets we need. Divide the max cachesize by
 108 * the "target" max bucket size, and round up to next power of two.
 109 */
 110static unsigned int
 111nfsd_hashsize(unsigned int limit)
 112{
 113        return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
 114}
 115
 116static struct svc_cacherep *
 117nfsd_reply_cache_alloc(void)
 118{
 119        struct svc_cacherep     *rp;
 120
 121        rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
 122        if (rp) {
 123                rp->c_state = RC_UNUSED;
 124                rp->c_type = RC_NOCACHE;
 125                INIT_LIST_HEAD(&rp->c_lru);
 126                INIT_HLIST_NODE(&rp->c_hash);
 127        }
 128        return rp;
 129}
 130
 131static void
 132nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
 133{
 134        if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
 135                drc_mem_usage -= rp->c_replvec.iov_len;
 136                kfree(rp->c_replvec.iov_base);
 137        }
 138        if (!hlist_unhashed(&rp->c_hash))
 139                hlist_del(&rp->c_hash);
 140        list_del(&rp->c_lru);
 141        --num_drc_entries;
 142        drc_mem_usage -= sizeof(*rp);
 143        kmem_cache_free(drc_slab, rp);
 144}
 145
 146static void
 147nfsd_reply_cache_free(struct svc_cacherep *rp)
 148{
 149        spin_lock(&cache_lock);
 150        nfsd_reply_cache_free_locked(rp);
 151        spin_unlock(&cache_lock);
 152}
 153
 154int nfsd_reply_cache_init(void)
 155{
 156        unsigned int hashsize;
 157
 158        INIT_LIST_HEAD(&lru_head);
 159        max_drc_entries = nfsd_cache_size_limit();
 160        num_drc_entries = 0;
 161        hashsize = nfsd_hashsize(max_drc_entries);
 162        maskbits = ilog2(hashsize);
 163
 164        register_shrinker(&nfsd_reply_cache_shrinker);
 165        drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
 166                                        0, 0, NULL);
 167        if (!drc_slab)
 168                goto out_nomem;
 169
 170        cache_hash = kcalloc(hashsize, sizeof(struct hlist_head), GFP_KERNEL);
 171        if (!cache_hash)
 172                goto out_nomem;
 173
 174        return 0;
 175out_nomem:
 176        printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
 177        nfsd_reply_cache_shutdown();
 178        return -ENOMEM;
 179}
 180
 181void nfsd_reply_cache_shutdown(void)
 182{
 183        struct svc_cacherep     *rp;
 184
 185        unregister_shrinker(&nfsd_reply_cache_shrinker);
 186        cancel_delayed_work_sync(&cache_cleaner);
 187
 188        while (!list_empty(&lru_head)) {
 189                rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
 190                nfsd_reply_cache_free_locked(rp);
 191        }
 192
 193        kfree (cache_hash);
 194        cache_hash = NULL;
 195
 196        if (drc_slab) {
 197                kmem_cache_destroy(drc_slab);
 198                drc_slab = NULL;
 199        }
 200}
 201
 202/*
 203 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
 204 * not already scheduled.
 205 */
 206static void
 207lru_put_end(struct svc_cacherep *rp)
 208{
 209        rp->c_timestamp = jiffies;
 210        list_move_tail(&rp->c_lru, &lru_head);
 211        schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
 212}
 213
 214/*
 215 * Move a cache entry from one hash list to another
 216 */
 217static void
 218hash_refile(struct svc_cacherep *rp)
 219{
 220        hlist_del_init(&rp->c_hash);
 221        hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits));
 222}
 223
 224static inline bool
 225nfsd_cache_entry_expired(struct svc_cacherep *rp)
 226{
 227        return rp->c_state != RC_INPROG &&
 228               time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
 229}
 230
 231/*
 232 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
 233 * Also prune the oldest ones when the total exceeds the max number of entries.
 234 */
 235static void
 236prune_cache_entries(void)
 237{
 238        struct svc_cacherep *rp, *tmp;
 239
 240        list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
 241                if (!nfsd_cache_entry_expired(rp) &&
 242                    num_drc_entries <= max_drc_entries)
 243                        break;
 244                nfsd_reply_cache_free_locked(rp);
 245        }
 246
 247        /*
 248         * Conditionally rearm the job. If we cleaned out the list, then
 249         * cancel any pending run (since there won't be any work to do).
 250         * Otherwise, we rearm the job or modify the existing one to run in
 251         * RC_EXPIRE since we just ran the pruner.
 252         */
 253        if (list_empty(&lru_head))
 254                cancel_delayed_work(&cache_cleaner);
 255        else
 256                mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
 257}
 258
 259static void
 260cache_cleaner_func(struct work_struct *unused)
 261{
 262        spin_lock(&cache_lock);
 263        prune_cache_entries();
 264        spin_unlock(&cache_lock);
 265}
 266
 267static int
 268nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc)
 269{
 270        unsigned int num;
 271
 272        spin_lock(&cache_lock);
 273        if (sc->nr_to_scan)
 274                prune_cache_entries();
 275        num = num_drc_entries;
 276        spin_unlock(&cache_lock);
 277
 278        return num;
 279}
 280
 281/*
 282 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
 283 */
 284static __wsum
 285nfsd_cache_csum(struct svc_rqst *rqstp)
 286{
 287        int idx;
 288        unsigned int base;
 289        __wsum csum;
 290        struct xdr_buf *buf = &rqstp->rq_arg;
 291        const unsigned char *p = buf->head[0].iov_base;
 292        size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
 293                                RC_CSUMLEN);
 294        size_t len = min(buf->head[0].iov_len, csum_len);
 295
 296        /* rq_arg.head first */
 297        csum = csum_partial(p, len, 0);
 298        csum_len -= len;
 299
 300        /* Continue into page array */
 301        idx = buf->page_base / PAGE_SIZE;
 302        base = buf->page_base & ~PAGE_MASK;
 303        while (csum_len) {
 304                p = page_address(buf->pages[idx]) + base;
 305                len = min_t(size_t, PAGE_SIZE - base, csum_len);
 306                csum = csum_partial(p, len, csum);
 307                csum_len -= len;
 308                base = 0;
 309                ++idx;
 310        }
 311        return csum;
 312}
 313
 314static bool
 315nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
 316{
 317        /* Check RPC header info first */
 318        if (rqstp->rq_xid != rp->c_xid || rqstp->rq_proc != rp->c_proc ||
 319            rqstp->rq_prot != rp->c_prot || rqstp->rq_vers != rp->c_vers ||
 320            rqstp->rq_arg.len != rp->c_len ||
 321            !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
 322            rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
 323                return false;
 324
 325        /* compare checksum of NFS data */
 326        if (csum != rp->c_csum) {
 327                ++payload_misses;
 328                return false;
 329        }
 330
 331        return true;
 332}
 333
 334/*
 335 * Search the request hash for an entry that matches the given rqstp.
 336 * Must be called with cache_lock held. Returns the found entry or
 337 * NULL on failure.
 338 */
 339static struct svc_cacherep *
 340nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum)
 341{
 342        struct svc_cacherep     *rp, *ret = NULL;
 343        struct hlist_head       *rh;
 344        unsigned int            entries = 0;
 345
 346        rh = &cache_hash[hash_32(rqstp->rq_xid, maskbits)];
 347        hlist_for_each_entry(rp, rh, c_hash) {
 348                ++entries;
 349                if (nfsd_cache_match(rqstp, csum, rp)) {
 350                        ret = rp;
 351                        break;
 352                }
 353        }
 354
 355        /* tally hash chain length stats */
 356        if (entries > longest_chain) {
 357                longest_chain = entries;
 358                longest_chain_cachesize = num_drc_entries;
 359        } else if (entries == longest_chain) {
 360                /* prefer to keep the smallest cachesize possible here */
 361                longest_chain_cachesize = min(longest_chain_cachesize,
 362                                                num_drc_entries);
 363        }
 364
 365        return ret;
 366}
 367
 368/*
 369 * Try to find an entry matching the current call in the cache. When none
 370 * is found, we try to grab the oldest expired entry off the LRU list. If
 371 * a suitable one isn't there, then drop the cache_lock and allocate a
 372 * new one, then search again in case one got inserted while this thread
 373 * didn't hold the lock.
 374 */
 375int
 376nfsd_cache_lookup(struct svc_rqst *rqstp)
 377{
 378        struct svc_cacherep     *rp, *found;
 379        __be32                  xid = rqstp->rq_xid;
 380        u32                     proto =  rqstp->rq_prot,
 381                                vers = rqstp->rq_vers,
 382                                proc = rqstp->rq_proc;
 383        __wsum                  csum;
 384        unsigned long           age;
 385        int type = rqstp->rq_cachetype;
 386        int rtn = RC_DOIT;
 387
 388        rqstp->rq_cacherep = NULL;
 389        if (type == RC_NOCACHE) {
 390                nfsdstats.rcnocache++;
 391                return rtn;
 392        }
 393
 394        csum = nfsd_cache_csum(rqstp);
 395
 396        /*
 397         * Since the common case is a cache miss followed by an insert,
 398         * preallocate an entry. First, try to reuse the first entry on the LRU
 399         * if it works, then go ahead and prune the LRU list.
 400         */
 401        spin_lock(&cache_lock);
 402        if (!list_empty(&lru_head)) {
 403                rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
 404                if (nfsd_cache_entry_expired(rp) ||
 405                    num_drc_entries >= max_drc_entries) {
 406                        lru_put_end(rp);
 407                        prune_cache_entries();
 408                        goto search_cache;
 409                }
 410        }
 411
 412        /* No expired ones available, allocate a new one. */
 413        spin_unlock(&cache_lock);
 414        rp = nfsd_reply_cache_alloc();
 415        spin_lock(&cache_lock);
 416        if (likely(rp)) {
 417                ++num_drc_entries;
 418                drc_mem_usage += sizeof(*rp);
 419        }
 420
 421search_cache:
 422        found = nfsd_cache_search(rqstp, csum);
 423        if (found) {
 424                if (likely(rp))
 425                        nfsd_reply_cache_free_locked(rp);
 426                rp = found;
 427                goto found_entry;
 428        }
 429
 430        if (!rp) {
 431                dprintk("nfsd: unable to allocate DRC entry!\n");
 432                goto out;
 433        }
 434
 435        /*
 436         * We're keeping the one we just allocated. Are we now over the
 437         * limit? Prune one off the tip of the LRU in trade for the one we
 438         * just allocated if so.
 439         */
 440        if (num_drc_entries >= max_drc_entries)
 441                nfsd_reply_cache_free_locked(list_first_entry(&lru_head,
 442                                                struct svc_cacherep, c_lru));
 443
 444        nfsdstats.rcmisses++;
 445        rqstp->rq_cacherep = rp;
 446        rp->c_state = RC_INPROG;
 447        rp->c_xid = xid;
 448        rp->c_proc = proc;
 449        rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
 450        rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
 451        rp->c_prot = proto;
 452        rp->c_vers = vers;
 453        rp->c_len = rqstp->rq_arg.len;
 454        rp->c_csum = csum;
 455
 456        hash_refile(rp);
 457        lru_put_end(rp);
 458
 459        /* release any buffer */
 460        if (rp->c_type == RC_REPLBUFF) {
 461                drc_mem_usage -= rp->c_replvec.iov_len;
 462                kfree(rp->c_replvec.iov_base);
 463                rp->c_replvec.iov_base = NULL;
 464        }
 465        rp->c_type = RC_NOCACHE;
 466 out:
 467        spin_unlock(&cache_lock);
 468        return rtn;
 469
 470found_entry:
 471        nfsdstats.rchits++;
 472        /* We found a matching entry which is either in progress or done. */
 473        age = jiffies - rp->c_timestamp;
 474        lru_put_end(rp);
 475
 476        rtn = RC_DROPIT;
 477        /* Request being processed or excessive rexmits */
 478        if (rp->c_state == RC_INPROG || age < RC_DELAY)
 479                goto out;
 480
 481        /* From the hall of fame of impractical attacks:
 482         * Is this a user who tries to snoop on the cache? */
 483        rtn = RC_DOIT;
 484        if (!rqstp->rq_secure && rp->c_secure)
 485                goto out;
 486
 487        /* Compose RPC reply header */
 488        switch (rp->c_type) {
 489        case RC_NOCACHE:
 490                break;
 491        case RC_REPLSTAT:
 492                svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
 493                rtn = RC_REPLY;
 494                break;
 495        case RC_REPLBUFF:
 496                if (!nfsd_cache_append(rqstp, &rp->c_replvec))
 497                        goto out;       /* should not happen */
 498                rtn = RC_REPLY;
 499                break;
 500        default:
 501                printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
 502                nfsd_reply_cache_free_locked(rp);
 503        }
 504
 505        goto out;
 506}
 507
 508/*
 509 * Update a cache entry. This is called from nfsd_dispatch when
 510 * the procedure has been executed and the complete reply is in
 511 * rqstp->rq_res.
 512 *
 513 * We're copying around data here rather than swapping buffers because
 514 * the toplevel loop requires max-sized buffers, which would be a waste
 515 * of memory for a cache with a max reply size of 100 bytes (diropokres).
 516 *
 517 * If we should start to use different types of cache entries tailored
 518 * specifically for attrstat and fh's, we may save even more space.
 519 *
 520 * Also note that a cachetype of RC_NOCACHE can legally be passed when
 521 * nfsd failed to encode a reply that otherwise would have been cached.
 522 * In this case, nfsd_cache_update is called with statp == NULL.
 523 */
 524void
 525nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
 526{
 527        struct svc_cacherep *rp = rqstp->rq_cacherep;
 528        struct kvec     *resv = &rqstp->rq_res.head[0], *cachv;
 529        int             len;
 530        size_t          bufsize = 0;
 531
 532        if (!rp)
 533                return;
 534
 535        len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
 536        len >>= 2;
 537
 538        /* Don't cache excessive amounts of data and XDR failures */
 539        if (!statp || len > (256 >> 2)) {
 540                nfsd_reply_cache_free(rp);
 541                return;
 542        }
 543
 544        switch (cachetype) {
 545        case RC_REPLSTAT:
 546                if (len != 1)
 547                        printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
 548                rp->c_replstat = *statp;
 549                break;
 550        case RC_REPLBUFF:
 551                cachv = &rp->c_replvec;
 552                bufsize = len << 2;
 553                cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
 554                if (!cachv->iov_base) {
 555                        nfsd_reply_cache_free(rp);
 556                        return;
 557                }
 558                cachv->iov_len = bufsize;
 559                memcpy(cachv->iov_base, statp, bufsize);
 560                break;
 561        case RC_NOCACHE:
 562                nfsd_reply_cache_free(rp);
 563                return;
 564        }
 565        spin_lock(&cache_lock);
 566        drc_mem_usage += bufsize;
 567        lru_put_end(rp);
 568        rp->c_secure = rqstp->rq_secure;
 569        rp->c_type = cachetype;
 570        rp->c_state = RC_DONE;
 571        spin_unlock(&cache_lock);
 572        return;
 573}
 574
 575/*
 576 * Copy cached reply to current reply buffer. Should always fit.
 577 * FIXME as reply is in a page, we should just attach the page, and
 578 * keep a refcount....
 579 */
 580static int
 581nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
 582{
 583        struct kvec     *vec = &rqstp->rq_res.head[0];
 584
 585        if (vec->iov_len + data->iov_len > PAGE_SIZE) {
 586                printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
 587                                data->iov_len);
 588                return 0;
 589        }
 590        memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
 591        vec->iov_len += data->iov_len;
 592        return 1;
 593}
 594
 595/*
 596 * Note that fields may be added, removed or reordered in the future. Programs
 597 * scraping this file for info should test the labels to ensure they're
 598 * getting the correct field.
 599 */
 600static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
 601{
 602        spin_lock(&cache_lock);
 603        seq_printf(m, "max entries:           %u\n", max_drc_entries);
 604        seq_printf(m, "num entries:           %u\n", num_drc_entries);
 605        seq_printf(m, "hash buckets:          %u\n", 1 << maskbits);
 606        seq_printf(m, "mem usage:             %u\n", drc_mem_usage);
 607        seq_printf(m, "cache hits:            %u\n", nfsdstats.rchits);
 608        seq_printf(m, "cache misses:          %u\n", nfsdstats.rcmisses);
 609        seq_printf(m, "not cached:            %u\n", nfsdstats.rcnocache);
 610        seq_printf(m, "payload misses:        %u\n", payload_misses);
 611        seq_printf(m, "longest chain len:     %u\n", longest_chain);
 612        seq_printf(m, "cachesize at longest:  %u\n", longest_chain_cachesize);
 613        spin_unlock(&cache_lock);
 614        return 0;
 615}
 616
 617int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
 618{
 619        return single_open(file, nfsd_reply_cache_stats_show, NULL);
 620}
 621
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.