linux/net/sunrpc/svc.c
<<
>>
Prefs
   1/*
   2 * linux/net/sunrpc/svc.c
   3 *
   4 * High-level RPC service routines
   5 *
   6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
   7 *
   8 * Multiple threads pools and NUMAisation
   9 * Copyright (c) 2006 Silicon Graphics, Inc.
  10 * by Greg Banks <gnb@melbourne.sgi.com>
  11 */
  12
  13#include <linux/linkage.h>
  14#include <linux/sched.h>
  15#include <linux/errno.h>
  16#include <linux/net.h>
  17#include <linux/in.h>
  18#include <linux/mm.h>
  19#include <linux/interrupt.h>
  20#include <linux/module.h>
  21#include <linux/kthread.h>
  22#include <linux/slab.h>
  23#include <linux/nsproxy.h>
  24
  25#include <linux/sunrpc/types.h>
  26#include <linux/sunrpc/xdr.h>
  27#include <linux/sunrpc/stats.h>
  28#include <linux/sunrpc/svcsock.h>
  29#include <linux/sunrpc/clnt.h>
  30#include <linux/sunrpc/bc_xprt.h>
  31
  32#define RPCDBG_FACILITY RPCDBG_SVCDSP
  33
  34static void svc_unregister(const struct svc_serv *serv, struct net *net);
  35
  36#define svc_serv_is_pooled(serv)    ((serv)->sv_function)
  37
  38/*
  39 * Mode for mapping cpus to pools.
  40 */
  41enum {
  42        SVC_POOL_AUTO = -1,     /* choose one of the others */
  43        SVC_POOL_GLOBAL,        /* no mapping, just a single global pool
  44                                 * (legacy & UP mode) */
  45        SVC_POOL_PERCPU,        /* one pool per cpu */
  46        SVC_POOL_PERNODE        /* one pool per numa node */
  47};
  48#define SVC_POOL_DEFAULT        SVC_POOL_GLOBAL
  49
  50/*
  51 * Structure for mapping cpus to pools and vice versa.
  52 * Setup once during sunrpc initialisation.
  53 */
  54static struct svc_pool_map {
  55        int count;                      /* How many svc_servs use us */
  56        int mode;                       /* Note: int not enum to avoid
  57                                         * warnings about "enumeration value
  58                                         * not handled in switch" */
  59        unsigned int npools;
  60        unsigned int *pool_to;          /* maps pool id to cpu or node */
  61        unsigned int *to_pool;          /* maps cpu or node to pool id */
  62} svc_pool_map = {
  63        .count = 0,
  64        .mode = SVC_POOL_DEFAULT
  65};
  66static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
  67
  68static int
  69param_set_pool_mode(const char *val, struct kernel_param *kp)
  70{
  71        int *ip = (int *)kp->arg;
  72        struct svc_pool_map *m = &svc_pool_map;
  73        int err;
  74
  75        mutex_lock(&svc_pool_map_mutex);
  76
  77        err = -EBUSY;
  78        if (m->count)
  79                goto out;
  80
  81        err = 0;
  82        if (!strncmp(val, "auto", 4))
  83                *ip = SVC_POOL_AUTO;
  84        else if (!strncmp(val, "global", 6))
  85                *ip = SVC_POOL_GLOBAL;
  86        else if (!strncmp(val, "percpu", 6))
  87                *ip = SVC_POOL_PERCPU;
  88        else if (!strncmp(val, "pernode", 7))
  89                *ip = SVC_POOL_PERNODE;
  90        else
  91                err = -EINVAL;
  92
  93out:
  94        mutex_unlock(&svc_pool_map_mutex);
  95        return err;
  96}
  97
  98static int
  99param_get_pool_mode(char *buf, struct kernel_param *kp)
 100{
 101        int *ip = (int *)kp->arg;
 102
 103        switch (*ip)
 104        {
 105        case SVC_POOL_AUTO:
 106                return strlcpy(buf, "auto", 20);
 107        case SVC_POOL_GLOBAL:
 108                return strlcpy(buf, "global", 20);
 109        case SVC_POOL_PERCPU:
 110                return strlcpy(buf, "percpu", 20);
 111        case SVC_POOL_PERNODE:
 112                return strlcpy(buf, "pernode", 20);
 113        default:
 114                return sprintf(buf, "%d", *ip);
 115        }
 116}
 117
 118module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
 119                 &svc_pool_map.mode, 0644);
 120
 121/*
 122 * Detect best pool mapping mode heuristically,
 123 * according to the machine's topology.
 124 */
 125static int
 126svc_pool_map_choose_mode(void)
 127{
 128        unsigned int node;
 129
 130        if (nr_online_nodes > 1) {
 131                /*
 132                 * Actually have multiple NUMA nodes,
 133                 * so split pools on NUMA node boundaries
 134                 */
 135                return SVC_POOL_PERNODE;
 136        }
 137
 138        node = first_online_node;
 139        if (nr_cpus_node(node) > 2) {
 140                /*
 141                 * Non-trivial SMP, or CONFIG_NUMA on
 142                 * non-NUMA hardware, e.g. with a generic
 143                 * x86_64 kernel on Xeons.  In this case we
 144                 * want to divide the pools on cpu boundaries.
 145                 */
 146                return SVC_POOL_PERCPU;
 147        }
 148
 149        /* default: one global pool */
 150        return SVC_POOL_GLOBAL;
 151}
 152
 153/*
 154 * Allocate the to_pool[] and pool_to[] arrays.
 155 * Returns 0 on success or an errno.
 156 */
 157static int
 158svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
 159{
 160        m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
 161        if (!m->to_pool)
 162                goto fail;
 163        m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
 164        if (!m->pool_to)
 165                goto fail_free;
 166
 167        return 0;
 168
 169fail_free:
 170        kfree(m->to_pool);
 171        m->to_pool = NULL;
 172fail:
 173        return -ENOMEM;
 174}
 175
 176/*
 177 * Initialise the pool map for SVC_POOL_PERCPU mode.
 178 * Returns number of pools or <0 on error.
 179 */
 180static int
 181svc_pool_map_init_percpu(struct svc_pool_map *m)
 182{
 183        unsigned int maxpools = nr_cpu_ids;
 184        unsigned int pidx = 0;
 185        unsigned int cpu;
 186        int err;
 187
 188        err = svc_pool_map_alloc_arrays(m, maxpools);
 189        if (err)
 190                return err;
 191
 192        for_each_online_cpu(cpu) {
 193                BUG_ON(pidx > maxpools);
 194                m->to_pool[cpu] = pidx;
 195                m->pool_to[pidx] = cpu;
 196                pidx++;
 197        }
 198        /* cpus brought online later all get mapped to pool0, sorry */
 199
 200        return pidx;
 201};
 202
 203
 204/*
 205 * Initialise the pool map for SVC_POOL_PERNODE mode.
 206 * Returns number of pools or <0 on error.
 207 */
 208static int
 209svc_pool_map_init_pernode(struct svc_pool_map *m)
 210{
 211        unsigned int maxpools = nr_node_ids;
 212        unsigned int pidx = 0;
 213        unsigned int node;
 214        int err;
 215
 216        err = svc_pool_map_alloc_arrays(m, maxpools);
 217        if (err)
 218                return err;
 219
 220        for_each_node_with_cpus(node) {
 221                /* some architectures (e.g. SN2) have cpuless nodes */
 222                BUG_ON(pidx > maxpools);
 223                m->to_pool[node] = pidx;
 224                m->pool_to[pidx] = node;
 225                pidx++;
 226        }
 227        /* nodes brought online later all get mapped to pool0, sorry */
 228
 229        return pidx;
 230}
 231
 232
 233/*
 234 * Add a reference to the global map of cpus to pools (and
 235 * vice versa).  Initialise the map if we're the first user.
 236 * Returns the number of pools.
 237 */
 238static unsigned int
 239svc_pool_map_get(void)
 240{
 241        struct svc_pool_map *m = &svc_pool_map;
 242        int npools = -1;
 243
 244        mutex_lock(&svc_pool_map_mutex);
 245
 246        if (m->count++) {
 247                mutex_unlock(&svc_pool_map_mutex);
 248                return m->npools;
 249        }
 250
 251        if (m->mode == SVC_POOL_AUTO)
 252                m->mode = svc_pool_map_choose_mode();
 253
 254        switch (m->mode) {
 255        case SVC_POOL_PERCPU:
 256                npools = svc_pool_map_init_percpu(m);
 257                break;
 258        case SVC_POOL_PERNODE:
 259                npools = svc_pool_map_init_pernode(m);
 260                break;
 261        }
 262
 263        if (npools < 0) {
 264                /* default, or memory allocation failure */
 265                npools = 1;
 266                m->mode = SVC_POOL_GLOBAL;
 267        }
 268        m->npools = npools;
 269
 270        mutex_unlock(&svc_pool_map_mutex);
 271        return m->npools;
 272}
 273
 274
 275/*
 276 * Drop a reference to the global map of cpus to pools.
 277 * When the last reference is dropped, the map data is
 278 * freed; this allows the sysadmin to change the pool
 279 * mode using the pool_mode module option without
 280 * rebooting or re-loading sunrpc.ko.
 281 */
 282static void
 283svc_pool_map_put(void)
 284{
 285        struct svc_pool_map *m = &svc_pool_map;
 286
 287        mutex_lock(&svc_pool_map_mutex);
 288
 289        if (!--m->count) {
 290                kfree(m->to_pool);
 291                m->to_pool = NULL;
 292                kfree(m->pool_to);
 293                m->pool_to = NULL;
 294                m->npools = 0;
 295        }
 296
 297        mutex_unlock(&svc_pool_map_mutex);
 298}
 299
 300
 301static int svc_pool_map_get_node(unsigned int pidx)
 302{
 303        const struct svc_pool_map *m = &svc_pool_map;
 304
 305        if (m->count) {
 306                if (m->mode == SVC_POOL_PERCPU)
 307                        return cpu_to_node(m->pool_to[pidx]);
 308                if (m->mode == SVC_POOL_PERNODE)
 309                        return m->pool_to[pidx];
 310        }
 311        return NUMA_NO_NODE;
 312}
 313/*
 314 * Set the given thread's cpus_allowed mask so that it
 315 * will only run on cpus in the given pool.
 316 */
 317static inline void
 318svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
 319{
 320        struct svc_pool_map *m = &svc_pool_map;
 321        unsigned int node = m->pool_to[pidx];
 322
 323        /*
 324         * The caller checks for sv_nrpools > 1, which
 325         * implies that we've been initialized.
 326         */
 327        BUG_ON(m->count == 0);
 328
 329        switch (m->mode) {
 330        case SVC_POOL_PERCPU:
 331        {
 332                set_cpus_allowed_ptr(task, cpumask_of(node));
 333                break;
 334        }
 335        case SVC_POOL_PERNODE:
 336        {
 337                set_cpus_allowed_ptr(task, cpumask_of_node(node));
 338                break;
 339        }
 340        }
 341}
 342
 343/*
 344 * Use the mapping mode to choose a pool for a given CPU.
 345 * Used when enqueueing an incoming RPC.  Always returns
 346 * a non-NULL pool pointer.
 347 */
 348struct svc_pool *
 349svc_pool_for_cpu(struct svc_serv *serv, int cpu)
 350{
 351        struct svc_pool_map *m = &svc_pool_map;
 352        unsigned int pidx = 0;
 353
 354        /*
 355         * An uninitialised map happens in a pure client when
 356         * lockd is brought up, so silently treat it the
 357         * same as SVC_POOL_GLOBAL.
 358         */
 359        if (svc_serv_is_pooled(serv)) {
 360                switch (m->mode) {
 361                case SVC_POOL_PERCPU:
 362                        pidx = m->to_pool[cpu];
 363                        break;
 364                case SVC_POOL_PERNODE:
 365                        pidx = m->to_pool[cpu_to_node(cpu)];
 366                        break;
 367                }
 368        }
 369        return &serv->sv_pools[pidx % serv->sv_nrpools];
 370}
 371
 372int svc_rpcb_setup(struct svc_serv *serv, struct net *net)
 373{
 374        int err;
 375
 376        err = rpcb_create_local(net);
 377        if (err)
 378                return err;
 379
 380        /* Remove any stale portmap registrations */
 381        svc_unregister(serv, net);
 382        return 0;
 383}
 384EXPORT_SYMBOL_GPL(svc_rpcb_setup);
 385
 386void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net)
 387{
 388        svc_unregister(serv, net);
 389        rpcb_put_local(net);
 390}
 391EXPORT_SYMBOL_GPL(svc_rpcb_cleanup);
 392
 393static int svc_uses_rpcbind(struct svc_serv *serv)
 394{
 395        struct svc_program      *progp;
 396        unsigned int            i;
 397
 398        for (progp = serv->sv_program; progp; progp = progp->pg_next) {
 399                for (i = 0; i < progp->pg_nvers; i++) {
 400                        if (progp->pg_vers[i] == NULL)
 401                                continue;
 402                        if (progp->pg_vers[i]->vs_hidden == 0)
 403                                return 1;
 404                }
 405        }
 406
 407        return 0;
 408}
 409
 410int svc_bind(struct svc_serv *serv, struct net *net)
 411{
 412        if (!svc_uses_rpcbind(serv))
 413                return 0;
 414        return svc_rpcb_setup(serv, net);
 415}
 416EXPORT_SYMBOL_GPL(svc_bind);
 417
 418/*
 419 * Create an RPC service
 420 */
 421static struct svc_serv *
 422__svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
 423             void (*shutdown)(struct svc_serv *serv, struct net *net))
 424{
 425        struct svc_serv *serv;
 426        unsigned int vers;
 427        unsigned int xdrsize;
 428        unsigned int i;
 429
 430        if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
 431                return NULL;
 432        serv->sv_name      = prog->pg_name;
 433        serv->sv_program   = prog;
 434        serv->sv_nrthreads = 1;
 435        serv->sv_stats     = prog->pg_stats;
 436        if (bufsize > RPCSVC_MAXPAYLOAD)
 437                bufsize = RPCSVC_MAXPAYLOAD;
 438        serv->sv_max_payload = bufsize? bufsize : 4096;
 439        serv->sv_max_mesg  = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
 440        serv->sv_shutdown  = shutdown;
 441        xdrsize = 0;
 442        while (prog) {
 443                prog->pg_lovers = prog->pg_nvers-1;
 444                for (vers=0; vers<prog->pg_nvers ; vers++)
 445                        if (prog->pg_vers[vers]) {
 446                                prog->pg_hivers = vers;
 447                                if (prog->pg_lovers > vers)
 448                                        prog->pg_lovers = vers;
 449                                if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
 450                                        xdrsize = prog->pg_vers[vers]->vs_xdrsize;
 451                        }
 452                prog = prog->pg_next;
 453        }
 454        serv->sv_xdrsize   = xdrsize;
 455        INIT_LIST_HEAD(&serv->sv_tempsocks);
 456        INIT_LIST_HEAD(&serv->sv_permsocks);
 457        init_timer(&serv->sv_temptimer);
 458        spin_lock_init(&serv->sv_lock);
 459
 460        serv->sv_nrpools = npools;
 461        serv->sv_pools =
 462                kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
 463                        GFP_KERNEL);
 464        if (!serv->sv_pools) {
 465                kfree(serv);
 466                return NULL;
 467        }
 468
 469        for (i = 0; i < serv->sv_nrpools; i++) {
 470                struct svc_pool *pool = &serv->sv_pools[i];
 471
 472                dprintk("svc: initialising pool %u for %s\n",
 473                                i, serv->sv_name);
 474
 475                pool->sp_id = i;
 476                INIT_LIST_HEAD(&pool->sp_threads);
 477                INIT_LIST_HEAD(&pool->sp_sockets);
 478                INIT_LIST_HEAD(&pool->sp_all_threads);
 479                spin_lock_init(&pool->sp_lock);
 480        }
 481
 482        if (svc_uses_rpcbind(serv) && (!serv->sv_shutdown))
 483                serv->sv_shutdown = svc_rpcb_cleanup;
 484
 485        return serv;
 486}
 487
 488struct svc_serv *
 489svc_create(struct svc_program *prog, unsigned int bufsize,
 490           void (*shutdown)(struct svc_serv *serv, struct net *net))
 491{
 492        return __svc_create(prog, bufsize, /*npools*/1, shutdown);
 493}
 494EXPORT_SYMBOL_GPL(svc_create);
 495
 496struct svc_serv *
 497svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
 498                  void (*shutdown)(struct svc_serv *serv, struct net *net),
 499                  svc_thread_fn func, struct module *mod)
 500{
 501        struct svc_serv *serv;
 502        unsigned int npools = svc_pool_map_get();
 503
 504        serv = __svc_create(prog, bufsize, npools, shutdown);
 505
 506        if (serv != NULL) {
 507                serv->sv_function = func;
 508                serv->sv_module = mod;
 509        }
 510
 511        return serv;
 512}
 513EXPORT_SYMBOL_GPL(svc_create_pooled);
 514
 515void svc_shutdown_net(struct svc_serv *serv, struct net *net)
 516{
 517        /*
 518         * The set of xprts (contained in the sv_tempsocks and
 519         * sv_permsocks lists) is now constant, since it is modified
 520         * only by accepting new sockets (done by service threads in
 521         * svc_recv) or aging old ones (done by sv_temptimer), or
 522         * configuration changes (excluded by whatever locking the
 523         * caller is using--nfsd_mutex in the case of nfsd).  So it's
 524         * safe to traverse those lists and shut everything down:
 525         */
 526        svc_close_net(serv, net);
 527
 528        if (serv->sv_shutdown)
 529                serv->sv_shutdown(serv, net);
 530}
 531EXPORT_SYMBOL_GPL(svc_shutdown_net);
 532
 533/*
 534 * Destroy an RPC service. Should be called with appropriate locking to
 535 * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
 536 */
 537void
 538svc_destroy(struct svc_serv *serv)
 539{
 540        dprintk("svc: svc_destroy(%s, %d)\n",
 541                                serv->sv_program->pg_name,
 542                                serv->sv_nrthreads);
 543
 544        if (serv->sv_nrthreads) {
 545                if (--(serv->sv_nrthreads) != 0) {
 546                        svc_sock_update_bufs(serv);
 547                        return;
 548                }
 549        } else
 550                printk("svc_destroy: no threads for serv=%p!\n", serv);
 551
 552        del_timer_sync(&serv->sv_temptimer);
 553
 554        /*
 555         * The last user is gone and thus all sockets have to be destroyed to
 556         * the point. Check this.
 557         */
 558        BUG_ON(!list_empty(&serv->sv_permsocks));
 559        BUG_ON(!list_empty(&serv->sv_tempsocks));
 560
 561        cache_clean_deferred(serv);
 562
 563        if (svc_serv_is_pooled(serv))
 564                svc_pool_map_put();
 565
 566        kfree(serv->sv_pools);
 567        kfree(serv);
 568}
 569EXPORT_SYMBOL_GPL(svc_destroy);
 570
 571/*
 572 * Allocate an RPC server's buffer space.
 573 * We allocate pages and place them in rq_argpages.
 574 */
 575static int
 576svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
 577{
 578        unsigned int pages, arghi;
 579
 580        /* bc_xprt uses fore channel allocated buffers */
 581        if (svc_is_backchannel(rqstp))
 582                return 1;
 583
 584        pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
 585                                       * We assume one is at most one page
 586                                       */
 587        arghi = 0;
 588        BUG_ON(pages > RPCSVC_MAXPAGES);
 589        while (pages) {
 590                struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
 591                if (!p)
 592                        break;
 593                rqstp->rq_pages[arghi++] = p;
 594                pages--;
 595        }
 596        return pages == 0;
 597}
 598
 599/*
 600 * Release an RPC server buffer
 601 */
 602static void
 603svc_release_buffer(struct svc_rqst *rqstp)
 604{
 605        unsigned int i;
 606
 607        for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
 608                if (rqstp->rq_pages[i])
 609                        put_page(rqstp->rq_pages[i]);
 610}
 611
 612struct svc_rqst *
 613svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
 614{
 615        struct svc_rqst *rqstp;
 616
 617        rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
 618        if (!rqstp)
 619                goto out_enomem;
 620
 621        init_waitqueue_head(&rqstp->rq_wait);
 622
 623        serv->sv_nrthreads++;
 624        spin_lock_bh(&pool->sp_lock);
 625        pool->sp_nrthreads++;
 626        list_add(&rqstp->rq_all, &pool->sp_all_threads);
 627        spin_unlock_bh(&pool->sp_lock);
 628        rqstp->rq_server = serv;
 629        rqstp->rq_pool = pool;
 630
 631        rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
 632        if (!rqstp->rq_argp)
 633                goto out_thread;
 634
 635        rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
 636        if (!rqstp->rq_resp)
 637                goto out_thread;
 638
 639        if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
 640                goto out_thread;
 641
 642        return rqstp;
 643out_thread:
 644        svc_exit_thread(rqstp);
 645out_enomem:
 646        return ERR_PTR(-ENOMEM);
 647}
 648EXPORT_SYMBOL_GPL(svc_prepare_thread);
 649
 650/*
 651 * Choose a pool in which to create a new thread, for svc_set_num_threads
 652 */
 653static inline struct svc_pool *
 654choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
 655{
 656        if (pool != NULL)
 657                return pool;
 658
 659        return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
 660}
 661
 662/*
 663 * Choose a thread to kill, for svc_set_num_threads
 664 */
 665static inline struct task_struct *
 666choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
 667{
 668        unsigned int i;
 669        struct task_struct *task = NULL;
 670
 671        if (pool != NULL) {
 672                spin_lock_bh(&pool->sp_lock);
 673        } else {
 674                /* choose a pool in round-robin fashion */
 675                for (i = 0; i < serv->sv_nrpools; i++) {
 676                        pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
 677                        spin_lock_bh(&pool->sp_lock);
 678                        if (!list_empty(&pool->sp_all_threads))
 679                                goto found_pool;
 680                        spin_unlock_bh(&pool->sp_lock);
 681                }
 682                return NULL;
 683        }
 684
 685found_pool:
 686        if (!list_empty(&pool->sp_all_threads)) {
 687                struct svc_rqst *rqstp;
 688
 689                /*
 690                 * Remove from the pool->sp_all_threads list
 691                 * so we don't try to kill it again.
 692                 */
 693                rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
 694                list_del_init(&rqstp->rq_all);
 695                task = rqstp->rq_task;
 696        }
 697        spin_unlock_bh(&pool->sp_lock);
 698
 699        return task;
 700}
 701
 702/*
 703 * Create or destroy enough new threads to make the number
 704 * of threads the given number.  If `pool' is non-NULL, applies
 705 * only to threads in that pool, otherwise round-robins between
 706 * all pools.  Caller must ensure that mutual exclusion between this and
 707 * server startup or shutdown.
 708 *
 709 * Destroying threads relies on the service threads filling in
 710 * rqstp->rq_task, which only the nfs ones do.  Assumes the serv
 711 * has been created using svc_create_pooled().
 712 *
 713 * Based on code that used to be in nfsd_svc() but tweaked
 714 * to be pool-aware.
 715 */
 716int
 717svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
 718{
 719        struct svc_rqst *rqstp;
 720        struct task_struct *task;
 721        struct svc_pool *chosen_pool;
 722        int error = 0;
 723        unsigned int state = serv->sv_nrthreads-1;
 724        int node;
 725
 726        if (pool == NULL) {
 727                /* The -1 assumes caller has done a svc_get() */
 728                nrservs -= (serv->sv_nrthreads-1);
 729        } else {
 730                spin_lock_bh(&pool->sp_lock);
 731                nrservs -= pool->sp_nrthreads;
 732                spin_unlock_bh(&pool->sp_lock);
 733        }
 734
 735        /* create new threads */
 736        while (nrservs > 0) {
 737                nrservs--;
 738                chosen_pool = choose_pool(serv, pool, &state);
 739
 740                node = svc_pool_map_get_node(chosen_pool->sp_id);
 741                rqstp = svc_prepare_thread(serv, chosen_pool, node);
 742                if (IS_ERR(rqstp)) {
 743                        error = PTR_ERR(rqstp);
 744                        break;
 745                }
 746
 747                __module_get(serv->sv_module);
 748                task = kthread_create_on_node(serv->sv_function, rqstp,
 749                                              node, serv->sv_name);
 750                if (IS_ERR(task)) {
 751                        error = PTR_ERR(task);
 752                        module_put(serv->sv_module);
 753                        svc_exit_thread(rqstp);
 754                        break;
 755                }
 756
 757                rqstp->rq_task = task;
 758                if (serv->sv_nrpools > 1)
 759                        svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
 760
 761                svc_sock_update_bufs(serv);
 762                wake_up_process(task);
 763        }
 764        /* destroy old threads */
 765        while (nrservs < 0 &&
 766               (task = choose_victim(serv, pool, &state)) != NULL) {
 767                send_sig(SIGINT, task, 1);
 768                nrservs++;
 769        }
 770
 771        return error;
 772}
 773EXPORT_SYMBOL_GPL(svc_set_num_threads);
 774
 775/*
 776 * Called from a server thread as it's exiting. Caller must hold the BKL or
 777 * the "service mutex", whichever is appropriate for the service.
 778 */
 779void
 780svc_exit_thread(struct svc_rqst *rqstp)
 781{
 782        struct svc_serv *serv = rqstp->rq_server;
 783        struct svc_pool *pool = rqstp->rq_pool;
 784
 785        svc_release_buffer(rqstp);
 786        kfree(rqstp->rq_resp);
 787        kfree(rqstp->rq_argp);
 788        kfree(rqstp->rq_auth_data);
 789
 790        spin_lock_bh(&pool->sp_lock);
 791        pool->sp_nrthreads--;
 792        list_del(&rqstp->rq_all);
 793        spin_unlock_bh(&pool->sp_lock);
 794
 795        kfree(rqstp);
 796
 797        /* Release the server */
 798        if (serv)
 799                svc_destroy(serv);
 800}
 801EXPORT_SYMBOL_GPL(svc_exit_thread);
 802
 803/*
 804 * Register an "inet" protocol family netid with the local
 805 * rpcbind daemon via an rpcbind v4 SET request.
 806 *
 807 * No netconfig infrastructure is available in the kernel, so
 808 * we map IP_ protocol numbers to netids by hand.
 809 *
 810 * Returns zero on success; a negative errno value is returned
 811 * if any error occurs.
 812 */
 813static int __svc_rpcb_register4(struct net *net, const u32 program,
 814                                const u32  769        }
 * rpcban>
                            "> * = & 799                svc_8et_nu81="L668"> 668        unsigned i8pc/svc.c#8719" id="L719" class="li8e" na81ef="net/sunrpsass="="+code=pool" class="srameaddr_i">rqstp,
rqstp,
 767                8vc_rq81destroy(tas82R(rqstp,
=svc_rqst" classh_addr>rqstp,
>();
                           r"> * if a=svc_pool" class="sref">8vc_po82ref">svc_sock_update_=svc_rqst" classhi"_.or=p_lock);
svc_sock_updaterq_pool" class="shton" id="L792" clashton"/a>);
                           r"> */error<8a> = 8;
 802
sta8e = u32760" class="line"u3276"L784"> 784
node 772}
 726        if (poolNUc#L809"14" class="line" nL809"814<767"> 767                /* The -1 assum8s cal8er has done a c cla
 686        if (!8rserv8 -= ( 772}
( 755                }
8href="+co8e=spin_lock_bh" class="s8ef">s8in_lock_bhc cla
 686        if (!8rserv8 -=  772}
(&am="L755"> 755                }
8h  686        if (!u32 , 1);
 772}
 801n8servs83"L797"> 797        8rserv83"+code=rqstp" class="sref">rf">PTR_ERR( ch8sen_pool = rqstp,
 802
 740                nod8 8  804rq8tp
 705IS_84/sunrpc/svc.c#L693" id="L693" class=* href="raet/sline" nalass="line"egacy="L806" cl2c.c#L809"me="L812"> 81284d_svc() but tweaked
<" class=*me="L813"> 813static int PTR_ERR(, 1);
 799                rf">PTR_ERR(  802
 747                _8module_getsunrpc/svc.c#L772" id="L772" class="line" name="L772"> 772}
tas8 8  801 740                IS_8RR#c/s
 799                85to kill it again.
 804
 805
 806 806
 808 809rq8tpe="L809"> 809
 81112" class="line" name="L812"> 812 813static int _rpcb_register4(struct net, const u32 program,
 814                   href="+co8e=wake_up_process" class8"sref8>wake_up_process(<<<<<<<<<<<<<<<< 769        }
svc_exit_thread(<<<<<<<<                           ="comment8>/* destroy old threads 8/u32  799                n8servs8/a> &l7"> 767                tas8 8  767                (8rserv8++;
rqstp,
>();
svc_sock_updatrq_pool" class="shton" id="L792" clashton"/a>);
                           c/svc.c#L871" id="L771" class="lin8" nam87 802
err8r8
u32760" class="line"u3276"L784"> 784
 784
EXPORT8SYMBO8_GPL 784
u32sasschL" class="sref">NUc#L809"14" class="line" nL809"814<767"> 767                /*
 686        if (! * Cal8ed from a server thread 8s it&87l = & 784
( 755                }
8t"> */
 686        if (!( 784
svc_exit8threa88R( 755                }
8pc/svc.c#8782" id="L782" class="li8e" na88 686        if (!8vc_se88wake_up_process(, 1);
 772}
8vc_po88ne" name="L764"> 764         785        svc8release_buffer(PTR_ERR(  * Cal8 class="sref">kfree(8a hre88l = & 802
kfree(8a hre8="+cod"> 802
 */kfree(8a hre8="+code=rqstp" l it again.
 804 804spi8_lock89errno value is returned 812pool-&g8;
 813static int list_de8(8amp;PTR_ERR(, 1);
 799                s8in_un89="sref">svc_exit_thre(PTR_ERR(, 1);
 772}
 795        kfree(8a hre8="+code=rqstp"sunrpc/svc.c#L772" id="L772" class="line" name="L772"> 772}
 764         728                serv 728                sv89vice threads filling in 804

 812EXPORT9SYMBO90"net/sunrpc/svc.c#L812" id="Le="L812"> 812list_de9" nam9013" id="L813" class="line" na 811/*
12" class="line" name="L812"> 812 * Reg9ster an "inet"9proto9ol family netid with the localme="L728"> 728                 * rpc9ind daemon via an rpcbin9 v4 S9T requ_rpcb_register4(struct net, const u32 " na rpc14" class="line" na rpc814                 *
program,
 769        }
 * No 9etconfig infrastructure 9s ava90g((struct id="L>rqstp,
serv =                           9t"> *
node,  799                 * Ret9rns zero on success; a n9gativ9 errno7"> 767                 * if 9ny error occurs.
9a hre91, 1);
 772}
 */
 803 *N id="L>rqstp,
 767                u32c cla
 686        if (! * rpc9an>
PTR_ERR(net *  * = & 802
svc_9et_nu91g( 755                }
9pc/svc.c#9719" id="L719" class="li9e" na91ef="n#c/s
 799                9vc_rq91destroy( 686        if (!tas92R(PTR_ERR( * if 9=svc_pool" class="sref">9vc_po92ref">svc_sock_update_"+code=sode=very"line" name="L75( 802
 */error<9a> = 9;
 802
sta92ne" name="L764"> 764        node 795        PTR_ERR( 799                pool = &e. 728                /* The -1 assum9s cal92g(e.PTR_ERR( 802
9rserv9 -= ( 772}
 764        s93"L771"> 771        return 9rserv93to kill it again.
 812 777@id=": 777 777 804n9servs939;s exiting. Caller must hold @c.c#L: trans hrt=.c#L809" id="L8Lto adclatisee="L804"> 8049rserv93able in the kernel, so
@ hrt:  hrtLto adclatisee="L804"> 804ch93s by hand.
 804Sne" naL="linde=netBKL or" claaddress" class=pfiled- clc.c#L805" id="Le="L804"> 804nod9 94errno value is returned 813static int rq9tpserv = rqstp->u32  813static int IS_94wake_up_process(<(struct id="L>rqstp,
 813static int 94="sref">svc_exit_threasass="f="+code=shor="line" name="L76.or=p_lock);
 799                 767                id="L814" class="lineref">id="L8ass="line"2" class="sref">" na96" class="line"" na9"L772"> 772}
 = &f="+code=egismp; 772}
_9module_getegismp;PTR_ERR( 772}
tas9 94ef="n"> 728                 802
IS_95"L771"> 771        return 9rror = sv_nrpools > 1)
id="L8ass=6"line" name="L76. na96" class="line"" na9"L772"line" name="L76. na96" class="line"" na9"L77=task" class="sref. na96" class="line"" na9"L77)
 767                module_put( 767                svc_exit_thread( 767                u32  802
 726        if ( = &e. 726        if (rq9tp( 813static int  =  813static int node, e.e. 726        if (( 726        if (svc_sock_update_"+code=sode=very"line" nter4(struct id="L>rqstp,
wake_up_process(<<<<<<<<<<<<<<<<<"+code=sline" name="L76. na96" class="line"" na9"L77)
                          9h svc_exit_thread(<<<<<<<<<<<<<<<<<"+code=slly netid witt" ing">e.e. 802
/* destroy old threads 9/ 795        n9servs96lass="line" name="L75href="+cc/svc.c#L799" id="L. na96" class="line"" na9"L77)
 767                tas9 96l = & 802
 802
9rserv9++;
PTR_ERR(                           9h        9/svc.c#L770" id="L770" c9ass="96destroy((struct id="L>rqstp,
 802
(PTR_ERR( 799                err9r97ref">svc_sock_update_"+code=sode=very="L755"> 755                }
9pc/svc.c#9773" id="L773" class="li9e" na97wake_up_process(<4"> 764        EXPORT9SYMBO97ne" name="L764"> 764         795        /*
 772}
 * Cal9ed from a server thread 9s it&97"L7974"> 764         802
 */
 804If uscL70pace ="liunn ig "L806" , it=should taklass="v4 UNSE<81"L804"> 804svc_exit9threa98errno value is returnedande iear eclayth ig class=="l[>id="L8,cllass="]. >If uscL70pace81"L804"> 804 8049vc_se9813" id="L813" class="line" na< claef="net/su6nrpc/svineries< clway. >SolasPMAP_UNSE<=should be sufficiine81"L804"> 8049vc_po984" id="L804" class="line" nan>1nss=="lc clatoe iear all"exf=" ig ineriesid="L8,cllass="].81"L804"> 804 728                svc98 requ_rpcb_rvoidask" class="sref net, const u32 program,
 769        }
 * Cal9 class="sref">kfree(9a hre98l = &" na rpc14" class="line" na rpc814<9"> 799                kfree(9a hre98="L668"> 668        unsigned i9o"> */kfree(9a hre9="+code=rqstp"/sunrpc/svc.c#L723" id="L723" class="line" name=4"> 784
 740                spi9_lock99PTR_ERR( e. 802
pool-&g9; 802
list_de9(9amp; 804s9in_un99d_svc() but tweaked
<" class=* Us"L70pace did2" id="Lsup hrt="L806" cla nam sun92" hime="L705"> 705 812kfree(9a hre99 request.
 728                 = &c/svc.c#L799" id="Lf">PTR_ERR(, 1);
 799                /* Release the server *9(PTR_ERR(  802
serv 728                sv99node" class="e=serv" class="dpr/suL794" class="lindpr/suLde=ne
ly netid witt" ing">e. 726        if (PTR_ERR(/pre>do" clvc.c6f/32/44d9b87ad68956f44dafc12e52bfbf7d26fd_3/10pc/>> 726        if (pool-&g10pla>10p1L7974"> 764        list_de10pl">10p"L803"> 803s10plo>10pd_svc() but tweaked
ame="L804"> 80410pl family netid with the localid="L8,cllass="]e="L804"> 804kfree(10pcl>10p request.
 80410p9;s exiting. Caller must hold hiddeL)atoemaklawayL or" "naw"/s_rpnnaLo/ss="line" name="L812"> 812 809serv10ps by hand.
 804 812 728                
10ny >10rref">_rpcb_rvoidask" class="srefa hr   ref="+c6" class="line"a hr   ref="+cde=nesass="="+code=pool" class="sref">serv = rqstp->u32  799                
 668        unsigned i10r3/svc.c#10vc_rpcb_register4" class10vc_>10r *id="L814" class="lineref">id="L8ass="2" class="sref">" na96" class="line"" na9"L772"> 772}
10rss="sref">u32f="+code=longster4(struct lagsp_lock);
 772}
>10r5s="sref">u32f="+code=/sunrpc/svc.c#L723"i6" class="line"i"L772"> 772}
 747                svc_10rea>10r"+code=rqstp" class="sref">r iear_tassad_ lag6" class="line" iear_tassad_ lag/a>);
 802
 728                10=sv>10rnode" class=" or"e
sv_nrpools > 1)
id="L8ass=6"line" name="L76. na96" class="line"" na9"L772"line" name="L76. na96" class="line"" na9"L77=task" class="sref. na96" class="line"" na9"L77)
 767                 767                10=sv>10=ref">svc_sock_update_"+code=sc/svc.c#L799" id="L. na96" class="line"" na9"L77)
 767                error<10ror>10=wake_up_process(<<<<<<<<<<<<<<<< 802
10=="sref">svc_exit_thread( 767                node10=ss="sref">u32  802
10="L726"> 726        if (pool10=l = &e. 726        if (/* The -1 assum10com>10=g( 802
10e=n>10=/a>++;
 (struct id=96" class="line"" na9"L77)
 802
 764         764        10e=n>10eto ki"> 802
10emp;(struct lagsp_lock);
 802
10e3p; 802
10e4p;(struct lagsp_lock);
 802
 764        n10=nr>10e"L747"> 747                10e=n>10eable in the kernel, so 804 81210evice threads filling in 728                nod10e=n>10e=ef">_rpcb_rline" name="L76  767                rq10e=r>10eto kivoidask" class="srefa hrpr/suL794" class="lina hrpr/suLde=ne="+code=pool" class="sref"rqs=p_lock);
rqs=96" class="line"rqs=9"L77osram" cchar32" class="sref">fm=p_lock);
 767                 668        unsigned i1043/svc.c#10ef="+code=error" class="10ef=>10e * 772}
10e4p;);
 *=pool" class="sargsp_lock);
 772}
10elass="line" nchar3="+href="net/sunrpbuf6" class="line"buf"L77[line" name="L76RPC_MAX_ADDRBUFLENR_ERR( 772}
10e"L747"> 747                rva__rpr=p_lock);
(struct m=p_lock);
 802
tas10e=t>10eef="n"> 728                10enode" class="e=serv" class="vaf6" class="line"vaf"L77=svc_rqst" class m=p_lock);
 802
 772}
 802
10+mp;e. 726        if (svc_exit_thread(<<<<< 802
10+"L795"> 795        10+elease_buffer();
 802
10+"L7974"> 764        rq10e=r>10+"+cod"> 802
10+s by hand.
 80410+vice threads filling inCg ion routc_ps or"proc/sv ig t="lRPC ine" name="L812"> 8121061ice threads filling in 728                106ref">_rpcb_r/su"> 728                10613" idpool" class="sref">idc/sv_cg ion14" class="lineref">idc/sv_cg ionde=ne="+code=pool" class="sref"rqs=p_lock);
rqs=96" class="line"rqs=9"L77os="+code=pool" class="skvec6" class="line"kvecass="2" class="sref">argf">sv_nrpoolsresf">sv_nrpools 767                1063"L668"> 668        unsigned i1064/svc.c#10>/* destroy old threads 10>/*>106ss="sref">u32s"+code=pool" class="sref">id="L814" class="lineref">id="L8ass="line"2" class="sref">" na96" class="line"" na9"L772"> 772}
n10=nr>106lass="line" n="+code=pool" class="sref"svc.c#L770" id="L770" ref"svc.c#Lass="line"2" class="sref">svc.96" class="line"svc.9"L77=task" class="sref"L767" class="line" name="L;st/sunrpc/svc.c#L798" id="L#L7pilers oodanme="L728"> 728                tas10e=t>1066s="sref">u32s"+code=pool" class="sref">idcedurc14" class="linecef">idcedurcs="sref"2" class="sref">" nc96" class="line"" nc9"L77=task" class="sref"L767" class="line" name="L;"> 728                1067s="sref">u32s"+code=pool" class="sref">serv = rqstp-> 728                10e=n>106"+code=rqstp" pool" class="skxdr" nc_=p_lock);
svc_exit_th pool" class="sxdr>rqstp,
 728                106node" class="e=serv" class="__be"> 769       __be">"sref">svc_exit_thread" class="sref">rqrpc96" class="line"qrpc9e="L;"> 728                1071ode" class="e=serv" class="9"> 769        }
(<<<<<(struct idc6" class="line" idce="L;"> 728                err10=er>1071ode" class="e=serv" class="__be"> 769       __be">"sref">svc_exit_thread" class="sref">auth__rp=p_lock);
(struct   __rp=p_lock);
 728                 728                EXPORT10GPL>1073p; 769       __be">"sref">svc_exit_thread" class="sref">rreply_qrpc96" class="line"reply_qrpc9e="L;"> 728                107"L795"> 795        
>107elease_buffer( 728                107"L747"> 747                107odule_gete/svc.c#L799" id="Largf">sv_nrpools 767                
_shor=_leL770" id="L770" f">_shor=_leLe="L;"> 728                107"L740"> 740                svc_exit10d" >10d"lease_buffer1nsgss"privacy=c cl:anme="L728"> 728                 728                10=sv>10dmp;when NFSv4 S/sv ons/are_usde=nme="L728"> 728                10=sv>10d3p;a"14" class="linerqcusdeeff">a"e="L=ta1;"> 728                10d4p; 728                 726        if (kfree(10 cl>10d6p; 728                kfree(10 cl>10d"+code=rqstp" class="sref">rrqs=96" class="line"rqs=9"L77)
iep_reply_hdr>rqstp,
iep_reply_hdrde=ne
 802
kfree(10 cl>10def="n"> 728                10dnode" class="e=serv" class="cef">ut9"> 769       cef">ut9">de=ne
sv_nrpools(struct qs=96" class="line"rqs=9"L77)
 802
spi10ock>10ocf="n"> 728                pool-&g10cla>10o1ode" class="e=serv" class="clasp_lock);
sv_nrpools 802
list_de10el">10o"L803"> 803s10nlo>10o3p; 728                10o4p;utn"14" class="linesef">utn"de=ne
sv_nrpools
 728                kfree(10 cl>10o"L726"> 726        if (10ol = &c/svc.c#L799" id="Lclasp_lock);

 728                /* Release the server *10>/*>10og(_badref=770" id="L770" f">_badref=e="L;"> 728                serv10oef="n"> 728                
 728                sv_nrpoolssv_nrpools 728                pool-&g11pla>110to ki"> 802
list_de11pl">110mp;utn"14" class="linesef">utn"de=ne
sv_nrpools
 728                s11plo>11pd_svc"> 728                1104p;sv_nrpools
 728                kfree(11pcl>110elease_buffer(sv_nrpools
 728                1106p;sv_nrpools
 728                110"+cod"> 802
serv110"+code=rqstp" pool" class="s.c#a96" class="line"" na9"L77=task" class="srefsref">sv_nrpools > 1)
id="L8ass=6"> 802
 740                1111ode" class=" or"e
sv_nrpools > 1)
id="L8ass=6"line" name="L76. na96" class="line"" na9"L772"line" name="L76. na96" class="line"" na9"L77=task" class=="L76. na96" class="line"" na9"L77)
 740                
11ny >111ref">svc_sock_update_c/svc.c#L799" id="L. na14" class="line" naestr=ttask" class=="L76. na96" class="line"" na9"L77)
 740                
 755                }
1113/svc.c#11vc_rpcb_register4" class11vc_>111d_svc"> 728                u32hand.
 804>111 request.
llaifiL8Lto coply bufferme="L812"> 812 804svc_11rea>111able in the kernel, so 812 728                11=sv>111node" class="e=serv" class="auth_resp_lock);
 802
id="L8 anchannaLto cojectss=="lc ll:anme="L728"> 728                11=sv>11=ref">svc_socke/svc.c#L799" id="Lauth_resp_lock);
 767                error<11ror>11=wake_up_process(_badcre60" class="line" *<_authr">_badcre6ass=6"> 802
11=="sref">svc_exit_three=serv" class="auth_resp_lock);
 802
node11=ss="sref">u324"> 764        112lass="line" n=witchsvc.c#L799" id="Lauth_resp_lock);
 767                pool11=l = &c clask" class="srefSVC_OKp_lock);
 767                /* The -1 assum11com>11=g( 755                }
1128/svc.c#11e=nrservs" class="sref">11e=n>11=/a>++;
 767                _garbagc14" class="linef">_garbagcass=6"> 802
 767                11e=n>113ref">svc_sock_update_sk" class="sref *<__rp=p_lock);
p_lock);
ass=6"> 802
11">"sref">svc_exit_thregoto"=ine" name="L76f">_bad770" id="L770" f">_badass=6"> 802
11e3p; 767                11e4p;_bad_auth770" id="L770" f">_badrauthass=6"> 802
 767                n11=nr>113l = & 740                11e=n>113g();
 802
 767                113destroy( 802
nod11e=n>114=e" name="L76c clask" class="srefSVC_COMPLETEp_lock);
 767                rq11e=r>114ref">svc_sock_update_goto"=ine" name="L76sendc=p_lock);
 802
svc_e4"> 764         728                 767                11elass="line" nxit_thregoto"=ine" name="L76f">_bad_" na14" class="linef">_bad_" naass=6"> 802
11e"L747"> 747                 747                tas11e=t>114/a>++;
 740                114destroy(_bad_clasp_lock);
_bad_clasass=6"> 802
 728                 728                11+mp; 767                svc_exit_thregoto"=ine" name="L76f">_bad_" n=770" id="L770" f">_badr idce="L;"> 728                 728                115"L726"> 726        if (1156p; 728                rq11e=r>115"+code=rqstp" class="sref">rsref">sv_nrpools > 1)
 728                115ef="n"> 728                
 728                1161ode" class="e=serv" class="qrpc96" class="line"qrpc9e="L=task" class="sref esf">sv_nrpoolssv_nrpools 728                1161ode" class="e=serv" class="cef">utn"14" class="linesef">utn"de=ne
sv_nrpoolssv_nrpools 802
116"L803"> 8031163p;idcedurc _rp=sL#LuntL8Lnme="L728"> 728                /* destroy old threads 11>/*>1164p; 728                n11=nr>116"L726"> 726        if (tas11e=t>1166p; 728                116"+code=rqstp" class="sref">rmems32 (struct idc96" class="line"" nc9"L77)
 802
11e=n>116"+code=rqstp" pool" class="smems32 (struct idc96" class="line"" nc9"L77)
 802
116"L740"> 740                117"lease_buffer 728                err11=er>1171ode"in the kernel, so 728                 728                EXPORT11GPL>1173p; 767                1174p;(struct idc96" class="line"" nc9"L77)
 802

>117"L726"> 726        if (1176p; 728                117odule_gete/sv!e=serv" class="clas96" class="line"svc.9"L77)
 767                

 728                117destroy(rqstp,
 728                svc_exit11d" >11d"lease_buffer href="+c/svc.c#L799" id="Lxdr>rqstp,
(structargf">sv_nrpools 740                svc_sock_update_"+code=sgoto"=ine" name="L76f">_garbagc14" class="linef">_garbagcass=6"> 802
11=sv>118"L803"> 80311=sv>118="sref">svc_exit_thre" class="sref">rqrpc96" class="line"qrpc9e="L=task" class="sref" nc96" class="line"" nc9"L77)
(structrqs=96" class="line"rqs=9"L77)
 802
118"L795"> 795        
 728                kfree(11 cl>118l = & 767                kfree(11 cl>118g( 767                kfree(11 cl>118/a>++;
(struct"L767" class="line" name="Los
 802
118destroy( 802
spi11ock>119"lease_buffer href="+4"> 764        pool-&g11cla>119ref">svc_sock_update_c/sv" class="sref">rqrpc96" class="line"qrpc9e="L=ttask" class="sref *<_nrpc/svp_lock);
 764        list_de11el">119wake_up_process(<<<< 764        s11nlo>119="sref">svc_exit_threeeee!e=serv" class="xdr>rqstp,
(struct esf">sv_nrpoolssv_nrpools 767                1194p;e. 802
kfree(11 cl>119lass="line" nxit_threxit_threhand.
++; *me="L728"> 728                119l = &rqrpc96" class="line"qrpc9e="L=task" class="sref *<_nystem_f">p_lock);
ass=6"> 802
 764        serv119/a>++;
 767                e. 802
(structqrpc96" class="line"qrpc9e="L9767"> 767                pool-&g12pla>120ref">svc_sock_update_"+code=seand.
 728                list_de12pl">120wake_up_process(<<<<<<<< 767                s12plo>120="sref">svc_exit_thread(<<<<<(struct"L767" class="line" name="Los
 802
1204p; 802
kfree(12pcl>120lass="line" nxit_thre4"> 764        1206p; 764        120"+cod"> 802
serv120"+code=rqstp" and.
 728                rqrpc96" class="line"qrpc9e="L=!task" class="sref *<_nrpc/svp_lock);
 767                121"lease_buffer href="+sk" class="sref esf">sv_nrpools(structqrpc96" class="line"qrpc9e="L9  -ask" class="sref esf">sv_nrpools 802
 802
>121mp; 728                1213p; 767                (struct"L767" class="line" name="Los
 802
>121"L726"> 726        if ( 767                svc_12rea>121g( 802
 728                12=sv>121node" ter4(structqendc=p_lock);
 767                 740                12=sv>122ref">svc_sock_update_goto"=ine" name="L76dropi=p_lock);
 802
error<12ror>12=wake_up_proceree" n 1;vc_sock_update_hand.
 728                122d_svc"> 728                node12=ss="sr=ine" name="L76dropi=p_lock);
 767                122lass="line" nc.c#L799" id="Lcef"authoricl770" id="L770" cef"authoricle="Le

 728                pool1226p;e.idc/sv dropi=\ne. 802
/* The -1 assum12com>12=g( 802
12e=n>122ef="n"> 728                _shor=_leL770" id="L770" f">_shor=_leLe="L:"> 767                e. 767                12e=n>123ref">svc_sock_update_ame="L76c.c#L799" id="Largf">sv_nrpools 802
123"L803"> 80312e3p;
 728                123"L795"> 795        _badref=770" id="L770" f">_badref=e="L:"> 767                n12=nr>1236p;sv_nrpools > 1)
 728                12e=n>123"+code=rqstp" class="sref">rsef">utn"14" class="linesef">utn"de=ne
sv_nrpools
 728                utn"14" class="linesef">utn"de=ne
sv_nrpools
 728                123node" class="e=serv" class="cef">utn"14" class="linesef">utn"de=ne
sv_nrpools
 728                nod12e=n>124=e" name="L76c.c#L799" id="Lcef"putn"14" class="linesef">utn"de=ne
sv_nrpools 728                rq12e=r>124ref">svc_sockgoto"=ine" name="L76sendc=p_lock);
 802
 803_badrauth770" id="L770" f">_badrauthass=:"> 767                e. 728                124lass="line" nc.c#L799" id="Lcref">sv_nrpools > 1)
 728                1246p; 728                rxdr(structreply_qrpc96" class="line"reply_qrpc9e="L);"> 728                tas12e=t>124"+code=rqstp" pool" class="ssef">utn"14" class="linesef">utn"de=ne
sv_nrpools
 728                124node" class="e=serv" class="cef">utn"14" class="linesef">utn"de=ne
sv_nrpools
 728                utn"de=ne
sv_nrpools 728                svc_sockgoto"=ine" name="L76sendc=p_lock);
 802
125"L803"> 803125d_svc=ine" name="L76f">_badr" na14" class="linef">_bad_" naass=:"> 767                e.id="L8 %d\ne. 728                125lass="line" nc.c#L799" id="Lcref">sv_nrpools > 1)
 728                1256p;utn"14" class="linesef">utn"de=ne
sv_nrpools 728                rq12e=r>125"+code=rqstp"goto"=ine" name="L76sendc=p_lock);
 802
125ef="n"> 728                _bad_clasp_lock);
_bad_clasass=:"> 767                1261ode" class="e=serv" class="qef"pr/suL794" class="lincef"pr/suLe="Le
e. 767                126ref">svc_sock_update_ame="L7
 728                126"L803"> 8031263p;sv_nrpools > 1)
 728                /* destroy old threads 12>/*>1264p;utn"14" class="linesef">utn"de=ne
sv_nrpools 728                n12=nr>126lass="line" nc.c#L799" id="Lcef">utn"14" class="linesef">utn"de=ne
sv_nrpools 728                tas12e=t>1266p;utn"14" class="linesef">utn"de=ne
sv_nrpools 728                126"+code=rqstp"goto"=ine" name="L76sendc=p_lock);
 802
12e=n>126ef="n"> 728                126destr=ine" name="L76f">_bad_" n=770" id="L770" f">_badr idce="L:"> 767                1271ode" class="e=serv" class="qef"pr/suL794" class="lincef"pr/suLe="Le
e.idcedurc (%d)\ne. 728                err12=er>127to ki"> 802
sv_nrpools > 1)
 728                EXPORT12GPL>1273p;utn"14" class="linesef">utn"de=ne
sv_nrpools 728                1274p; 802

>127"L726"> 726        if (1276p;_garbagc14" class="linef">_garbagcass=:"> 767                127"+code=rqstp" class="sref">rsef">r/suL794" class="lincef"pr/suLe="Le
e. 802
>127ef="n"> 728                127node" class="e=serv" class="r*<__rp=p_lock);
 802
svc_exit12d" >12d"leas=ine" name="L76f">_bad770" id="L770" f">_badass=:"> 767                sv_nrpools > 1)
 728                12=sv>128mp;utn"de=ne
sv_nrpools 728                12=sv>1283p; 802
128"L7954"> 764        idc/sv794" class="lincef"proc/sve="L76"> 802
kfree(12 cl>128"L747"> 747                kfree(12 cl>128able in the kernel, so 804kfree(12 cl>128s by hand.
 804128destrhand.
 728                spi12ock>129"leas/su"> 728                pool-&g12cla>129ref">
idc/sv794" class="lincef"proc/sve="L(t" uctee=serv" class="cef"rqs=794" class="lincef"rqs=p;rrqs=96" class="line"rqs=9"L777"> 728                list_de12el">129wake_7"> 767                s12nlo>129="sref">svc_et" uctee=serv" class="kvec6" class="line"kvecef">svc_sock_upda" class="sref">rargf">sv_nrpools 802
1294p;svc_sock_upda" class="sref">rresf">sv_nrpools 802
kfree(12 cl>129lass="line" nt" uctee=serv" class="cef"cref">sv_nrpoolsef"crefef">svc_sock_" class="sref">rqref">sv_nrpools > 1=task" class="sref qs=96" class="line"rqs=9"L77)
 802
1296p;);
"sref">svc_exit_threef="+href="net/sunrpdi>p_lock);
ass=6"> 802
 802
serv129"+code=rqstp" and.
 728                 804
 804pool-&g13pla>1301ode"in the kernel, so 728                list_de13pl">130mp; 802
s13plo>1303p;sv_nrpools 802
1304p;sv_nrpools 802
kfree(13pcl>130lass="line" nc.c#L799" id="Lrqs=96" class="line"rqs=9"L77)
 802
1306p; 802
rrqs=96" class="line"rqs=9"L77)
 802
serv130"+code=rqstp" pool" class="srqs=96" class="line"rqs=9"L77)
 802
 802
 802
 802
>131"L803"> 8031313p;);
ef"getue=e="Le
sv_nrpools 802
131"L795"> 795        >131lass="line" nc.c#L799" id="Ldi>p_lock);
ass= =task" class="sref>ef"getn"14" class="linesef"getn"e="Le
sv_nrpools 802
 767                svc_13rea>131g( 728                rsef">r/suL794" class="lincef"pr/suLe="Le
e. 802
13=sv>131destroy(sv_nrpools > 1)
 728                rsef"drop794" class="lincef"drope="Le
 802
13=sv>132ref">svc_sock_update_ree" n 02"> 802
error<13ror>13=wake_up_proce4"> 764        132d_svc"> 728                node1324p; 728                132lass="line" nc/svc.c#L799" id="Lcef"proc/sv_sv_nrpools(struct esf">sv_nrpools 740                pool1326p; 802
/* The -1 assum13com>13=g( 767                13e=n>132/a>++;
rsef"drop794" class="lincef"drope="Le
 802
 802
 764        13e=n>133ref">4"> 764        133"L803"> 80313e3p; 728                133"L795 and.
 728                /* create new threads *13>/*>1335p;
 728                n13=nr>1336p;
 728                13e=n>133able in the kernel, so 728                 728                133destr=ine" name="L76bf"cef">idc/sv794" class="linbf"cef">idc/sve="Let" uctee=serv" class="cef"cref">sv_nrpoolsef"crefef">s" class="sref">rqref">sv_nrpools > 1,nt" uctee=serv" class="   _rqs=794" class="lin   _rqs=ef">s" class="sref">rree">sv_nrpools 728                nod13e=n>134"lease_bufferf="net/t" uctee=serv" class="cef"rqs=794" class="lincef"rqs=p;rrqs=96" class="line"rqs=9"L777"> 728                rq13e=r>134ref">7"> 767                svc_s" class="sref">rargf">sv_nrpools 802
svc_et" uctee=serv" class="kvec6" class="line"kvecef">svc_s" class="sref">rresf">sv_nrpools 802
134"L795"> 795        134lass="line" ncand.
 728                1346p;sv_nrpools > 1)
 802
r qs=96" class="line"rqs=9"L77)
sv_nrpools 802
tas13e=t>134"+code=rqstp" pool" class="s qs=96" class="line"rqs=9"L77)
sv_nrpools 802
sv_nrpools > 16"> 802
 802
sv_nrpoolssv_nrpoolsass=76"> 802
135mp;sv_nrpoolssv_nrpoolssv_nrpoolssv_nrpoolsass=oster4(structrqs=96" class="line"rqs=9"L77)
 802
1353p;sv_nrpoolssv_nrpools 728                sv_nrpoolssv_nrpools 728                135"L726"> 726        if (1356p; 728                rq13e=r>135"+code=rqstp" class="sref">r esf">sv_nrpools 802
135ef="n"> 728                 767                136"lease_bufferf="net/shclass="sref">r>r/suL794" class="linpr/suLe="Le
e. 802
136ref">svc_sock_update_
 802
136wake_up_proce4"> 764        136d_svc"> 728                /* destroy old threads 13>/*>1364p; 728                n13=nr>1365p;
 728                tas13e=t>1366p;
 728                136able in the kernel, so 728                13e=n>136"+code=rqstp" pool" class="ssef"getue=spin_unlo>);
ef"getue=e="Le
sv_nrpools
 728                136node" class="e=serv" class="cef"getn"14" class="linesef"getn"e="Le
sv_nrpools 728                137=e" n"> 802
err13=er>1371p; 728                sv_nrpools(struct esf">sv_nrpools 767                EXPORT13GPL>1373p;sv_nrpoolssv_nrpools 767                1374p;sv_nrpools 728                
sv_nrpools 728                1376p; 767                137g( 728                
rxpr=_freesbf"ree" na">sv_nrpoolssv_nrpools 728                137destroy( 802
svc_exit13d" >138=e" name="L764"> 764        4"> 764        13=sv>138mp;idc/sv794" class="linbf"cef">idc/sve="L);"> 728                13=sv>1383p;
 728                138"L795"> 795         728                kfree(13 cl>1386p;
 804kfree(13 cl>138able in the kernel, so 728                kfree(13 cl>138s by h"srefc.c#L799" id="Lcef"max_payload770" id="L770" cef"max_payloadde=neconst/t" uctee=serv" class="cef"rqs=794" class="lincef"rqs=p;rrqs=96" class="line"rqs=9"L777"> 728                138destr7"> 767                spi13ock>1391ode" class="e=serv" class="ue=spin_unlo>);
"srefc.c#L799" id="Lmax">sv_nrpoolssv_nrpools 802
pool-&g13cla>139to ki"> 802
list_de13el">139mp;sv_nrpools 728                s13nlo>1393p;sv_nrpools 802
1394p;sv_nrpools 802
kfree(13 cl>139lass=4"> 764        1396p; 728                /* Release the server *13>/*>139"+cod


p> T="loriginal LXR software by s="l> 728http://sourceforge. >idjects/lx>p>LXR , sounityt; 1,ns=is experiolx>@ thux.not; 1.
p> lx>. thux.no kindly hosted by > 728http://www.redpill- th>id.no">Redpill Lth>id ASt; 1,n>idviderlof Lthux consult ig and operaet/ss sde=ic/s since 1995.