linux/kernel/padata.c
<<
>>
Prefs
   1/*
   2 * padata.c - generic interface to process data streams in parallel
   3 *
   4 * Copyright (C) 2008, 2009 secunet Security Networks AG
   5 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms and conditions of the GNU General Public License,
   9 * version 2, as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 *
  16 * You should have received a copy of the GNU General Public License along with
  17 * this program; if not, write to the Free Software Foundation, Inc.,
  18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  19 */
  20
  21#include <linux/module.h>
  22#include <linux/cpumask.h>
  23#include <linux/err.h>
  24#include <linux/cpu.h>
  25#include <linux/padata.h>
  26#include <linux/mutex.h>
  27#include <linux/sched.h>
  28#include <linux/slab.h>
  29#include <linux/sysfs.h>
  30#include <linux/rcupdate.h>
  31
  32#define MAX_SEQ_NR (INT_MAX - NR_CPUS)
  33#define MAX_OBJ_NUM 1000
  34
  35static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
  36{
  37        int cpu, target_cpu;
  38
  39        target_cpu = cpumask_first(pd->cpumask.pcpu);
  40        for (cpu = 0; cpu < cpu_index; cpu++)
  41                target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
  42
  43        return target_cpu;
  44}
  45
  46static int padata_cpu_hash(struct padata_priv *padata)
  47{
  48        int cpu_index;
  49        struct parallel_data *pd;
  50
  51        pd =  padata->pd;
  52
  53        /*
  54         * Hash the sequence numbers to the cpus by taking
  55         * seq_nr mod. number of cpus in use.
  56         */
  57        cpu_index =  padata->seq_nr % cpumask_weight(pd->cpumask.pcpu);
  58
  59        return padata_index_to_cpu(pd, cpu_index);
  60}
  61
  62static void padata_parallel_worker(struct work_struct *parallel_work)
  63{
  64        struct padata_parallel_queue *pqueue;
  65        struct parallel_data *pd;
  66        struct padata_instance *pinst;
  67        LIST_HEAD(local_list);
  68
  69        local_bh_disable();
  70        pqueue = container_of(parallel_work,
  71                              struct padata_parallel_queue, work);
  72        pd = pqueue->pd;
  73        pinst = pd->pinst;
  74
  75        spin_lock(&pqueue->parallel.lock);
  76        list_replace_init(&pqueue->parallel.list, &local_list);
  77        spin_unlock(&pqueue->parallel.lock);
  78
  79        while (!list_empty(&local_list)) {
  80                struct padata_priv *padata;
  81
  82                padata = list_entry(local_list.next,
  83                                    struct padata_priv, list);
  84
  85                list_del_init(&padata->list);
  86
  87                padata->parallel(padata);
  88        }
  89
  90        local_bh_enable();
  91}
  92
  93/**
  94 * padata_do_parallel - padata parallelization function
  95 *
  96 * @pinst: padata instance
  97 * @padata: object to be parallelized
  98 * @cb_cpu: cpu the serialization callback function will run on,
  99 *          must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
 100 *
 101 * The parallelization callback function will run with BHs off.
 102 * Note: Every object which is parallelized by padata_do_parallel
 103 * must be seen by padata_do_serial.
 104 */
 105int padata_do_parallel(struct padata_instance *pinst,
 106                       struct padata_priv *padata, int cb_cpu)
 107{
 108        int target_cpu, err;
 109        struct padata_parallel_queue *queue;
 110        struct parallel_data *pd;
 111
 112        rcu_read_lock_bh();
 113
 114        pd = rcu_dereference(pinst->pd);
 115
 116        err = -EINVAL;
 117        if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
 118                goto out;
 119
 120        if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu))
 121                goto out;
 122
 123        err =  -EBUSY;
 124        if ((pinst->flags & PADATA_RESET))
 125                goto out;
 126
 127        if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
 128                goto out;
 129
 130        err = 0;
 131        atomic_inc(&pd->refcnt);
 132        padata->pd = pd;
 133        padata->cb_cpu = cb_cpu;
 134
 135        if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
 136                atomic_set(&pd->seq_nr, -1);
 137
 138        padata->seq_nr = atomic_inc_return(&pd->seq_nr);
 139
 140        target_cpu = padata_cpu_hash(padata);
 141        queue = per_cpu_ptr(pd->pqueue, target_cpu);
 142
 143        spin_lock(&queue->parallel.lock);
 144        list_add_tail(&padata->list, &queue->parallel.list);
 145        spin_unlock(&queue->parallel.lock);
 146
 147        queue_work_on(target_cpu, pinst->wq, &queue->work);
 148
 149out:
 150        rcu_read_unlock_bh();
 151
 152        return err;
 153}
 154EXPORT_SYMBOL(padata_do_parallel);
 155
 156/*
 157 * padata_get_next - Get the next object that needs serialization.
 158 *
 159 * Return values are:
 160 *
 161 * A pointer to the control struct of the next object that needs
 162 * serialization, if present in one of the percpu reorder queues.
 163 *
 164 * NULL, if all percpu reorder queues are empty.
 165 *
 166 * -EINPROGRESS, if the next object that needs serialization will
 167 *  be parallel processed by another cpu and is not yet present in
 168 *  the cpu's reorder queue.
 169 *
 170 * -ENODATA, if this cpu has to do the parallel processing for
 171 *  the next object.
 172 */
 173static struct padata_priv *padata_get_next(struct parallel_data *pd)
 174{
 175        int cpu, num_cpus;
 176        int next_nr, next_index;
 177        struct padata_parallel_queue *queue, *next_queue;
 178        struct padata_priv *padata;
 179        struct padata_list *reorder;
 180
 181        num_cpus = cpumask_weight(pd->cpumask.pcpu);
 182
 183        /*
 184         * Calculate the percpu reorder queue and the sequence
 185         * number of the next object.
 186         */
 187        next_nr = pd->processed;
 188        next_index = next_nr % num_cpus;
 189        cpu = padata_index_to_cpu(pd, next_index);
 190        next_queue = per_cpu_ptr(pd->pqueue, cpu);
 191
 192        if (unlikely(next_nr > pd->max_seq_nr)) {
 193                next_nr = next_nr - pd->max_seq_nr - 1;
 194                next_index = next_nr % num_cpus;
 195                cpu = padata_index_to_cpu(pd, next_index);
 196                next_queue = per_cpu_ptr(pd->pqueue, cpu);
 197                pd->processed = 0;
 198        }
 199
 200        padata = NULL;
 201
 202        reorder = &next_queue->reorder;
 203
 204        if (!list_empty(&reorder->list)) {
 205                padata = list_entry(reorder->list.next,
 206                                    struct padata_priv, list);
 207
 208                BUG_ON(next_nr != padata->seq_nr);
 209
 210                spin_lock(&reorder->lock);
 211                list_del_init(&padata->list);
 212                atomic_dec(&pd->reorder_objects);
 213                spin_unlock(&reorder->lock);
 214
 215                pd->processed++;
 216
 217                goto out;
 218        }
 219
 220        queue = per_cpu_ptr(pd->pqueue, smp_processor_id());
 221        if (queue->cpu_index == next_queue->cpu_index) {
 222                padata = ERR_PTR(-ENODATA);
 223                goto out;
 224        }
 225
 226        padata = ERR_PTR(-EINPROGRESS);
 227out:
 228        return padata;
 229}
 230
 231static void padata_reorder(struct parallel_data *pd)
 232{
 233        struct padata_priv *padata;
 234        struct padata_serial_queue *squeue;
 235        struct padata_instance *pinst = pd->pinst;
 236
 237        /*
 238         * We need to ensure that only one cpu can work on dequeueing of
 239         * the reorder queue the time. Calculating in which percpu reorder
 240         * queue the next object will arrive takes some time. A spinlock
 241         * would be highly contended. Also it is not clear in which order
 242         * the objects arrive to the reorder queues. So a cpu could wait to
 243         * get the lock just to notice that there is nothing to do at the
 244         * moment. Therefore we use a trylock and let the holder of the lock
 245         * care for all the objects enqueued during the holdtime of the lock.
 246         */
 247        if (!spin_trylock_bh(&pd->lock))
 248                return;
 249
 250        while (1) {
 251                padata = padata_get_next(pd);
 252
 253                /*
 254                 * All reorder queues are empty, or the next object that needs
 255                 * serialization is parallel processed by another cpu and is
 256                 * still on it's way to the cpu's reorder queue, nothing to
 257                 * do for now.
 258                 */
 259                if (!padata || PTR_ERR(padata) == -EINPROGRESS)
 260                        break;
 261
 262                /*
 263                 * This cpu has to do the parallel processing of the next
 264                 * object. It's waiting in the cpu's parallelization queue,
 265                 * so exit immediately.
 266                 */
 267                if (PTR_ERR(padata) == -ENODATA) {
 268                        del_timer(&pd->timer);
 269                        spin_unlock_bh(&pd->lock);
 270                        return;
 271                }
 272
 273                squeue = per_cpu_ptr(pd->squeue, padata->cb_cpu);
 274
 275                spin_lock(&squeue->serial.lock);
 276                list_add_tail(&padata->list, &squeue->serial.list);
 277                spin_unlock(&squeue->serial.lock);
 278
 279                queue_work_on(padata->cb_cpu, pinst->wq, &squeue->work);
 280        }
 281
 282        spin_unlock_bh(&pd->lock);
 283
 284        /*
 285         * The next object that needs serialization might have arrived to
 286         * the reorder queues in the meantime, we will be called again
 287         * from the timer function if no one else cares for it.
 288         */
 289        if (atomic_read(&pd->reorder_objects)
 290                        && !(pinst->flags & PADATA_RESET))
 291                mod_timer(&pd->timer, jiffies + HZ);
 292        else
 293                del_timer(&pd->timer);
 294
 295        return;
 296}
 297
 298static void padata_reorder_timer(unsigned long arg)
 299{
 300        struct parallel_data *pd = (struct parallel_data *)arg;
 301
 302        padata_reorder(pd);
 303}
 304
 305static void padata_serial_worker(struct work_struct *serial_work)
 306{
 307        struct padata_serial_queue *squeue;
 308        struct parallel_data *pd;
 309        LIST_HEAD(local_list);
 310
 311        local_bh_disable();
 312        squeue = container_of(serial_work, struct padata_serial_queue, work);
 313        pd = squeue->pd;
 314
 315        spin_lock(&squeue->serial.lock);
 316        list_replace_init(&squeue->serial.list, &local_list);
 317        spin_unlock(&squeue->serial.lock);
 318
 319        while (!list_empty(&local_list)) {
 320                struct padata_priv *padata;
 321
 322                padata = list_entry(local_list.next,
 323                                    struct padata_priv, list);
 324
 325                list_del_init(&padata->list);
 326
 327                padata->serial(padata);
 328                atomic_dec(&pd->refcnt);
 329        }
 330        local_bh_enable();
 331}
 332
 333/**
 334 * padata_do_serial - padata serialization function
 335 *
 336 * @padata: object to be serialized.
 337 *
 338 * padata_do_serial must be called for every parallelized object.
 339 * The serialization callback function will run with BHs off.
 340 */
 341void padata_do_serial(struct padata_priv *padata)
 342{
 343        int cpu;
 344        struct padata_parallel_queue *pqueue;
 345        struct parallel_data *pd;
 346
 347        pd = padata->pd;
 348
 349        cpu = get_cpu();
 350        pqueue = per_cpu_ptr(pd->pqueue, cpu);
 351
 352        spin_lock(&pqueue->reorder.lock);
 353        atomic_inc(&pd->reorder_objects);
 354        list_add_tail(&padata->list, &pqueue->reorder.list);
 355        spin_unlock(&pqueue->reorder.lock);
 356
 357        put_cpu();
 358
 359        padata_reorder(pd);
 360}
 361EXPORT_SYMBOL(padata_do_serial);
 362
 363static int padata_setup_cpumasks(struct parallel_data *pd,
 364                                 const struct cpumask *pcpumask,
 365                                 const struct cpumask *cbcpumask)
 366{
 367        if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
 368                return -ENOMEM;
 369
 370        cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_active_mask);
 371        if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
 372                free_cpumask_var(pd->cpumask.cbcpu);
 373                return -ENOMEM;
 374        }
 375
 376        cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_active_mask);
 377        return 0;
 378}
 379
 380static void __padata_list_init(struct padata_list *pd_list)
 381{
 382        INIT_LIST_HEAD(&pd_list->list);
 383        spin_lock_init(&pd_list->lock);
 384}
 385
 386/* Initialize all percpu queues used by serial workers */
 387static void padata_init_squeues(struct parallel_data *pd)
 388{
 389        int cpu;
 390        struct padata_serial_queue *squeue;
 391
 392        for_each_cpu(cpu, pd->cpumask.cbcpu) {
 393                squeue = per_cpu_ptr(pd->squeue, cpu);
 394                squeue->pd = pd;
 395                __padata_list_init(&squeue->serial);
 396                INIT_WORK(&squeue->work, padata_serial_worker);
 397        }
 398}
 399
 400/* Initialize all percpu queues used by parallel workers */
 401static void padata_init_pqueues(struct parallel_data *pd)
 402{
 403        int cpu_index, num_cpus, cpu;
 404        struct padata_parallel_queue *pqueue;
 405
 406        cpu_index = 0;
 407        for_each_cpu(cpu, pd->cpumask.pcpu) {
 408                pqueue = per_cpu_ptr(pd->pqueue, cpu);
 409                pqueue->pd = pd;
 410                pqueue->cpu_index = cpu_index;
 411                cpu_index++;
 412
 413                __padata_list_init(&pqueue->reorder);
 414                __padata_list_init(&pqueue->parallel);
 415                INIT_WORK(&pqueue->work, padata_parallel_worker);
 416                atomic_set(&pqueue->num_obj, 0);
 417        }
 418
 419        num_cpus = cpumask_weight(pd->cpumask.pcpu);
 420        pd->max_seq_nr = num_cpus ? (MAX_SEQ_NR / num_cpus) * num_cpus - 1 : 0;
 421}
 422
 423/* Allocate and initialize the internal cpumask dependend resources. */
 424static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
 425                                             const struct cpumask *pcpumask,
 426                                             const struct cpumask *cbcpumask)
 427{
 428        struct parallel_data *pd;
 429
 430        pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
 431        if (!pd)
 432                goto err;
 433
 434        pd->pqueue = alloc_percpu(struct padata_parallel_queue);
 435        if (!pd->pqueue)
 436                goto err_free_pd;
 437
 438        pd->squeue = alloc_percpu(struct padata_serial_queue);
 439        if (!pd->squeue)
 440                goto err_free_pqueue;
 441        if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0)
 442                goto err_free_squeue;
 443
 444        padata_init_pqueues(pd);
 445        padata_init_squeues(pd);
 446        setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
 447        atomic_set(&pd->seq_nr, -1);
 448        atomic_set(&pd->reorder_objects, 0);
 449        atomic_set(&pd->refcnt, 0);
 450        pd->pinst = pinst;
 451        spin_lock_init(&pd->lock);
 452
 453        return pd;
 454
 455err_free_squeue:
 456        free_percpu(pd->squeue);
 457err_free_pqueue:
 458        free_percpu(pd->pqueue);
 459err_free_pd:
 460        kfree(pd);
 461err:
 462        return NULL;
 463}
 464
 465static void padata_free_pd(struct parallel_data *pd)
 466{
 467        free_cpumask_var(pd->cpumask.pcpu);
 468        free_cpumask_var(pd->cpumask.cbcpu);
 469        free_percpu(pd->pqueue);
 470        free_percpu(pd->squeue);
 471        kfree(pd);
 472}
 473
 474/* Flush all objects out of the padata queues. */
 475static void padata_flush_queues(struct parallel_data *pd)
 476{
 477        int cpu;
 478        struct padata_parallel_queue *pqueue;
 479        struct padata_serial_queue *squeue;
 480
 481        for_each_cpu(cpu, pd->cpumask.pcpu) {
 482                pqueue = per_cpu_ptr(pd->pqueue, cpu);
 483                flush_work(&pqueue->work);
 484        }
 485
 486        del_timer_sync(&pd->timer);
 487
 488        if (atomic_read(&pd->reorder_objects))
 489                padata_reorder(pd);
 490
 491        for_each_cpu(cpu, pd->cpumask.cbcpu) {
 492                squeue = per_cpu_ptr(pd->squeue, cpu);
 493                flush_work(&squeue->work);
 494        }
 495
 496        BUG_ON(atomic_read(&pd->refcnt) != 0);
 497}
 498
 499static void __padata_start(struct padata_instance *pinst)
 500{
 501        pinst->flags |= PADATA_INIT;
 502}
 503
 504static void __padata_stop(struct padata_instance *pinst)
 505{
 506        if (!(pinst->flags & PADATA_INIT))
 507                return;
 508
 509        pinst->flags &= ~PADATA_INIT;
 510
 511        synchronize_rcu();
 512
 513        get_online_cpus();
 514        padata_flush_queues(pinst->pd);
 515        put_online_cpus();
 516}
 517
 518/* Replace the internal control structure with a new one. */
 519static void padata_replace(struct padata_instance *pinst,
 520                           struct parallel_data *pd_new)
 521{
 522        struct parallel_data *pd_old = pinst->pd;
 523        int notification_mask = 0;
 524
 525        pinst->flags |= PADATA_RESET;
 526
 527        rcu_assign_pointer(pinst->pd, pd_new);
 528
 529        synchronize_rcu();
 530
 531        if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu))
 532                notification_mask |= PADATA_CPU_PARALLEL;
 533        if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
 534                notification_mask |= PADATA_CPU_SERIAL;
 535
 536        padata_flush_queues(pd_old);
 537        padata_free_pd(pd_old);
 538
 539        if (notification_mask)
 540                blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
 541                                             notification_mask,
 542                                             &pd_new->cpumask);
 543
 544        pinst->flags &= ~PADATA_RESET;
 545}
 546
 547/**
 548 * padata_register_cpumask_notifier - Registers a notifier that will be called
 549 *                             if either pcpu or cbcpu or both cpumasks change.
 550 *
 551 * @pinst: A poineter to padata instance
 552 * @nblock: A pointer to notifier block.
 553 */
 554int padata_register_cpumask_notifier(struct padata_instance *pinst,
 555                                     struct notifier_block *nblock)
 556{
 557        return blocking_notifier_chain_register(&pinst->cpumask_change_notifier,
 558                                                nblock);
 559}
 560EXPORT_SYMBOL(padata_register_cpumask_notifier);
 561
 562/**
 563 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
 564 *        registered earlier  using padata_register_cpumask_notifier
 565 *
 566 * @pinst: A pointer to data instance.
 567 * @nlock: A pointer to notifier block.
 568 */
 569int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
 570                                       struct notifier_block *nblock)
 571{
 572        return blocking_notifier_chain_unregister(
 573                &pinst->cpumask_change_notifier,
 574                nblock);
 575}
 576EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
 577
 578
 579/* If cpumask contains no active cpu, we mark the instance as invalid. */
 580static bool padata_validate_cpumask(struct padata_instance *pinst,
 581                                    const struct cpumask *cpumask)
 582{
 583        if (!cpumask_intersects(cpumask, cpu_active_mask)) {
 584                pinst->flags |= PADATA_INVALID;
 585                return false;
 586        }
 587
 588        pinst->flags &= ~PADATA_INVALID;
 589        return true;
 590}
 591
 592static int __padata_set_cpumasks(struct padata_instance *pinst,
 593                                 cpumask_var_t pcpumask,
 594                                 cpumask_var_t cbcpumask)
 595{
 596        int valid;
 597        struct parallel_data *pd;
 598
 599        valid = padata_validate_cpumask(pinst, pcpumask);
 600        if (!valid) {
 601                __padata_stop(pinst);
 602                goto out_replace;
 603        }
 604
 605        valid = padata_validate_cpumask(pinst, cbcpumask);
 606        if (!valid)
 607                __padata_stop(pinst);
 608
 609out_replace:
 610        pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
 611        if (!pd)
 612                return -ENOMEM;
 613
 614        cpumask_copy(pinst->cpumask.pcpu, pcpumask);
 615        cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
 616
 617        padata_replace(pinst, pd);
 618
 619        if (valid)
 620                __padata_start(pinst);
 621
 622        return 0;
 623}
 624
 625/**
 626 * padata_set_cpumasks - Set both parallel and serial cpumasks. The first
 627 *                       one is used by parallel workers and the second one
 628 *                       by the wokers doing serialization.
 629 *
 630 * @pinst: padata instance
 631 * @pcpumask: the cpumask to use for parallel workers
 632 * @cbcpumask: the cpumsak to use for serial workers
 633 */
 634int padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask,
 635                        cpumask_var_t cbcpumask)
 636{
 637        int err;
 638
 639        mutex_lock(&pinst->lock);
 640        get_online_cpus();
 641
 642        err = __padata_set_cpumasks(pinst, pcpumask, cbcpumask);
 643
 644        put_online_cpus();
 645        mutex_unlock(&pinst->lock);
 646
 647        return err;
 648
 649}
 650EXPORT_SYMBOL(padata_set_cpumasks);
 651
 652/**
 653 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
 654 *                     equivalent to @cpumask.
 655 *
 656 * @pinst: padata instance
 657 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
 658 *                to parallel and serial cpumasks respectively.
 659 * @cpumask: the cpumask to use
 660 */
 661int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
 662                       cpumask_var_t cpumask)
 663{
 664        struct cpumask *serial_mask, *parallel_mask;
 665        int err = -EINVAL;
 666
 667        mutex_lock(&pinst->lock);
 668        get_online_cpus();
 669
 670        switch (cpumask_type) {
 671        case PADATA_CPU_PARALLEL:
 672                serial_mask = pinst->cpumask.cbcpu;
 673                parallel_mask = cpumask;
 674                break;
 675        case PADATA_CPU_SERIAL:
 676                parallel_mask = pinst->cpumask.pcpu;
 677                serial_mask = cpumask;
 678                break;
 679        default:
 680                 goto out;
 681        }
 682
 683        err =  __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
 684
 685out:
 686        put_online_cpus();
 687        mutex_unlock(&pinst->lock);
 688
 689        return err;
 690}
 691EXPORT_SYMBOL(padata_set_cpumask);
 692
 693static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
 694{
 695        struct parallel_data *pd;
 696
 697        if (cpumask_test_cpu(cpu, cpu_active_mask)) {
 698                pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
 699                                     pinst->cpumask.cbcpu);
 700                if (!pd)
 701                        return -ENOMEM;
 702
 703                padata_replace(pinst, pd);
 704
 705                if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
 706                    padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
 707                        __padata_start(pinst);
 708        }
 709
 710        return 0;
 711}
 712
 713 /**
 714 * padata_add_cpu - add a cpu to one or both(parallel and serial)
 715 *                  padata cpumasks.
 716 *
 717 * @pinst: padata instance
 718 * @cpu: cpu to add
 719 * @mask: bitmask of flags specifying to which cpumask @cpu shuld be added.
 720 *        The @mask may be any combination of the following flags:
 721 *          PADATA_CPU_SERIAL   - serial cpumask
 722 *          PADATA_CPU_PARALLEL - parallel cpumask
 723 */
 724
 725int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask)
 726{
 727        int err;
 728
 729        if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
 730                return -EINVAL;
 731
 732        mutex_lock(&pinst->lock);
 733
 734        get_online_cpus();
 735        if (mask & PADATA_CPU_SERIAL)
 736                cpumask_set_cpu(cpu, pinst->cpumask.cbcpu);
 737        if (mask & PADATA_CPU_PARALLEL)
 738                cpumask_set_cpu(cpu, pinst->cpumask.pcpu);
 739
 740        err = __padata_add_cpu(pinst, cpu);
 741        put_online_cpus();
 742
 743        mutex_unlock(&pinst->lock);
 744
 745        return err;
 746}
 747EXPORT_SYMBOL(padata_add_cpu);
 748
 749static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
 750{
 751        struct parallel_data *pd = NULL;
 752
 753        if (cpumask_test_cpu(cpu, cpu_online_mask)) {
 754
 755                if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
 756                    !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
 757                        __padata_stop(pinst);
 758
 759                pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
 760                                     pinst->cpumask.cbcpu);
 761                if (!pd)
 762                        return -ENOMEM;
 763
 764                padata_replace(pinst, pd);
 765        }
 766
 767        return 0;
 768}
 769
 770 /**
 771 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
 772 *                     padata cpumasks.
 773 *
 774 * @pinst: padata instance
 775 * @cpu: cpu to remove
 776 * @mask: bitmask specifying from which cpumask @cpu should be removed
 777 *        The @mask may be any combination of the following flags:
 778 *          PADATA_CPU_SERIAL   - serial cpumask
 779 *          PADATA_CPU_PARALLEL - parallel cpumask
 780 */
 781int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask)
 782{
 783        int err;
 784
 785        if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
 786                return -EINVAL;
 787
 788        mutex_lock(&pinst->lock);
 789
 790        get_online_cpus();
 791        if (mask & PADATA_CPU_SERIAL)
 792                cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu);
 793        if (mask & PADATA_CPU_PARALLEL)
 794                cpumask_clear_cpu(cpu, pinst->cpumask.pcpu);
 795
 796        err = __padata_remove_cpu(pinst, cpu);
 797        put_online_cpus();
 798
 799        mutex_unlock(&pinst->lock);
 800
 801        return err;
 802}
 803EXPORT_SYMBOL(padata_remove_cpu);
 804
 805/**
 806 * padata_start - start the parallel processing
 807 *
 808 * @pinst: padata instance to start
 809 */
 810int padata_start(struct padata_instance *pinst)
 811{
 812        int err = 0;
 813
 814        mutex_lock(&pinst->lock);
 815
 816        if (pinst->flags & PADATA_INVALID)
 817                err =-EINVAL;
 818
 819         __padata_start(pinst);
 820
 821        mutex_unlock(&pinst->lock);
 822
 823        return err;
 824}
 825EXPORT_SYMBOL(padata_start);
 826
 827/**
 828 * padata_stop - stop the parallel processing
 829 *
 830 * @pinst: padata instance to stop
 831 */
 832void padata_stop(struct padata_instance *pinst)
 833{
 834        mutex_lock(&pinst->lock);
 835        __padata_stop(pinst);
 836        mutex_unlock(&pinst->lock);
 837}
 838EXPORT_SYMBOL(padata_stop);
 839
 840#ifdef CONFIG_HOTPLUG_CPU
 841
 842static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
 843{
 844        return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
 845                cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
 846}
 847
 848
 849static int padata_cpu_callback(struct notifier_block *nfb,
 850                               unsigned long action, void *hcpu)
 851{
 852        int err;
 853        struct padata_instance *pinst;
 854        int cpu = (unsigned long)hcpu;
 855
 856        pinst = container_of(nfb, struct padata_instance, cpu_notifier);
 857
 858        switch (action) {
 859        case CPU_ONLINE:
 860        case CPU_ONLINE_FROZEN:
 861                if (!pinst_has_cpu(pinst, cpu))
 862                        break;
 863                mutex_lock(&pinst->lock);
 864                err = __padata_add_cpu(pinst, cpu);
 865                mutex_unlock(&pinst->lock);
 866                if (err)
 867                        return notifier_from_errno(err);
 868                break;
 869
 870        case CPU_DOWN_PREPARE:
 871        case CPU_DOWN_PREPARE_FROZEN:
 872                if (!pinst_has_cpu(pinst, cpu))
 873                        break;
 874                mutex_lock(&pinst->lock);
 875                err = __padata_remove_cpu(pinst, cpu);
 876                mutex_unlock(&pinst->lock);
 877                if (err)
 878                        return notifier_from_errno(err);
 879                break;
 880
 881        case CPU_UP_CANCELED:
 882        case CPU_UP_CANCELED_FROZEN:
 883                if (!pinst_has_cpu(pinst, cpu))
 884                        break;
 885                mutex_lock(&pinst->lock);
 886                __padata_remove_cpu(pinst, cpu);
 887                mutex_unlock(&pinst->lock);
 888
 889        case CPU_DOWN_FAILED:
 890        case CPU_DOWN_FAILED_FROZEN:
 891                if (!pinst_has_cpu(pinst, cpu))
 892                        break;
 893                mutex_lock(&pinst->lock);
 894                __padata_add_cpu(pinst, cpu);
 895                mutex_unlock(&pinst->lock);
 896        }
 897
 898        return NOTIFY_OK;
 899}
 900#endif
 901
 902static void __padata_free(struct padata_instance *pinst)
 903{
 904#ifdef CONFIG_HOTPLUG_CPU
 905        unregister_hotcpu_notifier(&pinst->cpu_notifier);
 906#endif
 907
 908        padata_stop(pinst);
 909        padata_free_pd(pinst->pd);
 910        free_cpumask_var(pinst->cpumask.pcpu);
 911        free_cpumask_var(pinst->cpumask.cbcpu);
 912        kfree(pinst);
 913}
 914
 915#define kobj2pinst(_kobj)                                       \
 916        container_of(_kobj, struct padata_instance, kobj)
 917#define attr2pentry(_attr)                                      \
 918        container_of(_attr, struct padata_sysfs_entry, attr)
 919
 920static void padata_sysfs_release(struct kobject *kobj)
 921{
 922        struct padata_instance *pinst = kobj2pinst(kobj);
 923        __padata_free(pinst);
 924}
 925
 926struct padata_sysfs_entry {
 927        struct attribute attr;
 928        ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
 929        ssize_t (*store)(struct padata_instance *, struct attribute *,
 930                         const char *, size_t);
 931};
 932
 933static ssize_t show_cpumask(struct padata_instance *pinst,
 934                            struct attribute *attr,  char *buf)
 935{
 936        struct cpumask *cpumask;
 937        ssize_t len;
 938
 939        mutex_lock(&pinst->lock);
 940        if (!strcmp(attr->name, "serial_cpumask"))
 941                cpumask = pinst->cpumask.cbcpu;
 942        else
 943                cpumask = pinst->cpumask.pcpu;
 944
 945        len = bitmap_scnprintf(buf, PAGE_SIZE, cpumask_bits(cpumask),
 946                               nr_cpu_ids);
 947        if (PAGE_SIZE - len < 2)
 948                len = -EINVAL;
 949        else
 950                len += sprintf(buf + len, "\n");
 951
 952        mutex_unlock(&pinst->lock);
 953        return len;
 954}
 955
 956static ssize_t store_cpumask(struct padata_instance *pinst,
 957                             struct attribute *attr,
 958                             const char *buf, size_t count)
 959{
 960        cpumask_var_t new_cpumask;
 961        ssize_t ret;
 962        int mask_type;
 963
 964        if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
 965                return -ENOMEM;
 966
 967        ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
 968                           nr_cpumask_bits);
 969        if (ret < 0)
 970                goto out;
 971
 972        mask_type = !strcmp(attr->name, "serial_cpumask") ?
 973                PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
 974        ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
 975        if (!ret)
 976                ret = count;
 977
 978out:
 979        free_cpumask_var(new_cpumask);
 980        return ret;
 981}
 982
 983#define PADATA_ATTR_RW(_name, _show_name, _store_name)          \
 984        static struct padata_sysfs_entry _name##_attr =         \
 985                __ATTR(_name, 0644, _show_name, _store_name)
 986#define PADATA_ATTR_RO(_name, _show_name)               \
 987        static struct padata_sysfs_entry _name##_attr = \
 988                __ATTR(_name, 0400, _show_name, NULL)
 989
 990PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
 991PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
 992
 993/*
 994 * Padata sysfs provides the following objects:
 995 * serial_cpumask   [RW] - cpumask for serial workers
 996 * parallel_cpumask [RW] - cpumask for parallel workers
 997 */
 998static struct attribute *padata_default_attrs[] = {
 999        &serial_cpumask_attr.attr,
1000        &parallel_cpumask_attr.attr,
1001        NULL,
1002};
1003
1004static ssize_t padata_sysfs_show(struct kobject *kobj,
1005                                 struct attribute *attr, char *buf)
1006{
1007        struct padata_instance *pinst;
1008        struct padata_sysfs_entry *pentry;
1009        ssize_t ret = -EIO;
1010
1011        pinst = kobj2pinst(kobj);
1012        pentry = attr2pentry(attr);
1013        if (pentry->show)
1014                ret = pentry->show(pinst, attr, buf);
1015
1016        return ret;
1017}
1018
1019static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
1020                                  const char *buf, size_t count)
1021{
1022        struct padata_instance *pinst;
1023        struct padata_sysfs_entry *pentry;
1024        ssize_t ret = -EIO;
1025
1026        pinst = kobj2pinst(kobj);
1027        pentry = attr2pentry(attr);
1028        if (pentry->show)
1029                ret = pentry->store(pinst, attr, buf, count);
1030
1031        return ret;
1032}
1033
1034static const struct sysfs_ops padata_sysfs_ops = {
1035        .show = padata_sysfs_show,
1036        .store = padata_sysfs_store,
1037};
1038
1039static struct kobj_type padata_attr_type = {
1040        .sysfs_ops = &padata_sysfs_ops,
1041        .default_attrs = padata_default_attrs,
1042        .release = padata_sysfs_release,
1043};
1044
1045/**
1046 * padata_alloc_possible - Allocate and initialize padata instance.
1047 *                         Use the cpu_possible_mask for serial and
1048 *                         parallel workers.
1049 *
1050 * @wq: workqueue to use for the allocated padata instance
1051 */
1052struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
1053{
1054        return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
1055}
1056EXPORT_SYMBOL(padata_alloc_possible);
1057
1058/**
1059 * padata_alloc - allocate and initialize a padata instance and specify
1060 *                cpumasks for serial and parallel workers.
1061 *
1062 * @wq: workqueue to use for the allocated padata instance
1063 * @pcpumask: cpumask that will be used for padata parallelization
1064 * @cbcpumask: cpumask that will be used for padata serialization
1065 */
1066struct padata_instance *padata_alloc(struct workqueue_struct *wq,
1067                                     const struct cpumask *pcpumask,
1068                                     const struct cpumask *cbcpumask)
1069{
1070        struct padata_instance *pinst;
1071        struct parallel_data *pd = NULL;
1072
1073        pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
1074        if (!pinst)
1075                goto err;
1076
1077        get_online_cpus();
1078        if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
1079                goto err_free_inst;
1080        if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
1081                free_cpumask_var(pinst->cpumask.pcpu);
1082                goto err_free_inst;
1083        }
1084        if (!padata_validate_cpumask(pinst, pcpumask) ||
1085            !padata_validate_cpumask(pinst, cbcpumask))
1086                goto err_free_masks;
1087
1088        pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
1089        if (!pd)
1090                goto err_free_masks;
1091
1092        rcu_assign_pointer(pinst->pd, pd);
1093
1094        pinst->wq = wq;
1095
1096        cpumask_copy(pinst->cpumask.pcpu, pcpumask);
1097        cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
1098
1099        pinst->flags = 0;
1100
1101#ifdef CONFIG_HOTPLUG_CPU
1102        pinst->cpu_notifier.notifier_call = padata_cpu_callback;
1103        pinst->cpu_notifier.priority = 0;
1104        register_hotcpu_notifier(&pinst->cpu_notifier);
1105#endif
1106
1107        put_online_cpus();
1108
1109        BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
1110        kobject_init(&pinst->kobj, &padata_attr_type);
1111        mutex_init(&pinst->lock);
1112
1113        return pinst;
1114
1115err_free_masks:
1116        free_cpumask_var(pinst->cpumask.pcpu);
1117        free_cpumask_var(pinst->cpumask.cbcpu);
1118err_free_inst:
1119        kfree(pinst);
1120        put_online_cpus();
1121err:
1122        return NULL;
1123}
1124EXPORT_SYMBOL(padata_alloc);
1125
1126/**
1127 * padata_free - free a padata instance
1128 *
1129 * @padata_inst: padata instance to free
1130 */
1131void padata_free(struct padata_instance *pinst)
1132{
1133        kobject_put(&pinst->kobj);
1134}
1135EXPORT_SYMBOL(padata_free);
1136
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.