linux/drivers/misc/sgi-gru/grukservices.c
<<
>>
Prefs
   1/*
   2 * SN Platform GRU Driver
   3 *
   4 *              KERNEL SERVICES THAT USE THE GRU
   5 *
   6 *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
   7 *
   8 *  This program is free software; you can redistribute it and/or modify
   9 *  it under the terms of the GNU General Public License as published by
  10 *  the Free Software Foundation; either version 2 of the License, or
  11 *  (at your option) any later version.
  12 *
  13 *  This program is distributed in the hope that it will be useful,
  14 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 *  GNU General Public License for more details.
  17 *
  18 *  You should have received a copy of the GNU General Public License
  19 *  along with this program; if not, write to the Free Software
  20 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  21 */
  22
  23#include <linux/kernel.h>
  24#include <linux/errno.h>
  25#include <linux/slab.h>
  26#include <linux/mm.h>
  27#include <linux/smp_lock.h>
  28#include <linux/spinlock.h>
  29#include <linux/device.h>
  30#include <linux/miscdevice.h>
  31#include <linux/proc_fs.h>
  32#include <linux/interrupt.h>
  33#include <linux/uaccess.h>
  34#include "gru.h"
  35#include "grulib.h"
  36#include "grutables.h"
  37#include "grukservices.h"
  38#include "gru_instructions.h"
  39#include <asm/uv/uv_hub.h>
  40
  41/*
  42 * Kernel GRU Usage
  43 *
  44 * The following is an interim algorithm for management of kernel GRU
  45 * resources. This will likely be replaced when we better understand the
  46 * kernel/user requirements.
  47 *
  48 * At boot time, the kernel permanently reserves a fixed number of
  49 * CBRs/DSRs for each cpu to use. The resources are all taken from
  50 * the GRU chiplet 1 on the blade. This leaves the full set of resources
  51 * of chiplet 0 available to be allocated to a single user.
  52 */
  53
  54/* Blade percpu resources PERMANENTLY reserved for kernel use */
  55#define GRU_NUM_KERNEL_CBR      1
  56#define GRU_NUM_KERNEL_DSR_BYTES 256
  57#define KERNEL_CTXNUM           15
  58
  59/* GRU instruction attributes for all instructions */
  60#define IMA                     IMA_CB_DELAY
  61
  62/* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */
  63#define __gru_cacheline_aligned__                               \
  64        __attribute__((__aligned__(GRU_CACHE_LINE_BYTES)))
  65
  66#define MAGIC   0x1234567887654321UL
  67
  68/* Default retry count for GRU errors on kernel instructions */
  69#define EXCEPTION_RETRY_LIMIT   3
  70
  71/* Status of message queue sections */
  72#define MQS_EMPTY               0
  73#define MQS_FULL                1
  74#define MQS_NOOP                2
  75
  76/*----------------- RESOURCE MANAGEMENT -------------------------------------*/
  77/* optimized for x86_64 */
  78struct message_queue {
  79        union gru_mesqhead      head __gru_cacheline_aligned__; /* CL 0 */
  80        int                     qlines;                         /* DW 1 */
  81        long                    hstatus[2];
  82        void                    *next __gru_cacheline_aligned__;/* CL 1 */
  83        void                    *limit;
  84        void                    *start;
  85        void                    *start2;
  86        char                    data ____cacheline_aligned;     /* CL 2 */
  87};
  88
  89/* First word in every message - used by mesq interface */
  90struct message_header {
  91        char    present;
  92        char    present2;
  93        char    lines;
  94        char    fill;
  95};
  96
  97#define QLINES(mq)      ((mq) + offsetof(struct message_queue, qlines))
  98#define HSTATUS(mq, h)  ((mq) + offsetof(struct message_queue, hstatus[h]))
  99
 100static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
 101{
 102        struct gru_blade_state *bs;
 103        int lcpu;
 104
 105        BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
 106        preempt_disable();
 107        bs = gru_base[uv_numa_blade_id()];
 108        lcpu = uv_blade_processor_id();
 109        *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
 110        *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
 111        return 0;
 112}
 113
 114static void gru_free_cpu_resources(void *cb, void *dsr)
 115{
 116        preempt_enable();
 117}
 118
 119int gru_get_cb_exception_detail(void *cb,
 120                struct control_block_extended_exc_detail *excdet)
 121{
 122        struct gru_control_block_extended *cbe;
 123
 124        cbe = get_cbe(GRUBASE(cb), get_cb_number(cb));
 125        prefetchw(cbe);         /* Harmless on hardware, required for emulator */
 126        excdet->opc = cbe->opccpy;
 127        excdet->exopc = cbe->exopccpy;
 128        excdet->ecause = cbe->ecause;
 129        excdet->exceptdet0 = cbe->idef1upd;
 130        excdet->exceptdet1 = cbe->idef3upd;
 131        return 0;
 132}
 133
 134char *gru_get_cb_exception_detail_str(int ret, void *cb,
 135                                      char *buf, int size)
 136{
 137        struct gru_control_block_status *gen = (void *)cb;
 138        struct control_block_extended_exc_detail excdet;
 139
 140        if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
 141                gru_get_cb_exception_detail(cb, &excdet);
 142                snprintf(buf, size,
 143                        "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
 144                        "excdet0 0x%lx, excdet1 0x%x",
 145                        gen, excdet.opc, excdet.exopc, excdet.ecause,
 146                        excdet.exceptdet0, excdet.exceptdet1);
 147        } else {
 148                snprintf(buf, size, "No exception");
 149        }
 150        return buf;
 151}
 152
 153static int gru_wait_idle_or_exception(struct gru_control_block_status *gen)
 154{
 155        while (gen->istatus >= CBS_ACTIVE) {
 156                cpu_relax();
 157                barrier();
 158        }
 159        return gen->istatus;
 160}
 161
 162static int gru_retry_exception(void *cb)
 163{
 164        struct gru_control_block_status *gen = (void *)cb;
 165        struct control_block_extended_exc_detail excdet;
 166        int retry = EXCEPTION_RETRY_LIMIT;
 167
 168        while (1)  {
 169                if (gru_get_cb_message_queue_substatus(cb))
 170                        break;
 171                if (gru_wait_idle_or_exception(gen) == CBS_IDLE)
 172                        return CBS_IDLE;
 173
 174                gru_get_cb_exception_detail(cb, &excdet);
 175                if (excdet.ecause & ~EXCEPTION_RETRY_BITS)
 176                        break;
 177                if (retry-- == 0)
 178                        break;
 179                gen->icmd = 1;
 180                gru_flush_cache(gen);
 181        }
 182        return CBS_EXCEPTION;
 183}
 184
 185int gru_check_status_proc(void *cb)
 186{
 187        struct gru_control_block_status *gen = (void *)cb;
 188        int ret;
 189
 190        ret = gen->istatus;
 191        if (ret != CBS_EXCEPTION)
 192                return ret;
 193        return gru_retry_exception(cb);
 194
 195}
 196
 197int gru_wait_proc(void *cb)
 198{
 199        struct gru_control_block_status *gen = (void *)cb;
 200        int ret;
 201
 202        ret = gru_wait_idle_or_exception(gen);
 203        if (ret == CBS_EXCEPTION)
 204                ret = gru_retry_exception(cb);
 205
 206        return ret;
 207}
 208
 209void gru_abort(int ret, void *cb, char *str)
 210{
 211        char buf[GRU_EXC_STR_SIZE];
 212
 213        panic("GRU FATAL ERROR: %s - %s\n", str,
 214              gru_get_cb_exception_detail_str(ret, cb, buf, sizeof(buf)));
 215}
 216
 217void gru_wait_abort_proc(void *cb)
 218{
 219        int ret;
 220
 221        ret = gru_wait_proc(cb);
 222        if (ret)
 223                gru_abort(ret, cb, "gru_wait_abort");
 224}
 225
 226
 227/*------------------------------ MESSAGE QUEUES -----------------------------*/
 228
 229/* Internal status . These are NOT returned to the user. */
 230#define MQIE_AGAIN              -1      /* try again */
 231
 232
 233/*
 234 * Save/restore the "present" flag that is in the second line of 2-line
 235 * messages
 236 */
 237static inline int get_present2(void *p)
 238{
 239        struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
 240        return mhdr->present;
 241}
 242
 243static inline void restore_present2(void *p, int val)
 244{
 245        struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
 246        mhdr->present = val;
 247}
 248
 249/*
 250 * Create a message queue.
 251 *      qlines - message queue size in cache lines. Includes 2-line header.
 252 */
 253int gru_create_message_queue(void *p, unsigned int bytes)
 254{
 255        struct message_queue *mq = p;
 256        unsigned int qlines;
 257
 258        qlines = bytes / GRU_CACHE_LINE_BYTES - 2;
 259        memset(mq, 0, bytes);
 260        mq->start = &mq->data;
 261        mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES;
 262        mq->next = &mq->data;
 263        mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES;
 264        mq->qlines = qlines;
 265        mq->hstatus[0] = 0;
 266        mq->hstatus[1] = 1;
 267        mq->head = gru_mesq_head(2, qlines / 2 + 1);
 268        return 0;
 269}
 270EXPORT_SYMBOL_GPL(gru_create_message_queue);
 271
 272/*
 273 * Send a NOOP message to a message queue
 274 *      Returns:
 275 *               0 - if queue is full after the send. This is the normal case
 276 *                   but various races can change this.
 277 *              -1 - if mesq sent successfully but queue not full
 278 *              >0 - unexpected error. MQE_xxx returned
 279 */
 280static int send_noop_message(void *cb,
 281                                unsigned long mq, void *mesg)
 282{
 283        const struct message_header noop_header = {
 284                                        .present = MQS_NOOP, .lines = 1};
 285        unsigned long m;
 286        int substatus, ret;
 287        struct message_header save_mhdr, *mhdr = mesg;
 288
 289        STAT(mesq_noop);
 290        save_mhdr = *mhdr;
 291        *mhdr = noop_header;
 292        gru_mesq(cb, mq, gru_get_tri(mhdr), 1, IMA);
 293        ret = gru_wait(cb);
 294
 295        if (ret) {
 296                substatus = gru_get_cb_message_queue_substatus(cb);
 297                switch (substatus) {
 298                case CBSS_NO_ERROR:
 299                        STAT(mesq_noop_unexpected_error);
 300                        ret = MQE_UNEXPECTED_CB_ERR;
 301                        break;
 302                case CBSS_LB_OVERFLOWED:
 303                        STAT(mesq_noop_lb_overflow);
 304                        ret = MQE_CONGESTION;
 305                        break;
 306                case CBSS_QLIMIT_REACHED:
 307                        STAT(mesq_noop_qlimit_reached);
 308                        ret = 0;
 309                        break;
 310                case CBSS_AMO_NACKED:
 311                        STAT(mesq_noop_amo_nacked);
 312                        ret = MQE_CONGESTION;
 313                        break;
 314                case CBSS_PUT_NACKED:
 315                        STAT(mesq_noop_put_nacked);
 316                        m = mq + (gru_get_amo_value_head(cb) << 6);
 317                        gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1,
 318                                                IMA);
 319                        if (gru_wait(cb) == CBS_IDLE)
 320                                ret = MQIE_AGAIN;
 321                        else
 322                                ret = MQE_UNEXPECTED_CB_ERR;
 323                        break;
 324                case CBSS_PAGE_OVERFLOW:
 325                default:
 326                        BUG();
 327                }
 328        }
 329        *mhdr = save_mhdr;
 330        return ret;
 331}
 332
 333/*
 334 * Handle a gru_mesq full.
 335 */
 336static int send_message_queue_full(void *cb,
 337                           unsigned long mq, void *mesg, int lines)
 338{
 339        union gru_mesqhead mqh;
 340        unsigned int limit, head;
 341        unsigned long avalue;
 342        int half, qlines, save;
 343
 344        /* Determine if switching to first/second half of q */
 345        avalue = gru_get_amo_value(cb);
 346        head = gru_get_amo_value_head(cb);
 347        limit = gru_get_amo_value_limit(cb);
 348
 349        /*
 350         * Fetch "qlines" from the queue header. Since the queue may be
 351         * in memory that can't be accessed using socket addresses, use
 352         * the GRU to access the data. Use DSR space from the message.
 353         */
 354        save = *(int *)mesg;
 355        gru_vload(cb, QLINES(mq), gru_get_tri(mesg), XTYPE_W, 1, 1, IMA);
 356        if (gru_wait(cb) != CBS_IDLE)
 357                goto cberr;
 358        qlines = *(int *)mesg;
 359        *(int *)mesg = save;
 360        half = (limit != qlines);
 361
 362        if (half)
 363                mqh = gru_mesq_head(qlines / 2 + 1, qlines);
 364        else
 365                mqh = gru_mesq_head(2, qlines / 2 + 1);
 366
 367        /* Try to get lock for switching head pointer */
 368        gru_gamir(cb, EOP_IR_CLR, HSTATUS(mq, half), XTYPE_DW, IMA);
 369        if (gru_wait(cb) != CBS_IDLE)
 370                goto cberr;
 371        if (!gru_get_amo_value(cb)) {
 372                STAT(mesq_qf_locked);
 373                return MQE_QUEUE_FULL;
 374        }
 375
 376        /* Got the lock. Send optional NOP if queue not full, */
 377        if (head != limit) {
 378                if (send_noop_message(cb, mq, mesg)) {
 379                        gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half),
 380                                        XTYPE_DW, IMA);
 381                        if (gru_wait(cb) != CBS_IDLE)
 382                                goto cberr;
 383                        STAT(mesq_qf_noop_not_full);
 384                        return MQIE_AGAIN;
 385                }
 386                avalue++;
 387        }
 388
 389        /* Then flip queuehead to other half of queue. */
 390        gru_gamer(cb, EOP_ERR_CSWAP, mq, XTYPE_DW, mqh.val, avalue, IMA);
 391        if (gru_wait(cb) != CBS_IDLE)
 392                goto cberr;
 393
 394        /* If not successfully in swapping queue head, clear the hstatus lock */
 395        if (gru_get_amo_value(cb) != avalue) {
 396                STAT(mesq_qf_switch_head_failed);
 397                gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half), XTYPE_DW, IMA);
 398                if (gru_wait(cb) != CBS_IDLE)
 399                        goto cberr;
 400        }
 401        return MQIE_AGAIN;
 402cberr:
 403        STAT(mesq_qf_unexpected_error);
 404        return MQE_UNEXPECTED_CB_ERR;
 405}
 406
 407
 408/*
 409 * Handle a gru_mesq failure. Some of these failures are software recoverable
 410 * or retryable.
 411 */
 412static int send_message_failure(void *cb,
 413                                unsigned long mq,
 414                                void *mesg,
 415                                int lines)
 416{
 417        int substatus, ret = 0;
 418        unsigned long m;
 419
 420        substatus = gru_get_cb_message_queue_substatus(cb);
 421        switch (substatus) {
 422        case CBSS_NO_ERROR:
 423                STAT(mesq_send_unexpected_error);
 424                ret = MQE_UNEXPECTED_CB_ERR;
 425                break;
 426        case CBSS_LB_OVERFLOWED:
 427                STAT(mesq_send_lb_overflow);
 428                ret = MQE_CONGESTION;
 429                break;
 430        case CBSS_QLIMIT_REACHED:
 431                STAT(mesq_send_qlimit_reached);
 432                ret = send_message_queue_full(cb, mq, mesg, lines);
 433                break;
 434        case CBSS_AMO_NACKED:
 435                STAT(mesq_send_amo_nacked);
 436                ret = MQE_CONGESTION;
 437                break;
 438        case CBSS_PUT_NACKED:
 439                STAT(mesq_send_put_nacked);
 440                m =mq + (gru_get_amo_value_head(cb) << 6);
 441                gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
 442                if (gru_wait(cb) == CBS_IDLE)
 443                        ret = MQE_OK;
 444                else
 445                        ret = MQE_UNEXPECTED_CB_ERR;
 446                break;
 447        default:
 448                BUG();
 449        }
 450        return ret;
 451}
 452
 453/*
 454 * Send a message to a message queue
 455 *      cb      GRU control block to use to send message
 456 *      mq      message queue
 457 *      mesg    message. ust be vaddr within a GSEG
 458 *      bytes   message size (<= 2 CL)
 459 */
 460int gru_send_message_gpa(unsigned long mq, void *mesg, unsigned int bytes)
 461{
 462        struct message_header *mhdr;
 463        void *cb;
 464        void *dsr;
 465        int istatus, clines, ret;
 466
 467        STAT(mesq_send);
 468        BUG_ON(bytes < sizeof(int) || bytes > 2 * GRU_CACHE_LINE_BYTES);
 469
 470        clines = DIV_ROUND_UP(bytes, GRU_CACHE_LINE_BYTES);
 471        if (gru_get_cpu_resources(bytes, &cb, &dsr))
 472                return MQE_BUG_NO_RESOURCES;
 473        memcpy(dsr, mesg, bytes);
 474        mhdr = dsr;
 475        mhdr->present = MQS_FULL;
 476        mhdr->lines = clines;
 477        if (clines == 2) {
 478                mhdr->present2 = get_present2(mhdr);
 479                restore_present2(mhdr, MQS_FULL);
 480        }
 481
 482        do {
 483                ret = MQE_OK;
 484                gru_mesq(cb, mq, gru_get_tri(mhdr), clines, IMA);
 485                istatus = gru_wait(cb);
 486                if (istatus != CBS_IDLE)
 487                        ret = send_message_failure(cb, mq, dsr, clines);
 488        } while (ret == MQIE_AGAIN);
 489        gru_free_cpu_resources(cb, dsr);
 490
 491        if (ret)
 492                STAT(mesq_send_failed);
 493        return ret;
 494}
 495EXPORT_SYMBOL_GPL(gru_send_message_gpa);
 496
 497/*
 498 * Advance the receive pointer for the queue to the next message.
 499 */
 500void gru_free_message(void *rmq, void *mesg)
 501{
 502        struct message_queue *mq = rmq;
 503        struct message_header *mhdr = mq->next;
 504        void *next, *pnext;
 505        int half = -1;
 506        int lines = mhdr->lines;
 507
 508        if (lines == 2)
 509                restore_present2(mhdr, MQS_EMPTY);
 510        mhdr->present = MQS_EMPTY;
 511
 512        pnext = mq->next;
 513        next = pnext + GRU_CACHE_LINE_BYTES * lines;
 514        if (next == mq->limit) {
 515                next = mq->start;
 516                half = 1;
 517        } else if (pnext < mq->start2 && next >= mq->start2) {
 518                half = 0;
 519        }
 520
 521        if (half >= 0)
 522                mq->hstatus[half] = 1;
 523        mq->next = next;
 524}
 525EXPORT_SYMBOL_GPL(gru_free_message);
 526
 527/*
 528 * Get next message from message queue. Return NULL if no message
 529 * present. User must call next_message() to move to next message.
 530 *      rmq     message queue
 531 */
 532void *gru_get_next_message(void *rmq)
 533{
 534        struct message_queue *mq = rmq;
 535        struct message_header *mhdr = mq->next;
 536        int present = mhdr->present;
 537
 538        /* skip NOOP messages */
 539        STAT(mesq_receive);
 540        while (present == MQS_NOOP) {
 541                gru_free_message(rmq, mhdr);
 542                mhdr = mq->next;
 543                present = mhdr->present;
 544        }
 545
 546        /* Wait for both halves of 2 line messages */
 547        if (present == MQS_FULL && mhdr->lines == 2 &&
 548                                get_present2(mhdr) == MQS_EMPTY)
 549                present = MQS_EMPTY;
 550
 551        if (!present) {
 552                STAT(mesq_receive_none);
 553                return NULL;
 554        }
 555
 556        if (mhdr->lines == 2)
 557                restore_present2(mhdr, mhdr->present2);
 558
 559        return mhdr;
 560}
 561EXPORT_SYMBOL_GPL(gru_get_next_message);
 562
 563/* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
 564
 565/*
 566 * Copy a block of data using the GRU resources
 567 */
 568int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
 569                                unsigned int bytes)
 570{
 571        void *cb;
 572        void *dsr;
 573        int ret;
 574
 575        STAT(copy_gpa);
 576        if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
 577                return MQE_BUG_NO_RESOURCES;
 578        gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr),
 579                  XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_BYTES, IMA);
 580        ret = gru_wait(cb);
 581        gru_free_cpu_resources(cb, dsr);
 582        return ret;
 583}
 584EXPORT_SYMBOL_GPL(gru_copy_gpa);
 585
 586/* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
 587/*      Temp - will delete after we gain confidence in the GRU          */
 588static __cacheline_aligned unsigned long word0;
 589static __cacheline_aligned unsigned long word1;
 590
 591static int quicktest(struct gru_state *gru)
 592{
 593        void *cb;
 594        void *ds;
 595        unsigned long *p;
 596
 597        cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0);
 598        ds = get_gseg_base_address_ds(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0);
 599        p = ds;
 600        word0 = MAGIC;
 601
 602        gru_vload(cb, uv_gpa(&word0), 0, XTYPE_DW, 1, 1, IMA);
 603        if (gru_wait(cb) != CBS_IDLE)
 604                BUG();
 605
 606        if (*(unsigned long *)ds != MAGIC)
 607                BUG();
 608        gru_vstore(cb, uv_gpa(&word1), 0, XTYPE_DW, 1, 1, IMA);
 609        if (gru_wait(cb) != CBS_IDLE)
 610                BUG();
 611
 612        if (word0 != word1 || word0 != MAGIC) {
 613                printk
 614                    ("GRU quicktest err: gru %d, found 0x%lx, expected 0x%lx\n",
 615                     gru->gs_gid, word1, MAGIC);
 616                BUG();          /* ZZZ should not be fatal */
 617        }
 618
 619        return 0;
 620}
 621
 622
 623int gru_kservices_init(struct gru_state *gru)
 624{
 625        struct gru_blade_state *bs;
 626        struct gru_context_configuration_handle *cch;
 627        unsigned long cbr_map, dsr_map;
 628        int err, num, cpus_possible;
 629
 630        /*
 631         * Currently, resources are reserved ONLY on the second chiplet
 632         * on each blade. This leaves ALL resources on chiplet 0 available
 633         * for user code.
 634         */
 635        bs = gru->gs_blade;
 636        if (gru != &bs->bs_grus[1])
 637                return 0;
 638
 639        cpus_possible = uv_blade_nr_possible_cpus(gru->gs_blade_id);
 640
 641        num = GRU_NUM_KERNEL_CBR * cpus_possible;
 642        cbr_map = gru_reserve_cb_resources(gru, GRU_CB_COUNT_TO_AU(num), NULL);
 643        gru->gs_reserved_cbrs += num;
 644
 645        num = GRU_NUM_KERNEL_DSR_BYTES * cpus_possible;
 646        dsr_map = gru_reserve_ds_resources(gru, GRU_DS_BYTES_TO_AU(num), NULL);
 647        gru->gs_reserved_dsr_bytes += num;
 648
 649        gru->gs_active_contexts++;
 650        __set_bit(KERNEL_CTXNUM, &gru->gs_context_map);
 651        cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM);
 652
 653        bs->kernel_cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr,
 654                                        KERNEL_CTXNUM, 0);
 655        bs->kernel_dsr = get_gseg_base_address_ds(gru->gs_gru_base_vaddr,
 656                                        KERNEL_CTXNUM, 0);
 657
 658        lock_cch_handle(cch);
 659        cch->tfm_fault_bit_enable = 0;
 660        cch->tlb_int_enable = 0;
 661        cch->tfm_done_bit_enable = 0;
 662        cch->unmap_enable = 1;
 663        err = cch_allocate(cch, 0, cbr_map, dsr_map);
 664        if (err) {
 665                gru_dbg(grudev,
 666                        "Unable to allocate kernel CCH: gru %d, err %d\n",
 667                        gru->gs_gid, err);
 668                BUG();
 669        }
 670        if (cch_start(cch)) {
 671                gru_dbg(grudev, "Unable to start kernel CCH: gru %d, err %d\n",
 672                        gru->gs_gid, err);
 673                BUG();
 674        }
 675        unlock_cch_handle(cch);
 676
 677        if (gru_options & GRU_QUICKLOOK)
 678                quicktest(gru);
 679        return 0;
 680}
 681