linux/drivers/s390/scsi/zfcp_qdio.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * zfcp device driver
   4 *
   5 * Setup and helper functions to access QDIO.
   6 *
   7 * Copyright IBM Corp. 2002, 2020
   8 */
   9
  10#define KMSG_COMPONENT "zfcp"
  11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  12
  13#include <linux/lockdep.h>
  14#include <linux/slab.h>
  15#include <linux/module.h>
  16#include "zfcp_ext.h"
  17#include "zfcp_qdio.h"
  18
  19static bool enable_multibuffer = true;
  20module_param_named(datarouter, enable_multibuffer, bool, 0400);
  21MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
  22
  23#define ZFCP_QDIO_REQUEST_RESCAN_MSECS  (MSEC_PER_SEC * 10)
  24#define ZFCP_QDIO_REQUEST_SCAN_MSECS    MSEC_PER_SEC
  25
  26static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
  27                                    unsigned int qdio_err)
  28{
  29        struct zfcp_adapter *adapter = qdio->adapter;
  30
  31        dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
  32
  33        if (qdio_err & QDIO_ERROR_SLSB_STATE) {
  34                zfcp_qdio_siosl(adapter);
  35                zfcp_erp_adapter_shutdown(adapter, 0, dbftag);
  36                return;
  37        }
  38        zfcp_erp_adapter_reopen(adapter,
  39                                ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
  40                                ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
  41}
  42
  43static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
  44{
  45        int i, sbal_idx;
  46
  47        for (i = first; i < first + cnt; i++) {
  48                sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
  49                memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
  50        }
  51}
  52
  53/* this needs to be called prior to updating the queue fill level */
  54static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
  55{
  56        unsigned long long now, span;
  57        int used;
  58
  59        now = get_tod_clock_monotonic();
  60        span = (now - qdio->req_q_time) >> 12;
  61        used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
  62        qdio->req_q_util += used * span;
  63        qdio->req_q_time = now;
  64}
  65
  66static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
  67                              int queue_no, int idx, int count,
  68                              unsigned long parm)
  69{
  70        struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
  71
  72        if (unlikely(qdio_err)) {
  73                zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
  74                return;
  75        }
  76}
  77
  78static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet)
  79{
  80        struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, request_tasklet);
  81        struct ccw_device *cdev = qdio->adapter->ccw_device;
  82        unsigned int start, error;
  83        int completed;
  84
  85        completed = qdio_inspect_queue(cdev, 0, false, &start, &error);
  86        if (completed > 0) {
  87                if (error) {
  88                        zfcp_qdio_handler_error(qdio, "qdreqt1", error);
  89                } else {
  90                        /* cleanup all SBALs being program-owned now */
  91                        zfcp_qdio_zero_sbals(qdio->req_q, start, completed);
  92
  93                        spin_lock_irq(&qdio->stat_lock);
  94                        zfcp_qdio_account(qdio);
  95                        spin_unlock_irq(&qdio->stat_lock);
  96                        atomic_add(completed, &qdio->req_q_free);
  97                        wake_up(&qdio->req_q_wq);
  98                }
  99        }
 100
 101        if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
 102                timer_reduce(&qdio->request_timer,
 103                             jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_RESCAN_MSECS));
 104}
 105
 106static void zfcp_qdio_request_timer(struct timer_list *timer)
 107{
 108        struct zfcp_qdio *qdio = from_timer(qdio, timer, request_timer);
 109
 110        tasklet_schedule(&qdio->request_tasklet);
 111}
 112
 113static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
 114                               int queue_no, int idx, int count,
 115                               unsigned long parm)
 116{
 117        struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
 118        struct zfcp_adapter *adapter = qdio->adapter;
 119        int sbal_no, sbal_idx;
 120
 121        if (unlikely(qdio_err)) {
 122                if (zfcp_adapter_multi_buffer_active(adapter)) {
 123                        void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
 124                        struct qdio_buffer_element *sbale;
 125                        u64 req_id;
 126                        u8 scount;
 127
 128                        memset(pl, 0,
 129                               ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
 130                        sbale = qdio->res_q[idx]->element;
 131                        req_id = sbale->addr;
 132                        scount = min(sbale->scount + 1,
 133                                     ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
 134                                     /* incl. signaling SBAL */
 135
 136                        for (sbal_no = 0; sbal_no < scount; sbal_no++) {
 137                                sbal_idx = (idx + sbal_no) %
 138                                        QDIO_MAX_BUFFERS_PER_Q;
 139                                pl[sbal_no] = qdio->res_q[sbal_idx];
 140                        }
 141                        zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
 142                }
 143                zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
 144                return;
 145        }
 146
 147        /*
 148         * go through all SBALs from input queue currently
 149         * returned by QDIO layer
 150         */
 151        for (sbal_no = 0; sbal_no < count; sbal_no++) {
 152                sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
 153                /* go through all SBALEs of SBAL */
 154                zfcp_fsf_reqid_check(qdio, sbal_idx);
 155        }
 156
 157        /*
 158         * put SBALs back to response queue
 159         */
 160        if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count, NULL))
 161                zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
 162}
 163
 164static void zfcp_qdio_irq_tasklet(struct tasklet_struct *tasklet)
 165{
 166        struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, irq_tasklet);
 167        struct ccw_device *cdev = qdio->adapter->ccw_device;
 168        unsigned int start, error;
 169        int completed;
 170
 171        if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
 172                tasklet_schedule(&qdio->request_tasklet);
 173
 174        /* Check the Response Queue: */
 175        completed = qdio_inspect_queue(cdev, 0, true, &start, &error);
 176        if (completed < 0)
 177                return;
 178        if (completed > 0)
 179                zfcp_qdio_int_resp(cdev, error, 0, start, completed,
 180                                   (unsigned long) qdio);
 181
 182        if (qdio_start_irq(cdev))
 183                /* More work pending: */
 184                tasklet_schedule(&qdio->irq_tasklet);
 185}
 186
 187static void zfcp_qdio_poll(struct ccw_device *cdev, unsigned long data)
 188{
 189        struct zfcp_qdio *qdio = (struct zfcp_qdio *) data;
 190
 191        tasklet_schedule(&qdio->irq_tasklet);
 192}
 193
 194static struct qdio_buffer_element *
 195zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
 196{
 197        struct qdio_buffer_element *sbale;
 198
 199        /* set last entry flag in current SBALE of current SBAL */
 200        sbale = zfcp_qdio_sbale_curr(qdio, q_req);
 201        sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
 202
 203        /* don't exceed last allowed SBAL */
 204        if (q_req->sbal_last == q_req->sbal_limit)
 205                return NULL;
 206
 207        /* set chaining flag in first SBALE of current SBAL */
 208        sbale = zfcp_qdio_sbale_req(qdio, q_req);
 209        sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
 210
 211        /* calculate index of next SBAL */
 212        q_req->sbal_last++;
 213        q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
 214
 215        /* keep this requests number of SBALs up-to-date */
 216        q_req->sbal_number++;
 217        BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
 218
 219        /* start at first SBALE of new SBAL */
 220        q_req->sbale_curr = 0;
 221
 222        /* set storage-block type for new SBAL */
 223        sbale = zfcp_qdio_sbale_curr(qdio, q_req);
 224        sbale->sflags |= q_req->sbtype;
 225
 226        return sbale;
 227}
 228
 229static struct qdio_buffer_element *
 230zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
 231{
 232        if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
 233                return zfcp_qdio_sbal_chain(qdio, q_req);
 234        q_req->sbale_curr++;
 235        return zfcp_qdio_sbale_curr(qdio, q_req);
 236}
 237
 238/**
 239 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
 240 * @qdio: pointer to struct zfcp_qdio
 241 * @q_req: pointer to struct zfcp_qdio_req
 242 * @sg: scatter-gather list
 243 * Returns: zero or -EINVAL on error
 244 */
 245int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
 246                            struct scatterlist *sg)
 247{
 248        struct qdio_buffer_element *sbale;
 249
 250        /* set storage-block type for this request */
 251        sbale = zfcp_qdio_sbale_req(qdio, q_req);
 252        sbale->sflags |= q_req->sbtype;
 253
 254        for (; sg; sg = sg_next(sg)) {
 255                sbale = zfcp_qdio_sbale_next(qdio, q_req);
 256                if (!sbale) {
 257                        atomic_inc(&qdio->req_q_full);
 258                        zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
 259                                             q_req->sbal_number);
 260                        return -EINVAL;
 261                }
 262                sbale->addr = sg_phys(sg);
 263                sbale->length = sg->length;
 264        }
 265        return 0;
 266}
 267
 268static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
 269{
 270        if (atomic_read(&qdio->req_q_free) ||
 271            !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
 272                return 1;
 273        return 0;
 274}
 275
 276/**
 277 * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
 278 * @qdio: pointer to struct zfcp_qdio
 279 *
 280 * The req_q_lock must be held by the caller of this function, and
 281 * this function may only be called from process context; it will
 282 * sleep when waiting for a free sbal.
 283 *
 284 * Returns: 0 on success, -EIO if there is no free sbal after waiting.
 285 */
 286int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
 287{
 288        long ret;
 289
 290        ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
 291                       zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
 292
 293        if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
 294                return -EIO;
 295
 296        if (ret > 0)
 297                return 0;
 298
 299        if (!ret) {
 300                atomic_inc(&qdio->req_q_full);
 301                /* assume hanging outbound queue, try queue recovery */
 302                zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
 303        }
 304
 305        return -EIO;
 306}
 307
 308/**
 309 * zfcp_qdio_send - send req to QDIO
 310 * @qdio: pointer to struct zfcp_qdio
 311 * @q_req: pointer to struct zfcp_qdio_req
 312 * Returns: 0 on success, error otherwise
 313 */
 314int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
 315{
 316        int retval;
 317        u8 sbal_number = q_req->sbal_number;
 318
 319        /*
 320         * This should actually be a spin_lock_bh(stat_lock), to protect against
 321         * Request Queue completion processing in tasklet context.
 322         * But we can't do so (and are safe), as we always get called with IRQs
 323         * disabled by spin_lock_irq[save](req_q_lock).
 324         */
 325        lockdep_assert_irqs_disabled();
 326        spin_lock(&qdio->stat_lock);
 327        zfcp_qdio_account(qdio);
 328        spin_unlock(&qdio->stat_lock);
 329
 330        atomic_sub(sbal_number, &qdio->req_q_free);
 331
 332        retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
 333                         q_req->sbal_first, sbal_number, NULL);
 334
 335        if (unlikely(retval)) {
 336                /* Failed to submit the IO, roll back our modifications. */
 337                atomic_add(sbal_number, &qdio->req_q_free);
 338                zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
 339                                     sbal_number);
 340                return retval;
 341        }
 342
 343        if (atomic_read(&qdio->req_q_free) <= 2 * ZFCP_QDIO_MAX_SBALS_PER_REQ)
 344                tasklet_schedule(&qdio->request_tasklet);
 345        else
 346                timer_reduce(&qdio->request_timer,
 347                             jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_SCAN_MSECS));
 348
 349        /* account for transferred buffers */
 350        qdio->req_q_idx += sbal_number;
 351        qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
 352
 353        return 0;
 354}
 355
 356/**
 357 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
 358 * @qdio: pointer to struct zfcp_qdio
 359 * Returns: -ENOMEM on memory allocation error or return value from
 360 *          qdio_allocate
 361 */
 362static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
 363{
 364        int ret;
 365
 366        ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
 367        if (ret)
 368                return -ENOMEM;
 369
 370        ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
 371        if (ret)
 372                goto free_req_q;
 373
 374        init_waitqueue_head(&qdio->req_q_wq);
 375
 376        ret = qdio_allocate(qdio->adapter->ccw_device, 1, 1);
 377        if (ret)
 378                goto free_res_q;
 379
 380        return 0;
 381
 382free_res_q:
 383        qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
 384free_req_q:
 385        qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
 386        return ret;
 387}
 388
 389/**
 390 * zfcp_close_qdio - close qdio queues for an adapter
 391 * @qdio: pointer to structure zfcp_qdio
 392 */
 393void zfcp_qdio_close(struct zfcp_qdio *qdio)
 394{
 395        struct zfcp_adapter *adapter = qdio->adapter;
 396        int idx, count;
 397
 398        if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
 399                return;
 400
 401        /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
 402        spin_lock_irq(&qdio->req_q_lock);
 403        atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
 404        spin_unlock_irq(&qdio->req_q_lock);
 405
 406        wake_up(&qdio->req_q_wq);
 407
 408        tasklet_disable(&qdio->irq_tasklet);
 409        tasklet_disable(&qdio->request_tasklet);
 410        del_timer_sync(&qdio->request_timer);
 411        qdio_stop_irq(adapter->ccw_device);
 412        qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
 413
 414        /* cleanup used outbound sbals */
 415        count = atomic_read(&qdio->req_q_free);
 416        if (count < QDIO_MAX_BUFFERS_PER_Q) {
 417                idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
 418                count = QDIO_MAX_BUFFERS_PER_Q - count;
 419                zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
 420        }
 421        qdio->req_q_idx = 0;
 422        atomic_set(&qdio->req_q_free, 0);
 423}
 424
 425void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
 426                            const struct zfcp_qdio *const qdio)
 427{
 428        struct Scsi_Host *const shost = adapter->scsi_host;
 429
 430        if (shost == NULL)
 431                return;
 432
 433        shost->sg_tablesize = qdio->max_sbale_per_req;
 434        shost->max_sectors = qdio->max_sbale_per_req * 8;
 435}
 436
 437/**
 438 * zfcp_qdio_open - prepare and initialize response queue
 439 * @qdio: pointer to struct zfcp_qdio
 440 * Returns: 0 on success, otherwise -EIO
 441 */
 442int zfcp_qdio_open(struct zfcp_qdio *qdio)
 443{
 444        struct qdio_buffer **input_sbals[1] = {qdio->res_q};
 445        struct qdio_buffer **output_sbals[1] = {qdio->req_q};
 446        struct qdio_buffer_element *sbale;
 447        struct qdio_initialize init_data = {0};
 448        struct zfcp_adapter *adapter = qdio->adapter;
 449        struct ccw_device *cdev = adapter->ccw_device;
 450        struct qdio_ssqd_desc ssqd;
 451        int cc;
 452
 453        if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
 454                return -EIO;
 455
 456        atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
 457                          &qdio->adapter->status);
 458
 459        init_data.q_format = QDIO_ZFCP_QFMT;
 460        init_data.qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
 461        if (enable_multibuffer)
 462                init_data.qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
 463        init_data.no_input_qs = 1;
 464        init_data.no_output_qs = 1;
 465        init_data.input_handler = zfcp_qdio_int_resp;
 466        init_data.output_handler = zfcp_qdio_int_req;
 467        init_data.irq_poll = zfcp_qdio_poll;
 468        init_data.int_parm = (unsigned long) qdio;
 469        init_data.input_sbal_addr_array = input_sbals;
 470        init_data.output_sbal_addr_array = output_sbals;
 471
 472        if (qdio_establish(cdev, &init_data))
 473                goto failed_establish;
 474
 475        if (qdio_get_ssqd_desc(cdev, &ssqd))
 476                goto failed_qdio;
 477
 478        if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
 479                atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
 480                                &qdio->adapter->status);
 481
 482        if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
 483                atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
 484                qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
 485        } else {
 486                atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
 487                qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
 488        }
 489
 490        qdio->max_sbale_per_req =
 491                ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
 492                - 2;
 493        if (qdio_activate(cdev))
 494                goto failed_qdio;
 495
 496        for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
 497                sbale = &(qdio->res_q[cc]->element[0]);
 498                sbale->length = 0;
 499                sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
 500                sbale->sflags = 0;
 501                sbale->addr = 0;
 502        }
 503
 504        if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q,
 505                    NULL))
 506                goto failed_qdio;
 507
 508        /* set index of first available SBALS / number of available SBALS */
 509        qdio->req_q_idx = 0;
 510        atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
 511        atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
 512
 513        /* Enable processing for Request Queue completions: */
 514        tasklet_enable(&qdio->request_tasklet);
 515        /* Enable processing for QDIO interrupts: */
 516        tasklet_enable(&qdio->irq_tasklet);
 517        /* This results in a qdio_start_irq(): */
 518        tasklet_schedule(&qdio->irq_tasklet);
 519
 520        zfcp_qdio_shost_update(adapter, qdio);
 521
 522        return 0;
 523
 524failed_qdio:
 525        qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
 526failed_establish:
 527        dev_err(&cdev->dev,
 528                "Setting up the QDIO connection to the FCP adapter failed\n");
 529        return -EIO;
 530}
 531
 532void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
 533{
 534        if (!qdio)
 535                return;
 536
 537        tasklet_kill(&qdio->irq_tasklet);
 538        tasklet_kill(&qdio->request_tasklet);
 539
 540        if (qdio->adapter->ccw_device)
 541                qdio_free(qdio->adapter->ccw_device);
 542
 543        qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
 544        qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
 545        kfree(qdio);
 546}
 547
 548int zfcp_qdio_setup(struct zfcp_adapter *adapter)
 549{
 550        struct zfcp_qdio *qdio;
 551
 552        qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
 553        if (!qdio)
 554                return -ENOMEM;
 555
 556        qdio->adapter = adapter;
 557
 558        if (zfcp_qdio_allocate(qdio)) {
 559                kfree(qdio);
 560                return -ENOMEM;
 561        }
 562
 563        spin_lock_init(&qdio->req_q_lock);
 564        spin_lock_init(&qdio->stat_lock);
 565        timer_setup(&qdio->request_timer, zfcp_qdio_request_timer, 0);
 566        tasklet_setup(&qdio->irq_tasklet, zfcp_qdio_irq_tasklet);
 567        tasklet_setup(&qdio->request_tasklet, zfcp_qdio_request_tasklet);
 568        tasklet_disable(&qdio->irq_tasklet);
 569        tasklet_disable(&qdio->request_tasklet);
 570
 571        adapter->qdio = qdio;
 572        return 0;
 573}
 574
 575/**
 576 * zfcp_qdio_siosl - Trigger logging in FCP channel
 577 * @adapter: The zfcp_adapter where to trigger logging
 578 *
 579 * Call the cio siosl function to trigger hardware logging.  This
 580 * wrapper function sets a flag to ensure hardware logging is only
 581 * triggered once before going through qdio shutdown.
 582 *
 583 * The triggers are always run from qdio tasklet context, so no
 584 * additional synchronization is necessary.
 585 */
 586void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
 587{
 588        int rc;
 589
 590        if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
 591                return;
 592
 593        rc = ccw_device_siosl(adapter->ccw_device);
 594        if (!rc)
 595                atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
 596                                &adapter->status);
 597}
 598