linux/drivers/s390/scsi/zfcp_dbf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * zfcp device driver
   4 *
   5 * Debug traces for zfcp.
   6 *
   7 * Copyright IBM Corp. 2002, 2020
   8 */
   9
  10#define KMSG_COMPONENT "zfcp"
  11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  12
  13#include <linux/module.h>
  14#include <linux/ctype.h>
  15#include <linux/slab.h>
  16#include <asm/debug.h>
  17#include "zfcp_dbf.h"
  18#include "zfcp_ext.h"
  19#include "zfcp_fc.h"
  20
  21static u32 dbfsize = 4;
  22
  23module_param(dbfsize, uint, 0400);
  24MODULE_PARM_DESC(dbfsize,
  25                 "number of pages for each debug feature area (default 4)");
  26
  27static u32 dbflevel = 3;
  28
  29module_param(dbflevel, uint, 0400);
  30MODULE_PARM_DESC(dbflevel,
  31                 "log level for each debug feature area "
  32                 "(default 3, range 0..6)");
  33
  34static inline unsigned int zfcp_dbf_plen(unsigned int offset)
  35{
  36        return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC;
  37}
  38
  39static inline
  40void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
  41                       u64 req_id)
  42{
  43        struct zfcp_dbf_pay *pl = &dbf->pay_buf;
  44        u16 offset = 0, rec_length;
  45
  46        spin_lock(&dbf->pay_lock);
  47        memset(pl, 0, sizeof(*pl));
  48        pl->fsf_req_id = req_id;
  49        memcpy(pl->area, area, ZFCP_DBF_TAG_LEN);
  50
  51        while (offset < length) {
  52                rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC,
  53                                 (u16) (length - offset));
  54                memcpy(pl->data, data + offset, rec_length);
  55                debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length));
  56
  57                offset += rec_length;
  58                pl->counter++;
  59        }
  60
  61        spin_unlock(&dbf->pay_lock);
  62}
  63
  64/**
  65 * zfcp_dbf_hba_fsf_res - trace event for fsf responses
  66 * @tag: tag indicating which kind of FSF response has been received
  67 * @level: trace level to be used for event
  68 * @req: request for which a response was received
  69 */
  70void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
  71{
  72        struct zfcp_dbf *dbf = req->adapter->dbf;
  73        struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
  74        struct fsf_qtcb_header *q_head = &req->qtcb->header;
  75        struct zfcp_dbf_hba *rec = &dbf->hba_buf;
  76        unsigned long flags;
  77
  78        spin_lock_irqsave(&dbf->hba_lock, flags);
  79        memset(rec, 0, sizeof(*rec));
  80
  81        memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
  82        rec->id = ZFCP_DBF_HBA_RES;
  83        rec->fsf_req_id = req->req_id;
  84        rec->fsf_req_status = req->status;
  85        rec->fsf_cmd = q_head->fsf_command;
  86        rec->fsf_seq_no = q_pref->req_seq_no;
  87        rec->u.res.req_issued = req->issued;
  88        rec->u.res.prot_status = q_pref->prot_status;
  89        rec->u.res.fsf_status = q_head->fsf_status;
  90        rec->u.res.port_handle = q_head->port_handle;
  91        rec->u.res.lun_handle = q_head->lun_handle;
  92
  93        memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
  94               FSF_PROT_STATUS_QUAL_SIZE);
  95        memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
  96               FSF_STATUS_QUALIFIER_SIZE);
  97
  98        rec->pl_len = q_head->log_length;
  99        zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
 100                          rec->pl_len, "fsf_res", req->req_id);
 101
 102        debug_event(dbf->hba, level, rec, sizeof(*rec));
 103        spin_unlock_irqrestore(&dbf->hba_lock, flags);
 104}
 105
 106/**
 107 * zfcp_dbf_hba_fsf_fces - trace event for fsf responses related to
 108 *                         FC Endpoint Security (FCES)
 109 * @tag: tag indicating which kind of FC Endpoint Security event has occurred
 110 * @req: request for which a response was received
 111 * @wwpn: remote port or ZFCP_DBF_INVALID_WWPN
 112 * @fc_security_old: old FC Endpoint Security of FCP device or connection
 113 * @fc_security_new: new FC Endpoint Security of FCP device or connection
 114 */
 115void zfcp_dbf_hba_fsf_fces(char *tag, const struct zfcp_fsf_req *req, u64 wwpn,
 116                           u32 fc_security_old, u32 fc_security_new)
 117{
 118        struct zfcp_dbf *dbf = req->adapter->dbf;
 119        struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
 120        struct fsf_qtcb_header *q_head = &req->qtcb->header;
 121        struct zfcp_dbf_hba *rec = &dbf->hba_buf;
 122        static int const level = 3;
 123        unsigned long flags;
 124
 125        if (unlikely(!debug_level_enabled(dbf->hba, level)))
 126                return;
 127
 128        spin_lock_irqsave(&dbf->hba_lock, flags);
 129        memset(rec, 0, sizeof(*rec));
 130
 131        memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
 132        rec->id = ZFCP_DBF_HBA_FCES;
 133        rec->fsf_req_id = req->req_id;
 134        rec->fsf_req_status = req->status;
 135        rec->fsf_cmd = q_head->fsf_command;
 136        rec->fsf_seq_no = q_pref->req_seq_no;
 137        rec->u.fces.req_issued = req->issued;
 138        rec->u.fces.fsf_status = q_head->fsf_status;
 139        rec->u.fces.port_handle = q_head->port_handle;
 140        rec->u.fces.wwpn = wwpn;
 141        rec->u.fces.fc_security_old = fc_security_old;
 142        rec->u.fces.fc_security_new = fc_security_new;
 143
 144        debug_event(dbf->hba, level, rec, sizeof(*rec));
 145        spin_unlock_irqrestore(&dbf->hba_lock, flags);
 146}
 147
 148/**
 149 * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
 150 * @tag: tag indicating which kind of unsolicited status has been received
 151 * @req: request providing the unsolicited status
 152 */
 153void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
 154{
 155        struct zfcp_dbf *dbf = req->adapter->dbf;
 156        struct fsf_status_read_buffer *srb = req->data;
 157        struct zfcp_dbf_hba *rec = &dbf->hba_buf;
 158        static int const level = 2;
 159        unsigned long flags;
 160
 161        if (unlikely(!debug_level_enabled(dbf->hba, level)))
 162                return;
 163
 164        spin_lock_irqsave(&dbf->hba_lock, flags);
 165        memset(rec, 0, sizeof(*rec));
 166
 167        memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
 168        rec->id = ZFCP_DBF_HBA_USS;
 169        rec->fsf_req_id = req->req_id;
 170        rec->fsf_req_status = req->status;
 171        rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
 172
 173        if (!srb)
 174                goto log;
 175
 176        rec->u.uss.status_type = srb->status_type;
 177        rec->u.uss.status_subtype = srb->status_subtype;
 178        rec->u.uss.d_id = ntoh24(srb->d_id);
 179        rec->u.uss.lun = srb->fcp_lun;
 180        memcpy(&rec->u.uss.queue_designator, &srb->queue_designator,
 181               sizeof(rec->u.uss.queue_designator));
 182
 183        /* status read buffer payload length */
 184        rec->pl_len = (!srb->length) ? 0 : srb->length -
 185                        offsetof(struct fsf_status_read_buffer, payload);
 186
 187        if (rec->pl_len)
 188                zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
 189                                  "fsf_uss", req->req_id);
 190log:
 191        debug_event(dbf->hba, level, rec, sizeof(*rec));
 192        spin_unlock_irqrestore(&dbf->hba_lock, flags);
 193}
 194
 195/**
 196 * zfcp_dbf_hba_bit_err - trace event for bit error conditions
 197 * @tag: tag indicating which kind of bit error unsolicited status was received
 198 * @req: request which caused the bit_error condition
 199 */
 200void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
 201{
 202        struct zfcp_dbf *dbf = req->adapter->dbf;
 203        struct zfcp_dbf_hba *rec = &dbf->hba_buf;
 204        struct fsf_status_read_buffer *sr_buf = req->data;
 205        static int const level = 1;
 206        unsigned long flags;
 207
 208        if (unlikely(!debug_level_enabled(dbf->hba, level)))
 209                return;
 210
 211        spin_lock_irqsave(&dbf->hba_lock, flags);
 212        memset(rec, 0, sizeof(*rec));
 213
 214        memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
 215        rec->id = ZFCP_DBF_HBA_BIT;
 216        rec->fsf_req_id = req->req_id;
 217        rec->fsf_req_status = req->status;
 218        rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
 219        memcpy(&rec->u.be, &sr_buf->payload.bit_error,
 220               sizeof(struct fsf_bit_error_payload));
 221
 222        debug_event(dbf->hba, level, rec, sizeof(*rec));
 223        spin_unlock_irqrestore(&dbf->hba_lock, flags);
 224}
 225
 226/**
 227 * zfcp_dbf_hba_def_err - trace event for deferred error messages
 228 * @adapter: pointer to struct zfcp_adapter
 229 * @req_id: request id which caused the deferred error message
 230 * @scount: number of sbals incl. the signaling sbal
 231 * @pl: array of all involved sbals
 232 */
 233void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
 234                          void **pl)
 235{
 236        struct zfcp_dbf *dbf = adapter->dbf;
 237        struct zfcp_dbf_pay *payload = &dbf->pay_buf;
 238        unsigned long flags;
 239        static int const level = 1;
 240        u16 length;
 241
 242        if (unlikely(!debug_level_enabled(dbf->pay, level)))
 243                return;
 244
 245        if (!pl)
 246                return;
 247
 248        spin_lock_irqsave(&dbf->pay_lock, flags);
 249        memset(payload, 0, sizeof(*payload));
 250
 251        memcpy(payload->area, "def_err", 7);
 252        payload->fsf_req_id = req_id;
 253        payload->counter = 0;
 254        length = min((u16)sizeof(struct qdio_buffer),
 255                     (u16)ZFCP_DBF_PAY_MAX_REC);
 256
 257        while (payload->counter < scount && (char *)pl[payload->counter]) {
 258                memcpy(payload->data, (char *)pl[payload->counter], length);
 259                debug_event(dbf->pay, level, payload, zfcp_dbf_plen(length));
 260                payload->counter++;
 261        }
 262
 263        spin_unlock_irqrestore(&dbf->pay_lock, flags);
 264}
 265
 266static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
 267                                struct zfcp_adapter *adapter,
 268                                struct zfcp_port *port,
 269                                struct scsi_device *sdev)
 270{
 271        rec->adapter_status = atomic_read(&adapter->status);
 272        if (port) {
 273                rec->port_status = atomic_read(&port->status);
 274                rec->wwpn = port->wwpn;
 275                rec->d_id = port->d_id;
 276        }
 277        if (sdev) {
 278                rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
 279                rec->lun = zfcp_scsi_dev_lun(sdev);
 280        } else
 281                rec->lun = ZFCP_DBF_INVALID_LUN;
 282}
 283
 284/**
 285 * zfcp_dbf_rec_trig - trace event related to triggered recovery
 286 * @tag: identifier for event
 287 * @adapter: adapter on which the erp_action should run
 288 * @port: remote port involved in the erp_action
 289 * @sdev: scsi device involved in the erp_action
 290 * @want: wanted erp_action
 291 * @need: required erp_action
 292 *
 293 * The adapter->erp_lock has to be held.
 294 */
 295void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
 296                       struct zfcp_port *port, struct scsi_device *sdev,
 297                       u8 want, u8 need)
 298{
 299        struct zfcp_dbf *dbf = adapter->dbf;
 300        struct zfcp_dbf_rec *rec = &dbf->rec_buf;
 301        static int const level = 1;
 302        struct list_head *entry;
 303        unsigned long flags;
 304
 305        lockdep_assert_held(&adapter->erp_lock);
 306
 307        if (unlikely(!debug_level_enabled(dbf->rec, level)))
 308                return;
 309
 310        spin_lock_irqsave(&dbf->rec_lock, flags);
 311        memset(rec, 0, sizeof(*rec));
 312
 313        rec->id = ZFCP_DBF_REC_TRIG;
 314        memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
 315        zfcp_dbf_set_common(rec, adapter, port, sdev);
 316
 317        list_for_each(entry, &adapter->erp_ready_head)
 318                rec->u.trig.ready++;
 319
 320        list_for_each(entry, &adapter->erp_running_head)
 321                rec->u.trig.running++;
 322
 323        rec->u.trig.want = want;
 324        rec->u.trig.need = need;
 325
 326        debug_event(dbf->rec, level, rec, sizeof(*rec));
 327        spin_unlock_irqrestore(&dbf->rec_lock, flags);
 328}
 329
 330/**
 331 * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
 332 * @tag: identifier for event
 333 * @adapter: adapter on which the erp_action should run
 334 * @port: remote port involved in the erp_action
 335 * @sdev: scsi device involved in the erp_action
 336 * @want: wanted erp_action
 337 * @need: required erp_action
 338 *
 339 * The adapter->erp_lock must not be held.
 340 */
 341void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
 342                            struct zfcp_port *port, struct scsi_device *sdev,
 343                            u8 want, u8 need)
 344{
 345        unsigned long flags;
 346
 347        read_lock_irqsave(&adapter->erp_lock, flags);
 348        zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
 349        read_unlock_irqrestore(&adapter->erp_lock, flags);
 350}
 351
 352/**
 353 * zfcp_dbf_rec_run_lvl - trace event related to running recovery
 354 * @level: trace level to be used for event
 355 * @tag: identifier for event
 356 * @erp: erp_action running
 357 */
 358void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
 359{
 360        struct zfcp_dbf *dbf = erp->adapter->dbf;
 361        struct zfcp_dbf_rec *rec = &dbf->rec_buf;
 362        unsigned long flags;
 363
 364        if (!debug_level_enabled(dbf->rec, level))
 365                return;
 366
 367        spin_lock_irqsave(&dbf->rec_lock, flags);
 368        memset(rec, 0, sizeof(*rec));
 369
 370        rec->id = ZFCP_DBF_REC_RUN;
 371        memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
 372        zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev);
 373
 374        rec->u.run.fsf_req_id = erp->fsf_req_id;
 375        rec->u.run.rec_status = erp->status;
 376        rec->u.run.rec_step = erp->step;
 377        rec->u.run.rec_action = erp->type;
 378
 379        if (erp->sdev)
 380                rec->u.run.rec_count =
 381                        atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter);
 382        else if (erp->port)
 383                rec->u.run.rec_count = atomic_read(&erp->port->erp_counter);
 384        else
 385                rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
 386
 387        debug_event(dbf->rec, level, rec, sizeof(*rec));
 388        spin_unlock_irqrestore(&dbf->rec_lock, flags);
 389}
 390
 391/**
 392 * zfcp_dbf_rec_run - trace event related to running recovery
 393 * @tag: identifier for event
 394 * @erp: erp_action running
 395 */
 396void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
 397{
 398        zfcp_dbf_rec_run_lvl(1, tag, erp);
 399}
 400
 401/**
 402 * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
 403 * @tag: identifier for event
 404 * @wka_port: well known address port
 405 * @req_id: request ID to correlate with potential HBA trace record
 406 */
 407void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
 408                          u64 req_id)
 409{
 410        struct zfcp_dbf *dbf = wka_port->adapter->dbf;
 411        struct zfcp_dbf_rec *rec = &dbf->rec_buf;
 412        static int const level = 1;
 413        unsigned long flags;
 414
 415        if (unlikely(!debug_level_enabled(dbf->rec, level)))
 416                return;
 417
 418        spin_lock_irqsave(&dbf->rec_lock, flags);
 419        memset(rec, 0, sizeof(*rec));
 420
 421        rec->id = ZFCP_DBF_REC_RUN;
 422        memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
 423        rec->port_status = wka_port->status;
 424        rec->d_id = wka_port->d_id;
 425        rec->lun = ZFCP_DBF_INVALID_LUN;
 426
 427        rec->u.run.fsf_req_id = req_id;
 428        rec->u.run.rec_status = ~0;
 429        rec->u.run.rec_step = ~0;
 430        rec->u.run.rec_action = ~0;
 431        rec->u.run.rec_count = ~0;
 432
 433        debug_event(dbf->rec, level, rec, sizeof(*rec));
 434        spin_unlock_irqrestore(&dbf->rec_lock, flags);
 435}
 436
 437#define ZFCP_DBF_SAN_LEVEL 1
 438
 439static inline
 440void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
 441                  char *paytag, struct scatterlist *sg, u8 id, u16 len,
 442                  u64 req_id, u32 d_id, u16 cap_len)
 443{
 444        struct zfcp_dbf_san *rec = &dbf->san_buf;
 445        u16 rec_len;
 446        unsigned long flags;
 447        struct zfcp_dbf_pay *payload = &dbf->pay_buf;
 448        u16 pay_sum = 0;
 449
 450        spin_lock_irqsave(&dbf->san_lock, flags);
 451        memset(rec, 0, sizeof(*rec));
 452
 453        rec->id = id;
 454        rec->fsf_req_id = req_id;
 455        rec->d_id = d_id;
 456        memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
 457        rec->pl_len = len; /* full length even if we cap pay below */
 458        if (!sg)
 459                goto out;
 460        rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
 461        memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
 462        if (len <= rec_len)
 463                goto out; /* skip pay record if full content in rec->payload */
 464
 465        /* if (len > rec_len):
 466         * dump data up to cap_len ignoring small duplicate in rec->payload
 467         */
 468        spin_lock(&dbf->pay_lock);
 469        memset(payload, 0, sizeof(*payload));
 470        memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
 471        payload->fsf_req_id = req_id;
 472        payload->counter = 0;
 473        for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
 474                u16 pay_len, offset = 0;
 475
 476                while (offset < sg->length && pay_sum < cap_len) {
 477                        pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
 478                                      (u16)(sg->length - offset));
 479                        /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
 480                        memcpy(payload->data, sg_virt(sg) + offset, pay_len);
 481                        debug_event(dbf->pay, ZFCP_DBF_SAN_LEVEL, payload,
 482                                    zfcp_dbf_plen(pay_len));
 483                        payload->counter++;
 484                        offset += pay_len;
 485                        pay_sum += pay_len;
 486                }
 487        }
 488        spin_unlock(&dbf->pay_lock);
 489
 490out:
 491        debug_event(dbf->san, ZFCP_DBF_SAN_LEVEL, rec, sizeof(*rec));
 492        spin_unlock_irqrestore(&dbf->san_lock, flags);
 493}
 494
 495/**
 496 * zfcp_dbf_san_req - trace event for issued SAN request
 497 * @tag: identifier for event
 498 * @fsf: request containing issued CT or ELS data
 499 * @d_id: N_Port_ID where SAN request is sent to
 500 * d_id: destination ID
 501 */
 502void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
 503{
 504        struct zfcp_dbf *dbf = fsf->adapter->dbf;
 505        struct zfcp_fsf_ct_els *ct_els = fsf->data;
 506        u16 length;
 507
 508        if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
 509                return;
 510
 511        length = (u16)zfcp_qdio_real_bytes(ct_els->req);
 512        zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
 513                     length, fsf->req_id, d_id, length);
 514}
 515
 516static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
 517                                              struct zfcp_fsf_req *fsf,
 518                                              u16 len)
 519{
 520        struct zfcp_fsf_ct_els *ct_els = fsf->data;
 521        struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
 522        struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
 523        struct scatterlist *resp_entry = ct_els->resp;
 524        struct fc_ct_hdr *resph;
 525        struct fc_gpn_ft_resp *acc;
 526        int max_entries, x, last = 0;
 527
 528        if (!(memcmp(tag, "fsscth2", 7) == 0
 529              && ct_els->d_id == FC_FID_DIR_SERV
 530              && reqh->ct_rev == FC_CT_REV
 531              && reqh->ct_in_id[0] == 0
 532              && reqh->ct_in_id[1] == 0
 533              && reqh->ct_in_id[2] == 0
 534              && reqh->ct_fs_type == FC_FST_DIR
 535              && reqh->ct_fs_subtype == FC_NS_SUBTYPE
 536              && reqh->ct_options == 0
 537              && reqh->_ct_resvd1 == 0
 538              && reqh->ct_cmd == cpu_to_be16(FC_NS_GPN_FT)
 539              /* reqh->ct_mr_size can vary so do not match but read below */
 540              && reqh->_ct_resvd2 == 0
 541              && reqh->ct_reason == 0
 542              && reqh->ct_explan == 0
 543              && reqh->ct_vendor == 0
 544              && reqn->fn_resvd == 0
 545              && reqn->fn_domain_id_scope == 0
 546              && reqn->fn_area_id_scope == 0
 547              && reqn->fn_fc4_type == FC_TYPE_FCP))
 548                return len; /* not GPN_FT response so do not cap */
 549
 550        acc = sg_virt(resp_entry);
 551
 552        /* cap all but accept CT responses to at least the CT header */
 553        resph = (struct fc_ct_hdr *)acc;
 554        if ((ct_els->status) ||
 555            (resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
 556                return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
 557
 558        max_entries = (be16_to_cpu(reqh->ct_mr_size) * 4 /
 559                       sizeof(struct fc_gpn_ft_resp))
 560                + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
 561                     * to account for header as 1st pseudo "entry" */;
 562
 563        /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
 564         * response, allowing us to skip special handling for it - just skip it
 565         */
 566        for (x = 1; x < max_entries && !last; x++) {
 567                if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
 568                        acc++;
 569                else
 570                        acc = sg_virt(++resp_entry);
 571
 572                last = acc->fp_flags & FC_NS_FID_LAST;
 573        }
 574        len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
 575        return len; /* cap after last entry */
 576}
 577
 578/**
 579 * zfcp_dbf_san_res - trace event for received SAN request
 580 * @tag: identifier for event
 581 * @fsf: request containing received CT or ELS data
 582 */
 583void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
 584{
 585        struct zfcp_dbf *dbf = fsf->adapter->dbf;
 586        struct zfcp_fsf_ct_els *ct_els = fsf->data;
 587        u16 length;
 588
 589        if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
 590                return;
 591
 592        length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
 593        zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
 594                     length, fsf->req_id, ct_els->d_id,
 595                     zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
 596}
 597
 598/**
 599 * zfcp_dbf_san_in_els - trace event for incoming ELS
 600 * @tag: identifier for event
 601 * @fsf: request containing received ELS data
 602 */
 603void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
 604{
 605        struct zfcp_dbf *dbf = fsf->adapter->dbf;
 606        struct fsf_status_read_buffer *srb =
 607                (struct fsf_status_read_buffer *) fsf->data;
 608        u16 length;
 609        struct scatterlist sg;
 610
 611        if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
 612                return;
 613
 614        length = (u16)(srb->length -
 615                        offsetof(struct fsf_status_read_buffer, payload));
 616        sg_init_one(&sg, srb->payload.data, length);
 617        zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
 618                     fsf->req_id, ntoh24(srb->d_id), length);
 619}
 620
 621/**
 622 * zfcp_dbf_scsi_common() - Common trace event helper for scsi.
 623 * @tag: Identifier for event.
 624 * @level: trace level of event.
 625 * @sdev: Pointer to SCSI device as context for this event.
 626 * @sc: Pointer to SCSI command, or NULL with task management function (TMF).
 627 * @fsf: Pointer to FSF request, or NULL.
 628 */
 629void zfcp_dbf_scsi_common(char *tag, int level, struct scsi_device *sdev,
 630                          struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
 631{
 632        struct zfcp_adapter *adapter =
 633                (struct zfcp_adapter *) sdev->host->hostdata[0];
 634        struct zfcp_dbf *dbf = adapter->dbf;
 635        struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
 636        struct fcp_resp_with_ext *fcp_rsp;
 637        struct fcp_resp_rsp_info *fcp_rsp_info;
 638        unsigned long flags;
 639
 640        spin_lock_irqsave(&dbf->scsi_lock, flags);
 641        memset(rec, 0, sizeof(*rec));
 642
 643        memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
 644        rec->id = ZFCP_DBF_SCSI_CMND;
 645        if (sc) {
 646                rec->scsi_result = sc->result;
 647                rec->scsi_retries = sc->retries;
 648                rec->scsi_allowed = sc->allowed;
 649                rec->scsi_id = sc->device->id;
 650                rec->scsi_lun = (u32)sc->device->lun;
 651                rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
 652                rec->host_scribble = (unsigned long)sc->host_scribble;
 653
 654                memcpy(rec->scsi_opcode, sc->cmnd,
 655                       min_t(int, sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
 656        } else {
 657                rec->scsi_result = ~0;
 658                rec->scsi_retries = ~0;
 659                rec->scsi_allowed = ~0;
 660                rec->scsi_id = sdev->id;
 661                rec->scsi_lun = (u32)sdev->lun;
 662                rec->scsi_lun_64_hi = (u32)(sdev->lun >> 32);
 663                rec->host_scribble = ~0;
 664
 665                memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
 666        }
 667
 668        if (fsf) {
 669                rec->fsf_req_id = fsf->req_id;
 670                rec->pl_len = FCP_RESP_WITH_EXT;
 671                fcp_rsp = &(fsf->qtcb->bottom.io.fcp_rsp.iu);
 672                /* mandatory parts of FCP_RSP IU in this SCSI record */
 673                memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
 674                if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
 675                        fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
 676                        rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
 677                        rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
 678                }
 679                if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
 680                        rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
 681                }
 682                /* complete FCP_RSP IU in associated PAYload record
 683                 * but only if there are optional parts
 684                 */
 685                if (fcp_rsp->resp.fr_flags != 0)
 686                        zfcp_dbf_pl_write(
 687                                dbf, fcp_rsp,
 688                                /* at least one full PAY record
 689                                 * but not beyond hardware response field
 690                                 */
 691                                min_t(u16, max_t(u16, rec->pl_len,
 692                                                 ZFCP_DBF_PAY_MAX_REC),
 693                                      FSF_FCP_RSP_SIZE),
 694                                "fcp_riu", fsf->req_id);
 695        }
 696
 697        debug_event(dbf->scsi, level, rec, sizeof(*rec));
 698        spin_unlock_irqrestore(&dbf->scsi_lock, flags);
 699}
 700
 701/**
 702 * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
 703 * @tag: Identifier for event.
 704 * @adapter: Pointer to zfcp adapter as context for this event.
 705 * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
 706 * @ret: Return value of calling function.
 707 *
 708 * This SCSI trace variant does not depend on any of:
 709 * scsi_cmnd, zfcp_fsf_req, scsi_device.
 710 */
 711void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
 712                      unsigned int scsi_id, int ret)
 713{
 714        struct zfcp_dbf *dbf = adapter->dbf;
 715        struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
 716        unsigned long flags;
 717        static int const level = 1;
 718
 719        if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
 720                return;
 721
 722        spin_lock_irqsave(&dbf->scsi_lock, flags);
 723        memset(rec, 0, sizeof(*rec));
 724
 725        memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
 726        rec->id = ZFCP_DBF_SCSI_CMND;
 727        rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */
 728        rec->scsi_retries = ~0;
 729        rec->scsi_allowed = ~0;
 730        rec->fcp_rsp_info = ~0;
 731        rec->scsi_id = scsi_id;
 732        rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN;
 733        rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32);
 734        rec->host_scribble = ~0;
 735        memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
 736
 737        debug_event(dbf->scsi, level, rec, sizeof(*rec));
 738        spin_unlock_irqrestore(&dbf->scsi_lock, flags);
 739}
 740
 741static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
 742{
 743        struct debug_info *d;
 744
 745        d = debug_register(name, size, 1, rec_size);
 746        if (!d)
 747                return NULL;
 748
 749        debug_register_view(d, &debug_hex_ascii_view);
 750        debug_set_level(d, dbflevel);
 751
 752        return d;
 753}
 754
 755static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
 756{
 757        if (!dbf)
 758                return;
 759
 760        debug_unregister(dbf->scsi);
 761        debug_unregister(dbf->san);
 762        debug_unregister(dbf->hba);
 763        debug_unregister(dbf->pay);
 764        debug_unregister(dbf->rec);
 765        kfree(dbf);
 766}
 767
 768/**
 769 * zfcp_adapter_debug_register - registers debug feature for an adapter
 770 * @adapter: pointer to adapter for which debug features should be registered
 771 * return: -ENOMEM on error, 0 otherwise
 772 */
 773int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
 774{
 775        char name[DEBUG_MAX_NAME_LEN];
 776        struct zfcp_dbf *dbf;
 777
 778        dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
 779        if (!dbf)
 780                return -ENOMEM;
 781
 782        spin_lock_init(&dbf->pay_lock);
 783        spin_lock_init(&dbf->hba_lock);
 784        spin_lock_init(&dbf->san_lock);
 785        spin_lock_init(&dbf->scsi_lock);
 786        spin_lock_init(&dbf->rec_lock);
 787
 788        /* debug feature area which records recovery activity */
 789        sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
 790        dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec));
 791        if (!dbf->rec)
 792                goto err_out;
 793
 794        /* debug feature area which records HBA (FSF and QDIO) conditions */
 795        sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
 796        dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba));
 797        if (!dbf->hba)
 798                goto err_out;
 799
 800        /* debug feature area which records payload info */
 801        sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev));
 802        dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay));
 803        if (!dbf->pay)
 804                goto err_out;
 805
 806        /* debug feature area which records SAN command failures and recovery */
 807        sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
 808        dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san));
 809        if (!dbf->san)
 810                goto err_out;
 811
 812        /* debug feature area which records SCSI command failures and recovery */
 813        sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
 814        dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi));
 815        if (!dbf->scsi)
 816                goto err_out;
 817
 818        adapter->dbf = dbf;
 819
 820        return 0;
 821err_out:
 822        zfcp_dbf_unregister(dbf);
 823        return -ENOMEM;
 824}
 825
 826/**
 827 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
 828 * @adapter: pointer to adapter for which debug features should be unregistered
 829 */
 830void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
 831{
 832        struct zfcp_dbf *dbf = adapter->dbf;
 833
 834        adapter->dbf = NULL;
 835        zfcp_dbf_unregister(dbf);
 836}
 837
 838