linux/drivers/target/target_core_alua.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*******************************************************************************
   3 * Filename:  target_core_alua.c
   4 *
   5 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
   6 *
   7 * (c) Copyright 2009-2013 Datera, Inc.
   8 *
   9 * Nicholas A. Bellinger <nab@kernel.org>
  10 *
  11 ******************************************************************************/
  12
  13#include <linux/slab.h>
  14#include <linux/spinlock.h>
  15#include <linux/configfs.h>
  16#include <linux/delay.h>
  17#include <linux/export.h>
  18#include <linux/fcntl.h>
  19#include <linux/file.h>
  20#include <linux/fs.h>
  21#include <scsi/scsi_proto.h>
  22#include <asm/unaligned.h>
  23
  24#include <target/target_core_base.h>
  25#include <target/target_core_backend.h>
  26#include <target/target_core_fabric.h>
  27
  28#include "target_core_internal.h"
  29#include "target_core_alua.h"
  30#include "target_core_ua.h"
  31
  32static sense_reason_t core_alua_check_transition(int state, int valid,
  33                                                 int *primary, int explicit);
  34static int core_alua_set_tg_pt_secondary_state(
  35                struct se_lun *lun, int explicit, int offline);
  36
  37static char *core_alua_dump_state(int state);
  38
  39static void __target_attach_tg_pt_gp(struct se_lun *lun,
  40                struct t10_alua_tg_pt_gp *tg_pt_gp);
  41
  42static u16 alua_lu_gps_counter;
  43static u32 alua_lu_gps_count;
  44
  45static DEFINE_SPINLOCK(lu_gps_lock);
  46static LIST_HEAD(lu_gps_list);
  47
  48struct t10_alua_lu_gp *default_lu_gp;
  49
  50/*
  51 * REPORT REFERRALS
  52 *
  53 * See sbc3r35 section 5.23
  54 */
  55sense_reason_t
  56target_emulate_report_referrals(struct se_cmd *cmd)
  57{
  58        struct se_device *dev = cmd->se_dev;
  59        struct t10_alua_lba_map *map;
  60        struct t10_alua_lba_map_member *map_mem;
  61        unsigned char *buf;
  62        u32 rd_len = 0, off;
  63
  64        if (cmd->data_length < 4) {
  65                pr_warn("REPORT REFERRALS allocation length %u too"
  66                        " small\n", cmd->data_length);
  67                return TCM_INVALID_CDB_FIELD;
  68        }
  69
  70        buf = transport_kmap_data_sg(cmd);
  71        if (!buf)
  72                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  73
  74        off = 4;
  75        spin_lock(&dev->t10_alua.lba_map_lock);
  76        if (list_empty(&dev->t10_alua.lba_map_list)) {
  77                spin_unlock(&dev->t10_alua.lba_map_lock);
  78                transport_kunmap_data_sg(cmd);
  79
  80                return TCM_UNSUPPORTED_SCSI_OPCODE;
  81        }
  82
  83        list_for_each_entry(map, &dev->t10_alua.lba_map_list,
  84                            lba_map_list) {
  85                int desc_num = off + 3;
  86                int pg_num;
  87
  88                off += 4;
  89                if (cmd->data_length > off)
  90                        put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
  91                off += 8;
  92                if (cmd->data_length > off)
  93                        put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
  94                off += 8;
  95                rd_len += 20;
  96                pg_num = 0;
  97                list_for_each_entry(map_mem, &map->lba_map_mem_list,
  98                                    lba_map_mem_list) {
  99                        int alua_state = map_mem->lba_map_mem_alua_state;
 100                        int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
 101
 102                        if (cmd->data_length > off)
 103                                buf[off] = alua_state & 0x0f;
 104                        off += 2;
 105                        if (cmd->data_length > off)
 106                                buf[off] = (alua_pg_id >> 8) & 0xff;
 107                        off++;
 108                        if (cmd->data_length > off)
 109                                buf[off] = (alua_pg_id & 0xff);
 110                        off++;
 111                        rd_len += 4;
 112                        pg_num++;
 113                }
 114                if (cmd->data_length > desc_num)
 115                        buf[desc_num] = pg_num;
 116        }
 117        spin_unlock(&dev->t10_alua.lba_map_lock);
 118
 119        /*
 120         * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 121         */
 122        put_unaligned_be16(rd_len, &buf[2]);
 123
 124        transport_kunmap_data_sg(cmd);
 125
 126        target_complete_cmd(cmd, SAM_STAT_GOOD);
 127        return 0;
 128}
 129
 130/*
 131 * REPORT_TARGET_PORT_GROUPS
 132 *
 133 * See spc4r17 section 6.27
 134 */
 135sense_reason_t
 136target_emulate_report_target_port_groups(struct se_cmd *cmd)
 137{
 138        struct se_device *dev = cmd->se_dev;
 139        struct t10_alua_tg_pt_gp *tg_pt_gp;
 140        struct se_lun *lun;
 141        unsigned char *buf;
 142        u32 rd_len = 0, off;
 143        int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
 144
 145        /*
 146         * Skip over RESERVED area to first Target port group descriptor
 147         * depending on the PARAMETER DATA FORMAT type..
 148         */
 149        if (ext_hdr != 0)
 150                off = 8;
 151        else
 152                off = 4;
 153
 154        if (cmd->data_length < off) {
 155                pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
 156                        " small for %s header\n", cmd->data_length,
 157                        (ext_hdr) ? "extended" : "normal");
 158                return TCM_INVALID_CDB_FIELD;
 159        }
 160        buf = transport_kmap_data_sg(cmd);
 161        if (!buf)
 162                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 163
 164        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 165        list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
 166                        tg_pt_gp_list) {
 167                /*
 168                 * Check if the Target port group and Target port descriptor list
 169                 * based on tg_pt_gp_members count will fit into the response payload.
 170                 * Otherwise, bump rd_len to let the initiator know we have exceeded
 171                 * the allocation length and the response is truncated.
 172                 */
 173                if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
 174                     cmd->data_length) {
 175                        rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
 176                        continue;
 177                }
 178                /*
 179                 * PREF: Preferred target port bit, determine if this
 180                 * bit should be set for port group.
 181                 */
 182                if (tg_pt_gp->tg_pt_gp_pref)
 183                        buf[off] = 0x80;
 184                /*
 185                 * Set the ASYMMETRIC ACCESS State
 186                 */
 187                buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
 188                /*
 189                 * Set supported ASYMMETRIC ACCESS State bits
 190                 */
 191                buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
 192                /*
 193                 * TARGET PORT GROUP
 194                 */
 195                put_unaligned_be16(tg_pt_gp->tg_pt_gp_id, &buf[off]);
 196                off += 2;
 197
 198                off++; /* Skip over Reserved */
 199                /*
 200                 * STATUS CODE
 201                 */
 202                buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
 203                /*
 204                 * Vendor Specific field
 205                 */
 206                buf[off++] = 0x00;
 207                /*
 208                 * TARGET PORT COUNT
 209                 */
 210                buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
 211                rd_len += 8;
 212
 213                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 214                list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
 215                                lun_tg_pt_gp_link) {
 216                        /*
 217                         * Start Target Port descriptor format
 218                         *
 219                         * See spc4r17 section 6.2.7 Table 247
 220                         */
 221                        off += 2; /* Skip over Obsolete */
 222                        /*
 223                         * Set RELATIVE TARGET PORT IDENTIFIER
 224                         */
 225                        put_unaligned_be16(lun->lun_rtpi, &buf[off]);
 226                        off += 2;
 227                        rd_len += 4;
 228                }
 229                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 230        }
 231        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 232        /*
 233         * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 234         */
 235        put_unaligned_be32(rd_len, &buf[0]);
 236
 237        /*
 238         * Fill in the Extended header parameter data format if requested
 239         */
 240        if (ext_hdr != 0) {
 241                buf[4] = 0x10;
 242                /*
 243                 * Set the implicit transition time (in seconds) for the application
 244                 * client to use as a base for it's transition timeout value.
 245                 *
 246                 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
 247                 * this CDB was received upon to determine this value individually
 248                 * for ALUA target port group.
 249                 */
 250                spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock);
 251                tg_pt_gp = cmd->se_lun->lun_tg_pt_gp;
 252                if (tg_pt_gp)
 253                        buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
 254                spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock);
 255        }
 256        transport_kunmap_data_sg(cmd);
 257
 258        target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, rd_len + 4);
 259        return 0;
 260}
 261
 262/*
 263 * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
 264 *
 265 * See spc4r17 section 6.35
 266 */
 267sense_reason_t
 268target_emulate_set_target_port_groups(struct se_cmd *cmd)
 269{
 270        struct se_device *dev = cmd->se_dev;
 271        struct se_lun *l_lun = cmd->se_lun;
 272        struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
 273        struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
 274        unsigned char *buf;
 275        unsigned char *ptr;
 276        sense_reason_t rc = TCM_NO_SENSE;
 277        u32 len = 4; /* Skip over RESERVED area in header */
 278        int alua_access_state, primary = 0, valid_states;
 279        u16 tg_pt_id, rtpi;
 280
 281        if (cmd->data_length < 4) {
 282                pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
 283                        " small\n", cmd->data_length);
 284                return TCM_INVALID_PARAMETER_LIST;
 285        }
 286
 287        buf = transport_kmap_data_sg(cmd);
 288        if (!buf)
 289                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 290
 291        /*
 292         * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
 293         * for the local tg_pt_gp.
 294         */
 295        spin_lock(&l_lun->lun_tg_pt_gp_lock);
 296        l_tg_pt_gp = l_lun->lun_tg_pt_gp;
 297        if (!l_tg_pt_gp) {
 298                spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 299                pr_err("Unable to access l_lun->tg_pt_gp\n");
 300                rc = TCM_UNSUPPORTED_SCSI_OPCODE;
 301                goto out;
 302        }
 303
 304        if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
 305                spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 306                pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
 307                                " while TPGS_EXPLICIT_ALUA is disabled\n");
 308                rc = TCM_UNSUPPORTED_SCSI_OPCODE;
 309                goto out;
 310        }
 311        valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
 312        spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 313
 314        ptr = &buf[4]; /* Skip over RESERVED area in header */
 315
 316        while (len < cmd->data_length) {
 317                bool found = false;
 318                alua_access_state = (ptr[0] & 0x0f);
 319                /*
 320                 * Check the received ALUA access state, and determine if
 321                 * the state is a primary or secondary target port asymmetric
 322                 * access state.
 323                 */
 324                rc = core_alua_check_transition(alua_access_state, valid_states,
 325                                                &primary, 1);
 326                if (rc) {
 327                        /*
 328                         * If the SET TARGET PORT GROUPS attempts to establish
 329                         * an invalid combination of target port asymmetric
 330                         * access states or attempts to establish an
 331                         * unsupported target port asymmetric access state,
 332                         * then the command shall be terminated with CHECK
 333                         * CONDITION status, with the sense key set to ILLEGAL
 334                         * REQUEST, and the additional sense code set to INVALID
 335                         * FIELD IN PARAMETER LIST.
 336                         */
 337                        goto out;
 338                }
 339
 340                /*
 341                 * If the ASYMMETRIC ACCESS STATE field (see table 267)
 342                 * specifies a primary target port asymmetric access state,
 343                 * then the TARGET PORT GROUP OR TARGET PORT field specifies
 344                 * a primary target port group for which the primary target
 345                 * port asymmetric access state shall be changed. If the
 346                 * ASYMMETRIC ACCESS STATE field specifies a secondary target
 347                 * port asymmetric access state, then the TARGET PORT GROUP OR
 348                 * TARGET PORT field specifies the relative target port
 349                 * identifier (see 3.1.120) of the target port for which the
 350                 * secondary target port asymmetric access state shall be
 351                 * changed.
 352                 */
 353                if (primary) {
 354                        tg_pt_id = get_unaligned_be16(ptr + 2);
 355                        /*
 356                         * Locate the matching target port group ID from
 357                         * the global tg_pt_gp list
 358                         */
 359                        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 360                        list_for_each_entry(tg_pt_gp,
 361                                        &dev->t10_alua.tg_pt_gps_list,
 362                                        tg_pt_gp_list) {
 363                                if (!tg_pt_gp->tg_pt_gp_valid_id)
 364                                        continue;
 365
 366                                if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
 367                                        continue;
 368
 369                                atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
 370
 371                                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 372
 373                                if (!core_alua_do_port_transition(tg_pt_gp,
 374                                                dev, l_lun, nacl,
 375                                                alua_access_state, 1))
 376                                        found = true;
 377
 378                                spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 379                                atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
 380                                break;
 381                        }
 382                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 383                } else {
 384                        struct se_lun *lun;
 385
 386                        /*
 387                         * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
 388                         * the Target Port in question for the the incoming
 389                         * SET_TARGET_PORT_GROUPS op.
 390                         */
 391                        rtpi = get_unaligned_be16(ptr + 2);
 392                        /*
 393                         * Locate the matching relative target port identifier
 394                         * for the struct se_device storage object.
 395                         */
 396                        spin_lock(&dev->se_port_lock);
 397                        list_for_each_entry(lun, &dev->dev_sep_list,
 398                                                        lun_dev_link) {
 399                                if (lun->lun_rtpi != rtpi)
 400                                        continue;
 401
 402                                // XXX: racy unlock
 403                                spin_unlock(&dev->se_port_lock);
 404
 405                                if (!core_alua_set_tg_pt_secondary_state(
 406                                                lun, 1, 1))
 407                                        found = true;
 408
 409                                spin_lock(&dev->se_port_lock);
 410                                break;
 411                        }
 412                        spin_unlock(&dev->se_port_lock);
 413                }
 414
 415                if (!found) {
 416                        rc = TCM_INVALID_PARAMETER_LIST;
 417                        goto out;
 418                }
 419
 420                ptr += 4;
 421                len += 4;
 422        }
 423
 424out:
 425        transport_kunmap_data_sg(cmd);
 426        if (!rc)
 427                target_complete_cmd(cmd, SAM_STAT_GOOD);
 428        return rc;
 429}
 430
 431static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
 432{
 433        /*
 434         * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
 435         * The ALUA additional sense code qualifier (ASCQ) is determined
 436         * by the ALUA primary or secondary access state..
 437         */
 438        pr_debug("[%s]: ALUA TG Port not available, "
 439                "SenseKey: NOT_READY, ASC/ASCQ: "
 440                "0x04/0x%02x\n",
 441                cmd->se_tfo->fabric_name, alua_ascq);
 442
 443        cmd->scsi_asc = 0x04;
 444        cmd->scsi_ascq = alua_ascq;
 445}
 446
 447static inline void core_alua_state_nonoptimized(
 448        struct se_cmd *cmd,
 449        unsigned char *cdb,
 450        int nonop_delay_msecs)
 451{
 452        /*
 453         * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
 454         * later to determine if processing of this cmd needs to be
 455         * temporarily delayed for the Active/NonOptimized primary access state.
 456         */
 457        cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
 458        cmd->alua_nonop_delay = nonop_delay_msecs;
 459}
 460
 461static inline int core_alua_state_lba_dependent(
 462        struct se_cmd *cmd,
 463        struct t10_alua_tg_pt_gp *tg_pt_gp)
 464{
 465        struct se_device *dev = cmd->se_dev;
 466        u64 segment_size, segment_mult, sectors, lba;
 467
 468        /* Only need to check for cdb actually containing LBAs */
 469        if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
 470                return 0;
 471
 472        spin_lock(&dev->t10_alua.lba_map_lock);
 473        segment_size = dev->t10_alua.lba_map_segment_size;
 474        segment_mult = dev->t10_alua.lba_map_segment_multiplier;
 475        sectors = cmd->data_length / dev->dev_attrib.block_size;
 476
 477        lba = cmd->t_task_lba;
 478        while (lba < cmd->t_task_lba + sectors) {
 479                struct t10_alua_lba_map *cur_map = NULL, *map;
 480                struct t10_alua_lba_map_member *map_mem;
 481
 482                list_for_each_entry(map, &dev->t10_alua.lba_map_list,
 483                                    lba_map_list) {
 484                        u64 start_lba, last_lba;
 485                        u64 first_lba = map->lba_map_first_lba;
 486
 487                        if (segment_mult) {
 488                                u64 tmp = lba;
 489                                start_lba = do_div(tmp, segment_size * segment_mult);
 490
 491                                last_lba = first_lba + segment_size - 1;
 492                                if (start_lba >= first_lba &&
 493                                    start_lba <= last_lba) {
 494                                        lba += segment_size;
 495                                        cur_map = map;
 496                                        break;
 497                                }
 498                        } else {
 499                                last_lba = map->lba_map_last_lba;
 500                                if (lba >= first_lba && lba <= last_lba) {
 501                                        lba = last_lba + 1;
 502                                        cur_map = map;
 503                                        break;
 504                                }
 505                        }
 506                }
 507                if (!cur_map) {
 508                        spin_unlock(&dev->t10_alua.lba_map_lock);
 509                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 510                        return 1;
 511                }
 512                list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
 513                                    lba_map_mem_list) {
 514                        if (map_mem->lba_map_mem_alua_pg_id !=
 515                            tg_pt_gp->tg_pt_gp_id)
 516                                continue;
 517                        switch(map_mem->lba_map_mem_alua_state) {
 518                        case ALUA_ACCESS_STATE_STANDBY:
 519                                spin_unlock(&dev->t10_alua.lba_map_lock);
 520                                set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 521                                return 1;
 522                        case ALUA_ACCESS_STATE_UNAVAILABLE:
 523                                spin_unlock(&dev->t10_alua.lba_map_lock);
 524                                set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 525                                return 1;
 526                        default:
 527                                break;
 528                        }
 529                }
 530        }
 531        spin_unlock(&dev->t10_alua.lba_map_lock);
 532        return 0;
 533}
 534
 535static inline int core_alua_state_standby(
 536        struct se_cmd *cmd,
 537        unsigned char *cdb)
 538{
 539        /*
 540         * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
 541         * spc4r17 section 5.9.2.4.4
 542         */
 543        switch (cdb[0]) {
 544        case INQUIRY:
 545        case LOG_SELECT:
 546        case LOG_SENSE:
 547        case MODE_SELECT:
 548        case MODE_SENSE:
 549        case REPORT_LUNS:
 550        case RECEIVE_DIAGNOSTIC:
 551        case SEND_DIAGNOSTIC:
 552        case READ_CAPACITY:
 553                return 0;
 554        case SERVICE_ACTION_IN_16:
 555                switch (cdb[1] & 0x1f) {
 556                case SAI_READ_CAPACITY_16:
 557                        return 0;
 558                default:
 559                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 560                        return 1;
 561                }
 562        case MAINTENANCE_IN:
 563                switch (cdb[1] & 0x1f) {
 564                case MI_REPORT_TARGET_PGS:
 565                        return 0;
 566                default:
 567                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 568                        return 1;
 569                }
 570        case MAINTENANCE_OUT:
 571                switch (cdb[1]) {
 572                case MO_SET_TARGET_PGS:
 573                        return 0;
 574                default:
 575                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 576                        return 1;
 577                }
 578        case REQUEST_SENSE:
 579        case PERSISTENT_RESERVE_IN:
 580        case PERSISTENT_RESERVE_OUT:
 581        case READ_BUFFER:
 582        case WRITE_BUFFER:
 583                return 0;
 584        default:
 585                set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 586                return 1;
 587        }
 588
 589        return 0;
 590}
 591
 592static inline int core_alua_state_unavailable(
 593        struct se_cmd *cmd,
 594        unsigned char *cdb)
 595{
 596        /*
 597         * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
 598         * spc4r17 section 5.9.2.4.5
 599         */
 600        switch (cdb[0]) {
 601        case INQUIRY:
 602        case REPORT_LUNS:
 603                return 0;
 604        case MAINTENANCE_IN:
 605                switch (cdb[1] & 0x1f) {
 606                case MI_REPORT_TARGET_PGS:
 607                        return 0;
 608                default:
 609                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 610                        return 1;
 611                }
 612        case MAINTENANCE_OUT:
 613                switch (cdb[1]) {
 614                case MO_SET_TARGET_PGS:
 615                        return 0;
 616                default:
 617                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 618                        return 1;
 619                }
 620        case REQUEST_SENSE:
 621        case READ_BUFFER:
 622        case WRITE_BUFFER:
 623                return 0;
 624        default:
 625                set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 626                return 1;
 627        }
 628
 629        return 0;
 630}
 631
 632static inline int core_alua_state_transition(
 633        struct se_cmd *cmd,
 634        unsigned char *cdb)
 635{
 636        /*
 637         * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
 638         * spc4r17 section 5.9.2.5
 639         */
 640        switch (cdb[0]) {
 641        case INQUIRY:
 642        case REPORT_LUNS:
 643                return 0;
 644        case MAINTENANCE_IN:
 645                switch (cdb[1] & 0x1f) {
 646                case MI_REPORT_TARGET_PGS:
 647                        return 0;
 648                default:
 649                        set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
 650                        return 1;
 651                }
 652        case REQUEST_SENSE:
 653        case READ_BUFFER:
 654        case WRITE_BUFFER:
 655                return 0;
 656        default:
 657                set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
 658                return 1;
 659        }
 660
 661        return 0;
 662}
 663
 664/*
 665 * return 1: Is used to signal LUN not accessible, and check condition/not ready
 666 * return 0: Used to signal success
 667 * return -1: Used to signal failure, and invalid cdb field
 668 */
 669sense_reason_t
 670target_alua_state_check(struct se_cmd *cmd)
 671{
 672        struct se_device *dev = cmd->se_dev;
 673        unsigned char *cdb = cmd->t_task_cdb;
 674        struct se_lun *lun = cmd->se_lun;
 675        struct t10_alua_tg_pt_gp *tg_pt_gp;
 676        int out_alua_state, nonop_delay_msecs;
 677
 678        if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
 679                return 0;
 680        if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
 681                return 0;
 682
 683        /*
 684         * First, check for a struct se_port specific secondary ALUA target port
 685         * access state: OFFLINE
 686         */
 687        if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
 688                pr_debug("ALUA: Got secondary offline status for local"
 689                                " target port\n");
 690                set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
 691                return TCM_CHECK_CONDITION_NOT_READY;
 692        }
 693
 694        if (!lun->lun_tg_pt_gp)
 695                return 0;
 696
 697        spin_lock(&lun->lun_tg_pt_gp_lock);
 698        tg_pt_gp = lun->lun_tg_pt_gp;
 699        out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
 700        nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
 701
 702        // XXX: keeps using tg_pt_gp witout reference after unlock
 703        spin_unlock(&lun->lun_tg_pt_gp_lock);
 704        /*
 705         * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
 706         * statement so the compiler knows explicitly to check this case first.
 707         * For the Optimized ALUA access state case, we want to process the
 708         * incoming fabric cmd ASAP..
 709         */
 710        if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
 711                return 0;
 712
 713        switch (out_alua_state) {
 714        case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 715                core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
 716                break;
 717        case ALUA_ACCESS_STATE_STANDBY:
 718                if (core_alua_state_standby(cmd, cdb))
 719                        return TCM_CHECK_CONDITION_NOT_READY;
 720                break;
 721        case ALUA_ACCESS_STATE_UNAVAILABLE:
 722                if (core_alua_state_unavailable(cmd, cdb))
 723                        return TCM_CHECK_CONDITION_NOT_READY;
 724                break;
 725        case ALUA_ACCESS_STATE_TRANSITION:
 726                if (core_alua_state_transition(cmd, cdb))
 727                        return TCM_CHECK_CONDITION_NOT_READY;
 728                break;
 729        case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 730                if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
 731                        return TCM_CHECK_CONDITION_NOT_READY;
 732                break;
 733        /*
 734         * OFFLINE is a secondary ALUA target port group access state, that is
 735         * handled above with struct se_lun->lun_tg_pt_secondary_offline=1
 736         */
 737        case ALUA_ACCESS_STATE_OFFLINE:
 738        default:
 739                pr_err("Unknown ALUA access state: 0x%02x\n",
 740                                out_alua_state);
 741                return TCM_INVALID_CDB_FIELD;
 742        }
 743
 744        return 0;
 745}
 746
 747/*
 748 * Check implicit and explicit ALUA state change request.
 749 */
 750static sense_reason_t
 751core_alua_check_transition(int state, int valid, int *primary, int explicit)
 752{
 753        /*
 754         * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
 755         * defined as primary target port asymmetric access states.
 756         */
 757        switch (state) {
 758        case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
 759                if (!(valid & ALUA_AO_SUP))
 760                        goto not_supported;
 761                *primary = 1;
 762                break;
 763        case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 764                if (!(valid & ALUA_AN_SUP))
 765                        goto not_supported;
 766                *primary = 1;
 767                break;
 768        case ALUA_ACCESS_STATE_STANDBY:
 769                if (!(valid & ALUA_S_SUP))
 770                        goto not_supported;
 771                *primary = 1;
 772                break;
 773        case ALUA_ACCESS_STATE_UNAVAILABLE:
 774                if (!(valid & ALUA_U_SUP))
 775                        goto not_supported;
 776                *primary = 1;
 777                break;
 778        case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 779                if (!(valid & ALUA_LBD_SUP))
 780                        goto not_supported;
 781                *primary = 1;
 782                break;
 783        case ALUA_ACCESS_STATE_OFFLINE:
 784                /*
 785                 * OFFLINE state is defined as a secondary target port
 786                 * asymmetric access state.
 787                 */
 788                if (!(valid & ALUA_O_SUP))
 789                        goto not_supported;
 790                *primary = 0;
 791                break;
 792        case ALUA_ACCESS_STATE_TRANSITION:
 793                if (!(valid & ALUA_T_SUP) || explicit)
 794                        /*
 795                         * Transitioning is set internally and by tcmu daemon,
 796                         * and cannot be selected through a STPG.
 797                         */
 798                        goto not_supported;
 799                *primary = 0;
 800                break;
 801        default:
 802                pr_err("Unknown ALUA access state: 0x%02x\n", state);
 803                return TCM_INVALID_PARAMETER_LIST;
 804        }
 805
 806        return 0;
 807
 808not_supported:
 809        pr_err("ALUA access state %s not supported",
 810               core_alua_dump_state(state));
 811        return TCM_INVALID_PARAMETER_LIST;
 812}
 813
 814static char *core_alua_dump_state(int state)
 815{
 816        switch (state) {
 817        case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
 818                return "Active/Optimized";
 819        case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 820                return "Active/NonOptimized";
 821        case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 822                return "LBA Dependent";
 823        case ALUA_ACCESS_STATE_STANDBY:
 824                return "Standby";
 825        case ALUA_ACCESS_STATE_UNAVAILABLE:
 826                return "Unavailable";
 827        case ALUA_ACCESS_STATE_OFFLINE:
 828                return "Offline";
 829        case ALUA_ACCESS_STATE_TRANSITION:
 830                return "Transitioning";
 831        default:
 832                return "Unknown";
 833        }
 834
 835        return NULL;
 836}
 837
 838char *core_alua_dump_status(int status)
 839{
 840        switch (status) {
 841        case ALUA_STATUS_NONE:
 842                return "None";
 843        case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
 844                return "Altered by Explicit STPG";
 845        case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
 846                return "Altered by Implicit ALUA";
 847        default:
 848                return "Unknown";
 849        }
 850
 851        return NULL;
 852}
 853
 854/*
 855 * Used by fabric modules to determine when we need to delay processing
 856 * for the Active/NonOptimized paths..
 857 */
 858int core_alua_check_nonop_delay(
 859        struct se_cmd *cmd)
 860{
 861        if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
 862                return 0;
 863        /*
 864         * The ALUA Active/NonOptimized access state delay can be disabled
 865         * in via configfs with a value of zero
 866         */
 867        if (!cmd->alua_nonop_delay)
 868                return 0;
 869        /*
 870         * struct se_cmd->alua_nonop_delay gets set by a target port group
 871         * defined interval in core_alua_state_nonoptimized()
 872         */
 873        msleep_interruptible(cmd->alua_nonop_delay);
 874        return 0;
 875}
 876EXPORT_SYMBOL(core_alua_check_nonop_delay);
 877
 878static int core_alua_write_tpg_metadata(
 879        const char *path,
 880        unsigned char *md_buf,
 881        u32 md_buf_len)
 882{
 883        struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
 884        loff_t pos = 0;
 885        int ret;
 886
 887        if (IS_ERR(file)) {
 888                pr_err("filp_open(%s) for ALUA metadata failed\n", path);
 889                return -ENODEV;
 890        }
 891        ret = kernel_write(file, md_buf, md_buf_len, &pos);
 892        if (ret < 0)
 893                pr_err("Error writing ALUA metadata file: %s\n", path);
 894        fput(file);
 895        return (ret < 0) ? -EIO : 0;
 896}
 897
 898static int core_alua_update_tpg_primary_metadata(
 899        struct t10_alua_tg_pt_gp *tg_pt_gp)
 900{
 901        unsigned char *md_buf;
 902        struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
 903        char *path;
 904        int len, rc;
 905
 906        lockdep_assert_held(&tg_pt_gp->tg_pt_gp_transition_mutex);
 907
 908        md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
 909        if (!md_buf) {
 910                pr_err("Unable to allocate buf for ALUA metadata\n");
 911                return -ENOMEM;
 912        }
 913
 914        len = snprintf(md_buf, ALUA_MD_BUF_LEN,
 915                        "tg_pt_gp_id=%hu\n"
 916                        "alua_access_state=0x%02x\n"
 917                        "alua_access_status=0x%02x\n",
 918                        tg_pt_gp->tg_pt_gp_id,
 919                        tg_pt_gp->tg_pt_gp_alua_access_state,
 920                        tg_pt_gp->tg_pt_gp_alua_access_status);
 921
 922        rc = -ENOMEM;
 923        path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
 924                        &wwn->unit_serial[0],
 925                        config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
 926        if (path) {
 927                rc = core_alua_write_tpg_metadata(path, md_buf, len);
 928                kfree(path);
 929        }
 930        kfree(md_buf);
 931        return rc;
 932}
 933
 934static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
 935{
 936        struct se_dev_entry *se_deve;
 937        struct se_lun *lun;
 938        struct se_lun_acl *lacl;
 939
 940        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 941        list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
 942                                lun_tg_pt_gp_link) {
 943                /*
 944                 * After an implicit target port asymmetric access state
 945                 * change, a device server shall establish a unit attention
 946                 * condition for the initiator port associated with every I_T
 947                 * nexus with the additional sense code set to ASYMMETRIC
 948                 * ACCESS STATE CHANGED.
 949                 *
 950                 * After an explicit target port asymmetric access state
 951                 * change, a device server shall establish a unit attention
 952                 * condition with the additional sense code set to ASYMMETRIC
 953                 * ACCESS STATE CHANGED for the initiator port associated with
 954                 * every I_T nexus other than the I_T nexus on which the SET
 955                 * TARGET PORT GROUPS command
 956                 */
 957                if (!percpu_ref_tryget_live(&lun->lun_ref))
 958                        continue;
 959                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 960
 961                spin_lock(&lun->lun_deve_lock);
 962                list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
 963                        lacl = rcu_dereference_check(se_deve->se_lun_acl,
 964                                        lockdep_is_held(&lun->lun_deve_lock));
 965
 966                        /*
 967                         * spc4r37 p.242:
 968                         * After an explicit target port asymmetric access
 969                         * state change, a device server shall establish a
 970                         * unit attention condition with the additional sense
 971                         * code set to ASYMMETRIC ACCESS STATE CHANGED for
 972                         * the initiator port associated with every I_T nexus
 973                         * other than the I_T nexus on which the SET TARGET
 974                         * PORT GROUPS command was received.
 975                         */
 976                        if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
 977                             ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
 978                           (tg_pt_gp->tg_pt_gp_alua_lun != NULL) &&
 979                            (tg_pt_gp->tg_pt_gp_alua_lun == lun))
 980                                continue;
 981
 982                        /*
 983                         * se_deve->se_lun_acl pointer may be NULL for a
 984                         * entry created without explicit Node+MappedLUN ACLs
 985                         */
 986                        if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
 987                            (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl))
 988                                continue;
 989
 990                        core_scsi3_ua_allocate(se_deve, 0x2A,
 991                                ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
 992                }
 993                spin_unlock(&lun->lun_deve_lock);
 994
 995                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 996                percpu_ref_put(&lun->lun_ref);
 997        }
 998        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 999}
1000
1001static int core_alua_do_transition_tg_pt(
1002        struct t10_alua_tg_pt_gp *tg_pt_gp,
1003        int new_state,
1004        int explicit)
1005{
1006        int prev_state;
1007
1008        mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1009        /* Nothing to be done here */
1010        if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
1011                mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1012                return 0;
1013        }
1014
1015        if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
1016                mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1017                return -EAGAIN;
1018        }
1019
1020        /*
1021         * Save the old primary ALUA access state, and set the current state
1022         * to ALUA_ACCESS_STATE_TRANSITION.
1023         */
1024        prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
1025        tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
1026        tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1027                                ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1028                                ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1029
1030        core_alua_queue_state_change_ua(tg_pt_gp);
1031
1032        if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
1033                mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1034                return 0;
1035        }
1036
1037        /*
1038         * Check for the optional ALUA primary state transition delay
1039         */
1040        if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
1041                msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1042
1043        /*
1044         * Set the current primary ALUA access state to the requested new state
1045         */
1046        tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
1047
1048        /*
1049         * Update the ALUA metadata buf that has been allocated in
1050         * core_alua_do_port_transition(), this metadata will be written
1051         * to struct file.
1052         *
1053         * Note that there is the case where we do not want to update the
1054         * metadata when the saved metadata is being parsed in userspace
1055         * when setting the existing port access state and access status.
1056         *
1057         * Also note that the failure to write out the ALUA metadata to
1058         * struct file does NOT affect the actual ALUA transition.
1059         */
1060        if (tg_pt_gp->tg_pt_gp_write_metadata) {
1061                core_alua_update_tpg_primary_metadata(tg_pt_gp);
1062        }
1063
1064        pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1065                " from primary access state %s to %s\n", (explicit) ? "explicit" :
1066                "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1067                tg_pt_gp->tg_pt_gp_id,
1068                core_alua_dump_state(prev_state),
1069                core_alua_dump_state(new_state));
1070
1071        core_alua_queue_state_change_ua(tg_pt_gp);
1072
1073        mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1074        return 0;
1075}
1076
1077int core_alua_do_port_transition(
1078        struct t10_alua_tg_pt_gp *l_tg_pt_gp,
1079        struct se_device *l_dev,
1080        struct se_lun *l_lun,
1081        struct se_node_acl *l_nacl,
1082        int new_state,
1083        int explicit)
1084{
1085        struct se_device *dev;
1086        struct t10_alua_lu_gp *lu_gp;
1087        struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
1088        struct t10_alua_tg_pt_gp *tg_pt_gp;
1089        int primary, valid_states, rc = 0;
1090
1091        if (l_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
1092                return -ENODEV;
1093
1094        valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1095        if (core_alua_check_transition(new_state, valid_states, &primary,
1096                                       explicit) != 0)
1097                return -EINVAL;
1098
1099        local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
1100        spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1101        lu_gp = local_lu_gp_mem->lu_gp;
1102        atomic_inc(&lu_gp->lu_gp_ref_cnt);
1103        spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1104        /*
1105         * For storage objects that are members of the 'default_lu_gp',
1106         * we only do transition on the passed *l_tp_pt_gp, and not
1107         * on all of the matching target port groups IDs in default_lu_gp.
1108         */
1109        if (!lu_gp->lu_gp_id) {
1110                /*
1111                 * core_alua_do_transition_tg_pt() will always return
1112                 * success.
1113                 */
1114                l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1115                l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1116                rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1117                                                   new_state, explicit);
1118                atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1119                return rc;
1120        }
1121        /*
1122         * For all other LU groups aside from 'default_lu_gp', walk all of
1123         * the associated storage objects looking for a matching target port
1124         * group ID from the local target port group.
1125         */
1126        spin_lock(&lu_gp->lu_gp_lock);
1127        list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
1128                                lu_gp_mem_list) {
1129
1130                dev = lu_gp_mem->lu_gp_mem_dev;
1131                atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1132                spin_unlock(&lu_gp->lu_gp_lock);
1133
1134                spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1135                list_for_each_entry(tg_pt_gp,
1136                                &dev->t10_alua.tg_pt_gps_list,
1137                                tg_pt_gp_list) {
1138
1139                        if (!tg_pt_gp->tg_pt_gp_valid_id)
1140                                continue;
1141                        /*
1142                         * If the target behavior port asymmetric access state
1143                         * is changed for any target port group accessible via
1144                         * a logical unit within a LU group, the target port
1145                         * behavior group asymmetric access states for the same
1146                         * target port group accessible via other logical units
1147                         * in that LU group will also change.
1148                         */
1149                        if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
1150                                continue;
1151
1152                        if (l_tg_pt_gp == tg_pt_gp) {
1153                                tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1154                                tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1155                        } else {
1156                                tg_pt_gp->tg_pt_gp_alua_lun = NULL;
1157                                tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1158                        }
1159                        atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1160                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1161                        /*
1162                         * core_alua_do_transition_tg_pt() will always return
1163                         * success.
1164                         */
1165                        rc = core_alua_do_transition_tg_pt(tg_pt_gp,
1166                                        new_state, explicit);
1167
1168                        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1169                        atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1170                        if (rc)
1171                                break;
1172                }
1173                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1174
1175                spin_lock(&lu_gp->lu_gp_lock);
1176                atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1177        }
1178        spin_unlock(&lu_gp->lu_gp_lock);
1179
1180        if (!rc) {
1181                pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
1182                         " Group IDs: %hu %s transition to primary state: %s\n",
1183                         config_item_name(&lu_gp->lu_gp_group.cg_item),
1184                         l_tg_pt_gp->tg_pt_gp_id,
1185                         (explicit) ? "explicit" : "implicit",
1186                         core_alua_dump_state(new_state));
1187        }
1188
1189        atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1190        return rc;
1191}
1192
1193static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
1194{
1195        struct se_portal_group *se_tpg = lun->lun_tpg;
1196        unsigned char *md_buf;
1197        char *path;
1198        int len, rc;
1199
1200        mutex_lock(&lun->lun_tg_pt_md_mutex);
1201
1202        md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
1203        if (!md_buf) {
1204                pr_err("Unable to allocate buf for ALUA metadata\n");
1205                rc = -ENOMEM;
1206                goto out_unlock;
1207        }
1208
1209        len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
1210                        "alua_tg_pt_status=0x%02x\n",
1211                        atomic_read(&lun->lun_tg_pt_secondary_offline),
1212                        lun->lun_tg_pt_secondary_stat);
1213
1214        if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
1215                path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
1216                                db_root, se_tpg->se_tpg_tfo->fabric_name,
1217                                se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1218                                se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
1219                                lun->unpacked_lun);
1220        } else {
1221                path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
1222                                db_root, se_tpg->se_tpg_tfo->fabric_name,
1223                                se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1224                                lun->unpacked_lun);
1225        }
1226        if (!path) {
1227                rc = -ENOMEM;
1228                goto out_free;
1229        }
1230
1231        rc = core_alua_write_tpg_metadata(path, md_buf, len);
1232        kfree(path);
1233out_free:
1234        kfree(md_buf);
1235out_unlock:
1236        mutex_unlock(&lun->lun_tg_pt_md_mutex);
1237        return rc;
1238}
1239
1240static int core_alua_set_tg_pt_secondary_state(
1241        struct se_lun *lun,
1242        int explicit,
1243        int offline)
1244{
1245        struct t10_alua_tg_pt_gp *tg_pt_gp;
1246        int trans_delay_msecs;
1247
1248        spin_lock(&lun->lun_tg_pt_gp_lock);
1249        tg_pt_gp = lun->lun_tg_pt_gp;
1250        if (!tg_pt_gp) {
1251                spin_unlock(&lun->lun_tg_pt_gp_lock);
1252                pr_err("Unable to complete secondary state"
1253                                " transition\n");
1254                return -EINVAL;
1255        }
1256        trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1257        /*
1258         * Set the secondary ALUA target port access state to OFFLINE
1259         * or release the previously secondary state for struct se_lun
1260         */
1261        if (offline)
1262                atomic_set(&lun->lun_tg_pt_secondary_offline, 1);
1263        else
1264                atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
1265
1266        lun->lun_tg_pt_secondary_stat = (explicit) ?
1267                        ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1268                        ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1269
1270        pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1271                " to secondary access state: %s\n", (explicit) ? "explicit" :
1272                "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1273                tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1274
1275        spin_unlock(&lun->lun_tg_pt_gp_lock);
1276        /*
1277         * Do the optional transition delay after we set the secondary
1278         * ALUA access state.
1279         */
1280        if (trans_delay_msecs != 0)
1281                msleep_interruptible(trans_delay_msecs);
1282        /*
1283         * See if we need to update the ALUA fabric port metadata for
1284         * secondary state and status
1285         */
1286        if (lun->lun_tg_pt_secondary_write_md)
1287                core_alua_update_tpg_secondary_metadata(lun);
1288
1289        return 0;
1290}
1291
1292struct t10_alua_lba_map *
1293core_alua_allocate_lba_map(struct list_head *list,
1294                           u64 first_lba, u64 last_lba)
1295{
1296        struct t10_alua_lba_map *lba_map;
1297
1298        lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
1299        if (!lba_map) {
1300                pr_err("Unable to allocate struct t10_alua_lba_map\n");
1301                return ERR_PTR(-ENOMEM);
1302        }
1303        INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
1304        lba_map->lba_map_first_lba = first_lba;
1305        lba_map->lba_map_last_lba = last_lba;
1306
1307        list_add_tail(&lba_map->lba_map_list, list);
1308        return lba_map;
1309}
1310
1311int
1312core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
1313                               int pg_id, int state)
1314{
1315        struct t10_alua_lba_map_member *lba_map_mem;
1316
1317        list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
1318                            lba_map_mem_list) {
1319                if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
1320                        pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
1321                        return -EINVAL;
1322                }
1323        }
1324
1325        lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
1326        if (!lba_map_mem) {
1327                pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
1328                return -ENOMEM;
1329        }
1330        lba_map_mem->lba_map_mem_alua_state = state;
1331        lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
1332
1333        list_add_tail(&lba_map_mem->lba_map_mem_list,
1334                      &lba_map->lba_map_mem_list);
1335        return 0;
1336}
1337
1338void
1339core_alua_free_lba_map(struct list_head *lba_list)
1340{
1341        struct t10_alua_lba_map *lba_map, *lba_map_tmp;
1342        struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
1343
1344        list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
1345                                 lba_map_list) {
1346                list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
1347                                         &lba_map->lba_map_mem_list,
1348                                         lba_map_mem_list) {
1349                        list_del(&lba_map_mem->lba_map_mem_list);
1350                        kmem_cache_free(t10_alua_lba_map_mem_cache,
1351                                        lba_map_mem);
1352                }
1353                list_del(&lba_map->lba_map_list);
1354                kmem_cache_free(t10_alua_lba_map_cache, lba_map);
1355        }
1356}
1357
1358void
1359core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
1360                      int segment_size, int segment_mult)
1361{
1362        struct list_head old_lba_map_list;
1363        struct t10_alua_tg_pt_gp *tg_pt_gp;
1364        int activate = 0, supported;
1365
1366        INIT_LIST_HEAD(&old_lba_map_list);
1367        spin_lock(&dev->t10_alua.lba_map_lock);
1368        dev->t10_alua.lba_map_segment_size = segment_size;
1369        dev->t10_alua.lba_map_segment_multiplier = segment_mult;
1370        list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
1371        if (lba_map_list) {
1372                list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
1373                activate = 1;
1374        }
1375        spin_unlock(&dev->t10_alua.lba_map_lock);
1376        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1377        list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1378                            tg_pt_gp_list) {
1379
1380                if (!tg_pt_gp->tg_pt_gp_valid_id)
1381                        continue;
1382                supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
1383                if (activate)
1384                        supported |= ALUA_LBD_SUP;
1385                else
1386                        supported &= ~ALUA_LBD_SUP;
1387                tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
1388        }
1389        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1390        core_alua_free_lba_map(&old_lba_map_list);
1391}
1392
1393struct t10_alua_lu_gp *
1394core_alua_allocate_lu_gp(const char *name, int def_group)
1395{
1396        struct t10_alua_lu_gp *lu_gp;
1397
1398        lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1399        if (!lu_gp) {
1400                pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1401                return ERR_PTR(-ENOMEM);
1402        }
1403        INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1404        INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1405        spin_lock_init(&lu_gp->lu_gp_lock);
1406        atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1407
1408        if (def_group) {
1409                lu_gp->lu_gp_id = alua_lu_gps_counter++;
1410                lu_gp->lu_gp_valid_id = 1;
1411                alua_lu_gps_count++;
1412        }
1413
1414        return lu_gp;
1415}
1416
1417int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1418{
1419        struct t10_alua_lu_gp *lu_gp_tmp;
1420        u16 lu_gp_id_tmp;
1421        /*
1422         * The lu_gp->lu_gp_id may only be set once..
1423         */
1424        if (lu_gp->lu_gp_valid_id) {
1425                pr_warn("ALUA LU Group already has a valid ID,"
1426                        " ignoring request\n");
1427                return -EINVAL;
1428        }
1429
1430        spin_lock(&lu_gps_lock);
1431        if (alua_lu_gps_count == 0x0000ffff) {
1432                pr_err("Maximum ALUA alua_lu_gps_count:"
1433                                " 0x0000ffff reached\n");
1434                spin_unlock(&lu_gps_lock);
1435                kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1436                return -ENOSPC;
1437        }
1438again:
1439        lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1440                                alua_lu_gps_counter++;
1441
1442        list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1443                if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1444                        if (!lu_gp_id)
1445                                goto again;
1446
1447                        pr_warn("ALUA Logical Unit Group ID: %hu"
1448                                " already exists, ignoring request\n",
1449                                lu_gp_id);
1450                        spin_unlock(&lu_gps_lock);
1451                        return -EINVAL;
1452                }
1453        }
1454
1455        lu_gp->lu_gp_id = lu_gp_id_tmp;
1456        lu_gp->lu_gp_valid_id = 1;
1457        list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1458        alua_lu_gps_count++;
1459        spin_unlock(&lu_gps_lock);
1460
1461        return 0;
1462}
1463
1464static struct t10_alua_lu_gp_member *
1465core_alua_allocate_lu_gp_mem(struct se_device *dev)
1466{
1467        struct t10_alua_lu_gp_member *lu_gp_mem;
1468
1469        lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1470        if (!lu_gp_mem) {
1471                pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1472                return ERR_PTR(-ENOMEM);
1473        }
1474        INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1475        spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1476        atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1477
1478        lu_gp_mem->lu_gp_mem_dev = dev;
1479        dev->dev_alua_lu_gp_mem = lu_gp_mem;
1480
1481        return lu_gp_mem;
1482}
1483
1484void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1485{
1486        struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1487        /*
1488         * Once we have reached this point, config_item_put() has
1489         * already been called from target_core_alua_drop_lu_gp().
1490         *
1491         * Here, we remove the *lu_gp from the global list so that
1492         * no associations can be made while we are releasing
1493         * struct t10_alua_lu_gp.
1494         */
1495        spin_lock(&lu_gps_lock);
1496        list_del(&lu_gp->lu_gp_node);
1497        alua_lu_gps_count--;
1498        spin_unlock(&lu_gps_lock);
1499        /*
1500         * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1501         * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1502         * released with core_alua_put_lu_gp_from_name()
1503         */
1504        while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1505                cpu_relax();
1506        /*
1507         * Release reference to struct t10_alua_lu_gp * from all associated
1508         * struct se_device.
1509         */
1510        spin_lock(&lu_gp->lu_gp_lock);
1511        list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1512                                &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1513                if (lu_gp_mem->lu_gp_assoc) {
1514                        list_del(&lu_gp_mem->lu_gp_mem_list);
1515                        lu_gp->lu_gp_members--;
1516                        lu_gp_mem->lu_gp_assoc = 0;
1517                }
1518                spin_unlock(&lu_gp->lu_gp_lock);
1519                /*
1520                 *
1521                 * lu_gp_mem is associated with a single
1522                 * struct se_device->dev_alua_lu_gp_mem, and is released when
1523                 * struct se_device is released via core_alua_free_lu_gp_mem().
1524                 *
1525                 * If the passed lu_gp does NOT match the default_lu_gp, assume
1526                 * we want to re-associate a given lu_gp_mem with default_lu_gp.
1527                 */
1528                spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1529                if (lu_gp != default_lu_gp)
1530                        __core_alua_attach_lu_gp_mem(lu_gp_mem,
1531                                        default_lu_gp);
1532                else
1533                        lu_gp_mem->lu_gp = NULL;
1534                spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1535
1536                spin_lock(&lu_gp->lu_gp_lock);
1537        }
1538        spin_unlock(&lu_gp->lu_gp_lock);
1539
1540        kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1541}
1542
1543void core_alua_free_lu_gp_mem(struct se_device *dev)
1544{
1545        struct t10_alua_lu_gp *lu_gp;
1546        struct t10_alua_lu_gp_member *lu_gp_mem;
1547
1548        lu_gp_mem = dev->dev_alua_lu_gp_mem;
1549        if (!lu_gp_mem)
1550                return;
1551
1552        while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1553                cpu_relax();
1554
1555        spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1556        lu_gp = lu_gp_mem->lu_gp;
1557        if (lu_gp) {
1558                spin_lock(&lu_gp->lu_gp_lock);
1559                if (lu_gp_mem->lu_gp_assoc) {
1560                        list_del(&lu_gp_mem->lu_gp_mem_list);
1561                        lu_gp->lu_gp_members--;
1562                        lu_gp_mem->lu_gp_assoc = 0;
1563                }
1564                spin_unlock(&lu_gp->lu_gp_lock);
1565                lu_gp_mem->lu_gp = NULL;
1566        }
1567        spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1568
1569        kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1570}
1571
1572struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1573{
1574        struct t10_alua_lu_gp *lu_gp;
1575        struct config_item *ci;
1576
1577        spin_lock(&lu_gps_lock);
1578        list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1579                if (!lu_gp->lu_gp_valid_id)
1580                        continue;
1581                ci = &lu_gp->lu_gp_group.cg_item;
1582                if (!strcmp(config_item_name(ci), name)) {
1583                        atomic_inc(&lu_gp->lu_gp_ref_cnt);
1584                        spin_unlock(&lu_gps_lock);
1585                        return lu_gp;
1586                }
1587        }
1588        spin_unlock(&lu_gps_lock);
1589
1590        return NULL;
1591}
1592
1593void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1594{
1595        spin_lock(&lu_gps_lock);
1596        atomic_dec(&lu_gp->lu_gp_ref_cnt);
1597        spin_unlock(&lu_gps_lock);
1598}
1599
1600/*
1601 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1602 */
1603void __core_alua_attach_lu_gp_mem(
1604        struct t10_alua_lu_gp_member *lu_gp_mem,
1605        struct t10_alua_lu_gp *lu_gp)
1606{
1607        spin_lock(&lu_gp->lu_gp_lock);
1608        lu_gp_mem->lu_gp = lu_gp;
1609        lu_gp_mem->lu_gp_assoc = 1;
1610        list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1611        lu_gp->lu_gp_members++;
1612        spin_unlock(&lu_gp->lu_gp_lock);
1613}
1614
1615/*
1616 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1617 */
1618void __core_alua_drop_lu_gp_mem(
1619        struct t10_alua_lu_gp_member *lu_gp_mem,
1620        struct t10_alua_lu_gp *lu_gp)
1621{
1622        spin_lock(&lu_gp->lu_gp_lock);
1623        list_del(&lu_gp_mem->lu_gp_mem_list);
1624        lu_gp_mem->lu_gp = NULL;
1625        lu_gp_mem->lu_gp_assoc = 0;
1626        lu_gp->lu_gp_members--;
1627        spin_unlock(&lu_gp->lu_gp_lock);
1628}
1629
1630struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1631                const char *name, int def_group)
1632{
1633        struct t10_alua_tg_pt_gp *tg_pt_gp;
1634
1635        tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1636        if (!tg_pt_gp) {
1637                pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1638                return NULL;
1639        }
1640        INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1641        INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
1642        mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
1643        spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1644        atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1645        tg_pt_gp->tg_pt_gp_dev = dev;
1646        tg_pt_gp->tg_pt_gp_alua_access_state =
1647                        ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
1648        /*
1649         * Enable both explicit and implicit ALUA support by default
1650         */
1651        tg_pt_gp->tg_pt_gp_alua_access_type =
1652                        TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
1653        /*
1654         * Set the default Active/NonOptimized Delay in milliseconds
1655         */
1656        tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1657        tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1658        tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
1659
1660        /*
1661         * Enable all supported states
1662         */
1663        tg_pt_gp->tg_pt_gp_alua_supported_states =
1664            ALUA_T_SUP | ALUA_O_SUP |
1665            ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
1666
1667        if (def_group) {
1668                spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1669                tg_pt_gp->tg_pt_gp_id =
1670                                dev->t10_alua.alua_tg_pt_gps_counter++;
1671                tg_pt_gp->tg_pt_gp_valid_id = 1;
1672                dev->t10_alua.alua_tg_pt_gps_count++;
1673                list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1674                              &dev->t10_alua.tg_pt_gps_list);
1675                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1676        }
1677
1678        return tg_pt_gp;
1679}
1680
1681int core_alua_set_tg_pt_gp_id(
1682        struct t10_alua_tg_pt_gp *tg_pt_gp,
1683        u16 tg_pt_gp_id)
1684{
1685        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1686        struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1687        u16 tg_pt_gp_id_tmp;
1688
1689        /*
1690         * The tg_pt_gp->tg_pt_gp_id may only be set once..
1691         */
1692        if (tg_pt_gp->tg_pt_gp_valid_id) {
1693                pr_warn("ALUA TG PT Group already has a valid ID,"
1694                        " ignoring request\n");
1695                return -EINVAL;
1696        }
1697
1698        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1699        if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1700                pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1701                        " 0x0000ffff reached\n");
1702                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1703                kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1704                return -ENOSPC;
1705        }
1706again:
1707        tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1708                        dev->t10_alua.alua_tg_pt_gps_counter++;
1709
1710        list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
1711                        tg_pt_gp_list) {
1712                if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1713                        if (!tg_pt_gp_id)
1714                                goto again;
1715
1716                        pr_err("ALUA Target Port Group ID: %hu already"
1717                                " exists, ignoring request\n", tg_pt_gp_id);
1718                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1719                        return -EINVAL;
1720                }
1721        }
1722
1723        tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1724        tg_pt_gp->tg_pt_gp_valid_id = 1;
1725        list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1726                        &dev->t10_alua.tg_pt_gps_list);
1727        dev->t10_alua.alua_tg_pt_gps_count++;
1728        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1729
1730        return 0;
1731}
1732
1733void core_alua_free_tg_pt_gp(
1734        struct t10_alua_tg_pt_gp *tg_pt_gp)
1735{
1736        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1737        struct se_lun *lun, *next;
1738
1739        /*
1740         * Once we have reached this point, config_item_put() has already
1741         * been called from target_core_alua_drop_tg_pt_gp().
1742         *
1743         * Here we remove *tg_pt_gp from the global list so that
1744         * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
1745         * can be made while we are releasing struct t10_alua_tg_pt_gp.
1746         */
1747        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1748        if (tg_pt_gp->tg_pt_gp_valid_id) {
1749                list_del(&tg_pt_gp->tg_pt_gp_list);
1750                dev->t10_alua.alua_tg_pt_gps_count--;
1751        }
1752        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1753
1754        /*
1755         * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1756         * core_alua_get_tg_pt_gp_by_name() in
1757         * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1758         * to be released with core_alua_put_tg_pt_gp_from_name().
1759         */
1760        while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1761                cpu_relax();
1762
1763        /*
1764         * Release reference to struct t10_alua_tg_pt_gp from all associated
1765         * struct se_port.
1766         */
1767        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1768        list_for_each_entry_safe(lun, next,
1769                        &tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) {
1770                list_del_init(&lun->lun_tg_pt_gp_link);
1771                tg_pt_gp->tg_pt_gp_members--;
1772
1773                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1774                /*
1775                 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1776                 * assume we want to re-associate a given tg_pt_gp_mem with
1777                 * default_tg_pt_gp.
1778                 */
1779                spin_lock(&lun->lun_tg_pt_gp_lock);
1780                if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
1781                        __target_attach_tg_pt_gp(lun,
1782                                        dev->t10_alua.default_tg_pt_gp);
1783                } else
1784                        lun->lun_tg_pt_gp = NULL;
1785                spin_unlock(&lun->lun_tg_pt_gp_lock);
1786
1787                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1788        }
1789        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1790
1791        kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1792}
1793
1794static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1795                struct se_device *dev, const char *name)
1796{
1797        struct t10_alua_tg_pt_gp *tg_pt_gp;
1798        struct config_item *ci;
1799
1800        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1801        list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1802                        tg_pt_gp_list) {
1803                if (!tg_pt_gp->tg_pt_gp_valid_id)
1804                        continue;
1805                ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1806                if (!strcmp(config_item_name(ci), name)) {
1807                        atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1808                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1809                        return tg_pt_gp;
1810                }
1811        }
1812        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1813
1814        return NULL;
1815}
1816
1817static void core_alua_put_tg_pt_gp_from_name(
1818        struct t10_alua_tg_pt_gp *tg_pt_gp)
1819{
1820        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1821
1822        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1823        atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1824        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1825}
1826
1827static void __target_attach_tg_pt_gp(struct se_lun *lun,
1828                struct t10_alua_tg_pt_gp *tg_pt_gp)
1829{
1830        struct se_dev_entry *se_deve;
1831
1832        assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1833
1834        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1835        lun->lun_tg_pt_gp = tg_pt_gp;
1836        list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list);
1837        tg_pt_gp->tg_pt_gp_members++;
1838        spin_lock(&lun->lun_deve_lock);
1839        list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
1840                core_scsi3_ua_allocate(se_deve, 0x3f,
1841                                       ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED);
1842        spin_unlock(&lun->lun_deve_lock);
1843        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1844}
1845
1846void target_attach_tg_pt_gp(struct se_lun *lun,
1847                struct t10_alua_tg_pt_gp *tg_pt_gp)
1848{
1849        spin_lock(&lun->lun_tg_pt_gp_lock);
1850        __target_attach_tg_pt_gp(lun, tg_pt_gp);
1851        spin_unlock(&lun->lun_tg_pt_gp_lock);
1852}
1853
1854static void __target_detach_tg_pt_gp(struct se_lun *lun,
1855                struct t10_alua_tg_pt_gp *tg_pt_gp)
1856{
1857        assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1858
1859        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1860        list_del_init(&lun->lun_tg_pt_gp_link);
1861        tg_pt_gp->tg_pt_gp_members--;
1862        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1863
1864        lun->lun_tg_pt_gp = NULL;
1865}
1866
1867void target_detach_tg_pt_gp(struct se_lun *lun)
1868{
1869        struct t10_alua_tg_pt_gp *tg_pt_gp;
1870
1871        spin_lock(&lun->lun_tg_pt_gp_lock);
1872        tg_pt_gp = lun->lun_tg_pt_gp;
1873        if (tg_pt_gp)
1874                __target_detach_tg_pt_gp(lun, tg_pt_gp);
1875        spin_unlock(&lun->lun_tg_pt_gp_lock);
1876}
1877
1878ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
1879{
1880        struct config_item *tg_pt_ci;
1881        struct t10_alua_tg_pt_gp *tg_pt_gp;
1882        ssize_t len = 0;
1883
1884        spin_lock(&lun->lun_tg_pt_gp_lock);
1885        tg_pt_gp = lun->lun_tg_pt_gp;
1886        if (tg_pt_gp) {
1887                tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1888                len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1889                        " %hu\nTG Port Primary Access State: %s\nTG Port "
1890                        "Primary Access Status: %s\nTG Port Secondary Access"
1891                        " State: %s\nTG Port Secondary Access Status: %s\n",
1892                        config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1893                        core_alua_dump_state(
1894                                tg_pt_gp->tg_pt_gp_alua_access_state),
1895                        core_alua_dump_status(
1896                                tg_pt_gp->tg_pt_gp_alua_access_status),
1897                        atomic_read(&lun->lun_tg_pt_secondary_offline) ?
1898                        "Offline" : "None",
1899                        core_alua_dump_status(lun->lun_tg_pt_secondary_stat));
1900        }
1901        spin_unlock(&lun->lun_tg_pt_gp_lock);
1902
1903        return len;
1904}
1905
1906ssize_t core_alua_store_tg_pt_gp_info(
1907        struct se_lun *lun,
1908        const char *page,
1909        size_t count)
1910{
1911        struct se_portal_group *tpg = lun->lun_tpg;
1912        /*
1913         * rcu_dereference_raw protected by se_lun->lun_group symlink
1914         * reference to se_device->dev_group.
1915         */
1916        struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
1917        struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1918        unsigned char buf[TG_PT_GROUP_NAME_BUF];
1919        int move = 0;
1920
1921        if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
1922            (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
1923                return -ENODEV;
1924
1925        if (count > TG_PT_GROUP_NAME_BUF) {
1926                pr_err("ALUA Target Port Group alias too large!\n");
1927                return -EINVAL;
1928        }
1929        memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1930        memcpy(buf, page, count);
1931        /*
1932         * Any ALUA target port group alias besides "NULL" means we will be
1933         * making a new group association.
1934         */
1935        if (strcmp(strstrip(buf), "NULL")) {
1936                /*
1937                 * core_alua_get_tg_pt_gp_by_name() will increment reference to
1938                 * struct t10_alua_tg_pt_gp.  This reference is released with
1939                 * core_alua_put_tg_pt_gp_from_name() below.
1940                 */
1941                tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
1942                                        strstrip(buf));
1943                if (!tg_pt_gp_new)
1944                        return -ENODEV;
1945        }
1946
1947        spin_lock(&lun->lun_tg_pt_gp_lock);
1948        tg_pt_gp = lun->lun_tg_pt_gp;
1949        if (tg_pt_gp) {
1950                /*
1951                 * Clearing an existing tg_pt_gp association, and replacing
1952                 * with the default_tg_pt_gp.
1953                 */
1954                if (!tg_pt_gp_new) {
1955                        pr_debug("Target_Core_ConfigFS: Moving"
1956                                " %s/tpgt_%hu/%s from ALUA Target Port Group:"
1957                                " alua/%s, ID: %hu back to"
1958                                " default_tg_pt_gp\n",
1959                                tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1960                                tpg->se_tpg_tfo->tpg_get_tag(tpg),
1961                                config_item_name(&lun->lun_group.cg_item),
1962                                config_item_name(
1963                                        &tg_pt_gp->tg_pt_gp_group.cg_item),
1964                                tg_pt_gp->tg_pt_gp_id);
1965
1966                        __target_detach_tg_pt_gp(lun, tg_pt_gp);
1967                        __target_attach_tg_pt_gp(lun,
1968                                        dev->t10_alua.default_tg_pt_gp);
1969                        spin_unlock(&lun->lun_tg_pt_gp_lock);
1970
1971                        return count;
1972                }
1973                __target_detach_tg_pt_gp(lun, tg_pt_gp);
1974                move = 1;
1975        }
1976
1977        __target_attach_tg_pt_gp(lun, tg_pt_gp_new);
1978        spin_unlock(&lun->lun_tg_pt_gp_lock);
1979        pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1980                " Target Port Group: alua/%s, ID: %hu\n", (move) ?
1981                "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1982                tpg->se_tpg_tfo->tpg_get_tag(tpg),
1983                config_item_name(&lun->lun_group.cg_item),
1984                config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
1985                tg_pt_gp_new->tg_pt_gp_id);
1986
1987        core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1988        return count;
1989}
1990
1991ssize_t core_alua_show_access_type(
1992        struct t10_alua_tg_pt_gp *tg_pt_gp,
1993        char *page)
1994{
1995        if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
1996            (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
1997                return sprintf(page, "Implicit and Explicit\n");
1998        else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
1999                return sprintf(page, "Implicit\n");
2000        else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
2001                return sprintf(page, "Explicit\n");
2002        else
2003                return sprintf(page, "None\n");
2004}
2005
2006ssize_t core_alua_store_access_type(
2007        struct t10_alua_tg_pt_gp *tg_pt_gp,
2008        const char *page,
2009        size_t count)
2010{
2011        unsigned long tmp;
2012        int ret;
2013
2014        ret = kstrtoul(page, 0, &tmp);
2015        if (ret < 0) {
2016                pr_err("Unable to extract alua_access_type\n");
2017                return ret;
2018        }
2019        if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
2020                pr_err("Illegal value for alua_access_type:"
2021                                " %lu\n", tmp);
2022                return -EINVAL;
2023        }
2024        if (tmp == 3)
2025                tg_pt_gp->tg_pt_gp_alua_access_type =
2026                        TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
2027        else if (tmp == 2)
2028                tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
2029        else if (tmp == 1)
2030                tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
2031        else
2032                tg_pt_gp->tg_pt_gp_alua_access_type = 0;
2033
2034        return count;
2035}
2036
2037ssize_t core_alua_show_nonop_delay_msecs(
2038        struct t10_alua_tg_pt_gp *tg_pt_gp,
2039        char *page)
2040{
2041        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
2042}
2043
2044ssize_t core_alua_store_nonop_delay_msecs(
2045        struct t10_alua_tg_pt_gp *tg_pt_gp,
2046        const char *page,
2047        size_t count)
2048{
2049        unsigned long tmp;
2050        int ret;
2051
2052        ret = kstrtoul(page, 0, &tmp);
2053        if (ret < 0) {
2054                pr_err("Unable to extract nonop_delay_msecs\n");
2055                return ret;
2056        }
2057        if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
2058                pr_err("Passed nonop_delay_msecs: %lu, exceeds"
2059                        " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
2060                        ALUA_MAX_NONOP_DELAY_MSECS);
2061                return -EINVAL;
2062        }
2063        tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
2064
2065        return count;
2066}
2067
2068ssize_t core_alua_show_trans_delay_msecs(
2069        struct t10_alua_tg_pt_gp *tg_pt_gp,
2070        char *page)
2071{
2072        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
2073}
2074
2075ssize_t core_alua_store_trans_delay_msecs(
2076        struct t10_alua_tg_pt_gp *tg_pt_gp,
2077        const char *page,
2078        size_t count)
2079{
2080        unsigned long tmp;
2081        int ret;
2082
2083        ret = kstrtoul(page, 0, &tmp);
2084        if (ret < 0) {
2085                pr_err("Unable to extract trans_delay_msecs\n");
2086                return ret;
2087        }
2088        if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
2089                pr_err("Passed trans_delay_msecs: %lu, exceeds"
2090                        " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
2091                        ALUA_MAX_TRANS_DELAY_MSECS);
2092                return -EINVAL;
2093        }
2094        tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
2095
2096        return count;
2097}
2098
2099ssize_t core_alua_show_implicit_trans_secs(
2100        struct t10_alua_tg_pt_gp *tg_pt_gp,
2101        char *page)
2102{
2103        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
2104}
2105
2106ssize_t core_alua_store_implicit_trans_secs(
2107        struct t10_alua_tg_pt_gp *tg_pt_gp,
2108        const char *page,
2109        size_t count)
2110{
2111        unsigned long tmp;
2112        int ret;
2113
2114        ret = kstrtoul(page, 0, &tmp);
2115        if (ret < 0) {
2116                pr_err("Unable to extract implicit_trans_secs\n");
2117                return ret;
2118        }
2119        if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
2120                pr_err("Passed implicit_trans_secs: %lu, exceeds"
2121                        " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
2122                        ALUA_MAX_IMPLICIT_TRANS_SECS);
2123                return  -EINVAL;
2124        }
2125        tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
2126
2127        return count;
2128}
2129
2130ssize_t core_alua_show_preferred_bit(
2131        struct t10_alua_tg_pt_gp *tg_pt_gp,
2132        char *page)
2133{
2134        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
2135}
2136
2137ssize_t core_alua_store_preferred_bit(
2138        struct t10_alua_tg_pt_gp *tg_pt_gp,
2139        const char *page,
2140        size_t count)
2141{
2142        unsigned long tmp;
2143        int ret;
2144
2145        ret = kstrtoul(page, 0, &tmp);
2146        if (ret < 0) {
2147                pr_err("Unable to extract preferred ALUA value\n");
2148                return ret;
2149        }
2150        if ((tmp != 0) && (tmp != 1)) {
2151                pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
2152                return -EINVAL;
2153        }
2154        tg_pt_gp->tg_pt_gp_pref = (int)tmp;
2155
2156        return count;
2157}
2158
2159ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
2160{
2161        return sprintf(page, "%d\n",
2162                atomic_read(&lun->lun_tg_pt_secondary_offline));
2163}
2164
2165ssize_t core_alua_store_offline_bit(
2166        struct se_lun *lun,
2167        const char *page,
2168        size_t count)
2169{
2170        /*
2171         * rcu_dereference_raw protected by se_lun->lun_group symlink
2172         * reference to se_device->dev_group.
2173         */
2174        struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
2175        unsigned long tmp;
2176        int ret;
2177
2178        if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
2179            (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
2180                return -ENODEV;
2181
2182        ret = kstrtoul(page, 0, &tmp);
2183        if (ret < 0) {
2184                pr_err("Unable to extract alua_tg_pt_offline value\n");
2185                return ret;
2186        }
2187        if ((tmp != 0) && (tmp != 1)) {
2188                pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
2189                                tmp);
2190                return -EINVAL;
2191        }
2192
2193        ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp);
2194        if (ret < 0)
2195                return -EINVAL;
2196
2197        return count;
2198}
2199
2200ssize_t core_alua_show_secondary_status(
2201        struct se_lun *lun,
2202        char *page)
2203{
2204        return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat);
2205}
2206
2207ssize_t core_alua_store_secondary_status(
2208        struct se_lun *lun,
2209        const char *page,
2210        size_t count)
2211{
2212        unsigned long tmp;
2213        int ret;
2214
2215        ret = kstrtoul(page, 0, &tmp);
2216        if (ret < 0) {
2217                pr_err("Unable to extract alua_tg_pt_status\n");
2218                return ret;
2219        }
2220        if ((tmp != ALUA_STATUS_NONE) &&
2221            (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2222            (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2223                pr_err("Illegal value for alua_tg_pt_status: %lu\n",
2224                                tmp);
2225                return -EINVAL;
2226        }
2227        lun->lun_tg_pt_secondary_stat = (int)tmp;
2228
2229        return count;
2230}
2231
2232ssize_t core_alua_show_secondary_write_metadata(
2233        struct se_lun *lun,
2234        char *page)
2235{
2236        return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md);
2237}
2238
2239ssize_t core_alua_store_secondary_write_metadata(
2240        struct se_lun *lun,
2241        const char *page,
2242        size_t count)
2243{
2244        unsigned long tmp;
2245        int ret;
2246
2247        ret = kstrtoul(page, 0, &tmp);
2248        if (ret < 0) {
2249                pr_err("Unable to extract alua_tg_pt_write_md\n");
2250                return ret;
2251        }
2252        if ((tmp != 0) && (tmp != 1)) {
2253                pr_err("Illegal value for alua_tg_pt_write_md:"
2254                                " %lu\n", tmp);
2255                return -EINVAL;
2256        }
2257        lun->lun_tg_pt_secondary_write_md = (int)tmp;
2258
2259        return count;
2260}
2261
2262int core_setup_alua(struct se_device *dev)
2263{
2264        if (!(dev->transport_flags &
2265             TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
2266            !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2267                struct t10_alua_lu_gp_member *lu_gp_mem;
2268
2269                /*
2270                 * Associate this struct se_device with the default ALUA
2271                 * LUN Group.
2272                 */
2273                lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2274                if (IS_ERR(lu_gp_mem))
2275                        return PTR_ERR(lu_gp_mem);
2276
2277                spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2278                __core_alua_attach_lu_gp_mem(lu_gp_mem,
2279                                default_lu_gp);
2280                spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2281
2282                pr_debug("%s: Adding to default ALUA LU Group:"
2283                        " core/alua/lu_gps/default_lu_gp\n",
2284                        dev->transport->name);
2285        }
2286
2287        return 0;
2288}
2289