linux/drivers/target/target_core_device.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * Filename:  target_core_device.c (based on iscsi_target_device.c)
   3 *
   4 * This file contains the TCM Virtual Device and Disk Transport
   5 * agnostic related functions.
   6 *
   7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
   8 * Copyright (c) 2005-2006 SBE, Inc.  All Rights Reserved.
   9 * Copyright (c) 2007-2010 Rising Tide Systems
  10 * Copyright (c) 2008-2010 Linux-iSCSI.org
  11 *
  12 * Nicholas A. Bellinger <nab@kernel.org>
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License as published by
  16 * the Free Software Foundation; either version 2 of the License, or
  17 * (at your option) any later version.
  18 *
  19 * This program is distributed in the hope that it will be useful,
  20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  22 * GNU General Public License for more details.
  23 *
  24 * You should have received a copy of the GNU General Public License
  25 * along with this program; if not, write to the Free Software
  26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  27 *
  28 ******************************************************************************/
  29
  30#include <linux/net.h>
  31#include <linux/string.h>
  32#include <linux/delay.h>
  33#include <linux/timer.h>
  34#include <linux/slab.h>
  35#include <linux/spinlock.h>
  36#include <linux/kthread.h>
  37#include <linux/in.h>
  38#include <linux/export.h>
  39#include <net/sock.h>
  40#include <net/tcp.h>
  41#include <scsi/scsi.h>
  42#include <scsi/scsi_device.h>
  43
  44#include <target/target_core_base.h>
  45#include <target/target_core_backend.h>
  46#include <target/target_core_fabric.h>
  47
  48#include "target_core_internal.h"
  49#include "target_core_alua.h"
  50#include "target_core_pr.h"
  51#include "target_core_ua.h"
  52
  53static void se_dev_start(struct se_device *dev);
  54static void se_dev_stop(struct se_device *dev);
  55
  56static struct se_hba *lun0_hba;
  57static struct se_subsystem_dev *lun0_su_dev;
  58/* not static, needed by tpg.c */
  59struct se_device *g_lun0_dev;
  60
  61int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
  62{
  63        struct se_lun *se_lun = NULL;
  64        struct se_session *se_sess = se_cmd->se_sess;
  65        struct se_device *dev;
  66        unsigned long flags;
  67
  68        if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
  69                se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
  70                se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  71                return -ENODEV;
  72        }
  73
  74        spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
  75        se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
  76        if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  77                struct se_dev_entry *deve = se_cmd->se_deve;
  78
  79                deve->total_cmds++;
  80                deve->total_bytes += se_cmd->data_length;
  81
  82                if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
  83                    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
  84                        se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
  85                        se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  86                        pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
  87                                " Access for 0x%08x\n",
  88                                se_cmd->se_tfo->get_fabric_name(),
  89                                unpacked_lun);
  90                        spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  91                        return -EACCES;
  92                }
  93
  94                if (se_cmd->data_direction == DMA_TO_DEVICE)
  95                        deve->write_bytes += se_cmd->data_length;
  96                else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  97                        deve->read_bytes += se_cmd->data_length;
  98
  99                deve->deve_cmds++;
 100
 101                se_lun = deve->se_lun;
 102                se_cmd->se_lun = deve->se_lun;
 103                se_cmd->pr_res_key = deve->pr_res_key;
 104                se_cmd->orig_fe_lun = unpacked_lun;
 105                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 106        }
 107        spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
 108
 109        if (!se_lun) {
 110                /*
 111                 * Use the se_portal_group->tpg_virt_lun0 to allow for
 112                 * REPORT_LUNS, et al to be returned when no active
 113                 * MappedLUN=0 exists for this Initiator Port.
 114                 */
 115                if (unpacked_lun != 0) {
 116                        se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
 117                        se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 118                        pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 119                                " Access for 0x%08x\n",
 120                                se_cmd->se_tfo->get_fabric_name(),
 121                                unpacked_lun);
 122                        return -ENODEV;
 123                }
 124                /*
 125                 * Force WRITE PROTECT for virtual LUN 0
 126                 */
 127                if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
 128                    (se_cmd->data_direction != DMA_NONE)) {
 129                        se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
 130                        se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 131                        return -EACCES;
 132                }
 133
 134                se_lun = &se_sess->se_tpg->tpg_virt_lun0;
 135                se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
 136                se_cmd->orig_fe_lun = 0;
 137                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 138        }
 139        /*
 140         * Determine if the struct se_lun is online.
 141         * FIXME: Check for LUN_RESET + UNIT Attention
 142         */
 143        if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
 144                se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
 145                se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 146                return -ENODEV;
 147        }
 148
 149        /* Directly associate cmd with se_dev */
 150        se_cmd->se_dev = se_lun->lun_se_dev;
 151
 152        /* TODO: get rid of this and use atomics for stats */
 153        dev = se_lun->lun_se_dev;
 154        spin_lock_irqsave(&dev->stats_lock, flags);
 155        dev->num_cmds++;
 156        if (se_cmd->data_direction == DMA_TO_DEVICE)
 157                dev->write_bytes += se_cmd->data_length;
 158        else if (se_cmd->data_direction == DMA_FROM_DEVICE)
 159                dev->read_bytes += se_cmd->data_length;
 160        spin_unlock_irqrestore(&dev->stats_lock, flags);
 161
 162        /*
 163         * Add the iscsi_cmd_t to the struct se_lun's cmd list.  This list is used
 164         * for tracking state of struct se_cmds during LUN shutdown events.
 165         */
 166        spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
 167        list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
 168        atomic_set(&se_cmd->transport_lun_active, 1);
 169        spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
 170
 171        return 0;
 172}
 173EXPORT_SYMBOL(transport_lookup_cmd_lun);
 174
 175int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 176{
 177        struct se_dev_entry *deve;
 178        struct se_lun *se_lun = NULL;
 179        struct se_session *se_sess = se_cmd->se_sess;
 180        struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
 181        unsigned long flags;
 182
 183        if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
 184                se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
 185                se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 186                return -ENODEV;
 187        }
 188
 189        spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
 190        se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
 191        deve = se_cmd->se_deve;
 192
 193        if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
 194                se_tmr->tmr_lun = deve->se_lun;
 195                se_cmd->se_lun = deve->se_lun;
 196                se_lun = deve->se_lun;
 197                se_cmd->pr_res_key = deve->pr_res_key;
 198                se_cmd->orig_fe_lun = unpacked_lun;
 199        }
 200        spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
 201
 202        if (!se_lun) {
 203                pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 204                        " Access for 0x%08x\n",
 205                        se_cmd->se_tfo->get_fabric_name(),
 206                        unpacked_lun);
 207                se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 208                return -ENODEV;
 209        }
 210        /*
 211         * Determine if the struct se_lun is online.
 212         * FIXME: Check for LUN_RESET + UNIT Attention
 213         */
 214        if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
 215                se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 216                return -ENODEV;
 217        }
 218
 219        /* Directly associate cmd with se_dev */
 220        se_cmd->se_dev = se_lun->lun_se_dev;
 221        se_tmr->tmr_dev = se_lun->lun_se_dev;
 222
 223        spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
 224        list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
 225        spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
 226
 227        return 0;
 228}
 229EXPORT_SYMBOL(transport_lookup_tmr_lun);
 230
 231/*
 232 * This function is called from core_scsi3_emulate_pro_register_and_move()
 233 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
 234 * when a matching rtpi is found.
 235 */
 236struct se_dev_entry *core_get_se_deve_from_rtpi(
 237        struct se_node_acl *nacl,
 238        u16 rtpi)
 239{
 240        struct se_dev_entry *deve;
 241        struct se_lun *lun;
 242        struct se_port *port;
 243        struct se_portal_group *tpg = nacl->se_tpg;
 244        u32 i;
 245
 246        spin_lock_irq(&nacl->device_list_lock);
 247        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
 248                deve = &nacl->device_list[i];
 249
 250                if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
 251                        continue;
 252
 253                lun = deve->se_lun;
 254                if (!lun) {
 255                        pr_err("%s device entries device pointer is"
 256                                " NULL, but Initiator has access.\n",
 257                                tpg->se_tpg_tfo->get_fabric_name());
 258                        continue;
 259                }
 260                port = lun->lun_sep;
 261                if (!port) {
 262                        pr_err("%s device entries device pointer is"
 263                                " NULL, but Initiator has access.\n",
 264                                tpg->se_tpg_tfo->get_fabric_name());
 265                        continue;
 266                }
 267                if (port->sep_rtpi != rtpi)
 268                        continue;
 269
 270                atomic_inc(&deve->pr_ref_count);
 271                smp_mb__after_atomic_inc();
 272                spin_unlock_irq(&nacl->device_list_lock);
 273
 274                return deve;
 275        }
 276        spin_unlock_irq(&nacl->device_list_lock);
 277
 278        return NULL;
 279}
 280
 281int core_free_device_list_for_node(
 282        struct se_node_acl *nacl,
 283        struct se_portal_group *tpg)
 284{
 285        struct se_dev_entry *deve;
 286        struct se_lun *lun;
 287        u32 i;
 288
 289        if (!nacl->device_list)
 290                return 0;
 291
 292        spin_lock_irq(&nacl->device_list_lock);
 293        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
 294                deve = &nacl->device_list[i];
 295
 296                if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
 297                        continue;
 298
 299                if (!deve->se_lun) {
 300                        pr_err("%s device entries device pointer is"
 301                                " NULL, but Initiator has access.\n",
 302                                tpg->se_tpg_tfo->get_fabric_name());
 303                        continue;
 304                }
 305                lun = deve->se_lun;
 306
 307                spin_unlock_irq(&nacl->device_list_lock);
 308                core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
 309                        TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
 310                spin_lock_irq(&nacl->device_list_lock);
 311        }
 312        spin_unlock_irq(&nacl->device_list_lock);
 313
 314        kfree(nacl->device_list);
 315        nacl->device_list = NULL;
 316
 317        return 0;
 318}
 319
 320void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
 321{
 322        struct se_dev_entry *deve;
 323        unsigned long flags;
 324
 325        spin_lock_irqsave(&se_nacl->device_list_lock, flags);
 326        deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
 327        deve->deve_cmds--;
 328        spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
 329}
 330
 331void core_update_device_list_access(
 332        u32 mapped_lun,
 333        u32 lun_access,
 334        struct se_node_acl *nacl)
 335{
 336        struct se_dev_entry *deve;
 337
 338        spin_lock_irq(&nacl->device_list_lock);
 339        deve = &nacl->device_list[mapped_lun];
 340        if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
 341                deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
 342                deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
 343        } else {
 344                deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
 345                deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
 346        }
 347        spin_unlock_irq(&nacl->device_list_lock);
 348}
 349
 350/*      core_update_device_list_for_node():
 351 *
 352 *
 353 */
 354int core_update_device_list_for_node(
 355        struct se_lun *lun,
 356        struct se_lun_acl *lun_acl,
 357        u32 mapped_lun,
 358        u32 lun_access,
 359        struct se_node_acl *nacl,
 360        struct se_portal_group *tpg,
 361        int enable)
 362{
 363        struct se_port *port = lun->lun_sep;
 364        struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
 365        int trans = 0;
 366        /*
 367         * If the MappedLUN entry is being disabled, the entry in
 368         * port->sep_alua_list must be removed now before clearing the
 369         * struct se_dev_entry pointers below as logic in
 370         * core_alua_do_transition_tg_pt() depends on these being present.
 371         */
 372        if (!enable) {
 373                /*
 374                 * deve->se_lun_acl will be NULL for demo-mode created LUNs
 375                 * that have not been explicitly concerted to MappedLUNs ->
 376                 * struct se_lun_acl, but we remove deve->alua_port_list from
 377                 * port->sep_alua_list. This also means that active UAs and
 378                 * NodeACL context specific PR metadata for demo-mode
 379                 * MappedLUN *deve will be released below..
 380                 */
 381                spin_lock_bh(&port->sep_alua_lock);
 382                list_del(&deve->alua_port_list);
 383                spin_unlock_bh(&port->sep_alua_lock);
 384        }
 385
 386        spin_lock_irq(&nacl->device_list_lock);
 387        if (enable) {
 388                /*
 389                 * Check if the call is handling demo mode -> explict LUN ACL
 390                 * transition.  This transition must be for the same struct se_lun
 391                 * + mapped_lun that was setup in demo mode..
 392                 */
 393                if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
 394                        if (deve->se_lun_acl != NULL) {
 395                                pr_err("struct se_dev_entry->se_lun_acl"
 396                                        " already set for demo mode -> explict"
 397                                        " LUN ACL transition\n");
 398                                spin_unlock_irq(&nacl->device_list_lock);
 399                                return -EINVAL;
 400                        }
 401                        if (deve->se_lun != lun) {
 402                                pr_err("struct se_dev_entry->se_lun does"
 403                                        " match passed struct se_lun for demo mode"
 404                                        " -> explict LUN ACL transition\n");
 405                                spin_unlock_irq(&nacl->device_list_lock);
 406                                return -EINVAL;
 407                        }
 408                        deve->se_lun_acl = lun_acl;
 409                        trans = 1;
 410                } else {
 411                        deve->se_lun = lun;
 412                        deve->se_lun_acl = lun_acl;
 413                        deve->mapped_lun = mapped_lun;
 414                        deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
 415                }
 416
 417                if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
 418                        deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
 419                        deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
 420                } else {
 421                        deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
 422                        deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
 423                }
 424
 425                if (trans) {
 426                        spin_unlock_irq(&nacl->device_list_lock);
 427                        return 0;
 428                }
 429                deve->creation_time = get_jiffies_64();
 430                deve->attach_count++;
 431                spin_unlock_irq(&nacl->device_list_lock);
 432
 433                spin_lock_bh(&port->sep_alua_lock);
 434                list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
 435                spin_unlock_bh(&port->sep_alua_lock);
 436
 437                return 0;
 438        }
 439        /*
 440         * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
 441         * PR operation to complete.
 442         */
 443        spin_unlock_irq(&nacl->device_list_lock);
 444        while (atomic_read(&deve->pr_ref_count) != 0)
 445                cpu_relax();
 446        spin_lock_irq(&nacl->device_list_lock);
 447        /*
 448         * Disable struct se_dev_entry LUN ACL mapping
 449         */
 450        core_scsi3_ua_release_all(deve);
 451        deve->se_lun = NULL;
 452        deve->se_lun_acl = NULL;
 453        deve->lun_flags = 0;
 454        deve->creation_time = 0;
 455        deve->attach_count--;
 456        spin_unlock_irq(&nacl->device_list_lock);
 457
 458        core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
 459        return 0;
 460}
 461
 462/*      core_clear_lun_from_tpg():
 463 *
 464 *
 465 */
 466void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
 467{
 468        struct se_node_acl *nacl;
 469        struct se_dev_entry *deve;
 470        u32 i;
 471
 472        spin_lock_irq(&tpg->acl_node_lock);
 473        list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
 474                spin_unlock_irq(&tpg->acl_node_lock);
 475
 476                spin_lock_irq(&nacl->device_list_lock);
 477                for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
 478                        deve = &nacl->device_list[i];
 479                        if (lun != deve->se_lun)
 480                                continue;
 481                        spin_unlock_irq(&nacl->device_list_lock);
 482
 483                        core_update_device_list_for_node(lun, NULL,
 484                                deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
 485                                nacl, tpg, 0);
 486
 487                        spin_lock_irq(&nacl->device_list_lock);
 488                }
 489                spin_unlock_irq(&nacl->device_list_lock);
 490
 491                spin_lock_irq(&tpg->acl_node_lock);
 492        }
 493        spin_unlock_irq(&tpg->acl_node_lock);
 494}
 495
 496static struct se_port *core_alloc_port(struct se_device *dev)
 497{
 498        struct se_port *port, *port_tmp;
 499
 500        port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
 501        if (!port) {
 502                pr_err("Unable to allocate struct se_port\n");
 503                return ERR_PTR(-ENOMEM);
 504        }
 505        INIT_LIST_HEAD(&port->sep_alua_list);
 506        INIT_LIST_HEAD(&port->sep_list);
 507        atomic_set(&port->sep_tg_pt_secondary_offline, 0);
 508        spin_lock_init(&port->sep_alua_lock);
 509        mutex_init(&port->sep_tg_pt_md_mutex);
 510
 511        spin_lock(&dev->se_port_lock);
 512        if (dev->dev_port_count == 0x0000ffff) {
 513                pr_warn("Reached dev->dev_port_count =="
 514                                " 0x0000ffff\n");
 515                spin_unlock(&dev->se_port_lock);
 516                return ERR_PTR(-ENOSPC);
 517        }
 518again:
 519        /*
 520         * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
 521         * Here is the table from spc4r17 section 7.7.3.8.
 522         *
 523         *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
 524         *
 525         * Code      Description
 526         * 0h        Reserved
 527         * 1h        Relative port 1, historically known as port A
 528         * 2h        Relative port 2, historically known as port B
 529         * 3h to FFFFh    Relative port 3 through 65 535
 530         */
 531        port->sep_rtpi = dev->dev_rpti_counter++;
 532        if (!port->sep_rtpi)
 533                goto again;
 534
 535        list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
 536                /*
 537                 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
 538                 * for 16-bit wrap..
 539                 */
 540                if (port->sep_rtpi == port_tmp->sep_rtpi)
 541                        goto again;
 542        }
 543        spin_unlock(&dev->se_port_lock);
 544
 545        return port;
 546}
 547
 548static void core_export_port(
 549        struct se_device *dev,
 550        struct se_portal_group *tpg,
 551        struct se_port *port,
 552        struct se_lun *lun)
 553{
 554        struct se_subsystem_dev *su_dev = dev->se_sub_dev;
 555        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
 556
 557        spin_lock(&dev->se_port_lock);
 558        spin_lock(&lun->lun_sep_lock);
 559        port->sep_tpg = tpg;
 560        port->sep_lun = lun;
 561        lun->lun_sep = port;
 562        spin_unlock(&lun->lun_sep_lock);
 563
 564        list_add_tail(&port->sep_list, &dev->dev_sep_list);
 565        spin_unlock(&dev->se_port_lock);
 566
 567        if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
 568                tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
 569                if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
 570                        pr_err("Unable to allocate t10_alua_tg_pt"
 571                                        "_gp_member_t\n");
 572                        return;
 573                }
 574                spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
 575                __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
 576                        su_dev->t10_alua.default_tg_pt_gp);
 577                spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
 578                pr_debug("%s/%s: Adding to default ALUA Target Port"
 579                        " Group: alua/default_tg_pt_gp\n",
 580                        dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
 581        }
 582
 583        dev->dev_port_count++;
 584        port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
 585}
 586
 587/*
 588 *      Called with struct se_device->se_port_lock spinlock held.
 589 */
 590static void core_release_port(struct se_device *dev, struct se_port *port)
 591        __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
 592{
 593        /*
 594         * Wait for any port reference for PR ALL_TG_PT=1 operation
 595         * to complete in __core_scsi3_alloc_registration()
 596         */
 597        spin_unlock(&dev->se_port_lock);
 598        if (atomic_read(&port->sep_tg_pt_ref_cnt))
 599                cpu_relax();
 600        spin_lock(&dev->se_port_lock);
 601
 602        core_alua_free_tg_pt_gp_mem(port);
 603
 604        list_del(&port->sep_list);
 605        dev->dev_port_count--;
 606        kfree(port);
 607}
 608
 609int core_dev_export(
 610        struct se_device *dev,
 611        struct se_portal_group *tpg,
 612        struct se_lun *lun)
 613{
 614        struct se_port *port;
 615
 616        port = core_alloc_port(dev);
 617        if (IS_ERR(port))
 618                return PTR_ERR(port);
 619
 620        lun->lun_se_dev = dev;
 621        se_dev_start(dev);
 622
 623        atomic_inc(&dev->dev_export_obj.obj_access_count);
 624        core_export_port(dev, tpg, port, lun);
 625        return 0;
 626}
 627
 628void core_dev_unexport(
 629        struct se_device *dev,
 630        struct se_portal_group *tpg,
 631        struct se_lun *lun)
 632{
 633        struct se_port *port = lun->lun_sep;
 634
 635        spin_lock(&lun->lun_sep_lock);
 636        if (lun->lun_se_dev == NULL) {
 637                spin_unlock(&lun->lun_sep_lock);
 638                return;
 639        }
 640        spin_unlock(&lun->lun_sep_lock);
 641
 642        spin_lock(&dev->se_port_lock);
 643        atomic_dec(&dev->dev_export_obj.obj_access_count);
 644        core_release_port(dev, port);
 645        spin_unlock(&dev->se_port_lock);
 646
 647        se_dev_stop(dev);
 648        lun->lun_se_dev = NULL;
 649}
 650
 651int target_report_luns(struct se_task *se_task)
 652{
 653        struct se_cmd *se_cmd = se_task->task_se_cmd;
 654        struct se_dev_entry *deve;
 655        struct se_lun *se_lun;
 656        struct se_session *se_sess = se_cmd->se_sess;
 657        unsigned char *buf;
 658        u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
 659
 660        buf = transport_kmap_data_sg(se_cmd);
 661        if (!buf)
 662                return -ENOMEM;
 663
 664        /*
 665         * If no struct se_session pointer is present, this struct se_cmd is
 666         * coming via a target_core_mod PASSTHROUGH op, and not through
 667         * a $FABRIC_MOD.  In that case, report LUN=0 only.
 668         */
 669        if (!se_sess) {
 670                int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
 671                lun_count = 1;
 672                goto done;
 673        }
 674
 675        spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
 676        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
 677                deve = &se_sess->se_node_acl->device_list[i];
 678                if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
 679                        continue;
 680                se_lun = deve->se_lun;
 681                /*
 682                 * We determine the correct LUN LIST LENGTH even once we
 683                 * have reached the initial allocation length.
 684                 * See SPC2-R20 7.19.
 685                 */
 686                lun_count++;
 687                if ((cdb_offset + 8) >= se_cmd->data_length)
 688                        continue;
 689
 690                int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
 691                offset += 8;
 692                cdb_offset += 8;
 693        }
 694        spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
 695
 696        /*
 697         * See SPC3 r07, page 159.
 698         */
 699done:
 700        lun_count *= 8;
 701        buf[0] = ((lun_count >> 24) & 0xff);
 702        buf[1] = ((lun_count >> 16) & 0xff);
 703        buf[2] = ((lun_count >> 8) & 0xff);
 704        buf[3] = (lun_count & 0xff);
 705        transport_kunmap_data_sg(se_cmd);
 706
 707        se_task->task_scsi_status = GOOD;
 708        transport_complete_task(se_task, 1);
 709        return 0;
 710}
 711
 712/*      se_release_device_for_hba():
 713 *
 714 *
 715 */
 716void se_release_device_for_hba(struct se_device *dev)
 717{
 718        struct se_hba *hba = dev->se_hba;
 719
 720        if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
 721            (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
 722            (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
 723            (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
 724            (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
 725                se_dev_stop(dev);
 726
 727        if (dev->dev_ptr) {
 728                kthread_stop(dev->process_thread);
 729                if (dev->transport->free_device)
 730                        dev->transport->free_device(dev->dev_ptr);
 731        }
 732
 733        spin_lock(&hba->device_lock);
 734        list_del(&dev->dev_list);
 735        hba->dev_count--;
 736        spin_unlock(&hba->device_lock);
 737
 738        core_scsi3_free_all_registrations(dev);
 739        se_release_vpd_for_dev(dev);
 740
 741        kfree(dev);
 742}
 743
 744void se_release_vpd_for_dev(struct se_device *dev)
 745{
 746        struct t10_vpd *vpd, *vpd_tmp;
 747
 748        spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
 749        list_for_each_entry_safe(vpd, vpd_tmp,
 750                        &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
 751                list_del(&vpd->vpd_list);
 752                kfree(vpd);
 753        }
 754        spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
 755}
 756
 757/*      se_free_virtual_device():
 758 *
 759 *      Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
 760 */
 761int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
 762{
 763        if (!list_empty(&dev->dev_sep_list))
 764                dump_stack();
 765
 766        core_alua_free_lu_gp_mem(dev);
 767        se_release_device_for_hba(dev);
 768
 769        return 0;
 770}
 771
 772static void se_dev_start(struct se_device *dev)
 773{
 774        struct se_hba *hba = dev->se_hba;
 775
 776        spin_lock(&hba->device_lock);
 777        atomic_inc(&dev->dev_obj.obj_access_count);
 778        if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
 779                if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
 780                        dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
 781                        dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
 782                } else if (dev->dev_status &
 783                           TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
 784                        dev->dev_status &=
 785                                ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
 786                        dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
 787                }
 788        }
 789        spin_unlock(&hba->device_lock);
 790}
 791
 792static void se_dev_stop(struct se_device *dev)
 793{
 794        struct se_hba *hba = dev->se_hba;
 795
 796        spin_lock(&hba->device_lock);
 797        atomic_dec(&dev->dev_obj.obj_access_count);
 798        if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
 799                if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
 800                        dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
 801                        dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
 802                } else if (dev->dev_status &
 803                           TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
 804                        dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
 805                        dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
 806                }
 807        }
 808        spin_unlock(&hba->device_lock);
 809}
 810
 811int se_dev_check_online(struct se_device *dev)
 812{
 813        unsigned long flags;
 814        int ret;
 815
 816        spin_lock_irqsave(&dev->dev_status_lock, flags);
 817        ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
 818               (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
 819        spin_unlock_irqrestore(&dev->dev_status_lock, flags);
 820
 821        return ret;
 822}
 823
 824int se_dev_check_shutdown(struct se_device *dev)
 825{
 826        int ret;
 827
 828        spin_lock_irq(&dev->dev_status_lock);
 829        ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
 830        spin_unlock_irq(&dev->dev_status_lock);
 831
 832        return ret;
 833}
 834
 835u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
 836{
 837        u32 tmp, aligned_max_sectors;
 838        /*
 839         * Limit max_sectors to a PAGE_SIZE aligned value for modern
 840         * transport_allocate_data_tasks() operation.
 841         */
 842        tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
 843        aligned_max_sectors = (tmp / block_size);
 844        if (max_sectors != aligned_max_sectors) {
 845                printk(KERN_INFO "Rounding down aligned max_sectors from %u"
 846                                " to %u\n", max_sectors, aligned_max_sectors);
 847                return aligned_max_sectors;
 848        }
 849
 850        return max_sectors;
 851}
 852
 853void se_dev_set_default_attribs(
 854        struct se_device *dev,
 855        struct se_dev_limits *dev_limits)
 856{
 857        struct queue_limits *limits = &dev_limits->limits;
 858
 859        dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
 860        dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
 861        dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
 862        dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
 863        dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
 864        dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
 865        dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
 866        dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
 867        dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
 868        dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
 869        dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
 870        dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
 871        dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
 872        /*
 873         * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
 874         * iblock_create_virtdevice() from struct queue_limits values
 875         * if blk_queue_discard()==1
 876         */
 877        dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
 878        dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
 879                DA_MAX_UNMAP_BLOCK_DESC_COUNT;
 880        dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
 881        dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
 882                                DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
 883        /*
 884         * block_size is based on subsystem plugin dependent requirements.
 885         */
 886        dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
 887        dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
 888        /*
 889         * max_sectors is based on subsystem plugin dependent requirements.
 890         */
 891        dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
 892        /*
 893         * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
 894         */
 895        limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
 896                                                limits->logical_block_size);
 897        dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
 898        /*
 899         * Set optimal_sectors from max_sectors, which can be lowered via
 900         * configfs.
 901         */
 902        dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors;
 903        /*
 904         * queue_depth is based on subsystem plugin dependent requirements.
 905         */
 906        dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
 907        dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
 908}
 909
 910int se_dev_set_max_unmap_lba_count(
 911        struct se_device *dev,
 912        u32 max_unmap_lba_count)
 913{
 914        dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
 915        pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
 916                        dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
 917        return 0;
 918}
 919
 920int se_dev_set_max_unmap_block_desc_count(
 921        struct se_device *dev,
 922        u32 max_unmap_block_desc_count)
 923{
 924        dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
 925                max_unmap_block_desc_count;
 926        pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
 927                        dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
 928        return 0;
 929}
 930
 931int se_dev_set_unmap_granularity(
 932        struct se_device *dev,
 933        u32 unmap_granularity)
 934{
 935        dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
 936        pr_debug("dev[%p]: Set unmap_granularity: %u\n",
 937                        dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
 938        return 0;
 939}
 940
 941int se_dev_set_unmap_granularity_alignment(
 942        struct se_device *dev,
 943        u32 unmap_granularity_alignment)
 944{
 945        dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
 946        pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
 947                        dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
 948        return 0;
 949}
 950
 951int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
 952{
 953        if (flag != 0 && flag != 1) {
 954                pr_err("Illegal value %d\n", flag);
 955                return -EINVAL;
 956        }
 957
 958        if (flag) {
 959                pr_err("dpo_emulated not supported\n");
 960                return -EINVAL;
 961        }
 962
 963        return 0;
 964}
 965
 966int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
 967{
 968        if (flag != 0 && flag != 1) {
 969                pr_err("Illegal value %d\n", flag);
 970                return -EINVAL;
 971        }
 972
 973        if (flag && dev->transport->fua_write_emulated == 0) {
 974                pr_err("fua_write_emulated not supported\n");
 975                return -EINVAL;
 976        }
 977        dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
 978        pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
 979                        dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
 980        return 0;
 981}
 982
 983int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
 984{
 985        if (flag != 0 && flag != 1) {
 986                pr_err("Illegal value %d\n", flag);
 987                return -EINVAL;
 988        }
 989
 990        if (flag) {
 991                pr_err("ua read emulated not supported\n");
 992                return -EINVAL;
 993        }
 994
 995        return 0;
 996}
 997
 998int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
 999{
1000        if (flag != 0 && flag != 1) {
1001                pr_err("Illegal value %d\n", flag);
1002                return -EINVAL;
1003        }
1004        if (flag && dev->transport->write_cache_emulated == 0) {
1005                pr_err("write_cache_emulated not supported\n");
1006                return -EINVAL;
1007        }
1008        dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
1009        pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1010                        dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
1011        return 0;
1012}
1013
1014int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1015{
1016        if ((flag != 0) && (flag != 1) && (flag != 2)) {
1017                pr_err("Illegal value %d\n", flag);
1018                return -EINVAL;
1019        }
1020
1021        if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1022                pr_err("dev[%p]: Unable to change SE Device"
1023                        " UA_INTRLCK_CTRL while dev_export_obj: %d count"
1024                        " exists\n", dev,
1025                        atomic_read(&dev->dev_export_obj.obj_access_count));
1026                return -EINVAL;
1027        }
1028        dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
1029        pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1030                dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
1031
1032        return 0;
1033}
1034
1035int se_dev_set_emulate_tas(struct se_device *dev, int flag)
1036{
1037        if ((flag != 0) && (flag != 1)) {
1038                pr_err("Illegal value %d\n", flag);
1039                return -EINVAL;
1040        }
1041
1042        if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1043                pr_err("dev[%p]: Unable to change SE Device TAS while"
1044                        " dev_export_obj: %d count exists\n", dev,
1045                        atomic_read(&dev->dev_export_obj.obj_access_count));
1046                return -EINVAL;
1047        }
1048        dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
1049        pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1050                dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
1051
1052        return 0;
1053}
1054
1055int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1056{
1057        if ((flag != 0) && (flag != 1)) {
1058                pr_err("Illegal value %d\n", flag);
1059                return -EINVAL;
1060        }
1061        /*
1062         * We expect this value to be non-zero when generic Block Layer
1063         * Discard supported is detected iblock_create_virtdevice().
1064         */
1065        if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1066                pr_err("Generic Block Discard not supported\n");
1067                return -ENOSYS;
1068        }
1069
1070        dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
1071        pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1072                                dev, flag);
1073        return 0;
1074}
1075
1076int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1077{
1078        if ((flag != 0) && (flag != 1)) {
1079                pr_err("Illegal value %d\n", flag);
1080                return -EINVAL;
1081        }
1082        /*
1083         * We expect this value to be non-zero when generic Block Layer
1084         * Discard supported is detected iblock_create_virtdevice().
1085         */
1086        if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1087                pr_err("Generic Block Discard not supported\n");
1088                return -ENOSYS;
1089        }
1090
1091        dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
1092        pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1093                                dev, flag);
1094        return 0;
1095}
1096
1097int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1098{
1099        if ((flag != 0) && (flag != 1)) {
1100                pr_err("Illegal value %d\n", flag);
1101                return -EINVAL;
1102        }
1103        dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
1104        pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1105                (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
1106        return 0;
1107}
1108
1109int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1110{
1111        if ((flag != 0) && (flag != 1)) {
1112                printk(KERN_ERR "Illegal value %d\n", flag);
1113                return -EINVAL;
1114        }
1115        dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
1116        pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
1117               dev, flag);
1118        return 0;
1119}
1120
1121int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1122{
1123        if (flag != 0) {
1124                printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
1125                        " reordering not implemented\n", dev);
1126                return -ENOSYS;
1127        }
1128        dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
1129        pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
1130        return 0;
1131}
1132
1133/*
1134 * Note, this can only be called on unexported SE Device Object.
1135 */
1136int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1137{
1138        if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1139                pr_err("dev[%p]: Unable to change SE Device TCQ while"
1140                        " dev_export_obj: %d count exists\n", dev,
1141                        atomic_read(&dev->dev_export_obj.obj_access_count));
1142                return -EINVAL;
1143        }
1144        if (!queue_depth) {
1145                pr_err("dev[%p]: Illegal ZERO value for queue"
1146                        "_depth\n", dev);
1147                return -EINVAL;
1148        }
1149
1150        if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1151                if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1152                        pr_err("dev[%p]: Passed queue_depth: %u"
1153                                " exceeds TCM/SE_Device TCQ: %u\n",
1154                                dev, queue_depth,
1155                                dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
1156                        return -EINVAL;
1157                }
1158        } else {
1159                if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
1160                        if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1161                                pr_err("dev[%p]: Passed queue_depth:"
1162                                        " %u exceeds TCM/SE_Device MAX"
1163                                        " TCQ: %u\n", dev, queue_depth,
1164                                        dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
1165                                return -EINVAL;
1166                        }
1167                }
1168        }
1169
1170        dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1171        pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1172                        dev, queue_depth);
1173        return 0;
1174}
1175
1176int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1177{
1178        int force = 0; /* Force setting for VDEVS */
1179
1180        if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1181                pr_err("dev[%p]: Unable to change SE Device"
1182                        " max_sectors while dev_export_obj: %d count exists\n",
1183                        dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1184                return -EINVAL;
1185        }
1186        if (!max_sectors) {
1187                pr_err("dev[%p]: Illegal ZERO value for"
1188                        " max_sectors\n", dev);
1189                return -EINVAL;
1190        }
1191        if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1192                pr_err("dev[%p]: Passed max_sectors: %u less than"
1193                        " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
1194                                DA_STATUS_MAX_SECTORS_MIN);
1195                return -EINVAL;
1196        }
1197        if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1198                if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
1199                        pr_err("dev[%p]: Passed max_sectors: %u"
1200                                " greater than TCM/SE_Device max_sectors:"
1201                                " %u\n", dev, max_sectors,
1202                                dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1203                         return -EINVAL;
1204                }
1205        } else {
1206                if (!force && (max_sectors >
1207                                 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
1208                        pr_err("dev[%p]: Passed max_sectors: %u"
1209                                " greater than TCM/SE_Device max_sectors"
1210                                ": %u, use force=1 to override.\n", dev,
1211                                max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1212                        return -EINVAL;
1213                }
1214                if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1215                        pr_err("dev[%p]: Passed max_sectors: %u"
1216                                " greater than DA_STATUS_MAX_SECTORS_MAX:"
1217                                " %u\n", dev, max_sectors,
1218                                DA_STATUS_MAX_SECTORS_MAX);
1219                        return -EINVAL;
1220                }
1221        }
1222        /*
1223         * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1224         */
1225        max_sectors = se_dev_align_max_sectors(max_sectors,
1226                                dev->se_sub_dev->se_dev_attrib.block_size);
1227
1228        dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
1229        pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1230                        dev, max_sectors);
1231        return 0;
1232}
1233
1234int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1235{
1236        if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1237                pr_err("dev[%p]: Unable to change SE Device"
1238                        " optimal_sectors while dev_export_obj: %d count exists\n",
1239                        dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1240                return -EINVAL;
1241        }
1242        if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1243                pr_err("dev[%p]: Passed optimal_sectors cannot be"
1244                                " changed for TCM/pSCSI\n", dev);
1245                return -EINVAL;
1246        }
1247        if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
1248                pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1249                        " greater than max_sectors: %u\n", dev,
1250                        optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
1251                return -EINVAL;
1252        }
1253
1254        dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
1255        pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1256                        dev, optimal_sectors);
1257        return 0;
1258}
1259
1260int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1261{
1262        if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1263                pr_err("dev[%p]: Unable to change SE Device block_size"
1264                        " while dev_export_obj: %d count exists\n", dev,
1265                        atomic_read(&dev->dev_export_obj.obj_access_count));
1266                return -EINVAL;
1267        }
1268
1269        if ((block_size != 512) &&
1270            (block_size != 1024) &&
1271            (block_size != 2048) &&
1272            (block_size != 4096)) {
1273                pr_err("dev[%p]: Illegal value for block_device: %u"
1274                        " for SE device, must be 512, 1024, 2048 or 4096\n",
1275                        dev, block_size);
1276                return -EINVAL;
1277        }
1278
1279        if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1280                pr_err("dev[%p]: Not allowed to change block_size for"
1281                        " Physical Device, use for Linux/SCSI to change"
1282                        " block_size for underlying hardware\n", dev);
1283                return -EINVAL;
1284        }
1285
1286        dev->se_sub_dev->se_dev_attrib.block_size = block_size;
1287        pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1288                        dev, block_size);
1289        return 0;
1290}
1291
1292struct se_lun *core_dev_add_lun(
1293        struct se_portal_group *tpg,
1294        struct se_hba *hba,
1295        struct se_device *dev,
1296        u32 lun)
1297{
1298        struct se_lun *lun_p;
1299        u32 lun_access = 0;
1300        int rc;
1301
1302        if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
1303                pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
1304                        atomic_read(&dev->dev_access_obj.obj_access_count));
1305                return ERR_PTR(-EACCES);
1306        }
1307
1308        lun_p = core_tpg_pre_addlun(tpg, lun);
1309        if (IS_ERR(lun_p))
1310                return lun_p;
1311
1312        if (dev->dev_flags & DF_READ_ONLY)
1313                lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1314        else
1315                lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
1316
1317        rc = core_tpg_post_addlun(tpg, lun_p, lun_access, dev);
1318        if (rc < 0)
1319                return ERR_PTR(rc);
1320
1321        pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1322                " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1323                tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
1324                tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
1325        /*
1326         * Update LUN maps for dynamically added initiators when
1327         * generate_node_acl is enabled.
1328         */
1329        if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1330                struct se_node_acl *acl;
1331                spin_lock_irq(&tpg->acl_node_lock);
1332                list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1333                        if (acl->dynamic_node_acl &&
1334                            (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1335                             !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1336                                spin_unlock_irq(&tpg->acl_node_lock);
1337                                core_tpg_add_node_to_devs(acl, tpg);
1338                                spin_lock_irq(&tpg->acl_node_lock);
1339                        }
1340                }
1341                spin_unlock_irq(&tpg->acl_node_lock);
1342        }
1343
1344        return lun_p;
1345}
1346
1347/*      core_dev_del_lun():
1348 *
1349 *
1350 */
1351int core_dev_del_lun(
1352        struct se_portal_group *tpg,
1353        u32 unpacked_lun)
1354{
1355        struct se_lun *lun;
1356
1357        lun = core_tpg_pre_dellun(tpg, unpacked_lun);
1358        if (IS_ERR(lun))
1359                return PTR_ERR(lun);
1360
1361        core_tpg_post_dellun(tpg, lun);
1362
1363        pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1364                " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1365                tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1366                tpg->se_tpg_tfo->get_fabric_name());
1367
1368        return 0;
1369}
1370
1371struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1372{
1373        struct se_lun *lun;
1374
1375        spin_lock(&tpg->tpg_lun_lock);
1376        if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1377                pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1378                        "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1379                        tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1380                        TRANSPORT_MAX_LUNS_PER_TPG-1,
1381                        tpg->se_tpg_tfo->tpg_get_tag(tpg));
1382                spin_unlock(&tpg->tpg_lun_lock);
1383                return NULL;
1384        }
1385        lun = &tpg->tpg_lun_list[unpacked_lun];
1386
1387        if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1388                pr_err("%s Logical Unit Number: %u is not free on"
1389                        " Target Portal Group: %hu, ignoring request.\n",
1390                        tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1391                        tpg->se_tpg_tfo->tpg_get_tag(tpg));
1392                spin_unlock(&tpg->tpg_lun_lock);
1393                return NULL;
1394        }
1395        spin_unlock(&tpg->tpg_lun_lock);
1396
1397        return lun;
1398}
1399
1400/*      core_dev_get_lun():
1401 *
1402 *
1403 */
1404static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1405{
1406        struct se_lun *lun;
1407
1408        spin_lock(&tpg->tpg_lun_lock);
1409        if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1410                pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1411                        "_TPG-1: %u for Target Portal Group: %hu\n",
1412                        tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1413                        TRANSPORT_MAX_LUNS_PER_TPG-1,
1414                        tpg->se_tpg_tfo->tpg_get_tag(tpg));
1415                spin_unlock(&tpg->tpg_lun_lock);
1416                return NULL;
1417        }
1418        lun = &tpg->tpg_lun_list[unpacked_lun];
1419
1420        if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1421                pr_err("%s Logical Unit Number: %u is not active on"
1422                        " Target Portal Group: %hu, ignoring request.\n",
1423                        tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1424                        tpg->se_tpg_tfo->tpg_get_tag(tpg));
1425                spin_unlock(&tpg->tpg_lun_lock);
1426                return NULL;
1427        }
1428        spin_unlock(&tpg->tpg_lun_lock);
1429
1430        return lun;
1431}
1432
1433struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1434        struct se_portal_group *tpg,
1435        u32 mapped_lun,
1436        char *initiatorname,
1437        int *ret)
1438{
1439        struct se_lun_acl *lacl;
1440        struct se_node_acl *nacl;
1441
1442        if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
1443                pr_err("%s InitiatorName exceeds maximum size.\n",
1444                        tpg->se_tpg_tfo->get_fabric_name());
1445                *ret = -EOVERFLOW;
1446                return NULL;
1447        }
1448        nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
1449        if (!nacl) {
1450                *ret = -EINVAL;
1451                return NULL;
1452        }
1453        lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1454        if (!lacl) {
1455                pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1456                *ret = -ENOMEM;
1457                return NULL;
1458        }
1459
1460        INIT_LIST_HEAD(&lacl->lacl_list);
1461        lacl->mapped_lun = mapped_lun;
1462        lacl->se_lun_nacl = nacl;
1463        snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
1464
1465        return lacl;
1466}
1467
1468int core_dev_add_initiator_node_lun_acl(
1469        struct se_portal_group *tpg,
1470        struct se_lun_acl *lacl,
1471        u32 unpacked_lun,
1472        u32 lun_access)
1473{
1474        struct se_lun *lun;
1475        struct se_node_acl *nacl;
1476
1477        lun = core_dev_get_lun(tpg, unpacked_lun);
1478        if (!lun) {
1479                pr_err("%s Logical Unit Number: %u is not active on"
1480                        " Target Portal Group: %hu, ignoring request.\n",
1481                        tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1482                        tpg->se_tpg_tfo->tpg_get_tag(tpg));
1483                return -EINVAL;
1484        }
1485
1486        nacl = lacl->se_lun_nacl;
1487        if (!nacl)
1488                return -EINVAL;
1489
1490        if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1491            (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1492                lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1493
1494        lacl->se_lun = lun;
1495
1496        if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
1497                        lun_access, nacl, tpg, 1) < 0)
1498                return -EINVAL;
1499
1500        spin_lock(&lun->lun_acl_lock);
1501        list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1502        atomic_inc(&lun->lun_acl_count);
1503        smp_mb__after_atomic_inc();
1504        spin_unlock(&lun->lun_acl_lock);
1505
1506        pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1507                " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1508                tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1509                (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1510                lacl->initiatorname);
1511        /*
1512         * Check to see if there are any existing persistent reservation APTPL
1513         * pre-registrations that need to be enabled for this LUN ACL..
1514         */
1515        core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1516        return 0;
1517}
1518
1519/*      core_dev_del_initiator_node_lun_acl():
1520 *
1521 *
1522 */
1523int core_dev_del_initiator_node_lun_acl(
1524        struct se_portal_group *tpg,
1525        struct se_lun *lun,
1526        struct se_lun_acl *lacl)
1527{
1528        struct se_node_acl *nacl;
1529
1530        nacl = lacl->se_lun_nacl;
1531        if (!nacl)
1532                return -EINVAL;
1533
1534        spin_lock(&lun->lun_acl_lock);
1535        list_del(&lacl->lacl_list);
1536        atomic_dec(&lun->lun_acl_count);
1537        smp_mb__after_atomic_dec();
1538        spin_unlock(&lun->lun_acl_lock);
1539
1540        core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
1541                TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
1542
1543        lacl->se_lun = NULL;
1544
1545        pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1546                " InitiatorNode: %s Mapped LUN: %u\n",
1547                tpg->se_tpg_tfo->get_fabric_name(),
1548                tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1549                lacl->initiatorname, lacl->mapped_lun);
1550
1551        return 0;
1552}
1553
1554void core_dev_free_initiator_node_lun_acl(
1555        struct se_portal_group *tpg,
1556        struct se_lun_acl *lacl)
1557{
1558        pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1559                " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1560                tpg->se_tpg_tfo->tpg_get_tag(tpg),
1561                tpg->se_tpg_tfo->get_fabric_name(),
1562                lacl->initiatorname, lacl->mapped_lun);
1563
1564        kfree(lacl);
1565}
1566
1567int core_dev_setup_virtual_lun0(void)
1568{
1569        struct se_hba *hba;
1570        struct se_device *dev;
1571        struct se_subsystem_dev *se_dev = NULL;
1572        struct se_subsystem_api *t;
1573        char buf[16];
1574        int ret;
1575
1576        hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1577        if (IS_ERR(hba))
1578                return PTR_ERR(hba);
1579
1580        lun0_hba = hba;
1581        t = hba->transport;
1582
1583        se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
1584        if (!se_dev) {
1585                pr_err("Unable to allocate memory for"
1586                                " struct se_subsystem_dev\n");
1587                ret = -ENOMEM;
1588                goto out;
1589        }
1590        INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
1591        spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
1592        INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
1593        INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
1594        spin_lock_init(&se_dev->t10_pr.registration_lock);
1595        spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
1596        INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
1597        spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
1598        spin_lock_init(&se_dev->se_dev_lock);
1599        se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1600        se_dev->t10_wwn.t10_sub_dev = se_dev;
1601        se_dev->t10_alua.t10_sub_dev = se_dev;
1602        se_dev->se_dev_attrib.da_sub_dev = se_dev;
1603        se_dev->se_dev_hba = hba;
1604
1605        se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
1606        if (!se_dev->se_dev_su_ptr) {
1607                pr_err("Unable to locate subsystem dependent pointer"
1608                        " from allocate_virtdevice()\n");
1609                ret = -ENOMEM;
1610                goto out;
1611        }
1612        lun0_su_dev = se_dev;
1613
1614        memset(buf, 0, 16);
1615        sprintf(buf, "rd_pages=8");
1616        t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
1617
1618        dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1619        if (IS_ERR(dev)) {
1620                ret = PTR_ERR(dev);
1621                goto out;
1622        }
1623        se_dev->se_dev_ptr = dev;
1624        g_lun0_dev = dev;
1625
1626        return 0;
1627out:
1628        lun0_su_dev = NULL;
1629        kfree(se_dev);
1630        if (lun0_hba) {
1631                core_delete_hba(lun0_hba);
1632                lun0_hba = NULL;
1633        }
1634        return ret;
1635}
1636
1637
1638void core_dev_release_virtual_lun0(void)
1639{
1640        struct se_hba *hba = lun0_hba;
1641        struct se_subsystem_dev *su_dev = lun0_su_dev;
1642
1643        if (!hba)
1644                return;
1645
1646        if (g_lun0_dev)
1647                se_free_virtual_device(g_lun0_dev, hba);
1648
1649        kfree(su_dev);
1650        core_delete_hba(hba);
1651}
1652