linux/drivers/target/target_core_tpg.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * Filename:  target_core_tpg.c
   3 *
   4 * This file contains generic Target Portal Group related functions.
   5 *
   6 * (c) Copyright 2002-2012 RisingTide Systems LLC.
   7 *
   8 * Nicholas A. Bellinger <nab@kernel.org>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  23 *
  24 ******************************************************************************/
  25
  26#include <linux/net.h>
  27#include <linux/string.h>
  28#include <linux/timer.h>
  29#include <linux/slab.h>
  30#include <linux/spinlock.h>
  31#include <linux/in.h>
  32#include <linux/export.h>
  33#include <net/sock.h>
  34#include <net/tcp.h>
  35#include <scsi/scsi.h>
  36#include <scsi/scsi_cmnd.h>
  37
  38#include <target/target_core_base.h>
  39#include <target/target_core_backend.h>
  40#include <target/target_core_fabric.h>
  41
  42#include "target_core_internal.h"
  43
  44extern struct se_device *g_lun0_dev;
  45
  46static DEFINE_SPINLOCK(tpg_lock);
  47static LIST_HEAD(tpg_list);
  48
  49/*      core_clear_initiator_node_from_tpg():
  50 *
  51 *
  52 */
  53static void core_clear_initiator_node_from_tpg(
  54        struct se_node_acl *nacl,
  55        struct se_portal_group *tpg)
  56{
  57        int i;
  58        struct se_dev_entry *deve;
  59        struct se_lun *lun;
  60
  61        spin_lock_irq(&nacl->device_list_lock);
  62        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  63                deve = nacl->device_list[i];
  64
  65                if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  66                        continue;
  67
  68                if (!deve->se_lun) {
  69                        pr_err("%s device entries device pointer is"
  70                                " NULL, but Initiator has access.\n",
  71                                tpg->se_tpg_tfo->get_fabric_name());
  72                        continue;
  73                }
  74
  75                lun = deve->se_lun;
  76                spin_unlock_irq(&nacl->device_list_lock);
  77                core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
  78                        TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
  79
  80                spin_lock_irq(&nacl->device_list_lock);
  81        }
  82        spin_unlock_irq(&nacl->device_list_lock);
  83}
  84
  85/*      __core_tpg_get_initiator_node_acl():
  86 *
  87 *      spin_lock_bh(&tpg->acl_node_lock); must be held when calling
  88 */
  89struct se_node_acl *__core_tpg_get_initiator_node_acl(
  90        struct se_portal_group *tpg,
  91        const char *initiatorname)
  92{
  93        struct se_node_acl *acl;
  94
  95        list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  96                if (!strcmp(acl->initiatorname, initiatorname))
  97                        return acl;
  98        }
  99
 100        return NULL;
 101}
 102
 103/*      core_tpg_get_initiator_node_acl():
 104 *
 105 *
 106 */
 107struct se_node_acl *core_tpg_get_initiator_node_acl(
 108        struct se_portal_group *tpg,
 109        unsigned char *initiatorname)
 110{
 111        struct se_node_acl *acl;
 112
 113        spin_lock_irq(&tpg->acl_node_lock);
 114        acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
 115        spin_unlock_irq(&tpg->acl_node_lock);
 116
 117        return acl;
 118}
 119
 120/*      core_tpg_add_node_to_devs():
 121 *
 122 *
 123 */
 124void core_tpg_add_node_to_devs(
 125        struct se_node_acl *acl,
 126        struct se_portal_group *tpg)
 127{
 128        int i = 0;
 129        u32 lun_access = 0;
 130        struct se_lun *lun;
 131        struct se_device *dev;
 132
 133        spin_lock(&tpg->tpg_lun_lock);
 134        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
 135                lun = tpg->tpg_lun_list[i];
 136                if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
 137                        continue;
 138
 139                spin_unlock(&tpg->tpg_lun_lock);
 140
 141                dev = lun->lun_se_dev;
 142                /*
 143                 * By default in LIO-Target $FABRIC_MOD,
 144                 * demo_mode_write_protect is ON, or READ_ONLY;
 145                 */
 146                if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
 147                        lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
 148                } else {
 149                        /*
 150                         * Allow only optical drives to issue R/W in default RO
 151                         * demo mode.
 152                         */
 153                        if (dev->transport->get_device_type(dev) == TYPE_DISK)
 154                                lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
 155                        else
 156                                lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
 157                }
 158
 159                pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
 160                        " access for LUN in Demo Mode\n",
 161                        tpg->se_tpg_tfo->get_fabric_name(),
 162                        tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 163                        (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
 164                        "READ-WRITE" : "READ-ONLY");
 165
 166                core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
 167                                lun_access, acl, tpg);
 168                spin_lock(&tpg->tpg_lun_lock);
 169        }
 170        spin_unlock(&tpg->tpg_lun_lock);
 171}
 172
 173/*      core_set_queue_depth_for_node():
 174 *
 175 *
 176 */
 177static int core_set_queue_depth_for_node(
 178        struct se_portal_group *tpg,
 179        struct se_node_acl *acl)
 180{
 181        if (!acl->queue_depth) {
 182                pr_err("Queue depth for %s Initiator Node: %s is 0,"
 183                        "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
 184                        acl->initiatorname);
 185                acl->queue_depth = 1;
 186        }
 187
 188        return 0;
 189}
 190
 191void array_free(void *array, int n)
 192{
 193        void **a = array;
 194        int i;
 195
 196        for (i = 0; i < n; i++)
 197                kfree(a[i]);
 198        kfree(a);
 199}
 200
 201static void *array_zalloc(int n, size_t size, gfp_t flags)
 202{
 203        void **a;
 204        int i;
 205
 206        a = kzalloc(n * sizeof(void*), flags);
 207        if (!a)
 208                return NULL;
 209        for (i = 0; i < n; i++) {
 210                a[i] = kzalloc(size, flags);
 211                if (!a[i]) {
 212                        array_free(a, n);
 213                        return NULL;
 214                }
 215        }
 216        return a;
 217}
 218
 219/*      core_create_device_list_for_node():
 220 *
 221 *
 222 */
 223static int core_create_device_list_for_node(struct se_node_acl *nacl)
 224{
 225        struct se_dev_entry *deve;
 226        int i;
 227
 228        nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
 229                        sizeof(struct se_dev_entry), GFP_KERNEL);
 230        if (!nacl->device_list) {
 231                pr_err("Unable to allocate memory for"
 232                        " struct se_node_acl->device_list\n");
 233                return -ENOMEM;
 234        }
 235        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
 236                deve = nacl->device_list[i];
 237
 238                atomic_set(&deve->ua_count, 0);
 239                atomic_set(&deve->pr_ref_count, 0);
 240                spin_lock_init(&deve->ua_lock);
 241                INIT_LIST_HEAD(&deve->alua_port_list);
 242                INIT_LIST_HEAD(&deve->ua_list);
 243        }
 244
 245        return 0;
 246}
 247
 248/*      core_tpg_check_initiator_node_acl()
 249 *
 250 *
 251 */
 252struct se_node_acl *core_tpg_check_initiator_node_acl(
 253        struct se_portal_group *tpg,
 254        unsigned char *initiatorname)
 255{
 256        struct se_node_acl *acl;
 257
 258        acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
 259        if (acl)
 260                return acl;
 261
 262        if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
 263                return NULL;
 264
 265        acl =  tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
 266        if (!acl)
 267                return NULL;
 268
 269        INIT_LIST_HEAD(&acl->acl_list);
 270        INIT_LIST_HEAD(&acl->acl_sess_list);
 271        kref_init(&acl->acl_kref);
 272        init_completion(&acl->acl_free_comp);
 273        spin_lock_init(&acl->device_list_lock);
 274        spin_lock_init(&acl->nacl_sess_lock);
 275        atomic_set(&acl->acl_pr_ref_count, 0);
 276        acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
 277        snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
 278        acl->se_tpg = tpg;
 279        acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
 280        spin_lock_init(&acl->stats_lock);
 281        acl->dynamic_node_acl = 1;
 282
 283        tpg->se_tpg_tfo->set_default_node_attributes(acl);
 284
 285        if (core_create_device_list_for_node(acl) < 0) {
 286                tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
 287                return NULL;
 288        }
 289
 290        if (core_set_queue_depth_for_node(tpg, acl) < 0) {
 291                core_free_device_list_for_node(acl, tpg);
 292                tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
 293                return NULL;
 294        }
 295        /*
 296         * Here we only create demo-mode MappedLUNs from the active
 297         * TPG LUNs if the fabric is not explicitly asking for
 298         * tpg_check_demo_mode_login_only() == 1.
 299         */
 300        if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
 301            (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
 302                core_tpg_add_node_to_devs(acl, tpg);
 303
 304        spin_lock_irq(&tpg->acl_node_lock);
 305        list_add_tail(&acl->acl_list, &tpg->acl_node_list);
 306        tpg->num_node_acls++;
 307        spin_unlock_irq(&tpg->acl_node_lock);
 308
 309        pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
 310                " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 311                tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
 312                tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
 313
 314        return acl;
 315}
 316EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
 317
 318void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
 319{
 320        while (atomic_read(&nacl->acl_pr_ref_count) != 0)
 321                cpu_relax();
 322}
 323
 324void core_tpg_clear_object_luns(struct se_portal_group *tpg)
 325{
 326        int i;
 327        struct se_lun *lun;
 328
 329        spin_lock(&tpg->tpg_lun_lock);
 330        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
 331                lun = tpg->tpg_lun_list[i];
 332
 333                if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
 334                    (lun->lun_se_dev == NULL))
 335                        continue;
 336
 337                spin_unlock(&tpg->tpg_lun_lock);
 338                core_dev_del_lun(tpg, lun->unpacked_lun);
 339                spin_lock(&tpg->tpg_lun_lock);
 340        }
 341        spin_unlock(&tpg->tpg_lun_lock);
 342}
 343EXPORT_SYMBOL(core_tpg_clear_object_luns);
 344
 345/*      core_tpg_add_initiator_node_acl():
 346 *
 347 *
 348 */
 349struct se_node_acl *core_tpg_add_initiator_node_acl(
 350        struct se_portal_group *tpg,
 351        struct se_node_acl *se_nacl,
 352        const char *initiatorname,
 353        u32 queue_depth)
 354{
 355        struct se_node_acl *acl = NULL;
 356
 357        spin_lock_irq(&tpg->acl_node_lock);
 358        acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
 359        if (acl) {
 360                if (acl->dynamic_node_acl) {
 361                        acl->dynamic_node_acl = 0;
 362                        pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
 363                                " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 364                                tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
 365                        spin_unlock_irq(&tpg->acl_node_lock);
 366                        /*
 367                         * Release the locally allocated struct se_node_acl
 368                         * because * core_tpg_add_initiator_node_acl() returned
 369                         * a pointer to an existing demo mode node ACL.
 370                         */
 371                        if (se_nacl)
 372                                tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
 373                                                        se_nacl);
 374                        goto done;
 375                }
 376
 377                pr_err("ACL entry for %s Initiator"
 378                        " Node %s already exists for TPG %u, ignoring"
 379                        " request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
 380                        initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
 381                spin_unlock_irq(&tpg->acl_node_lock);
 382                return ERR_PTR(-EEXIST);
 383        }
 384        spin_unlock_irq(&tpg->acl_node_lock);
 385
 386        if (!se_nacl) {
 387                pr_err("struct se_node_acl pointer is NULL\n");
 388                return ERR_PTR(-EINVAL);
 389        }
 390        /*
 391         * For v4.x logic the se_node_acl_s is hanging off a fabric
 392         * dependent structure allocated via
 393         * struct target_core_fabric_ops->fabric_make_nodeacl()
 394         */
 395        acl = se_nacl;
 396
 397        INIT_LIST_HEAD(&acl->acl_list);
 398        INIT_LIST_HEAD(&acl->acl_sess_list);
 399        kref_init(&acl->acl_kref);
 400        init_completion(&acl->acl_free_comp);
 401        spin_lock_init(&acl->device_list_lock);
 402        spin_lock_init(&acl->nacl_sess_lock);
 403        atomic_set(&acl->acl_pr_ref_count, 0);
 404        acl->queue_depth = queue_depth;
 405        snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
 406        acl->se_tpg = tpg;
 407        acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
 408        spin_lock_init(&acl->stats_lock);
 409
 410        tpg->se_tpg_tfo->set_default_node_attributes(acl);
 411
 412        if (core_create_device_list_for_node(acl) < 0) {
 413                tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
 414                return ERR_PTR(-ENOMEM);
 415        }
 416
 417        if (core_set_queue_depth_for_node(tpg, acl) < 0) {
 418                core_free_device_list_for_node(acl, tpg);
 419                tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
 420                return ERR_PTR(-EINVAL);
 421        }
 422
 423        spin_lock_irq(&tpg->acl_node_lock);
 424        list_add_tail(&acl->acl_list, &tpg->acl_node_list);
 425        tpg->num_node_acls++;
 426        spin_unlock_irq(&tpg->acl_node_lock);
 427
 428done:
 429        pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
 430                " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 431                tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
 432                tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
 433
 434        return acl;
 435}
 436EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
 437
 438/*      core_tpg_del_initiator_node_acl():
 439 *
 440 *
 441 */
 442int core_tpg_del_initiator_node_acl(
 443        struct se_portal_group *tpg,
 444        struct se_node_acl *acl,
 445        int force)
 446{
 447        LIST_HEAD(sess_list);
 448        struct se_session *sess, *sess_tmp;
 449        unsigned long flags;
 450        int rc;
 451
 452        spin_lock_irq(&tpg->acl_node_lock);
 453        if (acl->dynamic_node_acl) {
 454                acl->dynamic_node_acl = 0;
 455        }
 456        list_del(&acl->acl_list);
 457        tpg->num_node_acls--;
 458        spin_unlock_irq(&tpg->acl_node_lock);
 459
 460        spin_lock_irqsave(&acl->nacl_sess_lock, flags);
 461        acl->acl_stop = 1;
 462
 463        list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
 464                                sess_acl_list) {
 465                if (sess->sess_tearing_down != 0)
 466                        continue;
 467
 468                target_get_session(sess);
 469                list_move(&sess->sess_acl_list, &sess_list);
 470        }
 471        spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
 472
 473        list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
 474                list_del(&sess->sess_acl_list);
 475
 476                rc = tpg->se_tpg_tfo->shutdown_session(sess);
 477                target_put_session(sess);
 478                if (!rc)
 479                        continue;
 480                target_put_session(sess);
 481        }
 482        target_put_nacl(acl);
 483        /*
 484         * Wait for last target_put_nacl() to complete in target_complete_nacl()
 485         * for active fabric session transport_deregister_session() callbacks.
 486         */
 487        wait_for_completion(&acl->acl_free_comp);
 488
 489        core_tpg_wait_for_nacl_pr_ref(acl);
 490        core_clear_initiator_node_from_tpg(acl, tpg);
 491        core_free_device_list_for_node(acl, tpg);
 492
 493        pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
 494                " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 495                tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
 496                tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
 497
 498        return 0;
 499}
 500EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
 501
 502/*      core_tpg_set_initiator_node_queue_depth():
 503 *
 504 *
 505 */
 506int core_tpg_set_initiator_node_queue_depth(
 507        struct se_portal_group *tpg,
 508        unsigned char *initiatorname,
 509        u32 queue_depth,
 510        int force)
 511{
 512        struct se_session *sess, *init_sess = NULL;
 513        struct se_node_acl *acl;
 514        unsigned long flags;
 515        int dynamic_acl = 0;
 516
 517        spin_lock_irq(&tpg->acl_node_lock);
 518        acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
 519        if (!acl) {
 520                pr_err("Access Control List entry for %s Initiator"
 521                        " Node %s does not exists for TPG %hu, ignoring"
 522                        " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
 523                        initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
 524                spin_unlock_irq(&tpg->acl_node_lock);
 525                return -ENODEV;
 526        }
 527        if (acl->dynamic_node_acl) {
 528                acl->dynamic_node_acl = 0;
 529                dynamic_acl = 1;
 530        }
 531        spin_unlock_irq(&tpg->acl_node_lock);
 532
 533        spin_lock_irqsave(&tpg->session_lock, flags);
 534        list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
 535                if (sess->se_node_acl != acl)
 536                        continue;
 537
 538                if (!force) {
 539                        pr_err("Unable to change queue depth for %s"
 540                                " Initiator Node: %s while session is"
 541                                " operational.  To forcefully change the queue"
 542                                " depth and force session reinstatement"
 543                                " use the \"force=1\" parameter.\n",
 544                                tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
 545                        spin_unlock_irqrestore(&tpg->session_lock, flags);
 546
 547                        spin_lock_irq(&tpg->acl_node_lock);
 548                        if (dynamic_acl)
 549                                acl->dynamic_node_acl = 1;
 550                        spin_unlock_irq(&tpg->acl_node_lock);
 551                        return -EEXIST;
 552                }
 553                /*
 554                 * Determine if the session needs to be closed by our context.
 555                 */
 556                if (!tpg->se_tpg_tfo->shutdown_session(sess))
 557                        continue;
 558
 559                init_sess = sess;
 560                break;
 561        }
 562
 563        /*
 564         * User has requested to change the queue depth for a Initiator Node.
 565         * Change the value in the Node's struct se_node_acl, and call
 566         * core_set_queue_depth_for_node() to add the requested queue depth.
 567         *
 568         * Finally call  tpg->se_tpg_tfo->close_session() to force session
 569         * reinstatement to occur if there is an active session for the
 570         * $FABRIC_MOD Initiator Node in question.
 571         */
 572        acl->queue_depth = queue_depth;
 573
 574        if (core_set_queue_depth_for_node(tpg, acl) < 0) {
 575                spin_unlock_irqrestore(&tpg->session_lock, flags);
 576                /*
 577                 * Force session reinstatement if
 578                 * core_set_queue_depth_for_node() failed, because we assume
 579                 * the $FABRIC_MOD has already the set session reinstatement
 580                 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
 581                 */
 582                if (init_sess)
 583                        tpg->se_tpg_tfo->close_session(init_sess);
 584
 585                spin_lock_irq(&tpg->acl_node_lock);
 586                if (dynamic_acl)
 587                        acl->dynamic_node_acl = 1;
 588                spin_unlock_irq(&tpg->acl_node_lock);
 589                return -EINVAL;
 590        }
 591        spin_unlock_irqrestore(&tpg->session_lock, flags);
 592        /*
 593         * If the $FABRIC_MOD session for the Initiator Node ACL exists,
 594         * forcefully shutdown the $FABRIC_MOD session/nexus.
 595         */
 596        if (init_sess)
 597                tpg->se_tpg_tfo->close_session(init_sess);
 598
 599        pr_debug("Successfully changed queue depth to: %d for Initiator"
 600                " Node: %s on %s Target Portal Group: %u\n", queue_depth,
 601                initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
 602                tpg->se_tpg_tfo->tpg_get_tag(tpg));
 603
 604        spin_lock_irq(&tpg->acl_node_lock);
 605        if (dynamic_acl)
 606                acl->dynamic_node_acl = 1;
 607        spin_unlock_irq(&tpg->acl_node_lock);
 608
 609        return 0;
 610}
 611EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
 612
 613/*      core_tpg_set_initiator_node_tag():
 614 *
 615 *      Initiator nodeacl tags are not used internally, but may be used by
 616 *      userspace to emulate aliases or groups.
 617 *      Returns length of newly-set tag or -EINVAL.
 618 */
 619int core_tpg_set_initiator_node_tag(
 620        struct se_portal_group *tpg,
 621        struct se_node_acl *acl,
 622        const char *new_tag)
 623{
 624        if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
 625                return -EINVAL;
 626
 627        if (!strncmp("NULL", new_tag, 4)) {
 628                acl->acl_tag[0] = '\0';
 629                return 0;
 630        }
 631
 632        return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
 633}
 634EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
 635
 636static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
 637{
 638        /* Set in core_dev_setup_virtual_lun0() */
 639        struct se_device *dev = g_lun0_dev;
 640        struct se_lun *lun = &se_tpg->tpg_virt_lun0;
 641        u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
 642        int ret;
 643
 644        lun->unpacked_lun = 0;
 645        lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
 646        atomic_set(&lun->lun_acl_count, 0);
 647        init_completion(&lun->lun_shutdown_comp);
 648        INIT_LIST_HEAD(&lun->lun_acl_list);
 649        INIT_LIST_HEAD(&lun->lun_cmd_list);
 650        spin_lock_init(&lun->lun_acl_lock);
 651        spin_lock_init(&lun->lun_cmd_lock);
 652        spin_lock_init(&lun->lun_sep_lock);
 653
 654        ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
 655        if (ret < 0)
 656                return ret;
 657
 658        return 0;
 659}
 660
 661static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
 662{
 663        struct se_lun *lun = &se_tpg->tpg_virt_lun0;
 664
 665        core_tpg_post_dellun(se_tpg, lun);
 666}
 667
 668int core_tpg_register(
 669        struct target_core_fabric_ops *tfo,
 670        struct se_wwn *se_wwn,
 671        struct se_portal_group *se_tpg,
 672        void *tpg_fabric_ptr,
 673        int se_tpg_type)
 674{
 675        struct se_lun *lun;
 676        u32 i;
 677
 678        se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
 679                        sizeof(struct se_lun), GFP_KERNEL);
 680        if (!se_tpg->tpg_lun_list) {
 681                pr_err("Unable to allocate struct se_portal_group->"
 682                                "tpg_lun_list\n");
 683                return -ENOMEM;
 684        }
 685
 686        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
 687                lun = se_tpg->tpg_lun_list[i];
 688                lun->unpacked_lun = i;
 689                lun->lun_link_magic = SE_LUN_LINK_MAGIC;
 690                lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
 691                atomic_set(&lun->lun_acl_count, 0);
 692                init_completion(&lun->lun_shutdown_comp);
 693                INIT_LIST_HEAD(&lun->lun_acl_list);
 694                INIT_LIST_HEAD(&lun->lun_cmd_list);
 695                spin_lock_init(&lun->lun_acl_lock);
 696                spin_lock_init(&lun->lun_cmd_lock);
 697                spin_lock_init(&lun->lun_sep_lock);
 698        }
 699
 700        se_tpg->se_tpg_type = se_tpg_type;
 701        se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
 702        se_tpg->se_tpg_tfo = tfo;
 703        se_tpg->se_tpg_wwn = se_wwn;
 704        atomic_set(&se_tpg->tpg_pr_ref_count, 0);
 705        INIT_LIST_HEAD(&se_tpg->acl_node_list);
 706        INIT_LIST_HEAD(&se_tpg->se_tpg_node);
 707        INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
 708        spin_lock_init(&se_tpg->acl_node_lock);
 709        spin_lock_init(&se_tpg->session_lock);
 710        spin_lock_init(&se_tpg->tpg_lun_lock);
 711
 712        if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
 713                if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
 714                        array_free(se_tpg->tpg_lun_list,
 715                                   TRANSPORT_MAX_LUNS_PER_TPG);
 716                        return -ENOMEM;
 717                }
 718        }
 719
 720        spin_lock_bh(&tpg_lock);
 721        list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
 722        spin_unlock_bh(&tpg_lock);
 723
 724        pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
 725                " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
 726                (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
 727                "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
 728                "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
 729
 730        return 0;
 731}
 732EXPORT_SYMBOL(core_tpg_register);
 733
 734int core_tpg_deregister(struct se_portal_group *se_tpg)
 735{
 736        struct se_node_acl *nacl, *nacl_tmp;
 737
 738        pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
 739                " for endpoint: %s Portal Tag %u\n",
 740                (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
 741                "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
 742                se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
 743                se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
 744
 745        spin_lock_bh(&tpg_lock);
 746        list_del(&se_tpg->se_tpg_node);
 747        spin_unlock_bh(&tpg_lock);
 748
 749        while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
 750                cpu_relax();
 751        /*
 752         * Release any remaining demo-mode generated se_node_acl that have
 753         * not been released because of TFO->tpg_check_demo_mode_cache() == 1
 754         * in transport_deregister_session().
 755         */
 756        spin_lock_irq(&se_tpg->acl_node_lock);
 757        list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
 758                        acl_list) {
 759                list_del(&nacl->acl_list);
 760                se_tpg->num_node_acls--;
 761                spin_unlock_irq(&se_tpg->acl_node_lock);
 762
 763                core_tpg_wait_for_nacl_pr_ref(nacl);
 764                core_free_device_list_for_node(nacl, se_tpg);
 765                se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
 766
 767                spin_lock_irq(&se_tpg->acl_node_lock);
 768        }
 769        spin_unlock_irq(&se_tpg->acl_node_lock);
 770
 771        if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
 772                core_tpg_release_virtual_lun0(se_tpg);
 773
 774        se_tpg->se_tpg_fabric_ptr = NULL;
 775        array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
 776        return 0;
 777}
 778EXPORT_SYMBOL(core_tpg_deregister);
 779
 780struct se_lun *core_tpg_pre_addlun(
 781        struct se_portal_group *tpg,
 782        u32 unpacked_lun)
 783{
 784        struct se_lun *lun;
 785
 786        if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
 787                pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
 788                        "-1: %u for Target Portal Group: %u\n",
 789                        tpg->se_tpg_tfo->get_fabric_name(),
 790                        unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
 791                        tpg->se_tpg_tfo->tpg_get_tag(tpg));
 792                return ERR_PTR(-EOVERFLOW);
 793        }
 794
 795        spin_lock(&tpg->tpg_lun_lock);
 796        lun = tpg->tpg_lun_list[unpacked_lun];
 797        if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
 798                pr_err("TPG Logical Unit Number: %u is already active"
 799                        " on %s Target Portal Group: %u, ignoring request.\n",
 800                        unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
 801                        tpg->se_tpg_tfo->tpg_get_tag(tpg));
 802                spin_unlock(&tpg->tpg_lun_lock);
 803                return ERR_PTR(-EINVAL);
 804        }
 805        spin_unlock(&tpg->tpg_lun_lock);
 806
 807        return lun;
 808}
 809
 810int core_tpg_post_addlun(
 811        struct se_portal_group *tpg,
 812        struct se_lun *lun,
 813        u32 lun_access,
 814        void *lun_ptr)
 815{
 816        int ret;
 817
 818        ret = core_dev_export(lun_ptr, tpg, lun);
 819        if (ret < 0)
 820                return ret;
 821
 822        spin_lock(&tpg->tpg_lun_lock);
 823        lun->lun_access = lun_access;
 824        lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
 825        spin_unlock(&tpg->tpg_lun_lock);
 826
 827        return 0;
 828}
 829
 830static void core_tpg_shutdown_lun(
 831        struct se_portal_group *tpg,
 832        struct se_lun *lun)
 833{
 834        core_clear_lun_from_tpg(lun, tpg);
 835        transport_clear_lun_from_sessions(lun);
 836}
 837
 838struct se_lun *core_tpg_pre_dellun(
 839        struct se_portal_group *tpg,
 840        u32 unpacked_lun)
 841{
 842        struct se_lun *lun;
 843
 844        if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
 845                pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
 846                        "-1: %u for Target Portal Group: %u\n",
 847                        tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
 848                        TRANSPORT_MAX_LUNS_PER_TPG-1,
 849                        tpg->se_tpg_tfo->tpg_get_tag(tpg));
 850                return ERR_PTR(-EOVERFLOW);
 851        }
 852
 853        spin_lock(&tpg->tpg_lun_lock);
 854        lun = tpg->tpg_lun_list[unpacked_lun];
 855        if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
 856                pr_err("%s Logical Unit Number: %u is not active on"
 857                        " Target Portal Group: %u, ignoring request.\n",
 858                        tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
 859                        tpg->se_tpg_tfo->tpg_get_tag(tpg));
 860                spin_unlock(&tpg->tpg_lun_lock);
 861                return ERR_PTR(-ENODEV);
 862        }
 863        spin_unlock(&tpg->tpg_lun_lock);
 864
 865        return lun;
 866}
 867
 868int core_tpg_post_dellun(
 869        struct se_portal_group *tpg,
 870        struct se_lun *lun)
 871{
 872        core_tpg_shutdown_lun(tpg, lun);
 873
 874        core_dev_unexport(lun->lun_se_dev, tpg, lun);
 875
 876        spin_lock(&tpg->tpg_lun_lock);
 877        lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
 878        spin_unlock(&tpg->tpg_lun_lock);
 879
 880        return 0;
 881}
 882
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.