linux/drivers/target/target_core_configfs.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * Filename:  target_core_configfs.c
   3 *
   4 * This file contains ConfigFS logic for the Generic Target Engine project.
   5 *
   6 * Copyright (c) 2008-2011 Rising Tide Systems
   7 * Copyright (c) 2008-2011 Linux-iSCSI.org
   8 *
   9 * Nicholas A. Bellinger <nab@kernel.org>
  10 *
  11 * based on configfs Copyright (C) 2005 Oracle.  All rights reserved.
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License as published by
  15 * the Free Software Foundation; either version 2 of the License, or
  16 * (at your option) any later version.
  17 *
  18 * This program is distributed in the hope that it will be useful,
  19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  21 * GNU General Public License for more details.
  22 ****************************************************************************/
  23
  24#include <linux/module.h>
  25#include <linux/moduleparam.h>
  26#include <generated/utsrelease.h>
  27#include <linux/utsname.h>
  28#include <linux/init.h>
  29#include <linux/fs.h>
  30#include <linux/namei.h>
  31#include <linux/slab.h>
  32#include <linux/types.h>
  33#include <linux/delay.h>
  34#include <linux/unistd.h>
  35#include <linux/string.h>
  36#include <linux/parser.h>
  37#include <linux/syscalls.h>
  38#include <linux/configfs.h>
  39#include <linux/spinlock.h>
  40
  41#include <target/target_core_base.h>
  42#include <target/target_core_device.h>
  43#include <target/target_core_transport.h>
  44#include <target/target_core_fabric_ops.h>
  45#include <target/target_core_fabric_configfs.h>
  46#include <target/target_core_configfs.h>
  47#include <target/configfs_macros.h>
  48
  49#include "target_core_alua.h"
  50#include "target_core_hba.h"
  51#include "target_core_pr.h"
  52#include "target_core_rd.h"
  53#include "target_core_stat.h"
  54
  55extern struct t10_alua_lu_gp *default_lu_gp;
  56
  57static struct list_head g_tf_list;
  58static struct mutex g_tf_lock;
  59
  60struct target_core_configfs_attribute {
  61        struct configfs_attribute attr;
  62        ssize_t (*show)(void *, char *);
  63        ssize_t (*store)(void *, const char *, size_t);
  64};
  65
  66static struct config_group target_core_hbagroup;
  67static struct config_group alua_group;
  68static struct config_group alua_lu_gps_group;
  69
  70static inline struct se_hba *
  71item_to_hba(struct config_item *item)
  72{
  73        return container_of(to_config_group(item), struct se_hba, hba_group);
  74}
  75
  76/*
  77 * Attributes for /sys/kernel/config/target/
  78 */
  79static ssize_t target_core_attr_show(struct config_item *item,
  80                                      struct configfs_attribute *attr,
  81                                      char *page)
  82{
  83        return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
  84                " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION,
  85                utsname()->sysname, utsname()->machine);
  86}
  87
  88static struct configfs_item_operations target_core_fabric_item_ops = {
  89        .show_attribute = target_core_attr_show,
  90};
  91
  92static struct configfs_attribute target_core_item_attr_version = {
  93        .ca_owner       = THIS_MODULE,
  94        .ca_name        = "version",
  95        .ca_mode        = S_IRUGO,
  96};
  97
  98static struct target_fabric_configfs *target_core_get_fabric(
  99        const char *name)
 100{
 101        struct target_fabric_configfs *tf;
 102
 103        if (!name)
 104                return NULL;
 105
 106        mutex_lock(&g_tf_lock);
 107        list_for_each_entry(tf, &g_tf_list, tf_list) {
 108                if (!strcmp(tf->tf_name, name)) {
 109                        atomic_inc(&tf->tf_access_cnt);
 110                        mutex_unlock(&g_tf_lock);
 111                        return tf;
 112                }
 113        }
 114        mutex_unlock(&g_tf_lock);
 115
 116        return NULL;
 117}
 118
 119/*
 120 * Called from struct target_core_group_ops->make_group()
 121 */
 122static struct config_group *target_core_register_fabric(
 123        struct config_group *group,
 124        const char *name)
 125{
 126        struct target_fabric_configfs *tf;
 127        int ret;
 128
 129        pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
 130                        " %s\n", group, name);
 131        /*
 132         * Below are some hardcoded request_module() calls to automatically
 133         * local fabric modules when the following is called:
 134         *
 135         * mkdir -p /sys/kernel/config/target/$MODULE_NAME
 136         *
 137         * Note that this does not limit which TCM fabric module can be
 138         * registered, but simply provids auto loading logic for modules with
 139         * mkdir(2) system calls with known TCM fabric modules.
 140         */
 141        if (!strncmp(name, "iscsi", 5)) {
 142                /*
 143                 * Automatically load the LIO Target fabric module when the
 144                 * following is called:
 145                 *
 146                 * mkdir -p $CONFIGFS/target/iscsi
 147                 */
 148                ret = request_module("iscsi_target_mod");
 149                if (ret < 0) {
 150                        pr_err("request_module() failed for"
 151                                " iscsi_target_mod.ko: %d\n", ret);
 152                        return ERR_PTR(-EINVAL);
 153                }
 154        } else if (!strncmp(name, "loopback", 8)) {
 155                /*
 156                 * Automatically load the tcm_loop fabric module when the
 157                 * following is called:
 158                 *
 159                 * mkdir -p $CONFIGFS/target/loopback
 160                 */
 161                ret = request_module("tcm_loop");
 162                if (ret < 0) {
 163                        pr_err("request_module() failed for"
 164                                " tcm_loop.ko: %d\n", ret);
 165                        return ERR_PTR(-EINVAL);
 166                }
 167        }
 168
 169        tf = target_core_get_fabric(name);
 170        if (!tf) {
 171                pr_err("target_core_get_fabric() failed for %s\n",
 172                        name);
 173                return ERR_PTR(-EINVAL);
 174        }
 175        pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
 176                        " %s\n", tf->tf_name);
 177        /*
 178         * On a successful target_core_get_fabric() look, the returned
 179         * struct target_fabric_configfs *tf will contain a usage reference.
 180         */
 181        pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
 182                        &TF_CIT_TMPL(tf)->tfc_wwn_cit);
 183
 184        tf->tf_group.default_groups = tf->tf_default_groups;
 185        tf->tf_group.default_groups[0] = &tf->tf_disc_group;
 186        tf->tf_group.default_groups[1] = NULL;
 187
 188        config_group_init_type_name(&tf->tf_group, name,
 189                        &TF_CIT_TMPL(tf)->tfc_wwn_cit);
 190        config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
 191                        &TF_CIT_TMPL(tf)->tfc_discovery_cit);
 192
 193        pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
 194                        " %s\n", tf->tf_group.cg_item.ci_name);
 195        /*
 196         * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
 197         */
 198        tf->tf_ops.tf_subsys = tf->tf_subsys;
 199        tf->tf_fabric = &tf->tf_group.cg_item;
 200        pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
 201                        " for %s\n", name);
 202
 203        return &tf->tf_group;
 204}
 205
 206/*
 207 * Called from struct target_core_group_ops->drop_item()
 208 */
 209static void target_core_deregister_fabric(
 210        struct config_group *group,
 211        struct config_item *item)
 212{
 213        struct target_fabric_configfs *tf = container_of(
 214                to_config_group(item), struct target_fabric_configfs, tf_group);
 215        struct config_group *tf_group;
 216        struct config_item *df_item;
 217        int i;
 218
 219        pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
 220                " tf list\n", config_item_name(item));
 221
 222        pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
 223                        " %s\n", tf->tf_name);
 224        atomic_dec(&tf->tf_access_cnt);
 225
 226        pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing"
 227                        " tf->tf_fabric for %s\n", tf->tf_name);
 228        tf->tf_fabric = NULL;
 229
 230        pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
 231                        " %s\n", config_item_name(item));
 232
 233        tf_group = &tf->tf_group;
 234        for (i = 0; tf_group->default_groups[i]; i++) {
 235                df_item = &tf_group->default_groups[i]->cg_item;
 236                tf_group->default_groups[i] = NULL;
 237                config_item_put(df_item);
 238        }
 239        config_item_put(item);
 240}
 241
 242static struct configfs_group_operations target_core_fabric_group_ops = {
 243        .make_group     = &target_core_register_fabric,
 244        .drop_item      = &target_core_deregister_fabric,
 245};
 246
 247/*
 248 * All item attributes appearing in /sys/kernel/target/ appear here.
 249 */
 250static struct configfs_attribute *target_core_fabric_item_attrs[] = {
 251        &target_core_item_attr_version,
 252        NULL,
 253};
 254
 255/*
 256 * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
 257 */
 258static struct config_item_type target_core_fabrics_item = {
 259        .ct_item_ops    = &target_core_fabric_item_ops,
 260        .ct_group_ops   = &target_core_fabric_group_ops,
 261        .ct_attrs       = target_core_fabric_item_attrs,
 262        .ct_owner       = THIS_MODULE,
 263};
 264
 265static struct configfs_subsystem target_core_fabrics = {
 266        .su_group = {
 267                .cg_item = {
 268                        .ci_namebuf = "target",
 269                        .ci_type = &target_core_fabrics_item,
 270                },
 271        },
 272};
 273
 274static struct configfs_subsystem *target_core_subsystem[] = {
 275        &target_core_fabrics,
 276        NULL,
 277};
 278
 279/*##############################################################################
 280// Start functions called by external Target Fabrics Modules
 281//############################################################################*/
 282
 283/*
 284 * First function called by fabric modules to:
 285 *
 286 * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer.
 287 * 2) Add struct target_fabric_configfs to g_tf_list
 288 * 3) Return struct target_fabric_configfs to fabric module to be passed
 289 *    into target_fabric_configfs_register().
 290 */
 291struct target_fabric_configfs *target_fabric_configfs_init(
 292        struct module *fabric_mod,
 293        const char *name)
 294{
 295        struct target_fabric_configfs *tf;
 296
 297        if (!(name)) {
 298                pr_err("Unable to locate passed fabric name\n");
 299                return ERR_PTR(-EINVAL);
 300        }
 301        if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
 302                pr_err("Passed name: %s exceeds TARGET_FABRIC"
 303                        "_NAME_SIZE\n", name);
 304                return ERR_PTR(-EINVAL);
 305        }
 306
 307        tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
 308        if (!tf)
 309                return ERR_PTR(-ENOMEM);
 310
 311        INIT_LIST_HEAD(&tf->tf_list);
 312        atomic_set(&tf->tf_access_cnt, 0);
 313        /*
 314         * Setup the default generic struct config_item_type's (cits) in
 315         * struct target_fabric_configfs->tf_cit_tmpl
 316         */
 317        tf->tf_module = fabric_mod;
 318        target_fabric_setup_cits(tf);
 319
 320        tf->tf_subsys = target_core_subsystem[0];
 321        snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name);
 322
 323        mutex_lock(&g_tf_lock);
 324        list_add_tail(&tf->tf_list, &g_tf_list);
 325        mutex_unlock(&g_tf_lock);
 326
 327        pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
 328                        ">>>>>>>>>>>>>>\n");
 329        pr_debug("Initialized struct target_fabric_configfs: %p for"
 330                        " %s\n", tf, tf->tf_name);
 331        return tf;
 332}
 333EXPORT_SYMBOL(target_fabric_configfs_init);
 334
 335/*
 336 * Called by fabric plugins after FAILED target_fabric_configfs_register() call.
 337 */
 338void target_fabric_configfs_free(
 339        struct target_fabric_configfs *tf)
 340{
 341        mutex_lock(&g_tf_lock);
 342        list_del(&tf->tf_list);
 343        mutex_unlock(&g_tf_lock);
 344
 345        kfree(tf);
 346}
 347EXPORT_SYMBOL(target_fabric_configfs_free);
 348
 349/*
 350 * Perform a sanity check of the passed tf->tf_ops before completing
 351 * TCM fabric module registration.
 352 */
 353static int target_fabric_tf_ops_check(
 354        struct target_fabric_configfs *tf)
 355{
 356        struct target_core_fabric_ops *tfo = &tf->tf_ops;
 357
 358        if (!tfo->get_fabric_name) {
 359                pr_err("Missing tfo->get_fabric_name()\n");
 360                return -EINVAL;
 361        }
 362        if (!tfo->get_fabric_proto_ident) {
 363                pr_err("Missing tfo->get_fabric_proto_ident()\n");
 364                return -EINVAL;
 365        }
 366        if (!tfo->tpg_get_wwn) {
 367                pr_err("Missing tfo->tpg_get_wwn()\n");
 368                return -EINVAL;
 369        }
 370        if (!tfo->tpg_get_tag) {
 371                pr_err("Missing tfo->tpg_get_tag()\n");
 372                return -EINVAL;
 373        }
 374        if (!tfo->tpg_get_default_depth) {
 375                pr_err("Missing tfo->tpg_get_default_depth()\n");
 376                return -EINVAL;
 377        }
 378        if (!tfo->tpg_get_pr_transport_id) {
 379                pr_err("Missing tfo->tpg_get_pr_transport_id()\n");
 380                return -EINVAL;
 381        }
 382        if (!tfo->tpg_get_pr_transport_id_len) {
 383                pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n");
 384                return -EINVAL;
 385        }
 386        if (!tfo->tpg_check_demo_mode) {
 387                pr_err("Missing tfo->tpg_check_demo_mode()\n");
 388                return -EINVAL;
 389        }
 390        if (!tfo->tpg_check_demo_mode_cache) {
 391                pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
 392                return -EINVAL;
 393        }
 394        if (!tfo->tpg_check_demo_mode_write_protect) {
 395                pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
 396                return -EINVAL;
 397        }
 398        if (!tfo->tpg_check_prod_mode_write_protect) {
 399                pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
 400                return -EINVAL;
 401        }
 402        if (!tfo->tpg_alloc_fabric_acl) {
 403                pr_err("Missing tfo->tpg_alloc_fabric_acl()\n");
 404                return -EINVAL;
 405        }
 406        if (!tfo->tpg_release_fabric_acl) {
 407                pr_err("Missing tfo->tpg_release_fabric_acl()\n");
 408                return -EINVAL;
 409        }
 410        if (!tfo->tpg_get_inst_index) {
 411                pr_err("Missing tfo->tpg_get_inst_index()\n");
 412                return -EINVAL;
 413        }
 414        if (!tfo->release_cmd) {
 415                pr_err("Missing tfo->release_cmd()\n");
 416                return -EINVAL;
 417        }
 418        if (!tfo->shutdown_session) {
 419                pr_err("Missing tfo->shutdown_session()\n");
 420                return -EINVAL;
 421        }
 422        if (!tfo->close_session) {
 423                pr_err("Missing tfo->close_session()\n");
 424                return -EINVAL;
 425        }
 426        if (!tfo->stop_session) {
 427                pr_err("Missing tfo->stop_session()\n");
 428                return -EINVAL;
 429        }
 430        if (!tfo->fall_back_to_erl0) {
 431                pr_err("Missing tfo->fall_back_to_erl0()\n");
 432                return -EINVAL;
 433        }
 434        if (!tfo->sess_logged_in) {
 435                pr_err("Missing tfo->sess_logged_in()\n");
 436                return -EINVAL;
 437        }
 438        if (!tfo->sess_get_index) {
 439                pr_err("Missing tfo->sess_get_index()\n");
 440                return -EINVAL;
 441        }
 442        if (!tfo->write_pending) {
 443                pr_err("Missing tfo->write_pending()\n");
 444                return -EINVAL;
 445        }
 446        if (!tfo->write_pending_status) {
 447                pr_err("Missing tfo->write_pending_status()\n");
 448                return -EINVAL;
 449        }
 450        if (!tfo->set_default_node_attributes) {
 451                pr_err("Missing tfo->set_default_node_attributes()\n");
 452                return -EINVAL;
 453        }
 454        if (!tfo->get_task_tag) {
 455                pr_err("Missing tfo->get_task_tag()\n");
 456                return -EINVAL;
 457        }
 458        if (!tfo->get_cmd_state) {
 459                pr_err("Missing tfo->get_cmd_state()\n");
 460                return -EINVAL;
 461        }
 462        if (!tfo->queue_data_in) {
 463                pr_err("Missing tfo->queue_data_in()\n");
 464                return -EINVAL;
 465        }
 466        if (!tfo->queue_status) {
 467                pr_err("Missing tfo->queue_status()\n");
 468                return -EINVAL;
 469        }
 470        if (!tfo->queue_tm_rsp) {
 471                pr_err("Missing tfo->queue_tm_rsp()\n");
 472                return -EINVAL;
 473        }
 474        if (!tfo->set_fabric_sense_len) {
 475                pr_err("Missing tfo->set_fabric_sense_len()\n");
 476                return -EINVAL;
 477        }
 478        if (!tfo->get_fabric_sense_len) {
 479                pr_err("Missing tfo->get_fabric_sense_len()\n");
 480                return -EINVAL;
 481        }
 482        if (!tfo->is_state_remove) {
 483                pr_err("Missing tfo->is_state_remove()\n");
 484                return -EINVAL;
 485        }
 486        /*
 487         * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
 488         * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
 489         * target_core_fabric_configfs.c WWN+TPG group context code.
 490         */
 491        if (!tfo->fabric_make_wwn) {
 492                pr_err("Missing tfo->fabric_make_wwn()\n");
 493                return -EINVAL;
 494        }
 495        if (!tfo->fabric_drop_wwn) {
 496                pr_err("Missing tfo->fabric_drop_wwn()\n");
 497                return -EINVAL;
 498        }
 499        if (!tfo->fabric_make_tpg) {
 500                pr_err("Missing tfo->fabric_make_tpg()\n");
 501                return -EINVAL;
 502        }
 503        if (!tfo->fabric_drop_tpg) {
 504                pr_err("Missing tfo->fabric_drop_tpg()\n");
 505                return -EINVAL;
 506        }
 507
 508        return 0;
 509}
 510
 511/*
 512 * Called 2nd from fabric module with returned parameter of
 513 * struct target_fabric_configfs * from target_fabric_configfs_init().
 514 *
 515 * Upon a successful registration, the new fabric's struct config_item is
 516 * return.  Also, a pointer to this struct is set in the passed
 517 * struct target_fabric_configfs.
 518 */
 519int target_fabric_configfs_register(
 520        struct target_fabric_configfs *tf)
 521{
 522        int ret;
 523
 524        if (!tf) {
 525                pr_err("Unable to locate target_fabric_configfs"
 526                        " pointer\n");
 527                return -EINVAL;
 528        }
 529        if (!tf->tf_subsys) {
 530                pr_err("Unable to target struct config_subsystem"
 531                        " pointer\n");
 532                return -EINVAL;
 533        }
 534        ret = target_fabric_tf_ops_check(tf);
 535        if (ret < 0)
 536                return ret;
 537
 538        pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
 539                ">>>>>>>>>>\n");
 540        return 0;
 541}
 542EXPORT_SYMBOL(target_fabric_configfs_register);
 543
 544void target_fabric_configfs_deregister(
 545        struct target_fabric_configfs *tf)
 546{
 547        struct configfs_subsystem *su;
 548
 549        if (!tf) {
 550                pr_err("Unable to locate passed target_fabric_"
 551                        "configfs\n");
 552                return;
 553        }
 554        su = tf->tf_subsys;
 555        if (!su) {
 556                pr_err("Unable to locate passed tf->tf_subsys"
 557                        " pointer\n");
 558                return;
 559        }
 560        pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
 561                        ">>>>>>>>>>>>\n");
 562        mutex_lock(&g_tf_lock);
 563        if (atomic_read(&tf->tf_access_cnt)) {
 564                mutex_unlock(&g_tf_lock);
 565                pr_err("Non zero tf->tf_access_cnt for fabric %s\n",
 566                        tf->tf_name);
 567                BUG();
 568        }
 569        list_del(&tf->tf_list);
 570        mutex_unlock(&g_tf_lock);
 571
 572        pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
 573                        " %s\n", tf->tf_name);
 574        tf->tf_module = NULL;
 575        tf->tf_subsys = NULL;
 576        kfree(tf);
 577
 578        pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
 579                        ">>>>>\n");
 580}
 581EXPORT_SYMBOL(target_fabric_configfs_deregister);
 582
 583/*##############################################################################
 584// Stop functions called by external Target Fabrics Modules
 585//############################################################################*/
 586
 587/* Start functions for struct config_item_type target_core_dev_attrib_cit */
 588
 589#define DEF_DEV_ATTRIB_SHOW(_name)                                      \
 590static ssize_t target_core_dev_show_attr_##_name(                       \
 591        struct se_dev_attrib *da,                                       \
 592        char *page)                                                     \
 593{                                                                       \
 594        struct se_device *dev;                                          \
 595        struct se_subsystem_dev *se_dev = da->da_sub_dev;                       \
 596        ssize_t rb;                                                     \
 597                                                                        \
 598        spin_lock(&se_dev->se_dev_lock);                                \
 599        dev = se_dev->se_dev_ptr;                                       \
 600        if (!dev) {                                                     \
 601                spin_unlock(&se_dev->se_dev_lock);                      \
 602                return -ENODEV;                                         \
 603        }                                                               \
 604        rb = snprintf(page, PAGE_SIZE, "%u\n",                          \
 605                (u32)dev->se_sub_dev->se_dev_attrib._name);             \
 606        spin_unlock(&se_dev->se_dev_lock);                              \
 607                                                                        \
 608        return rb;                                                      \
 609}
 610
 611#define DEF_DEV_ATTRIB_STORE(_name)                                     \
 612static ssize_t target_core_dev_store_attr_##_name(                      \
 613        struct se_dev_attrib *da,                                       \
 614        const char *page,                                               \
 615        size_t count)                                                   \
 616{                                                                       \
 617        struct se_device *dev;                                          \
 618        struct se_subsystem_dev *se_dev = da->da_sub_dev;                       \
 619        unsigned long val;                                              \
 620        int ret;                                                        \
 621                                                                        \
 622        spin_lock(&se_dev->se_dev_lock);                                \
 623        dev = se_dev->se_dev_ptr;                                       \
 624        if (!dev) {                                                     \
 625                spin_unlock(&se_dev->se_dev_lock);                      \
 626                return -ENODEV;                                         \
 627        }                                                               \
 628        ret = strict_strtoul(page, 0, &val);                            \
 629        if (ret < 0) {                                                  \
 630                spin_unlock(&se_dev->se_dev_lock);                      \
 631                pr_err("strict_strtoul() failed with"           \
 632                        " ret: %d\n", ret);                             \
 633                return -EINVAL;                                         \
 634        }                                                               \
 635        ret = se_dev_set_##_name(dev, (u32)val);                        \
 636        spin_unlock(&se_dev->se_dev_lock);                              \
 637                                                                        \
 638        return (!ret) ? count : -EINVAL;                                \
 639}
 640
 641#define DEF_DEV_ATTRIB(_name)                                           \
 642DEF_DEV_ATTRIB_SHOW(_name);                                             \
 643DEF_DEV_ATTRIB_STORE(_name);
 644
 645#define DEF_DEV_ATTRIB_RO(_name)                                        \
 646DEF_DEV_ATTRIB_SHOW(_name);
 647
 648CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
 649#define SE_DEV_ATTR(_name, _mode)                                       \
 650static struct target_core_dev_attrib_attribute                          \
 651                        target_core_dev_attrib_##_name =                \
 652                __CONFIGFS_EATTR(_name, _mode,                          \
 653                target_core_dev_show_attr_##_name,                      \
 654                target_core_dev_store_attr_##_name);
 655
 656#define SE_DEV_ATTR_RO(_name);                                          \
 657static struct target_core_dev_attrib_attribute                          \
 658                        target_core_dev_attrib_##_name =                \
 659        __CONFIGFS_EATTR_RO(_name,                                      \
 660        target_core_dev_show_attr_##_name);
 661
 662DEF_DEV_ATTRIB(emulate_dpo);
 663SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
 664
 665DEF_DEV_ATTRIB(emulate_fua_write);
 666SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR);
 667
 668DEF_DEV_ATTRIB(emulate_fua_read);
 669SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR);
 670
 671DEF_DEV_ATTRIB(emulate_write_cache);
 672SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR);
 673
 674DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl);
 675SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
 676
 677DEF_DEV_ATTRIB(emulate_tas);
 678SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR);
 679
 680DEF_DEV_ATTRIB(emulate_tpu);
 681SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
 682
 683DEF_DEV_ATTRIB(emulate_tpws);
 684SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
 685
 686DEF_DEV_ATTRIB(enforce_pr_isids);
 687SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
 688
 689DEF_DEV_ATTRIB(is_nonrot);
 690SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR);
 691
 692DEF_DEV_ATTRIB(emulate_rest_reord);
 693SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR);
 694
 695DEF_DEV_ATTRIB_RO(hw_block_size);
 696SE_DEV_ATTR_RO(hw_block_size);
 697
 698DEF_DEV_ATTRIB(block_size);
 699SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
 700
 701DEF_DEV_ATTRIB_RO(hw_max_sectors);
 702SE_DEV_ATTR_RO(hw_max_sectors);
 703
 704DEF_DEV_ATTRIB(max_sectors);
 705SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
 706
 707DEF_DEV_ATTRIB(optimal_sectors);
 708SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
 709
 710DEF_DEV_ATTRIB_RO(hw_queue_depth);
 711SE_DEV_ATTR_RO(hw_queue_depth);
 712
 713DEF_DEV_ATTRIB(queue_depth);
 714SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
 715
 716DEF_DEV_ATTRIB(max_unmap_lba_count);
 717SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
 718
 719DEF_DEV_ATTRIB(max_unmap_block_desc_count);
 720SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
 721
 722DEF_DEV_ATTRIB(unmap_granularity);
 723SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
 724
 725DEF_DEV_ATTRIB(unmap_granularity_alignment);
 726SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
 727
 728CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
 729
 730static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
 731        &target_core_dev_attrib_emulate_dpo.attr,
 732        &target_core_dev_attrib_emulate_fua_write.attr,
 733        &target_core_dev_attrib_emulate_fua_read.attr,
 734        &target_core_dev_attrib_emulate_write_cache.attr,
 735        &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
 736        &target_core_dev_attrib_emulate_tas.attr,
 737        &target_core_dev_attrib_emulate_tpu.attr,
 738        &target_core_dev_attrib_emulate_tpws.attr,
 739        &target_core_dev_attrib_enforce_pr_isids.attr,
 740        &target_core_dev_attrib_is_nonrot.attr,
 741        &target_core_dev_attrib_emulate_rest_reord.attr,
 742        &target_core_dev_attrib_hw_block_size.attr,
 743        &target_core_dev_attrib_block_size.attr,
 744        &target_core_dev_attrib_hw_max_sectors.attr,
 745        &target_core_dev_attrib_max_sectors.attr,
 746        &target_core_dev_attrib_optimal_sectors.attr,
 747        &target_core_dev_attrib_hw_queue_depth.attr,
 748        &target_core_dev_attrib_queue_depth.attr,
 749        &target_core_dev_attrib_max_unmap_lba_count.attr,
 750        &target_core_dev_attrib_max_unmap_block_desc_count.attr,
 751        &target_core_dev_attrib_unmap_granularity.attr,
 752        &target_core_dev_attrib_unmap_granularity_alignment.attr,
 753        NULL,
 754};
 755
 756static struct configfs_item_operations target_core_dev_attrib_ops = {
 757        .show_attribute         = target_core_dev_attrib_attr_show,
 758        .store_attribute        = target_core_dev_attrib_attr_store,
 759};
 760
 761static struct config_item_type target_core_dev_attrib_cit = {
 762        .ct_item_ops            = &target_core_dev_attrib_ops,
 763        .ct_attrs               = target_core_dev_attrib_attrs,
 764        .ct_owner               = THIS_MODULE,
 765};
 766
 767/* End functions for struct config_item_type target_core_dev_attrib_cit */
 768
 769/*  Start functions for struct config_item_type target_core_dev_wwn_cit */
 770
 771CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
 772#define SE_DEV_WWN_ATTR(_name, _mode)                                   \
 773static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \
 774                __CONFIGFS_EATTR(_name, _mode,                          \
 775                target_core_dev_wwn_show_attr_##_name,                  \
 776                target_core_dev_wwn_store_attr_##_name);
 777
 778#define SE_DEV_WWN_ATTR_RO(_name);                                      \
 779do {                                                                    \
 780        static struct target_core_dev_wwn_attribute                     \
 781                        target_core_dev_wwn_##_name =                   \
 782                __CONFIGFS_EATTR_RO(_name,                              \
 783                target_core_dev_wwn_show_attr_##_name);                 \
 784} while (0);
 785
 786/*
 787 * VPD page 0x80 Unit serial
 788 */
 789static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
 790        struct t10_wwn *t10_wwn,
 791        char *page)
 792{
 793        struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
 794        struct se_device *dev;
 795
 796        dev = se_dev->se_dev_ptr;
 797        if (!dev)
 798                return -ENODEV;
 799
 800        return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
 801                &t10_wwn->unit_serial[0]);
 802}
 803
 804static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
 805        struct t10_wwn *t10_wwn,
 806        const char *page,
 807        size_t count)
 808{
 809        struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev;
 810        struct se_device *dev;
 811        unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
 812
 813        /*
 814         * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
 815         * from the struct scsi_device level firmware, do not allow
 816         * VPD Unit Serial to be emulated.
 817         *
 818         * Note this struct scsi_device could also be emulating VPD
 819         * information from its drivers/scsi LLD.  But for now we assume
 820         * it is doing 'the right thing' wrt a world wide unique
 821         * VPD Unit Serial Number that OS dependent multipath can depend on.
 822         */
 823        if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
 824                pr_err("Underlying SCSI device firmware provided VPD"
 825                        " Unit Serial, ignoring request\n");
 826                return -EOPNOTSUPP;
 827        }
 828
 829        if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
 830                pr_err("Emulated VPD Unit Serial exceeds"
 831                " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
 832                return -EOVERFLOW;
 833        }
 834        /*
 835         * Check to see if any active $FABRIC_MOD exports exist.  If they
 836         * do exist, fail here as changing this information on the fly
 837         * (underneath the initiator side OS dependent multipath code)
 838         * could cause negative effects.
 839         */
 840        dev = su_dev->se_dev_ptr;
 841        if (dev) {
 842                if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
 843                        pr_err("Unable to set VPD Unit Serial while"
 844                                " active %d $FABRIC_MOD exports exist\n",
 845                                atomic_read(&dev->dev_export_obj.obj_access_count));
 846                        return -EINVAL;
 847                }
 848        }
 849        /*
 850         * This currently assumes ASCII encoding for emulated VPD Unit Serial.
 851         *
 852         * Also, strip any newline added from the userspace
 853         * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
 854         */
 855        memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
 856        snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
 857        snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
 858                        "%s", strstrip(buf));
 859        su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
 860
 861        pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
 862                        " %s\n", su_dev->t10_wwn.unit_serial);
 863
 864        return count;
 865}
 866
 867SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR);
 868
 869/*
 870 * VPD page 0x83 Protocol Identifier
 871 */
 872static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
 873        struct t10_wwn *t10_wwn,
 874        char *page)
 875{
 876        struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
 877        struct se_device *dev;
 878        struct t10_vpd *vpd;
 879        unsigned char buf[VPD_TMP_BUF_SIZE];
 880        ssize_t len = 0;
 881
 882        dev = se_dev->se_dev_ptr;
 883        if (!dev)
 884                return -ENODEV;
 885
 886        memset(buf, 0, VPD_TMP_BUF_SIZE);
 887
 888        spin_lock(&t10_wwn->t10_vpd_lock);
 889        list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
 890                if (!vpd->protocol_identifier_set)
 891                        continue;
 892
 893                transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
 894
 895                if (len + strlen(buf) >= PAGE_SIZE)
 896                        break;
 897
 898                len += sprintf(page+len, "%s", buf);
 899        }
 900        spin_unlock(&t10_wwn->t10_vpd_lock);
 901
 902        return len;
 903}
 904
 905static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier(
 906        struct t10_wwn *t10_wwn,
 907        const char *page,
 908        size_t count)
 909{
 910        return -ENOSYS;
 911}
 912
 913SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR);
 914
 915/*
 916 * Generic wrapper for dumping VPD identifiers by association.
 917 */
 918#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc)                           \
 919static ssize_t target_core_dev_wwn_show_attr_##_name(                   \
 920        struct t10_wwn *t10_wwn,                                        \
 921        char *page)                                                     \
 922{                                                                       \
 923        struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;         \
 924        struct se_device *dev;                                          \
 925        struct t10_vpd *vpd;                                                    \
 926        unsigned char buf[VPD_TMP_BUF_SIZE];                            \
 927        ssize_t len = 0;                                                \
 928                                                                        \
 929        dev = se_dev->se_dev_ptr;                                       \
 930        if (!dev)                                                       \
 931                return -ENODEV;                                         \
 932                                                                        \
 933        spin_lock(&t10_wwn->t10_vpd_lock);                              \
 934        list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {    \
 935                if (vpd->association != _assoc)                         \
 936                        continue;                                       \
 937                                                                        \
 938                memset(buf, 0, VPD_TMP_BUF_SIZE);                       \
 939                transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE);   \
 940                if (len + strlen(buf) >= PAGE_SIZE)                     \
 941                        break;                                          \
 942                len += sprintf(page+len, "%s", buf);                    \
 943                                                                        \
 944                memset(buf, 0, VPD_TMP_BUF_SIZE);                       \
 945                transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
 946                if (len + strlen(buf) >= PAGE_SIZE)                     \
 947                        break;                                          \
 948                len += sprintf(page+len, "%s", buf);                    \
 949                                                                        \
 950                memset(buf, 0, VPD_TMP_BUF_SIZE);                       \
 951                transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
 952                if (len + strlen(buf) >= PAGE_SIZE)                     \
 953                        break;                                          \
 954                len += sprintf(page+len, "%s", buf);                    \
 955        }                                                               \
 956        spin_unlock(&t10_wwn->t10_vpd_lock);                            \
 957                                                                        \
 958        return len;                                                     \
 959}
 960
 961/*
 962 * VPD page 0x83 Association: Logical Unit
 963 */
 964DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
 965
 966static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit(
 967        struct t10_wwn *t10_wwn,
 968        const char *page,
 969        size_t count)
 970{
 971        return -ENOSYS;
 972}
 973
 974SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR);
 975
 976/*
 977 * VPD page 0x83 Association: Target Port
 978 */
 979DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
 980
 981static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port(
 982        struct t10_wwn *t10_wwn,
 983        const char *page,
 984        size_t count)
 985{
 986        return -ENOSYS;
 987}
 988
 989SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR);
 990
 991/*
 992 * VPD page 0x83 Association: SCSI Target Device
 993 */
 994DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
 995
 996static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device(
 997        struct t10_wwn *t10_wwn,
 998        const char *page,
 999        size_t count)
1000{
1001        return -ENOSYS;
1002}
1003
1004SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR);
1005
1006CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group);
1007
1008static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
1009        &target_core_dev_wwn_vpd_unit_serial.attr,
1010        &target_core_dev_wwn_vpd_protocol_identifier.attr,
1011        &target_core_dev_wwn_vpd_assoc_logical_unit.attr,
1012        &target_core_dev_wwn_vpd_assoc_target_port.attr,
1013        &target_core_dev_wwn_vpd_assoc_scsi_target_device.attr,
1014        NULL,
1015};
1016
1017static struct configfs_item_operations target_core_dev_wwn_ops = {
1018        .show_attribute         = target_core_dev_wwn_attr_show,
1019        .store_attribute        = target_core_dev_wwn_attr_store,
1020};
1021
1022static struct config_item_type target_core_dev_wwn_cit = {
1023        .ct_item_ops            = &target_core_dev_wwn_ops,
1024        .ct_attrs               = target_core_dev_wwn_attrs,
1025        .ct_owner               = THIS_MODULE,
1026};
1027
1028/*  End functions for struct config_item_type target_core_dev_wwn_cit */
1029
1030/*  Start functions for struct config_item_type target_core_dev_pr_cit */
1031
1032CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev);
1033#define SE_DEV_PR_ATTR(_name, _mode)                                    \
1034static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
1035        __CONFIGFS_EATTR(_name, _mode,                                  \
1036        target_core_dev_pr_show_attr_##_name,                           \
1037        target_core_dev_pr_store_attr_##_name);
1038
1039#define SE_DEV_PR_ATTR_RO(_name);                                       \
1040static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
1041        __CONFIGFS_EATTR_RO(_name,                                      \
1042        target_core_dev_pr_show_attr_##_name);
1043
1044/*
1045 * res_holder
1046 */
1047static ssize_t target_core_dev_pr_show_spc3_res(
1048        struct se_device *dev,
1049        char *page,
1050        ssize_t *len)
1051{
1052        struct se_node_acl *se_nacl;
1053        struct t10_pr_registration *pr_reg;
1054        char i_buf[PR_REG_ISID_ID_LEN];
1055        int prf_isid;
1056
1057        memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1058
1059        spin_lock(&dev->dev_reservation_lock);
1060        pr_reg = dev->dev_pr_res_holder;
1061        if (!pr_reg) {
1062                *len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
1063                spin_unlock(&dev->dev_reservation_lock);
1064                return *len;
1065        }
1066        se_nacl = pr_reg->pr_reg_nacl;
1067        prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
1068                                PR_REG_ISID_ID_LEN);
1069
1070        *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
1071                se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
1072                se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
1073        spin_unlock(&dev->dev_reservation_lock);
1074
1075        return *len;
1076}
1077
1078static ssize_t target_core_dev_pr_show_spc2_res(
1079        struct se_device *dev,
1080        char *page,
1081        ssize_t *len)
1082{
1083        struct se_node_acl *se_nacl;
1084
1085        spin_lock(&dev->dev_reservation_lock);
1086        se_nacl = dev->dev_reserved_node_acl;
1087        if (!se_nacl) {
1088                *len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
1089                spin_unlock(&dev->dev_reservation_lock);
1090                return *len;
1091        }
1092        *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
1093                se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
1094                se_nacl->initiatorname);
1095        spin_unlock(&dev->dev_reservation_lock);
1096
1097        return *len;
1098}
1099
1100static ssize_t target_core_dev_pr_show_attr_res_holder(
1101        struct se_subsystem_dev *su_dev,
1102        char *page)
1103{
1104        ssize_t len = 0;
1105
1106        if (!su_dev->se_dev_ptr)
1107                return -ENODEV;
1108
1109        switch (su_dev->t10_pr.res_type) {
1110        case SPC3_PERSISTENT_RESERVATIONS:
1111                target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
1112                                page, &len);
1113                break;
1114        case SPC2_RESERVATIONS:
1115                target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr,
1116                                page, &len);
1117                break;
1118        case SPC_PASSTHROUGH:
1119                len += sprintf(page+len, "Passthrough\n");
1120                break;
1121        default:
1122                len += sprintf(page+len, "Unknown\n");
1123                break;
1124        }
1125
1126        return len;
1127}
1128
1129SE_DEV_PR_ATTR_RO(res_holder);
1130
1131/*
1132 * res_pr_all_tgt_pts
1133 */
1134static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
1135        struct se_subsystem_dev *su_dev,
1136        char *page)
1137{
1138        struct se_device *dev;
1139        struct t10_pr_registration *pr_reg;
1140        ssize_t len = 0;
1141
1142        dev = su_dev->se_dev_ptr;
1143        if (!dev)
1144                return -ENODEV;
1145
1146        if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1147                return len;
1148
1149        spin_lock(&dev->dev_reservation_lock);
1150        pr_reg = dev->dev_pr_res_holder;
1151        if (!pr_reg) {
1152                len = sprintf(page, "No SPC-3 Reservation holder\n");
1153                spin_unlock(&dev->dev_reservation_lock);
1154                return len;
1155        }
1156        /*
1157         * See All Target Ports (ALL_TG_PT) bit in spcr17, section 6.14.3
1158         * Basic PERSISTENT RESERVER OUT parameter list, page 290
1159         */
1160        if (pr_reg->pr_reg_all_tg_pt)
1161                len = sprintf(page, "SPC-3 Reservation: All Target"
1162                        " Ports registration\n");
1163        else
1164                len = sprintf(page, "SPC-3 Reservation: Single"
1165                        " Target Port registration\n");
1166        spin_unlock(&dev->dev_reservation_lock);
1167
1168        return len;
1169}
1170
1171SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
1172
1173/*
1174 * res_pr_generation
1175 */
1176static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
1177        struct se_subsystem_dev *su_dev,
1178        char *page)
1179{
1180        if (!su_dev->se_dev_ptr)
1181                return -ENODEV;
1182
1183        if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1184                return 0;
1185
1186        return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation);
1187}
1188
1189SE_DEV_PR_ATTR_RO(res_pr_generation);
1190
1191/*
1192 * res_pr_holder_tg_port
1193 */
1194static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
1195        struct se_subsystem_dev *su_dev,
1196        char *page)
1197{
1198        struct se_device *dev;
1199        struct se_node_acl *se_nacl;
1200        struct se_lun *lun;
1201        struct se_portal_group *se_tpg;
1202        struct t10_pr_registration *pr_reg;
1203        struct target_core_fabric_ops *tfo;
1204        ssize_t len = 0;
1205
1206        dev = su_dev->se_dev_ptr;
1207        if (!dev)
1208                return -ENODEV;
1209
1210        if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1211                return len;
1212
1213        spin_lock(&dev->dev_reservation_lock);
1214        pr_reg = dev->dev_pr_res_holder;
1215        if (!pr_reg) {
1216                len = sprintf(page, "No SPC-3 Reservation holder\n");
1217                spin_unlock(&dev->dev_reservation_lock);
1218                return len;
1219        }
1220        se_nacl = pr_reg->pr_reg_nacl;
1221        se_tpg = se_nacl->se_tpg;
1222        lun = pr_reg->pr_reg_tg_pt_lun;
1223        tfo = se_tpg->se_tpg_tfo;
1224
1225        len += sprintf(page+len, "SPC-3 Reservation: %s"
1226                " Target Node Endpoint: %s\n", tfo->get_fabric_name(),
1227                tfo->tpg_get_wwn(se_tpg));
1228        len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
1229                " Identifer Tag: %hu %s Portal Group Tag: %hu"
1230                " %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
1231                tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
1232                tfo->get_fabric_name(), lun->unpacked_lun);
1233        spin_unlock(&dev->dev_reservation_lock);
1234
1235        return len;
1236}
1237
1238SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
1239
1240/*
1241 * res_pr_registered_i_pts
1242 */
1243static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
1244        struct se_subsystem_dev *su_dev,
1245        char *page)
1246{
1247        struct target_core_fabric_ops *tfo;
1248        struct t10_pr_registration *pr_reg;
1249        unsigned char buf[384];
1250        char i_buf[PR_REG_ISID_ID_LEN];
1251        ssize_t len = 0;
1252        int reg_count = 0, prf_isid;
1253
1254        if (!su_dev->se_dev_ptr)
1255                return -ENODEV;
1256
1257        if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1258                return len;
1259
1260        len += sprintf(page+len, "SPC-3 PR Registrations:\n");
1261
1262        spin_lock(&su_dev->t10_pr.registration_lock);
1263        list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
1264                        pr_reg_list) {
1265
1266                memset(buf, 0, 384);
1267                memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1268                tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
1269                prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
1270                                        PR_REG_ISID_ID_LEN);
1271                sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
1272                        tfo->get_fabric_name(),
1273                        pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ?
1274                        &i_buf[0] : "", pr_reg->pr_res_key,
1275                        pr_reg->pr_res_generation);
1276
1277                if (len + strlen(buf) >= PAGE_SIZE)
1278                        break;
1279
1280                len += sprintf(page+len, "%s", buf);
1281                reg_count++;
1282        }
1283        spin_unlock(&su_dev->t10_pr.registration_lock);
1284
1285        if (!reg_count)
1286                len += sprintf(page+len, "None\n");
1287
1288        return len;
1289}
1290
1291SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
1292
1293/*
1294 * res_pr_type
1295 */
1296static ssize_t target_core_dev_pr_show_attr_res_pr_type(
1297        struct se_subsystem_dev *su_dev,
1298        char *page)
1299{
1300        struct se_device *dev;
1301        struct t10_pr_registration *pr_reg;
1302        ssize_t len = 0;
1303
1304        dev = su_dev->se_dev_ptr;
1305        if (!dev)
1306                return -ENODEV;
1307
1308        if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1309                return len;
1310
1311        spin_lock(&dev->dev_reservation_lock);
1312        pr_reg = dev->dev_pr_res_holder;
1313        if (!pr_reg) {
1314                len = sprintf(page, "No SPC-3 Reservation holder\n");
1315                spin_unlock(&dev->dev_reservation_lock);
1316                return len;
1317        }
1318        len = sprintf(page, "SPC-3 Reservation Type: %s\n",
1319                core_scsi3_pr_dump_type(pr_reg->pr_res_type));
1320        spin_unlock(&dev->dev_reservation_lock);
1321
1322        return len;
1323}
1324
1325SE_DEV_PR_ATTR_RO(res_pr_type);
1326
1327/*
1328 * res_type
1329 */
1330static ssize_t target_core_dev_pr_show_attr_res_type(
1331        struct se_subsystem_dev *su_dev,
1332        char *page)
1333{
1334        ssize_t len = 0;
1335
1336        if (!su_dev->se_dev_ptr)
1337                return -ENODEV;
1338
1339        switch (su_dev->t10_pr.res_type) {
1340        case SPC3_PERSISTENT_RESERVATIONS:
1341                len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
1342                break;
1343        case SPC2_RESERVATIONS:
1344                len = sprintf(page, "SPC2_RESERVATIONS\n");
1345                break;
1346        case SPC_PASSTHROUGH:
1347                len = sprintf(page, "SPC_PASSTHROUGH\n");
1348                break;
1349        default:
1350                len = sprintf(page, "UNKNOWN\n");
1351                break;
1352        }
1353
1354        return len;
1355}
1356
1357SE_DEV_PR_ATTR_RO(res_type);
1358
1359/*
1360 * res_aptpl_active
1361 */
1362
1363static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
1364        struct se_subsystem_dev *su_dev,
1365        char *page)
1366{
1367        if (!su_dev->se_dev_ptr)
1368                return -ENODEV;
1369
1370        if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1371                return 0;
1372
1373        return sprintf(page, "APTPL Bit Status: %s\n",
1374                (su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
1375}
1376
1377SE_DEV_PR_ATTR_RO(res_aptpl_active);
1378
1379/*
1380 * res_aptpl_metadata
1381 */
1382static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
1383        struct se_subsystem_dev *su_dev,
1384        char *page)
1385{
1386        if (!su_dev->se_dev_ptr)
1387                return -ENODEV;
1388
1389        if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1390                return 0;
1391
1392        return sprintf(page, "Ready to process PR APTPL metadata..\n");
1393}
1394
1395enum {
1396        Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
1397        Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
1398        Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
1399        Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
1400};
1401
1402static match_table_t tokens = {
1403        {Opt_initiator_fabric, "initiator_fabric=%s"},
1404        {Opt_initiator_node, "initiator_node=%s"},
1405        {Opt_initiator_sid, "initiator_sid=%s"},
1406        {Opt_sa_res_key, "sa_res_key=%s"},
1407        {Opt_res_holder, "res_holder=%d"},
1408        {Opt_res_type, "res_type=%d"},
1409        {Opt_res_scope, "res_scope=%d"},
1410        {Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
1411        {Opt_mapped_lun, "mapped_lun=%d"},
1412        {Opt_target_fabric, "target_fabric=%s"},
1413        {Opt_target_node, "target_node=%s"},
1414        {Opt_tpgt, "tpgt=%d"},
1415        {Opt_port_rtpi, "port_rtpi=%d"},
1416        {Opt_target_lun, "target_lun=%d"},
1417        {Opt_err, NULL}
1418};
1419
1420static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1421        struct se_subsystem_dev *su_dev,
1422        const char *page,
1423        size_t count)
1424{
1425        struct se_device *dev;
1426        unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
1427        unsigned char *t_fabric = NULL, *t_port = NULL;
1428        char *orig, *ptr, *arg_p, *opts;
1429        substring_t args[MAX_OPT_ARGS];
1430        unsigned long long tmp_ll;
1431        u64 sa_res_key = 0;
1432        u32 mapped_lun = 0, target_lun = 0;
1433        int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
1434        u16 port_rpti = 0, tpgt = 0;
1435        u8 type = 0, scope;
1436
1437        dev = su_dev->se_dev_ptr;
1438        if (!dev)
1439                return -ENODEV;
1440
1441        if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1442                return 0;
1443
1444        if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1445                pr_debug("Unable to process APTPL metadata while"
1446                        " active fabric exports exist\n");
1447                return -EINVAL;
1448        }
1449
1450        opts = kstrdup(page, GFP_KERNEL);
1451        if (!opts)
1452                return -ENOMEM;
1453
1454        orig = opts;
1455        while ((ptr = strsep(&opts, ",")) != NULL) {
1456                if (!*ptr)
1457                        continue;
1458
1459                token = match_token(ptr, tokens, args);
1460                switch (token) {
1461                case Opt_initiator_fabric:
1462                        i_fabric = match_strdup(&args[0]);
1463                        if (!i_fabric) {
1464                                ret = -ENOMEM;
1465                                goto out;
1466                        }
1467                        break;
1468                case Opt_initiator_node:
1469                        i_port = match_strdup(&args[0]);
1470                        if (!i_port) {
1471                                ret = -ENOMEM;
1472                                goto out;
1473                        }
1474                        if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
1475                                pr_err("APTPL metadata initiator_node="
1476                                        " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
1477                                        PR_APTPL_MAX_IPORT_LEN);
1478                                ret = -EINVAL;
1479                                break;
1480                        }
1481                        break;
1482                case Opt_initiator_sid:
1483                        isid = match_strdup(&args[0]);
1484                        if (!isid) {
1485                                ret = -ENOMEM;
1486                                goto out;
1487                        }
1488                        if (strlen(isid) >= PR_REG_ISID_LEN) {
1489                                pr_err("APTPL metadata initiator_isid"
1490                                        "= exceeds PR_REG_ISID_LEN: %d\n",
1491                                        PR_REG_ISID_LEN);
1492                                ret = -EINVAL;
1493                                break;
1494                        }
1495                        break;
1496                case Opt_sa_res_key:
1497                        arg_p = match_strdup(&args[0]);
1498                        if (!arg_p) {
1499                                ret = -ENOMEM;
1500                                goto out;
1501                        }
1502                        ret = strict_strtoull(arg_p, 0, &tmp_ll);
1503                        if (ret < 0) {
1504                                pr_err("strict_strtoull() failed for"
1505                                        " sa_res_key=\n");
1506                                goto out;
1507                        }
1508                        sa_res_key = (u64)tmp_ll;
1509                        break;
1510                /*
1511                 * PR APTPL Metadata for Reservation
1512                 */
1513                case Opt_res_holder:
1514                        match_int(args, &arg);
1515                        res_holder = arg;
1516                        break;
1517                case Opt_res_type:
1518                        match_int(args, &arg);
1519                        type = (u8)arg;
1520                        break;
1521                case Opt_res_scope:
1522                        match_int(args, &arg);
1523                        scope = (u8)arg;
1524                        break;
1525                case Opt_res_all_tg_pt:
1526                        match_int(args, &arg);
1527                        all_tg_pt = (int)arg;
1528                        break;
1529                case Opt_mapped_lun:
1530                        match_int(args, &arg);
1531                        mapped_lun = (u32)arg;
1532                        break;
1533                /*
1534                 * PR APTPL Metadata for Target Port
1535                 */
1536                case Opt_target_fabric:
1537                        t_fabric = match_strdup(&args[0]);
1538                        if (!t_fabric) {
1539                                ret = -ENOMEM;
1540                                goto out;
1541                        }
1542                        break;
1543                case Opt_target_node:
1544                        t_port = match_strdup(&args[0]);
1545                        if (!t_port) {
1546                                ret = -ENOMEM;
1547                                goto out;
1548                        }
1549                        if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
1550                                pr_err("APTPL metadata target_node="
1551                                        " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
1552                                        PR_APTPL_MAX_TPORT_LEN);
1553                                ret = -EINVAL;
1554                                break;
1555                        }
1556                        break;
1557                case Opt_tpgt:
1558                        match_int(args, &arg);
1559                        tpgt = (u16)arg;
1560                        break;
1561                case Opt_port_rtpi:
1562                        match_int(args, &arg);
1563                        port_rpti = (u16)arg;
1564                        break;
1565                case Opt_target_lun:
1566                        match_int(args, &arg);
1567                        target_lun = (u32)arg;
1568                        break;
1569                default:
1570                        break;
1571                }
1572        }
1573
1574        if (!i_port || !t_port || !sa_res_key) {
1575                pr_err("Illegal parameters for APTPL registration\n");
1576                ret = -EINVAL;
1577                goto out;
1578        }
1579
1580        if (res_holder && !(type)) {
1581                pr_err("Illegal PR type: 0x%02x for reservation"
1582                                " holder\n", type);
1583                ret = -EINVAL;
1584                goto out;
1585        }
1586
1587        ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key,
1588                        i_port, isid, mapped_lun, t_port, tpgt, target_lun,
1589                        res_holder, all_tg_pt, type);
1590out:
1591        kfree(i_fabric);
1592        kfree(i_port);
1593        kfree(isid);
1594        kfree(t_fabric);
1595        kfree(t_port);
1596        kfree(orig);
1597        return (ret == 0) ? count : ret;
1598}
1599
1600SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
1601
1602CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group);
1603
1604static struct configfs_attribute *target_core_dev_pr_attrs[] = {
1605        &target_core_dev_pr_res_holder.attr,
1606        &target_core_dev_pr_res_pr_all_tgt_pts.attr,
1607        &target_core_dev_pr_res_pr_generation.attr,
1608        &target_core_dev_pr_res_pr_holder_tg_port.attr,
1609        &target_core_dev_pr_res_pr_registered_i_pts.attr,
1610        &target_core_dev_pr_res_pr_type.attr,
1611        &target_core_dev_pr_res_type.attr,
1612        &target_core_dev_pr_res_aptpl_active.attr,
1613        &target_core_dev_pr_res_aptpl_metadata.attr,
1614        NULL,
1615};
1616
1617static struct configfs_item_operations target_core_dev_pr_ops = {
1618        .show_attribute         = target_core_dev_pr_attr_show,
1619        .store_attribute        = target_core_dev_pr_attr_store,
1620};
1621
1622static struct config_item_type target_core_dev_pr_cit = {
1623        .ct_item_ops            = &target_core_dev_pr_ops,
1624        .ct_attrs               = target_core_dev_pr_attrs,
1625        .ct_owner               = THIS_MODULE,
1626};
1627
1628/*  End functions for struct config_item_type target_core_dev_pr_cit */
1629
1630/*  Start functions for struct config_item_type target_core_dev_cit */
1631
1632static ssize_t target_core_show_dev_info(void *p, char *page)
1633{
1634        struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1635        struct se_hba *hba = se_dev->se_dev_hba;
1636        struct se_subsystem_api *t = hba->transport;
1637        int bl = 0;
1638        ssize_t read_bytes = 0;
1639
1640        if (!se_dev->se_dev_ptr)
1641                return -ENODEV;
1642
1643        transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
1644        read_bytes += bl;
1645        read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes);
1646        return read_bytes;
1647}
1648
1649static struct target_core_configfs_attribute target_core_attr_dev_info = {
1650        .attr   = { .ca_owner = THIS_MODULE,
1651                    .ca_name = "info",
1652                    .ca_mode = S_IRUGO },
1653        .show   = target_core_show_dev_info,
1654        .store  = NULL,
1655};
1656
1657static ssize_t target_core_store_dev_control(
1658        void *p,
1659        const char *page,
1660        size_t count)
1661{
1662        struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1663        struct se_hba *hba = se_dev->se_dev_hba;
1664        struct se_subsystem_api *t = hba->transport;
1665
1666        if (!se_dev->se_dev_su_ptr) {
1667                pr_err("Unable to locate struct se_subsystem_dev>se"
1668                                "_dev_su_ptr\n");
1669                return -EINVAL;
1670        }
1671
1672        return t->set_configfs_dev_params(hba, se_dev, page, count);
1673}
1674
1675static struct target_core_configfs_attribute target_core_attr_dev_control = {
1676        .attr   = { .ca_owner = THIS_MODULE,
1677                    .ca_name = "control",
1678                    .ca_mode = S_IWUSR },
1679        .show   = NULL,
1680        .store  = target_core_store_dev_control,
1681};
1682
1683static ssize_t target_core_show_dev_alias(void *p, char *page)
1684{
1685        struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1686
1687        if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
1688                return 0;
1689
1690        return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias);
1691}
1692
1693static ssize_t target_core_store_dev_alias(
1694        void *p,
1695        const char *page,
1696        size_t count)
1697{
1698        struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1699        struct se_hba *hba = se_dev->se_dev_hba;
1700        ssize_t read_bytes;
1701
1702        if (count > (SE_DEV_ALIAS_LEN-1)) {
1703                pr_err("alias count: %d exceeds"
1704                        " SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
1705                        SE_DEV_ALIAS_LEN-1);
1706                return -EINVAL;
1707        }
1708
1709        se_dev->su_dev_flags |= SDF_USING_ALIAS;
1710        read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
1711                        "%s", page);
1712
1713        pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
1714                config_item_name(&hba->hba_group.cg_item),
1715                config_item_name(&se_dev->se_dev_group.cg_item),
1716                se_dev->se_dev_alias);
1717
1718        return read_bytes;
1719}
1720
1721static struct target_core_configfs_attribute target_core_attr_dev_alias = {
1722        .attr   = { .ca_owner = THIS_MODULE,
1723                    .ca_name = "alias",
1724                    .ca_mode =  S_IRUGO | S_IWUSR },
1725        .show   = target_core_show_dev_alias,
1726        .store  = target_core_store_dev_alias,
1727};
1728
1729static ssize_t target_core_show_dev_udev_path(void *p, char *page)
1730{
1731        struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1732
1733        if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
1734                return 0;
1735
1736        return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path);
1737}
1738
1739static ssize_t target_core_store_dev_udev_path(
1740        void *p,
1741        const char *page,
1742        size_t count)
1743{
1744        struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1745        struct se_hba *hba = se_dev->se_dev_hba;
1746        ssize_t read_bytes;
1747
1748        if (count > (SE_UDEV_PATH_LEN-1)) {
1749                pr_err("udev_path count: %d exceeds"
1750                        " SE_UDEV_PATH_LEN-1: %u\n", (int)count,
1751                        SE_UDEV_PATH_LEN-1);
1752                return -EINVAL;
1753        }
1754
1755        se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
1756        read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
1757                        "%s", page);
1758
1759        pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
1760                config_item_name(&hba->hba_group.cg_item),
1761                config_item_name(&se_dev->se_dev_group.cg_item),
1762                se_dev->se_dev_udev_path);
1763
1764        return read_bytes;
1765}
1766
1767static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
1768        .attr   = { .ca_owner = THIS_MODULE,
1769                    .ca_name = "udev_path",
1770                    .ca_mode =  S_IRUGO | S_IWUSR },
1771        .show   = target_core_show_dev_udev_path,
1772        .store  = target_core_store_dev_udev_path,
1773};
1774
1775static ssize_t target_core_store_dev_enable(
1776        void *p,
1777        const char *page,
1778        size_t count)
1779{
1780        struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1781        struct se_device *dev;
1782        struct se_hba *hba = se_dev->se_dev_hba;
1783        struct se_subsystem_api *t = hba->transport;
1784        char *ptr;
1785
1786        ptr = strstr(page, "1");
1787        if (!ptr) {
1788                pr_err("For dev_enable ops, only valid value"
1789                                " is \"1\"\n");
1790                return -EINVAL;
1791        }
1792        if (se_dev->se_dev_ptr) {
1793                pr_err("se_dev->se_dev_ptr already set for storage"
1794                                " object\n");
1795                return -EEXIST;
1796        }
1797
1798        if (t->check_configfs_dev_params(hba, se_dev) < 0)
1799                return -EINVAL;
1800
1801        dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1802        if (IS_ERR(dev))
1803                return PTR_ERR(dev);
1804        else if (!dev)
1805                return -EINVAL;
1806
1807        se_dev->se_dev_ptr = dev;
1808        pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
1809                " %p\n", se_dev->se_dev_ptr);
1810
1811        return count;
1812}
1813
1814static struct target_core_configfs_attribute target_core_attr_dev_enable = {
1815        .attr   = { .ca_owner = THIS_MODULE,
1816                    .ca_name = "enable",
1817                    .ca_mode = S_IWUSR },
1818        .show   = NULL,
1819        .store  = target_core_store_dev_enable,
1820};
1821
1822static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
1823{
1824        struct se_device *dev;
1825        struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
1826        struct config_item *lu_ci;
1827        struct t10_alua_lu_gp *lu_gp;
1828        struct t10_alua_lu_gp_member *lu_gp_mem;
1829        ssize_t len = 0;
1830
1831        dev = su_dev->se_dev_ptr;
1832        if (!dev)
1833                return -ENODEV;
1834
1835        if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
1836                return len;
1837
1838        lu_gp_mem = dev->dev_alua_lu_gp_mem;
1839        if (!lu_gp_mem) {
1840                pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
1841                                " pointer\n");
1842                return -EINVAL;
1843        }
1844
1845        spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1846        lu_gp = lu_gp_mem->lu_gp;
1847        if (lu_gp) {
1848                lu_ci = &lu_gp->lu_gp_group.cg_item;
1849                len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
1850                        config_item_name(lu_ci), lu_gp->lu_gp_id);
1851        }
1852        spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1853
1854        return len;
1855}
1856
1857static ssize_t target_core_store_alua_lu_gp(
1858        void *p,
1859        const char *page,
1860        size_t count)
1861{
1862        struct se_device *dev;
1863        struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
1864        struct se_hba *hba = su_dev->se_dev_hba;
1865        struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
1866        struct t10_alua_lu_gp_member *lu_gp_mem;
1867        unsigned char buf[LU_GROUP_NAME_BUF];
1868        int move = 0;
1869
1870        dev = su_dev->se_dev_ptr;
1871        if (!dev)
1872                return -ENODEV;
1873
1874        if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
1875                pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n",
1876                        config_item_name(&hba->hba_group.cg_item),
1877                        config_item_name(&su_dev->se_dev_group.cg_item));
1878                return -EINVAL;
1879        }
1880        if (count > LU_GROUP_NAME_BUF) {
1881                pr_err("ALUA LU Group Alias too large!\n");
1882                return -EINVAL;
1883        }
1884        memset(buf, 0, LU_GROUP_NAME_BUF);
1885        memcpy(buf, page, count);
1886        /*
1887         * Any ALUA logical unit alias besides "NULL" means we will be
1888         * making a new group association.
1889         */
1890        if (strcmp(strstrip(buf), "NULL")) {
1891                /*
1892                 * core_alua_get_lu_gp_by_name() will increment reference to
1893                 * struct t10_alua_lu_gp.  This reference is released with
1894                 * core_alua_get_lu_gp_by_name below().
1895                 */
1896                lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
1897                if (!lu_gp_new)
1898                        return -ENODEV;
1899        }
1900        lu_gp_mem = dev->dev_alua_lu_gp_mem;
1901        if (!lu_gp_mem) {
1902                if (lu_gp_new)
1903                        core_alua_put_lu_gp_from_name(lu_gp_new);
1904                pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
1905                                " pointer\n");
1906                return -EINVAL;
1907        }
1908
1909        spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1910        lu_gp = lu_gp_mem->lu_gp;
1911        if (lu_gp) {
1912                /*
1913                 * Clearing an existing lu_gp association, and replacing
1914                 * with NULL
1915                 */
1916                if (!lu_gp_new) {
1917                        pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
1918                                " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
1919                                " %hu\n",
1920                                config_item_name(&hba->hba_group.cg_item),
1921                                config_item_name(&su_dev->se_dev_group.cg_item),
1922                                config_item_name(&lu_gp->lu_gp_group.cg_item),
1923                                lu_gp->lu_gp_id);
1924
1925                        __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
1926                        spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1927
1928                        return count;
1929                }
1930                /*
1931                 * Removing existing association of lu_gp_mem with lu_gp
1932                 */
1933                __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
1934                move = 1;
1935        }
1936        /*
1937         * Associate lu_gp_mem with lu_gp_new.
1938         */
1939        __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
1940        spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1941
1942        pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
1943                " core/alua/lu_gps/%s, ID: %hu\n",
1944                (move) ? "Moving" : "Adding",
1945                config_item_name(&hba->hba_group.cg_item),
1946                config_item_name(&su_dev->se_dev_group.cg_item),
1947                config_item_name(&lu_gp_new->lu_gp_group.cg_item),
1948                lu_gp_new->lu_gp_id);
1949
1950        core_alua_put_lu_gp_from_name(lu_gp_new);
1951        return count;
1952}
1953
1954static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
1955        .attr   = { .ca_owner = THIS_MODULE,
1956                    .ca_name = "alua_lu_gp",
1957                    .ca_mode = S_IRUGO | S_IWUSR },
1958        .show   = target_core_show_alua_lu_gp,
1959        .store  = target_core_store_alua_lu_gp,
1960};
1961
1962static struct configfs_attribute *lio_core_dev_attrs[] = {
1963        &target_core_attr_dev_info.attr,
1964        &target_core_attr_dev_control.attr,
1965        &target_core_attr_dev_alias.attr,
1966        &target_core_attr_dev_udev_path.attr,
1967        &target_core_attr_dev_enable.attr,
1968        &target_core_attr_dev_alua_lu_gp.attr,
1969        NULL,
1970};
1971
1972static void target_core_dev_release(struct config_item *item)
1973{
1974        struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
1975                                struct se_subsystem_dev, se_dev_group);
1976        struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
1977        struct se_subsystem_api *t = hba->transport;
1978        struct config_group *dev_cg = &se_dev->se_dev_group;
1979
1980        kfree(dev_cg->default_groups);
1981        /*
1982         * This pointer will set when the storage is enabled with:
1983         *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
1984         */
1985        if (se_dev->se_dev_ptr) {
1986                pr_debug("Target_Core_ConfigFS: Calling se_free_"
1987                        "virtual_device() for se_dev_ptr: %p\n",
1988                        se_dev->se_dev_ptr);
1989
1990                se_free_virtual_device(se_dev->se_dev_ptr, hba);
1991        } else {
1992                /*
1993                 * Release struct se_subsystem_dev->se_dev_su_ptr..
1994                 */
1995                pr_debug("Target_Core_ConfigFS: Calling t->free_"
1996                        "device() for se_dev_su_ptr: %p\n",
1997                        se_dev->se_dev_su_ptr);
1998
1999                t->free_device(se_dev->se_dev_su_ptr);
2000        }
2001
2002        pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem"
2003                        "_dev_t: %p\n", se_dev);
2004        kfree(se_dev);
2005}
2006
2007static ssize_t target_core_dev_show(struct config_item *item,
2008                                     struct configfs_attribute *attr,
2009                                     char *page)
2010{
2011        struct se_subsystem_dev *se_dev = container_of(
2012                        to_config_group(item), struct se_subsystem_dev,
2013                        se_dev_group);
2014        struct target_core_configfs_attribute *tc_attr = container_of(
2015                        attr, struct target_core_configfs_attribute, attr);
2016
2017        if (!tc_attr->show)
2018                return -EINVAL;
2019
2020        return tc_attr->show(se_dev, page);
2021}
2022
2023static ssize_t target_core_dev_store(struct config_item *item,
2024                                      struct configfs_attribute *attr,
2025                                      const char *page, size_t count)
2026{
2027        struct se_subsystem_dev *se_dev = container_of(
2028                        to_config_group(item), struct se_subsystem_dev,
2029                        se_dev_group);
2030        struct target_core_configfs_attribute *tc_attr = container_of(
2031                        attr, struct target_core_configfs_attribute, attr);
2032
2033        if (!tc_attr->store)
2034                return -EINVAL;
2035
2036        return tc_attr->store(se_dev, page, count);
2037}
2038
2039static struct configfs_item_operations target_core_dev_item_ops = {
2040        .release                = target_core_dev_release,
2041        .show_attribute         = target_core_dev_show,
2042        .store_attribute        = target_core_dev_store,
2043};
2044
2045static struct config_item_type target_core_dev_cit = {
2046        .ct_item_ops            = &target_core_dev_item_ops,
2047        .ct_attrs               = lio_core_dev_attrs,
2048        .ct_owner               = THIS_MODULE,
2049};
2050
2051/* End functions for struct config_item_type target_core_dev_cit */
2052
2053/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
2054
2055CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp);
2056#define SE_DEV_ALUA_LU_ATTR(_name, _mode)                               \
2057static struct target_core_alua_lu_gp_attribute                          \
2058                        target_core_alua_lu_gp_##_name =                \
2059        __CONFIGFS_EATTR(_name, _mode,                                  \
2060        target_core_alua_lu_gp_show_attr_##_name,                       \
2061        target_core_alua_lu_gp_store_attr_##_name);
2062
2063#define SE_DEV_ALUA_LU_ATTR_RO(_name)                                   \
2064static struct target_core_alua_lu_gp_attribute                          \
2065                        target_core_alua_lu_gp_##_name =                \
2066        __CONFIGFS_EATTR_RO(_name,                                      \
2067        target_core_alua_lu_gp_show_attr_##_name);
2068
2069/*
2070 * lu_gp_id
2071 */
2072static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(
2073        struct t10_alua_lu_gp *lu_gp,
2074        char *page)
2075{
2076        if (!lu_gp->lu_gp_valid_id)
2077                return 0;
2078
2079        return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
2080}
2081
2082static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
2083        struct t10_alua_lu_gp *lu_gp,
2084        const char *page,
2085        size_t count)
2086{
2087        struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
2088        unsigned long lu_gp_id;
2089        int ret;
2090
2091        ret = strict_strtoul(page, 0, &lu_gp_id);
2092        if (ret < 0) {
2093                pr_err("strict_strtoul() returned %d for"
2094                        " lu_gp_id\n", ret);
2095                return -EINVAL;
2096        }
2097        if (lu_gp_id > 0x0000ffff) {
2098                pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
2099                        " 0x0000ffff\n", lu_gp_id);
2100                return -EINVAL;
2101        }
2102
2103        ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
2104        if (ret < 0)
2105                return -EINVAL;
2106
2107        pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
2108                " Group: core/alua/lu_gps/%s to ID: %hu\n",
2109                config_item_name(&alua_lu_gp_cg->cg_item),
2110                lu_gp->lu_gp_id);
2111
2112        return count;
2113}
2114
2115SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR);
2116
2117/*
2118 * members
2119 */
2120static ssize_t target_core_alua_lu_gp_show_attr_members(
2121        struct t10_alua_lu_gp *lu_gp,
2122        char *page)
2123{
2124        struct se_device *dev;
2125        struct se_hba *hba;
2126        struct se_subsystem_dev *su_dev;
2127        struct t10_alua_lu_gp_member *lu_gp_mem;
2128        ssize_t len = 0, cur_len;
2129        unsigned char buf[LU_GROUP_NAME_BUF];
2130
2131        memset(buf, 0, LU_GROUP_NAME_BUF);
2132
2133        spin_lock(&lu_gp->lu_gp_lock);
2134        list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
2135                dev = lu_gp_mem->lu_gp_mem_dev;
2136                su_dev = dev->se_sub_dev;
2137                hba = su_dev->se_dev_hba;
2138
2139                cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
2140                        config_item_name(&hba->hba_group.cg_item),
2141                        config_item_name(&su_dev->se_dev_group.cg_item));
2142                cur_len++; /* Extra byte for NULL terminator */
2143
2144                if ((cur_len + len) > PAGE_SIZE) {
2145                        pr_warn("Ran out of lu_gp_show_attr"
2146                                "_members buffer\n");
2147                        break;
2148                }
2149                memcpy(page+len, buf, cur_len);
2150                len += cur_len;
2151        }
2152        spin_unlock(&lu_gp->lu_gp_lock);
2153
2154        return len;
2155}
2156
2157SE_DEV_ALUA_LU_ATTR_RO(members);
2158
2159CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group);
2160
2161static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
2162        &target_core_alua_lu_gp_lu_gp_id.attr,
2163        &target_core_alua_lu_gp_members.attr,
2164        NULL,
2165};
2166
2167static void target_core_alua_lu_gp_release(struct config_item *item)
2168{
2169        struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2170                        struct t10_alua_lu_gp, lu_gp_group);
2171
2172        core_alua_free_lu_gp(lu_gp);
2173}
2174
2175static struct configfs_item_operations target_core_alua_lu_gp_ops = {
2176        .release                = target_core_alua_lu_gp_release,
2177        .show_attribute         = target_core_alua_lu_gp_attr_show,
2178        .store_attribute        = target_core_alua_lu_gp_attr_store,
2179};
2180
2181static struct config_item_type target_core_alua_lu_gp_cit = {
2182        .ct_item_ops            = &target_core_alua_lu_gp_ops,
2183        .ct_attrs               = target_core_alua_lu_gp_attrs,
2184        .ct_owner               = THIS_MODULE,
2185};
2186
2187/* End functions for struct config_item_type target_core_alua_lu_gp_cit */
2188
2189/* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
2190
2191static struct config_group *target_core_alua_create_lu_gp(
2192        struct config_group *group,
2193        const char *name)
2194{
2195        struct t10_alua_lu_gp *lu_gp;
2196        struct config_group *alua_lu_gp_cg = NULL;
2197        struct config_item *alua_lu_gp_ci = NULL;
2198
2199        lu_gp = core_alua_allocate_lu_gp(name, 0);
2200        if (IS_ERR(lu_gp))
2201                return NULL;
2202
2203        alua_lu_gp_cg = &lu_gp->lu_gp_group;
2204        alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
2205
2206        config_group_init_type_name(alua_lu_gp_cg, name,
2207                        &target_core_alua_lu_gp_cit);
2208
2209        pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
2210                " Group: core/alua/lu_gps/%s\n",
2211                config_item_name(alua_lu_gp_ci));
2212
2213        return alua_lu_gp_cg;
2214
2215}
2216
2217static void target_core_alua_drop_lu_gp(
2218        struct config_group *group,
2219        struct config_item *item)
2220{
2221        struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2222                        struct t10_alua_lu_gp, lu_gp_group);
2223
2224        pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
2225                " Group: core/alua/lu_gps/%s, ID: %hu\n",
2226                config_item_name(item), lu_gp->lu_gp_id);
2227        /*
2228         * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
2229         * -> target_core_alua_lu_gp_release()
2230         */
2231        config_item_put(item);
2232}
2233
2234static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
2235        .make_group             = &target_core_alua_create_lu_gp,
2236        .drop_item              = &target_core_alua_drop_lu_gp,
2237};
2238
2239static struct config_item_type target_core_alua_lu_gps_cit = {
2240        .ct_item_ops            = NULL,
2241        .ct_group_ops           = &target_core_alua_lu_gps_group_ops,
2242        .ct_owner               = THIS_MODULE,
2243};
2244
2245/* End functions for struct config_item_type target_core_alua_lu_gps_cit */
2246
2247/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2248
2249CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp);
2250#define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode)                            \
2251static struct target_core_alua_tg_pt_gp_attribute                       \
2252                        target_core_alua_tg_pt_gp_##_name =             \
2253        __CONFIGFS_EATTR(_name, _mode,                                  \
2254        target_core_alua_tg_pt_gp_show_attr_##_name,                    \
2255        target_core_alua_tg_pt_gp_store_attr_##_name);
2256
2257#define SE_DEV_ALUA_TG_PT_ATTR_RO(_name)                                \
2258static struct target_core_alua_tg_pt_gp_attribute                       \
2259                        target_core_alua_tg_pt_gp_##_name =             \
2260        __CONFIGFS_EATTR_RO(_name,                                      \
2261        target_core_alua_tg_pt_gp_show_attr_##_name);
2262
2263/*
2264 * alua_access_state
2265 */
2266static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state(
2267        struct t10_alua_tg_pt_gp *tg_pt_gp,
2268        char *page)
2269{
2270        return sprintf(page, "%d\n",
2271                atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state));
2272}
2273
2274static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
2275        struct t10_alua_tg_pt_gp *tg_pt_gp,
2276        const char *page,
2277        size_t count)
2278{
2279        struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
2280        unsigned long tmp;
2281        int new_state, ret;
2282
2283        if (!tg_pt_gp->tg_pt_gp_valid_id) {
2284                pr_err("Unable to do implict ALUA on non valid"
2285                        " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
2286                return -EINVAL;
2287        }
2288
2289        ret = strict_strtoul(page, 0, &tmp);
2290        if (ret < 0) {
2291                pr_err("Unable to extract new ALUA access state from"
2292                                " %s\n", page);
2293                return -EINVAL;
2294        }
2295        new_state = (int)tmp;
2296
2297        if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
2298                pr_err("Unable to process implict configfs ALUA"
2299                        " transition while TPGS_IMPLICT_ALUA is diabled\n");
2300                return -EINVAL;
2301        }
2302
2303        ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr,
2304                                        NULL, NULL, new_state, 0);
2305        return (!ret) ? count : -EINVAL;
2306}
2307
2308SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR);
2309
2310/*
2311 * alua_access_status
2312 */
2313static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status(
2314        struct t10_alua_tg_pt_gp *tg_pt_gp,
2315        char *page)
2316{
2317        return sprintf(page, "%s\n",
2318                core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
2319}
2320
2321static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
2322        struct t10_alua_tg_pt_gp *tg_pt_gp,
2323        const char *page,
2324        size_t count)
2325{
2326        unsigned long tmp;
2327        int new_status, ret;
2328
2329        if (!tg_pt_gp->tg_pt_gp_valid_id) {
2330                pr_err("Unable to do set ALUA access status on non"
2331                        " valid tg_pt_gp ID: %hu\n",
2332                        tg_pt_gp->tg_pt_gp_valid_id);
2333                return -EINVAL;
2334        }
2335
2336        ret = strict_strtoul(page, 0, &tmp);
2337        if (ret < 0) {
2338                pr_err("Unable to extract new ALUA access status"
2339                                " from %s\n", page);
2340                return -EINVAL;
2341        }
2342        new_status = (int)tmp;
2343
2344        if ((new_status != ALUA_STATUS_NONE) &&
2345            (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
2346            (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
2347                pr_err("Illegal ALUA access status: 0x%02x\n",
2348                                new_status);
2349                return -EINVAL;
2350        }
2351
2352        tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
2353        return count;
2354}
2355
2356SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR);
2357
2358/*
2359 * alua_access_type
2360 */
2361static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type(
2362        struct t10_alua_tg_pt_gp *tg_pt_gp,
2363        char *page)
2364{
2365        return core_alua_show_access_type(tg_pt_gp, page);
2366}
2367
2368static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
2369        struct t10_alua_tg_pt_gp *tg_pt_gp,
2370        const char *page,
2371        size_t count)
2372{
2373        return core_alua_store_access_type(tg_pt_gp, page, count);
2374}
2375
2376SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
2377
2378/*
2379 * alua_write_metadata
2380 */
2381static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
2382        struct t10_alua_tg_pt_gp *tg_pt_gp,
2383        char *page)
2384{
2385        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata);
2386}
2387
2388static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
2389        struct t10_alua_tg_pt_gp *tg_pt_gp,
2390        const char *page,
2391        size_t count)
2392{
2393        unsigned long tmp;
2394        int ret;
2395
2396        ret = strict_strtoul(page, 0, &tmp);
2397        if (ret < 0) {
2398                pr_err("Unable to extract alua_write_metadata\n");
2399                return -EINVAL;
2400        }
2401
2402        if ((tmp != 0) && (tmp != 1)) {
2403                pr_err("Illegal value for alua_write_metadata:"
2404                        " %lu\n", tmp);
2405                return -EINVAL;
2406        }
2407        tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
2408
2409        return count;
2410}
2411
2412SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR);
2413
2414
2415
2416/*
2417 * nonop_delay_msecs
2418 */
2419static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs(
2420        struct t10_alua_tg_pt_gp *tg_pt_gp,
2421        char *page)
2422{
2423        return core_alua_show_nonop_delay_msecs(tg_pt_gp, page);
2424
2425}
2426
2427static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs(
2428        struct t10_alua_tg_pt_gp *tg_pt_gp,
2429        const char *page,
2430        size_t count)
2431{
2432        return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count);
2433}
2434
2435SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR);
2436
2437/*
2438 * trans_delay_msecs
2439 */
2440static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs(
2441        struct t10_alua_tg_pt_gp *tg_pt_gp,
2442        char *page)
2443{
2444        return core_alua_show_trans_delay_msecs(tg_pt_gp, page);
2445}
2446
2447static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
2448        struct t10_alua_tg_pt_gp *tg_pt_gp,
2449        const char *page,
2450        size_t count)
2451{
2452        return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count);
2453}
2454
2455SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
2456
2457/*
2458 * preferred
2459 */
2460
2461static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred(
2462        struct t10_alua_tg_pt_gp *tg_pt_gp,
2463        char *page)
2464{
2465        return core_alua_show_preferred_bit(tg_pt_gp, page);
2466}
2467
2468static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred(
2469        struct t10_alua_tg_pt_gp *tg_pt_gp,
2470        const char *page,
2471        size_t count)
2472{
2473        return core_alua_store_preferred_bit(tg_pt_gp, page, count);
2474}
2475
2476SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR);
2477
2478/*
2479 * tg_pt_gp_id
2480 */
2481static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
2482        struct t10_alua_tg_pt_gp *tg_pt_gp,
2483        char *page)
2484{
2485        if (!tg_pt_gp->tg_pt_gp_valid_id)
2486                return 0;
2487
2488        return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
2489}
2490
2491static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
2492        struct t10_alua_tg_pt_gp *tg_pt_gp,
2493        const char *page,
2494        size_t count)
2495{
2496        struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2497        unsigned long tg_pt_gp_id;
2498        int ret;
2499
2500        ret = strict_strtoul(page, 0, &tg_pt_gp_id);
2501        if (ret < 0) {
2502                pr_err("strict_strtoul() returned %d for"
2503                        " tg_pt_gp_id\n", ret);
2504                return -EINVAL;
2505        }
2506        if (tg_pt_gp_id > 0x0000ffff) {
2507                pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:"
2508                        " 0x0000ffff\n", tg_pt_gp_id);
2509                return -EINVAL;
2510        }
2511
2512        ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
2513        if (ret < 0)
2514                return -EINVAL;
2515
2516        pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
2517                "core/alua/tg_pt_gps/%s to ID: %hu\n",
2518                config_item_name(&alua_tg_pt_gp_cg->cg_item),
2519                tg_pt_gp->tg_pt_gp_id);
2520
2521        return count;
2522}
2523
2524SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR);
2525
2526/*
2527 * members
2528 */
2529static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
2530        struct t10_alua_tg_pt_gp *tg_pt_gp,
2531        char *page)
2532{
2533        struct se_port *port;
2534        struct se_portal_group *tpg;
2535        struct se_lun *lun;
2536        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
2537        ssize_t len = 0, cur_len;
2538        unsigned char buf[TG_PT_GROUP_NAME_BUF];
2539
2540        memset(buf, 0, TG_PT_GROUP_NAME_BUF);
2541
2542        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
2543        list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
2544                        tg_pt_gp_mem_list) {
2545                port = tg_pt_gp_mem->tg_pt;
2546                tpg = port->sep_tpg;
2547                lun = port->sep_lun;
2548
2549                cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
2550                        "/%s\n", tpg->se_tpg_tfo->get_fabric_name(),
2551                        tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2552                        tpg->se_tpg_tfo->tpg_get_tag(tpg),
2553                        config_item_name(&lun->lun_group.cg_item));
2554                cur_len++; /* Extra byte for NULL terminator */
2555
2556                if ((cur_len + len) > PAGE_SIZE) {
2557                        pr_warn("Ran out of lu_gp_show_attr"
2558                                "_members buffer\n");
2559                        break;
2560                }
2561                memcpy(page+len, buf, cur_len);
2562                len += cur_len;
2563        }
2564        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
2565
2566        return len;
2567}
2568
2569SE_DEV_ALUA_TG_PT_ATTR_RO(members);
2570
2571CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp,
2572                        tg_pt_gp_group);
2573
2574static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
2575        &target_core_alua_tg_pt_gp_alua_access_state.attr,
2576        &target_core_alua_tg_pt_gp_alua_access_status.attr,
2577        &target_core_alua_tg_pt_gp_alua_access_type.attr,
2578        &target_core_alua_tg_pt_gp_alua_write_metadata.attr,
2579        &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
2580        &target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
2581        &target_core_alua_tg_pt_gp_preferred.attr,
2582        &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
2583        &target_core_alua_tg_pt_gp_members.attr,
2584        NULL,
2585};
2586
2587static void target_core_alua_tg_pt_gp_release(struct config_item *item)
2588{
2589        struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2590                        struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2591
2592        core_alua_free_tg_pt_gp(tg_pt_gp);
2593}
2594
2595static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
2596        .release                = target_core_alua_tg_pt_gp_release,
2597        .show_attribute         = target_core_alua_tg_pt_gp_attr_show,
2598        .store_attribute        = target_core_alua_tg_pt_gp_attr_store,
2599};
2600
2601static struct config_item_type target_core_alua_tg_pt_gp_cit = {
2602        .ct_item_ops            = &target_core_alua_tg_pt_gp_ops,
2603        .ct_attrs               = target_core_alua_tg_pt_gp_attrs,
2604        .ct_owner               = THIS_MODULE,
2605};
2606
2607/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2608
2609/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
2610
2611static struct config_group *target_core_alua_create_tg_pt_gp(
2612        struct config_group *group,
2613        const char *name)
2614{
2615        struct t10_alua *alua = container_of(group, struct t10_alua,
2616                                        alua_tg_pt_gps_group);
2617        struct t10_alua_tg_pt_gp *tg_pt_gp;
2618        struct se_subsystem_dev *su_dev = alua->t10_sub_dev;
2619        struct config_group *alua_tg_pt_gp_cg = NULL;
2620        struct config_item *alua_tg_pt_gp_ci = NULL;
2621
2622        tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
2623        if (!tg_pt_gp)
2624                return NULL;
2625
2626        alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2627        alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
2628
2629        config_group_init_type_name(alua_tg_pt_gp_cg, name,
2630                        &target_core_alua_tg_pt_gp_cit);
2631
2632        pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
2633                " Group: alua/tg_pt_gps/%s\n",
2634                config_item_name(alua_tg_pt_gp_ci));
2635
2636        return alua_tg_pt_gp_cg;
2637}
2638
2639static void target_core_alua_drop_tg_pt_gp(
2640        struct config_group *group,
2641        struct config_item *item)
2642{
2643        struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2644                        struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2645
2646        pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
2647                " Group: alua/tg_pt_gps/%s, ID: %hu\n",
2648                config_item_name(item), tg_pt_gp->tg_pt_gp_id);
2649        /*
2650         * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
2651         * -> target_core_alua_tg_pt_gp_release().
2652         */
2653        config_item_put(item);
2654}
2655
2656static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
2657        .make_group             = &target_core_alua_create_tg_pt_gp,
2658        .drop_item              = &target_core_alua_drop_tg_pt_gp,
2659};
2660
2661static struct config_item_type target_core_alua_tg_pt_gps_cit = {
2662        .ct_group_ops           = &target_core_alua_tg_pt_gps_group_ops,
2663        .ct_owner               = THIS_MODULE,
2664};
2665
2666/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
2667
2668/* Start functions for struct config_item_type target_core_alua_cit */
2669
2670/*
2671 * target_core_alua_cit is a ConfigFS group that lives under
2672 * /sys/kernel/config/target/core/alua.  There are default groups
2673 * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
2674 * target_core_alua_cit in target_core_init_configfs() below.
2675 */
2676static struct config_item_type target_core_alua_cit = {
2677        .ct_item_ops            = NULL,
2678        .ct_attrs               = NULL,
2679        .ct_owner               = THIS_MODULE,
2680};
2681
2682/* End functions for struct config_item_type target_core_alua_cit */
2683
2684/* Start functions for struct config_item_type target_core_stat_cit */
2685
2686static struct config_group *target_core_stat_mkdir(
2687        struct config_group *group,
2688        const char *name)
2689{
2690        return ERR_PTR(-ENOSYS);
2691}
2692
2693static void target_core_stat_rmdir(
2694        struct config_group *group,
2695        struct config_item *item)
2696{
2697        return;
2698}
2699
2700static struct configfs_group_operations target_core_stat_group_ops = {
2701        .make_group             = &target_core_stat_mkdir,
2702        .drop_item              = &target_core_stat_rmdir,
2703};
2704
2705static struct config_item_type target_core_stat_cit = {
2706        .ct_group_ops           = &target_core_stat_group_ops,
2707        .ct_owner               = THIS_MODULE,
2708};
2709
2710/* End functions for struct config_item_type target_core_stat_cit */
2711
2712/* Start functions for struct config_item_type target_core_hba_cit */
2713
2714static struct config_group *target_core_make_subdev(
2715        struct config_group *group,
2716        const char *name)
2717{
2718        struct t10_alua_tg_pt_gp *tg_pt_gp;
2719        struct se_subsystem_dev *se_dev;
2720        struct se_subsystem_api *t;
2721        struct config_item *hba_ci = &group->cg_item;
2722        struct se_hba *hba = item_to_hba(hba_ci);
2723        struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
2724        struct config_group *dev_stat_grp = NULL;
2725        int errno = -ENOMEM, ret;
2726
2727        ret = mutex_lock_interruptible(&hba->hba_access_mutex);
2728        if (ret)
2729                return ERR_PTR(ret);
2730        /*
2731         * Locate the struct se_subsystem_api from parent's struct se_hba.
2732         */
2733        t = hba->transport;
2734
2735        se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
2736        if (!se_dev) {
2737                pr_err("Unable to allocate memory for"
2738                                " struct se_subsystem_dev\n");
2739                goto unlock;
2740        }
2741        INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
2742        spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
2743        INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
2744        INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
2745        spin_lock_init(&se_dev->t10_pr.registration_lock);
2746        spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
2747        INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
2748        spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
2749        spin_lock_init(&se_dev->se_dev_lock);
2750        se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
2751        se_dev->t10_wwn.t10_sub_dev = se_dev;
2752        se_dev->t10_alua.t10_sub_dev = se_dev;
2753        se_dev->se_dev_attrib.da_sub_dev = se_dev;
2754
2755        se_dev->se_dev_hba = hba;
2756        dev_cg = &se_dev->se_dev_group;
2757
2758        dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7,
2759                        GFP_KERNEL);
2760        if (!dev_cg->default_groups)
2761                goto out;
2762        /*
2763         * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
2764         * for ->allocate_virtdevice()
2765         *
2766         * se_dev->se_dev_ptr will be set after ->create_virtdev()
2767         * has been called successfully in the next level up in the
2768         * configfs tree for device object's struct config_group.
2769         */
2770        se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
2771        if (!se_dev->se_dev_su_ptr) {
2772                pr_err("Unable to locate subsystem dependent pointer"
2773                        " from allocate_virtdevice()\n");
2774                goto out;
2775        }
2776
2777        config_group_init_type_name(&se_dev->se_dev_group, name,
2778                        &target_core_dev_cit);
2779        config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib",
2780                        &target_core_dev_attrib_cit);
2781        config_group_init_type_name(&se_dev->se_dev_pr_group, "pr",
2782                        &target_core_dev_pr_cit);
2783        config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn",
2784                        &target_core_dev_wwn_cit);
2785        config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group,
2786                        "alua", &target_core_alua_tg_pt_gps_cit);
2787        config_group_init_type_name(&se_dev->dev_stat_grps.stat_group,
2788                        "statistics", &target_core_stat_cit);
2789
2790        dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group;
2791        dev_cg->default_groups[1] = &se_dev->se_dev_pr_group;
2792        dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group;
2793        dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group;
2794        dev_cg->default_groups[4] = &se_dev->dev_stat_grps.stat_group;
2795        dev_cg->default_groups[5] = NULL;
2796        /*
2797         * Add core/$HBA/$DEV/alua/default_tg_pt_gp
2798         */
2799        tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
2800        if (!tg_pt_gp)
2801                goto out;
2802
2803        tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
2804        tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
2805                                GFP_KERNEL);
2806        if (!tg_pt_gp_cg->default_groups) {
2807                pr_err("Unable to allocate tg_pt_gp_cg->"
2808                                "default_groups\n");
2809                goto out;
2810        }
2811
2812        config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
2813                        "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
2814        tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
2815        tg_pt_gp_cg->default_groups[1] = NULL;
2816        se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
2817        /*
2818         * Add core/$HBA/$DEV/statistics/ default groups
2819         */
2820        dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
2821        dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
2822                                GFP_KERNEL);
2823        if (!dev_stat_grp->default_groups) {
2824                pr_err("Unable to allocate dev_stat_grp->default_groups\n");
2825                goto out;
2826        }
2827        target_stat_setup_dev_default_groups(se_dev);
2828
2829        pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
2830                " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
2831
2832        mutex_unlock(&hba->hba_access_mutex);
2833        return &se_dev->se_dev_group;
2834out:
2835        if (se_dev->t10_alua.default_tg_pt_gp) {
2836                core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp);
2837                se_dev->t10_alua.default_tg_pt_gp = NULL;
2838        }
2839        if (dev_stat_grp)
2840                kfree(dev_stat_grp->default_groups);
2841        if (tg_pt_gp_cg)
2842                kfree(tg_pt_gp_cg->default_groups);
2843        if (dev_cg)
2844                kfree(dev_cg->default_groups);
2845        if (se_dev->se_dev_su_ptr)
2846                t->free_device(se_dev->se_dev_su_ptr);
2847        kfree(se_dev);
2848unlock:
2849        mutex_unlock(&hba->hba_access_mutex);
2850        return ERR_PTR(errno);
2851}
2852
2853static void target_core_drop_subdev(
2854        struct config_group *group,
2855        struct config_item *item)
2856{
2857        struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
2858                                struct se_subsystem_dev, se_dev_group);
2859        struct se_hba *hba;
2860        struct se_subsystem_api *t;
2861        struct config_item *df_item;
2862        struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp;
2863        int i;
2864
2865        hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
2866
2867        mutex_lock(&hba->hba_access_mutex);
2868        t = hba->transport;
2869
2870        dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
2871        for (i = 0; dev_stat_grp->default_groups[i]; i++) {
2872                df_item = &dev_stat_grp->default_groups[i]->cg_item;
2873                dev_stat_grp->default_groups[i] = NULL;
2874                config_item_put(df_item);
2875        }
2876        kfree(dev_stat_grp->default_groups);
2877
2878        tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
2879        for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
2880                df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
2881                tg_pt_gp_cg->default_groups[i] = NULL;
2882                config_item_put(df_item);
2883        }
2884        kfree(tg_pt_gp_cg->default_groups);
2885        /*
2886         * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
2887         * directly from target_core_alua_tg_pt_gp_release().
2888         */
2889        se_dev->t10_alua.default_tg_pt_gp = NULL;
2890
2891        dev_cg = &se_dev->se_dev_group;
2892        for (i = 0; dev_cg->default_groups[i]; i++) {
2893                df_item = &dev_cg->default_groups[i]->cg_item;
2894                dev_cg->default_groups[i] = NULL;
2895                config_item_put(df_item);
2896        }
2897        /*
2898         * The releasing of se_dev and associated se_dev->se_dev_ptr is done
2899         * from target_core_dev_item_ops->release() ->target_core_dev_release().
2900         */
2901        config_item_put(item);
2902        mutex_unlock(&hba->hba_access_mutex);
2903}
2904
2905static struct configfs_group_operations target_core_hba_group_ops = {
2906        .make_group             = target_core_make_subdev,
2907        .drop_item              = target_core_drop_subdev,
2908};
2909
2910CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba);
2911#define SE_HBA_ATTR(_name, _mode)                               \
2912static struct target_core_hba_attribute                         \
2913                target_core_hba_##_name =                       \
2914                __CONFIGFS_EATTR(_name, _mode,                  \
2915                target_core_hba_show_attr_##_name,              \
2916                target_core_hba_store_attr_##_name);
2917
2918#define SE_HBA_ATTR_RO(_name)                                   \
2919static struct target_core_hba_attribute                         \
2920                target_core_hba_##_name =                       \
2921                __CONFIGFS_EATTR_RO(_name,                      \
2922                target_core_hba_show_attr_##_name);
2923
2924static ssize_t target_core_hba_show_attr_hba_info(
2925        struct se_hba *hba,
2926        char *page)
2927{
2928        return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
2929                        hba->hba_id, hba->transport->name,
2930                        TARGET_CORE_CONFIGFS_VERSION);
2931}
2932
2933SE_HBA_ATTR_RO(hba_info);
2934
2935static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba,
2936                                char *page)
2937{
2938        int hba_mode = 0;
2939
2940        if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
2941                hba_mode = 1;
2942
2943        return sprintf(page, "%d\n", hba_mode);
2944}
2945
2946static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
2947                                const char *page, size_t count)
2948{
2949        struct se_subsystem_api *transport = hba->transport;
2950        unsigned long mode_flag;
2951        int ret;
2952
2953        if (transport->pmode_enable_hba == NULL)
2954                return -EINVAL;
2955
2956        ret = strict_strtoul(page, 0, &mode_flag);
2957        if (ret < 0) {
2958                pr_err("Unable to extract hba mode flag: %d\n", ret);
2959                return -EINVAL;
2960        }
2961
2962        spin_lock(&hba->device_lock);
2963        if (!list_empty(&hba->hba_dev_list)) {
2964                pr_err("Unable to set hba_mode with active devices\n");
2965                spin_unlock(&hba->device_lock);
2966                return -EINVAL;
2967        }
2968        spin_unlock(&hba->device_lock);
2969
2970        ret = transport->pmode_enable_hba(hba, mode_flag);
2971        if (ret < 0)
2972                return -EINVAL;
2973        if (ret > 0)
2974                hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
2975        else if (ret == 0)
2976                hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
2977
2978        return count;
2979}
2980
2981SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
2982
2983CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
2984
2985static void target_core_hba_release(struct config_item *item)
2986{
2987        struct se_hba *hba = container_of(to_config_group(item),
2988                                struct se_hba, hba_group);
2989        core_delete_hba(hba);
2990}
2991
2992static struct configfs_attribute *target_core_hba_attrs[] = {
2993        &target_core_hba_hba_info.attr,
2994        &target_core_hba_hba_mode.attr,
2995        NULL,
2996};
2997
2998static struct configfs_item_operations target_core_hba_item_ops = {
2999        .release                = target_core_hba_release,
3000        .show_attribute         = target_core_hba_attr_show,
3001        .store_attribute        = target_core_hba_attr_store,
3002};
3003
3004static struct config_item_type target_core_hba_cit = {
3005        .ct_item_ops            = &target_core_hba_item_ops,
3006        .ct_group_ops           = &target_core_hba_group_ops,
3007        .ct_attrs               = target_core_hba_attrs,
3008        .ct_owner               = THIS_MODULE,
3009};
3010
3011static struct config_group *target_core_call_addhbatotarget(
3012        struct config_group *group,
3013        const char *name)
3014{
3015        char *se_plugin_str, *str, *str2;
3016        struct se_hba *hba;
3017        char buf[TARGET_CORE_NAME_MAX_LEN];
3018        unsigned long plugin_dep_id = 0;
3019        int ret;
3020
3021        memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
3022        if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
3023                pr_err("Passed *name strlen(): %d exceeds"
3024                        " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
3025                        TARGET_CORE_NAME_MAX_LEN);
3026                return ERR_PTR(-ENAMETOOLONG);
3027        }
3028        snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
3029
3030        str = strstr(buf, "_");
3031        if (!str) {
3032                pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
3033                return ERR_PTR(-EINVAL);
3034        }
3035        se_plugin_str = buf;
3036        /*
3037         * Special case for subsystem plugins that have "_" in their names.
3038         * Namely rd_direct and rd_mcp..
3039         */
3040        str2 = strstr(str+1, "_");
3041        if (str2) {
3042                *str2 = '\0'; /* Terminate for *se_plugin_str */
3043                str2++; /* Skip to start of plugin dependent ID */
3044                str = str2;
3045        } else {
3046                *str = '\0'; /* Terminate for *se_plugin_str */
3047                str++; /* Skip to start of plugin dependent ID */
3048        }
3049
3050        ret = strict_strtoul(str, 0, &plugin_dep_id);
3051        if (ret < 0) {
3052                pr_err("strict_strtoul() returned %d for"
3053                                " plugin_dep_id\n", ret);
3054                return ERR_PTR(-EINVAL);
3055        }
3056        /*
3057         * Load up TCM subsystem plugins if they have not already been loaded.
3058         */
3059        transport_subsystem_check_init();
3060
3061        hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
3062        if (IS_ERR(hba))
3063                return ERR_CAST(hba);
3064
3065        config_group_init_type_name(&hba->hba_group, name,
3066                        &target_core_hba_cit);
3067
3068        return &hba->hba_group;
3069}
3070
3071static void target_core_call_delhbafromtarget(
3072        struct config_group *group,
3073        struct config_item *item)
3074{
3075        /*
3076         * core_delete_hba() is called from target_core_hba_item_ops->release()
3077         * -> target_core_hba_release()
3078         */
3079        config_item_put(item);
3080}
3081
3082static struct configfs_group_operations target_core_group_ops = {
3083        .make_group     = target_core_call_addhbatotarget,
3084        .drop_item      = target_core_call_delhbafromtarget,
3085};
3086
3087static struct config_item_type target_core_cit = {
3088        .ct_item_ops    = NULL,
3089        .ct_group_ops   = &target_core_group_ops,
3090        .ct_attrs       = NULL,
3091        .ct_owner       = THIS_MODULE,
3092};
3093
3094/* Stop functions for struct config_item_type target_core_hba_cit */
3095
3096static int __init target_core_init_configfs(void)
3097{
3098        struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
3099        struct config_group *lu_gp_cg = NULL;
3100        struct configfs_subsystem *subsys;
3101        struct t10_alua_lu_gp *lu_gp;
3102        int ret;
3103
3104        pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
3105                " Engine: %s on %s/%s on "UTS_RELEASE"\n",
3106                TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
3107
3108        subsys = target_core_subsystem[0];
3109        config_group_init(&subsys->su_group);
3110        mutex_init(&subsys->su_mutex);
3111
3112        INIT_LIST_HEAD(&g_tf_list);
3113        mutex_init(&g_tf_lock);
3114        ret = init_se_kmem_caches();
3115        if (ret < 0)
3116                return ret;
3117        /*
3118         * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
3119         * and ALUA Logical Unit Group and Target Port Group infrastructure.
3120         */
3121        target_cg = &subsys->su_group;
3122        target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3123                                GFP_KERNEL);
3124        if (!target_cg->default_groups) {
3125                pr_err("Unable to allocate target_cg->default_groups\n");
3126                ret = -ENOMEM;
3127                goto out_global;
3128        }
3129
3130        config_group_init_type_name(&target_core_hbagroup,
3131                        "core", &target_core_cit);
3132        target_cg->default_groups[0] = &target_core_hbagroup;
3133        target_cg->default_groups[1] = NULL;
3134        /*
3135         * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
3136         */
3137        hba_cg = &target_core_hbagroup;
3138        hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3139                                GFP_KERNEL);
3140        if (!hba_cg->default_groups) {
3141                pr_err("Unable to allocate hba_cg->default_groups\n");
3142                ret = -ENOMEM;
3143                goto out_global;
3144        }
3145        config_group_init_type_name(&alua_group,
3146                        "alua", &target_core_alua_cit);
3147        hba_cg->default_groups[0] = &alua_group;
3148        hba_cg->default_groups[1] = NULL;
3149        /*
3150         * Add ALUA Logical Unit Group and Target Port Group ConfigFS
3151         * groups under /sys/kernel/config/target/core/alua/
3152         */
3153        alua_cg = &alua_group;
3154        alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3155                        GFP_KERNEL);
3156        if (!alua_cg->default_groups) {
3157                pr_err("Unable to allocate alua_cg->default_groups\n");
3158                ret = -ENOMEM;
3159                goto out_global;
3160        }
3161
3162        config_group_init_type_name(&alua_lu_gps_group,
3163                        "lu_gps", &target_core_alua_lu_gps_cit);
3164        alua_cg->default_groups[0] = &alua_lu_gps_group;
3165        alua_cg->default_groups[1] = NULL;
3166        /*
3167         * Add core/alua/lu_gps/default_lu_gp
3168         */
3169        lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
3170        if (IS_ERR(lu_gp)) {
3171                ret = -ENOMEM;
3172                goto out_global;
3173        }
3174
3175        lu_gp_cg = &alua_lu_gps_group;
3176        lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3177                        GFP_KERNEL);
3178        if (!lu_gp_cg->default_groups) {
3179                pr_err("Unable to allocate lu_gp_cg->default_groups\n");
3180                ret = -ENOMEM;
3181                goto out_global;
3182        }
3183
3184        config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
3185                                &target_core_alua_lu_gp_cit);
3186        lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
3187        lu_gp_cg->default_groups[1] = NULL;
3188        default_lu_gp = lu_gp;
3189        /*
3190         * Register the target_core_mod subsystem with configfs.
3191         */
3192        ret = configfs_register_subsystem(subsys);
3193        if (ret < 0) {
3194                pr_err("Error %d while registering subsystem %s\n",
3195                        ret, subsys->su_group.cg_item.ci_namebuf);
3196                goto out_global;
3197        }
3198        pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
3199                " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
3200                " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
3201        /*
3202         * Register built-in RAMDISK subsystem logic for virtual LUN 0
3203         */
3204        ret = rd_module_init();
3205        if (ret < 0)
3206                goto out;
3207
3208        ret = core_dev_setup_virtual_lun0();
3209        if (ret < 0)
3210                goto out;
3211
3212        return 0;
3213
3214out:
3215        configfs_unregister_subsystem(subsys);
3216        core_dev_release_virtual_lun0();
3217        rd_module_exit();
3218out_global:
3219        if (default_lu_gp) {
3220                core_alua_free_lu_gp(default_lu_gp);
3221                default_lu_gp = NULL;
3222        }
3223        if (lu_gp_cg)
3224                kfree(lu_gp_cg->default_groups);
3225        if (alua_cg)
3226                kfree(alua_cg->default_groups);
3227        if (hba_cg)
3228                kfree(hba_cg->default_groups);
3229        kfree(target_cg->default_groups);
3230        release_se_kmem_caches();
3231        return ret;
3232}
3233
3234static void __exit target_core_exit_configfs(void)
3235{
3236        struct configfs_subsystem *subsys;
3237        struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
3238        struct config_item *item;
3239        int i;
3240
3241        subsys = target_core_subsystem[0];
3242
3243        lu_gp_cg = &alua_lu_gps_group;
3244        for (i = 0; lu_gp_cg->default_groups[i]; i++) {
3245                item = &lu_gp_cg->default_groups[i]->cg_item;
3246                lu_gp_cg->default_groups[i] = NULL;
3247                config_item_put(item);
3248        }
3249        kfree(lu_gp_cg->default_groups);
3250        lu_gp_cg->default_groups = NULL;
3251
3252        alua_cg = &alua_group;
3253        for (i = 0; alua_cg->default_groups[i]; i++) {
3254                item = &alua_cg->default_groups[i]->cg_item;
3255                alua_cg->default_groups[i] = NULL;
3256                config_item_put(item);
3257        }
3258        kfree(alua_cg->default_groups);
3259        alua_cg->default_groups = NULL;
3260
3261        hba_cg = &target_core_hbagroup;
3262        for (i = 0; hba_cg->default_groups[i]; i++) {
3263                item = &hba_cg->default_groups[i]->cg_item;
3264                hba_cg->default_groups[i] = NULL;
3265                config_item_put(item);
3266        }
3267        kfree(hba_cg->default_groups);
3268        hba_cg->default_groups = NULL;
3269        /*
3270         * We expect subsys->su_group.default_groups to be released
3271         * by configfs subsystem provider logic..
3272         */
3273        configfs_unregister_subsystem(subsys);
3274        kfree(subsys->su_group.default_groups);
3275
3276        core_alua_free_lu_gp(default_lu_gp);
3277        default_lu_gp = NULL;
3278
3279        pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
3280                        " Infrastructure\n");
3281
3282        core_dev_release_virtual_lun0();
3283        rd_module_exit();
3284        release_se_kmem_caches();
3285}
3286
3287MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
3288MODULE_AUTHOR("nab@Linux-iSCSI.org");
3289MODULE_LICENSE("GPL");
3290
3291module_init(target_core_init_configfs);
3292module_exit(target_core_exit_configfs);
3293