linux/drivers/target/target_core_file.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * Filename:  target_core_file.c
   3 *
   4 * This file contains the Storage Engine <-> FILEIO transport specific functions
   5 *
   6 * (c) Copyright 2005-2012 RisingTide Systems LLC.
   7 *
   8 * Nicholas A. Bellinger <nab@kernel.org>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  23 *
  24 ******************************************************************************/
  25
  26#include <linux/string.h>
  27#include <linux/parser.h>
  28#include <linux/timer.h>
  29#include <linux/blkdev.h>
  30#include <linux/slab.h>
  31#include <linux/spinlock.h>
  32#include <linux/module.h>
  33#include <scsi/scsi.h>
  34#include <scsi/scsi_host.h>
  35
  36#include <target/target_core_base.h>
  37#include <target/target_core_backend.h>
  38
  39#include "target_core_file.h"
  40
  41static inline struct fd_dev *FD_DEV(struct se_device *dev)
  42{
  43        return container_of(dev, struct fd_dev, dev);
  44}
  45
  46/*      fd_attach_hba(): (Part of se_subsystem_api_t template)
  47 *
  48 *
  49 */
  50static int fd_attach_hba(struct se_hba *hba, u32 host_id)
  51{
  52        struct fd_host *fd_host;
  53
  54        fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
  55        if (!fd_host) {
  56                pr_err("Unable to allocate memory for struct fd_host\n");
  57                return -ENOMEM;
  58        }
  59
  60        fd_host->fd_host_id = host_id;
  61
  62        hba->hba_ptr = fd_host;
  63
  64        pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
  65                " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
  66                TARGET_CORE_MOD_VERSION);
  67        pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
  68                " MaxSectors: %u\n",
  69                hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
  70
  71        return 0;
  72}
  73
  74static void fd_detach_hba(struct se_hba *hba)
  75{
  76        struct fd_host *fd_host = hba->hba_ptr;
  77
  78        pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
  79                " Target Core\n", hba->hba_id, fd_host->fd_host_id);
  80
  81        kfree(fd_host);
  82        hba->hba_ptr = NULL;
  83}
  84
  85static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
  86{
  87        struct fd_dev *fd_dev;
  88        struct fd_host *fd_host = hba->hba_ptr;
  89
  90        fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
  91        if (!fd_dev) {
  92                pr_err("Unable to allocate memory for struct fd_dev\n");
  93                return NULL;
  94        }
  95
  96        fd_dev->fd_host = fd_host;
  97
  98        pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
  99
 100        return &fd_dev->dev;
 101}
 102
 103static int fd_configure_device(struct se_device *dev)
 104{
 105        struct fd_dev *fd_dev = FD_DEV(dev);
 106        struct fd_host *fd_host = dev->se_hba->hba_ptr;
 107        struct file *file;
 108        struct inode *inode = NULL;
 109        int flags, ret = -EINVAL;
 110
 111        if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
 112                pr_err("Missing fd_dev_name=\n");
 113                return -EINVAL;
 114        }
 115
 116        /*
 117         * Use O_DSYNC by default instead of O_SYNC to forgo syncing
 118         * of pure timestamp updates.
 119         */
 120        flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
 121
 122        /*
 123         * Optionally allow fd_buffered_io=1 to be enabled for people
 124         * who want use the fs buffer cache as an WriteCache mechanism.
 125         *
 126         * This means that in event of a hard failure, there is a risk
 127         * of silent data-loss if the SCSI client has *not* performed a
 128         * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
 129         * to write-out the entire device cache.
 130         */
 131        if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
 132                pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
 133                flags &= ~O_DSYNC;
 134        }
 135
 136        file = filp_open(fd_dev->fd_dev_name, flags, 0600);
 137        if (IS_ERR(file)) {
 138                pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name);
 139                ret = PTR_ERR(file);
 140                goto fail;
 141        }
 142        fd_dev->fd_file = file;
 143        /*
 144         * If using a block backend with this struct file, we extract
 145         * fd_dev->fd_[block,dev]_size from struct block_device.
 146         *
 147         * Otherwise, we use the passed fd_size= from configfs
 148         */
 149        inode = file->f_mapping->host;
 150        if (S_ISBLK(inode->i_mode)) {
 151                unsigned long long dev_size;
 152
 153                /*
 154                 * Determine the number of bytes from i_size_read() minus
 155                 * one (1) logical sector from underlying struct block_device
 156                 */
 157                dev_size = (i_size_read(file->f_mapping->host) -
 158                                       fd_dev->fd_block_size);
 159
 160                pr_debug("FILEIO: Using size: %llu bytes from struct"
 161                        " block_device blocks: %llu logical_block_size: %d\n",
 162                        dev_size, div_u64(dev_size, fd_dev->fd_block_size),
 163                        fd_dev->fd_block_size);
 164        } else {
 165                if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
 166                        pr_err("FILEIO: Missing fd_dev_size="
 167                                " parameter, and no backing struct"
 168                                " block_device\n");
 169                        goto fail;
 170                }
 171        }
 172
 173        fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
 174
 175        dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
 176        dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
 177        dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
 178
 179        if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
 180                pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
 181                        " with FDBD_HAS_BUFFERED_IO_WCE\n");
 182                dev->dev_attrib.emulate_write_cache = 1;
 183        }
 184
 185        fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
 186        fd_dev->fd_queue_depth = dev->queue_depth;
 187        /*
 188         * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
 189         * based upon struct iovec limit for vfs_writev()
 190         */
 191        dev->dev_attrib.max_write_same_len = 0x1000;
 192
 193        pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
 194                " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
 195                        fd_dev->fd_dev_name, fd_dev->fd_dev_size);
 196
 197        return 0;
 198fail:
 199        if (fd_dev->fd_file) {
 200                filp_close(fd_dev->fd_file, NULL);
 201                fd_dev->fd_file = NULL;
 202        }
 203        return ret;
 204}
 205
 206static void fd_free_device(struct se_device *dev)
 207{
 208        struct fd_dev *fd_dev = FD_DEV(dev);
 209
 210        if (fd_dev->fd_file) {
 211                filp_close(fd_dev->fd_file, NULL);
 212                fd_dev->fd_file = NULL;
 213        }
 214
 215        kfree(fd_dev);
 216}
 217
 218static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
 219                u32 sgl_nents, int is_write)
 220{
 221        struct se_device *se_dev = cmd->se_dev;
 222        struct fd_dev *dev = FD_DEV(se_dev);
 223        struct file *fd = dev->fd_file;
 224        struct scatterlist *sg;
 225        struct iovec *iov;
 226        mm_segment_t old_fs;
 227        loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
 228        int ret = 0, i;
 229
 230        iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
 231        if (!iov) {
 232                pr_err("Unable to allocate fd_do_readv iov[]\n");
 233                return -ENOMEM;
 234        }
 235
 236        for_each_sg(sgl, sg, sgl_nents, i) {
 237                iov[i].iov_len = sg->length;
 238                iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
 239        }
 240
 241        old_fs = get_fs();
 242        set_fs(get_ds());
 243
 244        if (is_write)
 245                ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
 246        else
 247                ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
 248
 249        set_fs(old_fs);
 250
 251        for_each_sg(sgl, sg, sgl_nents, i)
 252                kunmap(sg_page(sg));
 253
 254        kfree(iov);
 255
 256        if (is_write) {
 257                if (ret < 0 || ret != cmd->data_length) {
 258                        pr_err("%s() write returned %d\n", __func__, ret);
 259                        return (ret < 0 ? ret : -EINVAL);
 260                }
 261        } else {
 262                /*
 263                 * Return zeros and GOOD status even if the READ did not return
 264                 * the expected virt_size for struct file w/o a backing struct
 265                 * block_device.
 266                 */
 267                if (S_ISBLK(file_inode(fd)->i_mode)) {
 268                        if (ret < 0 || ret != cmd->data_length) {
 269                                pr_err("%s() returned %d, expecting %u for "
 270                                                "S_ISBLK\n", __func__, ret,
 271                                                cmd->data_length);
 272                                return (ret < 0 ? ret : -EINVAL);
 273                        }
 274                } else {
 275                        if (ret < 0) {
 276                                pr_err("%s() returned %d for non S_ISBLK\n",
 277                                                __func__, ret);
 278                                return ret;
 279                        }
 280                }
 281        }
 282        return 1;
 283}
 284
 285static sense_reason_t
 286fd_execute_sync_cache(struct se_cmd *cmd)
 287{
 288        struct se_device *dev = cmd->se_dev;
 289        struct fd_dev *fd_dev = FD_DEV(dev);
 290        int immed = (cmd->t_task_cdb[1] & 0x2);
 291        loff_t start, end;
 292        int ret;
 293
 294        /*
 295         * If the Immediate bit is set, queue up the GOOD response
 296         * for this SYNCHRONIZE_CACHE op
 297         */
 298        if (immed)
 299                target_complete_cmd(cmd, SAM_STAT_GOOD);
 300
 301        /*
 302         * Determine if we will be flushing the entire device.
 303         */
 304        if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
 305                start = 0;
 306                end = LLONG_MAX;
 307        } else {
 308                start = cmd->t_task_lba * dev->dev_attrib.block_size;
 309                if (cmd->data_length)
 310                        end = start + cmd->data_length;
 311                else
 312                        end = LLONG_MAX;
 313        }
 314
 315        ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
 316        if (ret != 0)
 317                pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
 318
 319        if (immed)
 320                return 0;
 321
 322        if (ret)
 323                target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
 324        else
 325                target_complete_cmd(cmd, SAM_STAT_GOOD);
 326
 327        return 0;
 328}
 329
 330static unsigned char *
 331fd_setup_write_same_buf(struct se_cmd *cmd, struct scatterlist *sg,
 332                    unsigned int len)
 333{
 334        struct se_device *se_dev = cmd->se_dev;
 335        unsigned int block_size = se_dev->dev_attrib.block_size;
 336        unsigned int i = 0, end;
 337        unsigned char *buf, *p, *kmap_buf;
 338
 339        buf = kzalloc(min_t(unsigned int, len, PAGE_SIZE), GFP_KERNEL);
 340        if (!buf) {
 341                pr_err("Unable to allocate fd_execute_write_same buf\n");
 342                return NULL;
 343        }
 344
 345        kmap_buf = kmap(sg_page(sg)) + sg->offset;
 346        if (!kmap_buf) {
 347                pr_err("kmap() failed in fd_setup_write_same\n");
 348                kfree(buf);
 349                return NULL;
 350        }
 351        /*
 352         * Fill local *buf to contain multiple WRITE_SAME blocks up to
 353         * min(len, PAGE_SIZE)
 354         */
 355        p = buf;
 356        end = min_t(unsigned int, len, PAGE_SIZE);
 357
 358        while (i < end) {
 359                memcpy(p, kmap_buf, block_size);
 360
 361                i += block_size;
 362                p += block_size;
 363        }
 364        kunmap(sg_page(sg));
 365
 366        return buf;
 367}
 368
 369static sense_reason_t
 370fd_execute_write_same(struct se_cmd *cmd)
 371{
 372        struct se_device *se_dev = cmd->se_dev;
 373        struct fd_dev *fd_dev = FD_DEV(se_dev);
 374        struct file *f = fd_dev->fd_file;
 375        struct scatterlist *sg;
 376        struct iovec *iov;
 377        mm_segment_t old_fs;
 378        sector_t nolb = sbc_get_write_same_sectors(cmd);
 379        loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
 380        unsigned int len, len_tmp, iov_num;
 381        int i, rc;
 382        unsigned char *buf;
 383
 384        if (!nolb) {
 385                target_complete_cmd(cmd, SAM_STAT_GOOD);
 386                return 0;
 387        }
 388        sg = &cmd->t_data_sg[0];
 389
 390        if (cmd->t_data_nents > 1 ||
 391            sg->length != cmd->se_dev->dev_attrib.block_size) {
 392                pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
 393                        " block_size: %u\n", cmd->t_data_nents, sg->length,
 394                        cmd->se_dev->dev_attrib.block_size);
 395                return TCM_INVALID_CDB_FIELD;
 396        }
 397
 398        len = len_tmp = nolb * se_dev->dev_attrib.block_size;
 399        iov_num = DIV_ROUND_UP(len, PAGE_SIZE);
 400
 401        buf = fd_setup_write_same_buf(cmd, sg, len);
 402        if (!buf)
 403                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 404
 405        iov = vzalloc(sizeof(struct iovec) * iov_num);
 406        if (!iov) {
 407                pr_err("Unable to allocate fd_execute_write_same iovecs\n");
 408                kfree(buf);
 409                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 410        }
 411        /*
 412         * Map the single fabric received scatterlist block now populated
 413         * in *buf into each iovec for I/O submission.
 414         */
 415        for (i = 0; i < iov_num; i++) {
 416                iov[i].iov_base = buf;
 417                iov[i].iov_len = min_t(unsigned int, len_tmp, PAGE_SIZE);
 418                len_tmp -= iov[i].iov_len;
 419        }
 420
 421        old_fs = get_fs();
 422        set_fs(get_ds());
 423        rc = vfs_writev(f, &iov[0], iov_num, &pos);
 424        set_fs(old_fs);
 425
 426        vfree(iov);
 427        kfree(buf);
 428
 429        if (rc < 0 || rc != len) {
 430                pr_err("vfs_writev() returned %d for write same\n", rc);
 431                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 432        }
 433
 434        target_complete_cmd(cmd, SAM_STAT_GOOD);
 435        return 0;
 436}
 437
 438static sense_reason_t
 439fd_execute_rw(struct se_cmd *cmd)
 440{
 441        struct scatterlist *sgl = cmd->t_data_sg;
 442        u32 sgl_nents = cmd->t_data_nents;
 443        enum dma_data_direction data_direction = cmd->data_direction;
 444        struct se_device *dev = cmd->se_dev;
 445        int ret = 0;
 446
 447        /*
 448         * Call vectorized fileio functions to map struct scatterlist
 449         * physical memory addresses to struct iovec virtual memory.
 450         */
 451        if (data_direction == DMA_FROM_DEVICE) {
 452                ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
 453        } else {
 454                ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
 455                /*
 456                 * Perform implict vfs_fsync_range() for fd_do_writev() ops
 457                 * for SCSI WRITEs with Forced Unit Access (FUA) set.
 458                 * Allow this to happen independent of WCE=0 setting.
 459                 */
 460                if (ret > 0 &&
 461                    dev->dev_attrib.emulate_fua_write > 0 &&
 462                    (cmd->se_cmd_flags & SCF_FUA)) {
 463                        struct fd_dev *fd_dev = FD_DEV(dev);
 464                        loff_t start = cmd->t_task_lba *
 465                                dev->dev_attrib.block_size;
 466                        loff_t end = start + cmd->data_length;
 467
 468                        vfs_fsync_range(fd_dev->fd_file, start, end, 1);
 469                }
 470        }
 471
 472        if (ret < 0)
 473                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 474
 475        if (ret)
 476                target_complete_cmd(cmd, SAM_STAT_GOOD);
 477        return 0;
 478}
 479
 480enum {
 481        Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
 482};
 483
 484static match_table_t tokens = {
 485        {Opt_fd_dev_name, "fd_dev_name=%s"},
 486        {Opt_fd_dev_size, "fd_dev_size=%s"},
 487        {Opt_fd_buffered_io, "fd_buffered_io=%d"},
 488        {Opt_err, NULL}
 489};
 490
 491static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
 492                const char *page, ssize_t count)
 493{
 494        struct fd_dev *fd_dev = FD_DEV(dev);
 495        char *orig, *ptr, *arg_p, *opts;
 496        substring_t args[MAX_OPT_ARGS];
 497        int ret = 0, arg, token;
 498
 499        opts = kstrdup(page, GFP_KERNEL);
 500        if (!opts)
 501                return -ENOMEM;
 502
 503        orig = opts;
 504
 505        while ((ptr = strsep(&opts, ",\n")) != NULL) {
 506                if (!*ptr)
 507                        continue;
 508
 509                token = match_token(ptr, tokens, args);
 510                switch (token) {
 511                case Opt_fd_dev_name:
 512                        if (match_strlcpy(fd_dev->fd_dev_name, &args[0],
 513                                FD_MAX_DEV_NAME) == 0) {
 514                                ret = -EINVAL;
 515                                break;
 516                        }
 517                        pr_debug("FILEIO: Referencing Path: %s\n",
 518                                        fd_dev->fd_dev_name);
 519                        fd_dev->fbd_flags |= FBDF_HAS_PATH;
 520                        break;
 521                case Opt_fd_dev_size:
 522                        arg_p = match_strdup(&args[0]);
 523                        if (!arg_p) {
 524                                ret = -ENOMEM;
 525                                break;
 526                        }
 527                        ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
 528                        kfree(arg_p);
 529                        if (ret < 0) {
 530                                pr_err("strict_strtoull() failed for"
 531                                                " fd_dev_size=\n");
 532                                goto out;
 533                        }
 534                        pr_debug("FILEIO: Referencing Size: %llu"
 535                                        " bytes\n", fd_dev->fd_dev_size);
 536                        fd_dev->fbd_flags |= FBDF_HAS_SIZE;
 537                        break;
 538                case Opt_fd_buffered_io:
 539                        match_int(args, &arg);
 540                        if (arg != 1) {
 541                                pr_err("bogus fd_buffered_io=%d value\n", arg);
 542                                ret = -EINVAL;
 543                                goto out;
 544                        }
 545
 546                        pr_debug("FILEIO: Using buffered I/O"
 547                                " operations for struct fd_dev\n");
 548
 549                        fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
 550                        break;
 551                default:
 552                        break;
 553                }
 554        }
 555
 556out:
 557        kfree(orig);
 558        return (!ret) ? count : ret;
 559}
 560
 561static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
 562{
 563        struct fd_dev *fd_dev = FD_DEV(dev);
 564        ssize_t bl = 0;
 565
 566        bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
 567        bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s\n",
 568                fd_dev->fd_dev_name, fd_dev->fd_dev_size,
 569                (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
 570                "Buffered-WCE" : "O_DSYNC");
 571        return bl;
 572}
 573
 574static sector_t fd_get_blocks(struct se_device *dev)
 575{
 576        struct fd_dev *fd_dev = FD_DEV(dev);
 577        struct file *f = fd_dev->fd_file;
 578        struct inode *i = f->f_mapping->host;
 579        unsigned long long dev_size;
 580        /*
 581         * When using a file that references an underlying struct block_device,
 582         * ensure dev_size is always based on the current inode size in order
 583         * to handle underlying block_device resize operations.
 584         */
 585        if (S_ISBLK(i->i_mode))
 586                dev_size = (i_size_read(i) - fd_dev->fd_block_size);
 587        else
 588                dev_size = fd_dev->fd_dev_size;
 589
 590        return div_u64(dev_size, dev->dev_attrib.block_size);
 591}
 592
 593static struct sbc_ops fd_sbc_ops = {
 594        .execute_rw             = fd_execute_rw,
 595        .execute_sync_cache     = fd_execute_sync_cache,
 596        .execute_write_same     = fd_execute_write_same,
 597};
 598
 599static sense_reason_t
 600fd_parse_cdb(struct se_cmd *cmd)
 601{
 602        return sbc_parse_cdb(cmd, &fd_sbc_ops);
 603}
 604
 605static struct se_subsystem_api fileio_template = {
 606        .name                   = "fileio",
 607        .inquiry_prod           = "FILEIO",
 608        .inquiry_rev            = FD_VERSION,
 609        .owner                  = THIS_MODULE,
 610        .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
 611        .attach_hba             = fd_attach_hba,
 612        .detach_hba             = fd_detach_hba,
 613        .alloc_device           = fd_alloc_device,
 614        .configure_device       = fd_configure_device,
 615        .free_device            = fd_free_device,
 616        .parse_cdb              = fd_parse_cdb,
 617        .set_configfs_dev_params = fd_set_configfs_dev_params,
 618        .show_configfs_dev_params = fd_show_configfs_dev_params,
 619        .get_device_type        = sbc_get_device_type,
 620        .get_blocks             = fd_get_blocks,
 621};
 622
 623static int __init fileio_module_init(void)
 624{
 625        return transport_subsystem_register(&fileio_template);
 626}
 627
 628static void __exit fileio_module_exit(void)
 629{
 630        transport_subsystem_release(&fileio_template);
 631}
 632
 633MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
 634MODULE_AUTHOR("nab@Linux-iSCSI.org");
 635MODULE_LICENSE("GPL");
 636
 637module_init(fileio_module_init);
 638module_exit(fileio_module_exit);
 639
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.