1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/string.h>
30#include <linux/parser.h>
31#include <linux/timer.h>
32#include <linux/blkdev.h>
33#include <linux/slab.h>
34#include <linux/spinlock.h>
35#include <linux/module.h>
36#include <scsi/scsi.h>
37#include <scsi/scsi_host.h>
38
39#include <target/target_core_base.h>
40#include <target/target_core_device.h>
41#include <target/target_core_transport.h>
42
43#include "target_core_file.h"
44
45static struct se_subsystem_api fileio_template;
46
47
48
49
50
51static int fd_attach_hba(struct se_hba *hba, u32 host_id)
52{
53 struct fd_host *fd_host;
54
55 fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
56 if (!fd_host) {
57 pr_err("Unable to allocate memory for struct fd_host\n");
58 return -ENOMEM;
59 }
60
61 fd_host->fd_host_id = host_id;
62
63 hba->hba_ptr = fd_host;
64
65 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
66 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
67 TARGET_CORE_MOD_VERSION);
68 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
69 " MaxSectors: %u\n",
70 hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
71
72 return 0;
73}
74
75static void fd_detach_hba(struct se_hba *hba)
76{
77 struct fd_host *fd_host = hba->hba_ptr;
78
79 pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
80 " Target Core\n", hba->hba_id, fd_host->fd_host_id);
81
82 kfree(fd_host);
83 hba->hba_ptr = NULL;
84}
85
86static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
87{
88 struct fd_dev *fd_dev;
89 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
90
91 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
92 if (!fd_dev) {
93 pr_err("Unable to allocate memory for struct fd_dev\n");
94 return NULL;
95 }
96
97 fd_dev->fd_host = fd_host;
98
99 pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
100
101 return fd_dev;
102}
103
104
105
106
107
108static struct se_device *fd_create_virtdevice(
109 struct se_hba *hba,
110 struct se_subsystem_dev *se_dev,
111 void *p)
112{
113 char *dev_p = NULL;
114 struct se_device *dev;
115 struct se_dev_limits dev_limits;
116 struct queue_limits *limits;
117 struct fd_dev *fd_dev = (struct fd_dev *) p;
118 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
119 mm_segment_t old_fs;
120 struct file *file;
121 struct inode *inode = NULL;
122 int dev_flags = 0, flags, ret = -EINVAL;
123
124 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
125
126 old_fs = get_fs();
127 set_fs(get_ds());
128 dev_p = getname(fd_dev->fd_dev_name);
129 set_fs(old_fs);
130
131 if (IS_ERR(dev_p)) {
132 pr_err("getname(%s) failed: %lu\n",
133 fd_dev->fd_dev_name, IS_ERR(dev_p));
134 ret = PTR_ERR(dev_p);
135 goto fail;
136 }
137
138
139
140
141 flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
142
143
144
145
146
147
148
149
150
151 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
152 pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
153 flags &= ~O_DSYNC;
154 }
155
156 file = filp_open(dev_p, flags, 0600);
157 if (IS_ERR(file)) {
158 pr_err("filp_open(%s) failed\n", dev_p);
159 ret = PTR_ERR(file);
160 goto fail;
161 }
162 if (!file || !file->f_dentry) {
163 pr_err("filp_open(%s) failed\n", dev_p);
164 goto fail;
165 }
166 fd_dev->fd_file = file;
167
168
169
170
171
172
173 inode = file->f_mapping->host;
174 if (S_ISBLK(inode->i_mode)) {
175 struct request_queue *q;
176 unsigned long long dev_size;
177
178
179
180
181 q = bdev_get_queue(inode->i_bdev);
182 limits = &dev_limits.limits;
183 limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
184 limits->max_hw_sectors = queue_max_hw_sectors(q);
185 limits->max_sectors = queue_max_sectors(q);
186
187
188
189
190 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
191 dev_size = (i_size_read(file->f_mapping->host) -
192 fd_dev->fd_block_size);
193
194 pr_debug("FILEIO: Using size: %llu bytes from struct"
195 " block_device blocks: %llu logical_block_size: %d\n",
196 dev_size, div_u64(dev_size, fd_dev->fd_block_size),
197 fd_dev->fd_block_size);
198 } else {
199 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
200 pr_err("FILEIO: Missing fd_dev_size="
201 " parameter, and no backing struct"
202 " block_device\n");
203 goto fail;
204 }
205
206 limits = &dev_limits.limits;
207 limits->logical_block_size = FD_BLOCKSIZE;
208 limits->max_hw_sectors = FD_MAX_SECTORS;
209 limits->max_sectors = FD_MAX_SECTORS;
210 fd_dev->fd_block_size = FD_BLOCKSIZE;
211 }
212
213 dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
214 dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
215
216 dev = transport_add_device_to_core_hba(hba, &fileio_template,
217 se_dev, dev_flags, fd_dev,
218 &dev_limits, "FILEIO", FD_VERSION);
219 if (!dev)
220 goto fail;
221
222 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
223 pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
224 " with FDBD_HAS_BUFFERED_IO_WCE\n");
225 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1;
226 }
227
228 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
229 fd_dev->fd_queue_depth = dev->queue_depth;
230
231 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
232 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
233 fd_dev->fd_dev_name, fd_dev->fd_dev_size);
234
235 putname(dev_p);
236 return dev;
237fail:
238 if (fd_dev->fd_file) {
239 filp_close(fd_dev->fd_file, NULL);
240 fd_dev->fd_file = NULL;
241 }
242 putname(dev_p);
243 return ERR_PTR(ret);
244}
245
246
247
248
249
250static void fd_free_device(void *p)
251{
252 struct fd_dev *fd_dev = (struct fd_dev *) p;
253
254 if (fd_dev->fd_file) {
255 filp_close(fd_dev->fd_file, NULL);
256 fd_dev->fd_file = NULL;
257 }
258
259 kfree(fd_dev);
260}
261
262static inline struct fd_request *FILE_REQ(struct se_task *task)
263{
264 return container_of(task, struct fd_request, fd_task);
265}
266
267
268static struct se_task *
269fd_alloc_task(unsigned char *cdb)
270{
271 struct fd_request *fd_req;
272
273 fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
274 if (!fd_req) {
275 pr_err("Unable to allocate struct fd_request\n");
276 return NULL;
277 }
278
279 return &fd_req->fd_task;
280}
281
282static int fd_do_readv(struct se_task *task)
283{
284 struct fd_request *req = FILE_REQ(task);
285 struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
286 struct fd_dev *dev = se_dev->dev_ptr;
287 struct file *fd = dev->fd_file;
288 struct scatterlist *sg = task->task_sg;
289 struct iovec *iov;
290 mm_segment_t old_fs;
291 loff_t pos = (task->task_lba *
292 se_dev->se_sub_dev->se_dev_attrib.block_size);
293 int ret = 0, i;
294
295 iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
296 if (!iov) {
297 pr_err("Unable to allocate fd_do_readv iov[]\n");
298 return -ENOMEM;
299 }
300
301 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
302 iov[i].iov_len = sg->length;
303 iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
304 }
305
306 old_fs = get_fs();
307 set_fs(get_ds());
308 ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
309 set_fs(old_fs);
310
311 for_each_sg(task->task_sg, sg, task->task_sg_nents, i)
312 kunmap(sg_page(sg));
313 kfree(iov);
314
315
316
317
318
319 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
320 if (ret < 0 || ret != task->task_size) {
321 pr_err("vfs_readv() returned %d,"
322 " expecting %d for S_ISBLK\n", ret,
323 (int)task->task_size);
324 return (ret < 0 ? ret : -EINVAL);
325 }
326 } else {
327 if (ret < 0) {
328 pr_err("vfs_readv() returned %d for non"
329 " S_ISBLK\n", ret);
330 return ret;
331 }
332 }
333
334 return 1;
335}
336
337static int fd_do_writev(struct se_task *task)
338{
339 struct fd_request *req = FILE_REQ(task);
340 struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
341 struct fd_dev *dev = se_dev->dev_ptr;
342 struct file *fd = dev->fd_file;
343 struct scatterlist *sg = task->task_sg;
344 struct iovec *iov;
345 mm_segment_t old_fs;
346 loff_t pos = (task->task_lba *
347 se_dev->se_sub_dev->se_dev_attrib.block_size);
348 int ret, i = 0;
349
350 iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
351 if (!iov) {
352 pr_err("Unable to allocate fd_do_writev iov[]\n");
353 return -ENOMEM;
354 }
355
356 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
357 iov[i].iov_len = sg->length;
358 iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
359 }
360
361 old_fs = get_fs();
362 set_fs(get_ds());
363 ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
364 set_fs(old_fs);
365
366 for_each_sg(task->task_sg, sg, task->task_sg_nents, i)
367 kunmap(sg_page(sg));
368
369 kfree(iov);
370
371 if (ret < 0 || ret != task->task_size) {
372 pr_err("vfs_writev() returned %d\n", ret);
373 return (ret < 0 ? ret : -EINVAL);
374 }
375
376 return 1;
377}
378
379static void fd_emulate_sync_cache(struct se_task *task)
380{
381 struct se_cmd *cmd = task->task_se_cmd;
382 struct se_device *dev = cmd->se_dev;
383 struct fd_dev *fd_dev = dev->dev_ptr;
384 int immed = (cmd->t_task_cdb[1] & 0x2);
385 loff_t start, end;
386 int ret;
387
388
389
390
391
392 if (immed)
393 transport_complete_sync_cache(cmd, 1);
394
395
396
397
398 if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
399 start = 0;
400 end = LLONG_MAX;
401 } else {
402 start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
403 if (cmd->data_length)
404 end = start + cmd->data_length;
405 else
406 end = LLONG_MAX;
407 }
408
409 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
410 if (ret != 0)
411 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
412
413 if (!immed)
414 transport_complete_sync_cache(cmd, ret == 0);
415}
416
417static int fd_do_task(struct se_task *task)
418{
419 struct se_cmd *cmd = task->task_se_cmd;
420 struct se_device *dev = cmd->se_dev;
421 int ret = 0;
422
423
424
425
426
427 if (task->task_data_direction == DMA_FROM_DEVICE) {
428 ret = fd_do_readv(task);
429 } else {
430 ret = fd_do_writev(task);
431
432
433
434
435
436 if (ret > 0 &&
437 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
438 (cmd->se_cmd_flags & SCF_FUA)) {
439 struct fd_dev *fd_dev = dev->dev_ptr;
440 loff_t start = task->task_lba *
441 dev->se_sub_dev->se_dev_attrib.block_size;
442 loff_t end = start + task->task_size;
443
444 vfs_fsync_range(fd_dev->fd_file, start, end, 1);
445 }
446 }
447
448 if (ret < 0) {
449 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
450 return ret;
451 }
452 if (ret) {
453 task->task_scsi_status = GOOD;
454 transport_complete_task(task, 1);
455 }
456 return 0;
457}
458
459
460
461
462
463static void fd_free_task(struct se_task *task)
464{
465 struct fd_request *req = FILE_REQ(task);
466
467 kfree(req);
468}
469
470enum {
471 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
472};
473
474static match_table_t tokens = {
475 {Opt_fd_dev_name, "fd_dev_name=%s"},
476 {Opt_fd_dev_size, "fd_dev_size=%s"},
477 {Opt_fd_buffered_io, "fd_buffered_io=%d"},
478 {Opt_err, NULL}
479};
480
481static ssize_t fd_set_configfs_dev_params(
482 struct se_hba *hba,
483 struct se_subsystem_dev *se_dev,
484 const char *page, ssize_t count)
485{
486 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
487 char *orig, *ptr, *arg_p, *opts;
488 substring_t args[MAX_OPT_ARGS];
489 int ret = 0, arg, token;
490
491 opts = kstrdup(page, GFP_KERNEL);
492 if (!opts)
493 return -ENOMEM;
494
495 orig = opts;
496
497 while ((ptr = strsep(&opts, ",")) != NULL) {
498 if (!*ptr)
499 continue;
500
501 token = match_token(ptr, tokens, args);
502 switch (token) {
503 case Opt_fd_dev_name:
504 arg_p = match_strdup(&args[0]);
505 if (!arg_p) {
506 ret = -ENOMEM;
507 break;
508 }
509 snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
510 "%s", arg_p);
511 kfree(arg_p);
512 pr_debug("FILEIO: Referencing Path: %s\n",
513 fd_dev->fd_dev_name);
514 fd_dev->fbd_flags |= FBDF_HAS_PATH;
515 break;
516 case Opt_fd_dev_size:
517 arg_p = match_strdup(&args[0]);
518 if (!arg_p) {
519 ret = -ENOMEM;
520 break;
521 }
522 ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
523 kfree(arg_p);
524 if (ret < 0) {
525 pr_err("strict_strtoull() failed for"
526 " fd_dev_size=\n");
527 goto out;
528 }
529 pr_debug("FILEIO: Referencing Size: %llu"
530 " bytes\n", fd_dev->fd_dev_size);
531 fd_dev->fbd_flags |= FBDF_HAS_SIZE;
532 break;
533 case Opt_fd_buffered_io:
534 match_int(args, &arg);
535 if (arg != 1) {
536 pr_err("bogus fd_buffered_io=%d value\n", arg);
537 ret = -EINVAL;
538 goto out;
539 }
540
541 pr_debug("FILEIO: Using buffered I/O"
542 " operations for struct fd_dev\n");
543
544 fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
545 break;
546 default:
547 break;
548 }
549 }
550
551out:
552 kfree(orig);
553 return (!ret) ? count : ret;
554}
555
556static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
557{
558 struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
559
560 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
561 pr_err("Missing fd_dev_name=\n");
562 return -EINVAL;
563 }
564
565 return 0;
566}
567
568static ssize_t fd_show_configfs_dev_params(
569 struct se_hba *hba,
570 struct se_subsystem_dev *se_dev,
571 char *b)
572{
573 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
574 ssize_t bl = 0;
575
576 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
577 bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
578 fd_dev->fd_dev_name, fd_dev->fd_dev_size,
579 (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
580 "Buffered-WCE" : "O_DSYNC");
581 return bl;
582}
583
584
585
586
587
588static u32 fd_get_device_rev(struct se_device *dev)
589{
590 return SCSI_SPC_2;
591}
592
593
594
595
596
597static u32 fd_get_device_type(struct se_device *dev)
598{
599 return TYPE_DISK;
600}
601
602static sector_t fd_get_blocks(struct se_device *dev)
603{
604 struct fd_dev *fd_dev = dev->dev_ptr;
605 struct file *f = fd_dev->fd_file;
606 struct inode *i = f->f_mapping->host;
607 unsigned long long dev_size;
608
609
610
611
612
613 if (S_ISBLK(i->i_mode))
614 dev_size = (i_size_read(i) - fd_dev->fd_block_size);
615 else
616 dev_size = fd_dev->fd_dev_size;
617
618 return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size);
619}
620
621static struct se_subsystem_api fileio_template = {
622 .name = "fileio",
623 .owner = THIS_MODULE,
624 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
625 .write_cache_emulated = 1,
626 .fua_write_emulated = 1,
627 .attach_hba = fd_attach_hba,
628 .detach_hba = fd_detach_hba,
629 .allocate_virtdevice = fd_allocate_virtdevice,
630 .create_virtdevice = fd_create_virtdevice,
631 .free_device = fd_free_device,
632 .alloc_task = fd_alloc_task,
633 .do_task = fd_do_task,
634 .do_sync_cache = fd_emulate_sync_cache,
635 .free_task = fd_free_task,
636 .check_configfs_dev_params = fd_check_configfs_dev_params,
637 .set_configfs_dev_params = fd_set_configfs_dev_params,
638 .show_configfs_dev_params = fd_show_configfs_dev_params,
639 .get_device_rev = fd_get_device_rev,
640 .get_device_type = fd_get_device_type,
641 .get_blocks = fd_get_blocks,
642};
643
644static int __init fileio_module_init(void)
645{
646 return transport_subsystem_register(&fileio_template);
647}
648
649static void fileio_module_exit(void)
650{
651 transport_subsystem_release(&fileio_template);
652}
653
654MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
655MODULE_AUTHOR("nab@Linux-iSCSI.org");
656MODULE_LICENSE("GPL");
657
658module_init(fileio_module_init);
659module_exit(fileio_module_exit);
660