1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/net.h>
30#include <linux/delay.h>
31#include <linux/string.h>
32#include <linux/timer.h>
33#include <linux/slab.h>
34#include <linux/blkdev.h>
35#include <linux/spinlock.h>
36#include <linux/kthread.h>
37#include <linux/in.h>
38#include <linux/cdrom.h>
39#include <linux/module.h>
40#include <asm/unaligned.h>
41#include <net/sock.h>
42#include <net/tcp.h>
43#include <scsi/scsi.h>
44#include <scsi/scsi_cmnd.h>
45#include <scsi/scsi_tcq.h>
46
47#include <target/target_core_base.h>
48#include <target/target_core_device.h>
49#include <target/target_core_tmr.h>
50#include <target/target_core_tpg.h>
51#include <target/target_core_transport.h>
52#include <target/target_core_fabric_ops.h>
53#include <target/target_core_configfs.h>
54
55#include "target_core_alua.h"
56#include "target_core_cdb.h"
57#include "target_core_hba.h"
58#include "target_core_pr.h"
59#include "target_core_ua.h"
60
61static int sub_api_initialized;
62
63static struct workqueue_struct *target_completion_wq;
64static struct kmem_cache *se_sess_cache;
65struct kmem_cache *se_tmr_req_cache;
66struct kmem_cache *se_ua_cache;
67struct kmem_cache *t10_pr_reg_cache;
68struct kmem_cache *t10_alua_lu_gp_cache;
69struct kmem_cache *t10_alua_lu_gp_mem_cache;
70struct kmem_cache *t10_alua_tg_pt_gp_cache;
71struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
72
73static int transport_generic_write_pending(struct se_cmd *);
74static int transport_processing_thread(void *param);
75static int __transport_execute_tasks(struct se_device *dev);
76static void transport_complete_task_attr(struct se_cmd *cmd);
77static void transport_handle_queue_full(struct se_cmd *cmd,
78 struct se_device *dev);
79static void transport_free_dev_tasks(struct se_cmd *cmd);
80static int transport_generic_get_mem(struct se_cmd *cmd);
81static void transport_put_cmd(struct se_cmd *cmd);
82static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
83static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
84static void transport_generic_request_failure(struct se_cmd *);
85static void target_complete_ok_work(struct work_struct *work);
86
87int init_se_kmem_caches(void)
88{
89 se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
90 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
91 0, NULL);
92 if (!se_tmr_req_cache) {
93 pr_err("kmem_cache_create() for struct se_tmr_req"
94 " failed\n");
95 goto out;
96 }
97 se_sess_cache = kmem_cache_create("se_sess_cache",
98 sizeof(struct se_session), __alignof__(struct se_session),
99 0, NULL);
100 if (!se_sess_cache) {
101 pr_err("kmem_cache_create() for struct se_session"
102 " failed\n");
103 goto out_free_tmr_req_cache;
104 }
105 se_ua_cache = kmem_cache_create("se_ua_cache",
106 sizeof(struct se_ua), __alignof__(struct se_ua),
107 0, NULL);
108 if (!se_ua_cache) {
109 pr_err("kmem_cache_create() for struct se_ua failed\n");
110 goto out_free_sess_cache;
111 }
112 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
113 sizeof(struct t10_pr_registration),
114 __alignof__(struct t10_pr_registration), 0, NULL);
115 if (!t10_pr_reg_cache) {
116 pr_err("kmem_cache_create() for struct t10_pr_registration"
117 " failed\n");
118 goto out_free_ua_cache;
119 }
120 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
121 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
122 0, NULL);
123 if (!t10_alua_lu_gp_cache) {
124 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
125 " failed\n");
126 goto out_free_pr_reg_cache;
127 }
128 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
129 sizeof(struct t10_alua_lu_gp_member),
130 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
131 if (!t10_alua_lu_gp_mem_cache) {
132 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
133 "cache failed\n");
134 goto out_free_lu_gp_cache;
135 }
136 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
137 sizeof(struct t10_alua_tg_pt_gp),
138 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
139 if (!t10_alua_tg_pt_gp_cache) {
140 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
141 "cache failed\n");
142 goto out_free_lu_gp_mem_cache;
143 }
144 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
145 "t10_alua_tg_pt_gp_mem_cache",
146 sizeof(struct t10_alua_tg_pt_gp_member),
147 __alignof__(struct t10_alua_tg_pt_gp_member),
148 0, NULL);
149 if (!t10_alua_tg_pt_gp_mem_cache) {
150 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
151 "mem_t failed\n");
152 goto out_free_tg_pt_gp_cache;
153 }
154
155 target_completion_wq = alloc_workqueue("target_completion",
156 WQ_MEM_RECLAIM, 0);
157 if (!target_completion_wq)
158 goto out_free_tg_pt_gp_mem_cache;
159
160 return 0;
161
162out_free_tg_pt_gp_mem_cache:
163 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
164out_free_tg_pt_gp_cache:
165 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
166out_free_lu_gp_mem_cache:
167 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
168out_free_lu_gp_cache:
169 kmem_cache_destroy(t10_alua_lu_gp_cache);
170out_free_pr_reg_cache:
171 kmem_cache_destroy(t10_pr_reg_cache);
172out_free_ua_cache:
173 kmem_cache_destroy(se_ua_cache);
174out_free_sess_cache:
175 kmem_cache_destroy(se_sess_cache);
176out_free_tmr_req_cache:
177 kmem_cache_destroy(se_tmr_req_cache);
178out:
179 return -ENOMEM;
180}
181
182void release_se_kmem_caches(void)
183{
184 destroy_workqueue(target_completion_wq);
185 kmem_cache_destroy(se_tmr_req_cache);
186 kmem_cache_destroy(se_sess_cache);
187 kmem_cache_destroy(se_ua_cache);
188 kmem_cache_destroy(t10_pr_reg_cache);
189 kmem_cache_destroy(t10_alua_lu_gp_cache);
190 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
191 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
192 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
193}
194
195
196static DEFINE_SPINLOCK(scsi_mib_index_lock);
197static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
198
199
200
201
202u32 scsi_get_new_index(scsi_index_t type)
203{
204 u32 new_index;
205
206 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
207
208 spin_lock(&scsi_mib_index_lock);
209 new_index = ++scsi_mib_index[type];
210 spin_unlock(&scsi_mib_index_lock);
211
212 return new_index;
213}
214
215void transport_init_queue_obj(struct se_queue_obj *qobj)
216{
217 atomic_set(&qobj->queue_cnt, 0);
218 INIT_LIST_HEAD(&qobj->qobj_list);
219 init_waitqueue_head(&qobj->thread_wq);
220 spin_lock_init(&qobj->cmd_queue_lock);
221}
222EXPORT_SYMBOL(transport_init_queue_obj);
223
224void transport_subsystem_check_init(void)
225{
226 int ret;
227
228 if (sub_api_initialized)
229 return;
230
231 ret = request_module("target_core_iblock");
232 if (ret != 0)
233 pr_err("Unable to load target_core_iblock\n");
234
235 ret = request_module("target_core_file");
236 if (ret != 0)
237 pr_err("Unable to load target_core_file\n");
238
239 ret = request_module("target_core_pscsi");
240 if (ret != 0)
241 pr_err("Unable to load target_core_pscsi\n");
242
243 ret = request_module("target_core_stgt");
244 if (ret != 0)
245 pr_err("Unable to load target_core_stgt\n");
246
247 sub_api_initialized = 1;
248 return;
249}
250
251struct se_session *transport_init_session(void)
252{
253 struct se_session *se_sess;
254
255 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
256 if (!se_sess) {
257 pr_err("Unable to allocate struct se_session from"
258 " se_sess_cache\n");
259 return ERR_PTR(-ENOMEM);
260 }
261 INIT_LIST_HEAD(&se_sess->sess_list);
262 INIT_LIST_HEAD(&se_sess->sess_acl_list);
263 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
264 INIT_LIST_HEAD(&se_sess->sess_wait_list);
265 spin_lock_init(&se_sess->sess_cmd_lock);
266
267 return se_sess;
268}
269EXPORT_SYMBOL(transport_init_session);
270
271
272
273
274void __transport_register_session(
275 struct se_portal_group *se_tpg,
276 struct se_node_acl *se_nacl,
277 struct se_session *se_sess,
278 void *fabric_sess_ptr)
279{
280 unsigned char buf[PR_REG_ISID_LEN];
281
282 se_sess->se_tpg = se_tpg;
283 se_sess->fabric_sess_ptr = fabric_sess_ptr;
284
285
286
287
288
289
290 if (se_nacl) {
291
292
293
294
295 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
296 memset(&buf[0], 0, PR_REG_ISID_LEN);
297 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
298 &buf[0], PR_REG_ISID_LEN);
299 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
300 }
301 spin_lock_irq(&se_nacl->nacl_sess_lock);
302
303
304
305
306 se_nacl->nacl_sess = se_sess;
307
308 list_add_tail(&se_sess->sess_acl_list,
309 &se_nacl->acl_sess_list);
310 spin_unlock_irq(&se_nacl->nacl_sess_lock);
311 }
312 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
313
314 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
315 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
316}
317EXPORT_SYMBOL(__transport_register_session);
318
319void transport_register_session(
320 struct se_portal_group *se_tpg,
321 struct se_node_acl *se_nacl,
322 struct se_session *se_sess,
323 void *fabric_sess_ptr)
324{
325 spin_lock_bh(&se_tpg->session_lock);
326 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
327 spin_unlock_bh(&se_tpg->session_lock);
328}
329EXPORT_SYMBOL(transport_register_session);
330
331void transport_deregister_session_configfs(struct se_session *se_sess)
332{
333 struct se_node_acl *se_nacl;
334 unsigned long flags;
335
336
337
338 se_nacl = se_sess->se_node_acl;
339 if (se_nacl) {
340 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
341 list_del(&se_sess->sess_acl_list);
342
343
344
345
346
347 if (list_empty(&se_nacl->acl_sess_list))
348 se_nacl->nacl_sess = NULL;
349 else {
350 se_nacl->nacl_sess = container_of(
351 se_nacl->acl_sess_list.prev,
352 struct se_session, sess_acl_list);
353 }
354 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
355 }
356}
357EXPORT_SYMBOL(transport_deregister_session_configfs);
358
359void transport_free_session(struct se_session *se_sess)
360{
361 kmem_cache_free(se_sess_cache, se_sess);
362}
363EXPORT_SYMBOL(transport_free_session);
364
365void transport_deregister_session(struct se_session *se_sess)
366{
367 struct se_portal_group *se_tpg = se_sess->se_tpg;
368 struct se_node_acl *se_nacl;
369 unsigned long flags;
370
371 if (!se_tpg) {
372 transport_free_session(se_sess);
373 return;
374 }
375
376 spin_lock_irqsave(&se_tpg->session_lock, flags);
377 list_del(&se_sess->sess_list);
378 se_sess->se_tpg = NULL;
379 se_sess->fabric_sess_ptr = NULL;
380 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
381
382
383
384
385
386 se_nacl = se_sess->se_node_acl;
387 if (se_nacl) {
388 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
389 if (se_nacl->dynamic_node_acl) {
390 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
391 se_tpg)) {
392 list_del(&se_nacl->acl_list);
393 se_tpg->num_node_acls--;
394 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
395
396 core_tpg_wait_for_nacl_pr_ref(se_nacl);
397 core_free_device_list_for_node(se_nacl, se_tpg);
398 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
399 se_nacl);
400 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
401 }
402 }
403 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
404 }
405
406 transport_free_session(se_sess);
407
408 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
409 se_tpg->se_tpg_tfo->get_fabric_name());
410}
411EXPORT_SYMBOL(transport_deregister_session);
412
413
414
415
416static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
417{
418 struct se_device *dev = cmd->se_dev;
419 struct se_task *task;
420 unsigned long flags;
421
422 if (!dev)
423 return;
424
425 list_for_each_entry(task, &cmd->t_task_list, t_list) {
426 if (task->task_flags & TF_ACTIVE)
427 continue;
428
429 if (!atomic_read(&task->task_state_active))
430 continue;
431
432 spin_lock_irqsave(&dev->execute_task_lock, flags);
433 list_del(&task->t_state_list);
434 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
435 cmd->se_tfo->get_task_tag(cmd), dev, task);
436 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
437
438 atomic_set(&task->task_state_active, 0);
439 atomic_dec(&cmd->t_task_cdbs_ex_left);
440 }
441}
442
443
444
445
446
447
448
449
450
451static int transport_cmd_check_stop(
452 struct se_cmd *cmd,
453 int transport_off,
454 u8 t_state)
455{
456 unsigned long flags;
457
458 spin_lock_irqsave(&cmd->t_state_lock, flags);
459
460
461
462
463 if (atomic_read(&cmd->transport_lun_stop)) {
464 pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
465 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
466 cmd->se_tfo->get_task_tag(cmd));
467
468 atomic_set(&cmd->t_transport_active, 0);
469 if (transport_off == 2)
470 transport_all_task_dev_remove_state(cmd);
471 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
472
473 complete(&cmd->transport_lun_stop_comp);
474 return 1;
475 }
476
477
478
479
480 if (atomic_read(&cmd->t_transport_stop)) {
481 pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
482 " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
483 cmd->se_tfo->get_task_tag(cmd));
484
485 if (transport_off == 2)
486 transport_all_task_dev_remove_state(cmd);
487
488
489
490
491
492 if (transport_off == 2)
493 cmd->se_lun = NULL;
494 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
495
496 complete(&cmd->t_transport_stop_comp);
497 return 1;
498 }
499 if (transport_off) {
500 atomic_set(&cmd->t_transport_active, 0);
501 if (transport_off == 2) {
502 transport_all_task_dev_remove_state(cmd);
503
504
505
506
507 cmd->se_lun = NULL;
508
509
510
511
512
513
514
515
516
517 if (cmd->se_tfo->check_stop_free != NULL) {
518 spin_unlock_irqrestore(
519 &cmd->t_state_lock, flags);
520
521 return cmd->se_tfo->check_stop_free(cmd);
522 }
523 }
524 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
525
526 return 0;
527 } else if (t_state)
528 cmd->t_state = t_state;
529 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
530
531 return 0;
532}
533
534static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
535{
536 return transport_cmd_check_stop(cmd, 2, 0);
537}
538
539static void transport_lun_remove_cmd(struct se_cmd *cmd)
540{
541 struct se_lun *lun = cmd->se_lun;
542 unsigned long flags;
543
544 if (!lun)
545 return;
546
547 spin_lock_irqsave(&cmd->t_state_lock, flags);
548 if (!atomic_read(&cmd->transport_dev_active)) {
549 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
550 goto check_lun;
551 }
552 atomic_set(&cmd->transport_dev_active, 0);
553 transport_all_task_dev_remove_state(cmd);
554 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
555
556
557check_lun:
558 spin_lock_irqsave(&lun->lun_cmd_lock, flags);
559 if (atomic_read(&cmd->transport_lun_active)) {
560 list_del(&cmd->se_lun_node);
561 atomic_set(&cmd->transport_lun_active, 0);
562#if 0
563 pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
564 cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
565#endif
566 }
567 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
568}
569
570void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
571{
572 if (!cmd->se_tmr_req)
573 transport_lun_remove_cmd(cmd);
574
575 if (transport_cmd_check_stop_to_fabric(cmd))
576 return;
577 if (remove) {
578 transport_remove_cmd_from_queue(cmd);
579 transport_put_cmd(cmd);
580 }
581}
582
583static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
584 bool at_head)
585{
586 struct se_device *dev = cmd->se_dev;
587 struct se_queue_obj *qobj = &dev->dev_queue_obj;
588 unsigned long flags;
589
590 if (t_state) {
591 spin_lock_irqsave(&cmd->t_state_lock, flags);
592 cmd->t_state = t_state;
593 atomic_set(&cmd->t_transport_active, 1);
594 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
595 }
596
597 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
598
599
600 if (!list_empty(&cmd->se_queue_node))
601 list_del(&cmd->se_queue_node);
602 else
603 atomic_inc(&qobj->queue_cnt);
604
605 if (at_head)
606 list_add(&cmd->se_queue_node, &qobj->qobj_list);
607 else
608 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
609 atomic_set(&cmd->t_transport_queue_active, 1);
610 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
611
612 wake_up_interruptible(&qobj->thread_wq);
613}
614
615static struct se_cmd *
616transport_get_cmd_from_queue(struct se_queue_obj *qobj)
617{
618 struct se_cmd *cmd;
619 unsigned long flags;
620
621 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
622 if (list_empty(&qobj->qobj_list)) {
623 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
624 return NULL;
625 }
626 cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
627
628 atomic_set(&cmd->t_transport_queue_active, 0);
629
630 list_del_init(&cmd->se_queue_node);
631 atomic_dec(&qobj->queue_cnt);
632 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
633
634 return cmd;
635}
636
637static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
638{
639 struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
640 unsigned long flags;
641
642 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
643 if (!atomic_read(&cmd->t_transport_queue_active)) {
644 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
645 return;
646 }
647 atomic_set(&cmd->t_transport_queue_active, 0);
648 atomic_dec(&qobj->queue_cnt);
649 list_del_init(&cmd->se_queue_node);
650 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
651
652 if (atomic_read(&cmd->t_transport_queue_active)) {
653 pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
654 cmd->se_tfo->get_task_tag(cmd),
655 atomic_read(&cmd->t_transport_queue_active));
656 }
657}
658
659
660
661
662
663void transport_complete_sync_cache(struct se_cmd *cmd, int good)
664{
665 struct se_task *task = list_entry(cmd->t_task_list.next,
666 struct se_task, t_list);
667
668 if (good) {
669 cmd->scsi_status = SAM_STAT_GOOD;
670 task->task_scsi_status = GOOD;
671 } else {
672 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
673 task->task_se_cmd->scsi_sense_reason =
674 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
675
676 }
677
678 transport_complete_task(task, good);
679}
680EXPORT_SYMBOL(transport_complete_sync_cache);
681
682static void target_complete_failure_work(struct work_struct *work)
683{
684 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
685
686 transport_generic_request_failure(cmd);
687}
688
689
690
691
692
693
694void transport_complete_task(struct se_task *task, int success)
695{
696 struct se_cmd *cmd = task->task_se_cmd;
697 struct se_device *dev = cmd->se_dev;
698 unsigned long flags;
699#if 0
700 pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
701 cmd->t_task_cdb[0], dev);
702#endif
703 if (dev)
704 atomic_inc(&dev->depth_left);
705
706 spin_lock_irqsave(&cmd->t_state_lock, flags);
707 task->task_flags &= ~TF_ACTIVE;
708
709
710
711
712
713
714 if (dev && dev->transport->transport_complete) {
715 if (dev->transport->transport_complete(task) != 0) {
716 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
717 task->task_sense = 1;
718 success = 1;
719 }
720 }
721
722
723
724
725
726 if (task->task_flags & TF_REQUEST_STOP) {
727 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
728 complete(&task->task_stop_comp);
729 return;
730 }
731
732 if (!success)
733 cmd->t_tasks_failed = 1;
734
735
736
737
738
739
740 if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
741 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
742 return;
743 }
744
745 if (cmd->t_tasks_failed) {
746 if (!task->task_error_status) {
747 task->task_error_status =
748 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
749 cmd->scsi_sense_reason =
750 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
751 }
752
753 INIT_WORK(&cmd->work, target_complete_failure_work);
754 } else {
755 atomic_set(&cmd->t_transport_complete, 1);
756 INIT_WORK(&cmd->work, target_complete_ok_work);
757 }
758
759 cmd->t_state = TRANSPORT_COMPLETE;
760 atomic_set(&cmd->t_transport_active, 1);
761 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
762
763 queue_work(target_completion_wq, &cmd->work);
764}
765EXPORT_SYMBOL(transport_complete_task);
766
767
768
769
770
771
772
773
774static inline int transport_add_task_check_sam_attr(
775 struct se_task *task,
776 struct se_task *task_prev,
777 struct se_device *dev)
778{
779
780
781
782
783 if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
784 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
785 return 0;
786 }
787
788
789
790
791
792
793 if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
794 list_add(&task->t_execute_list,
795 (task_prev != NULL) ?
796 &task_prev->t_execute_list :
797 &dev->execute_task_list);
798
799 pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
800 " in execution queue\n",
801 task->task_se_cmd->t_task_cdb[0]);
802 return 1;
803 }
804
805
806
807
808
809 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
810 return 0;
811}
812
813
814
815
816
817static void __transport_add_task_to_execute_queue(
818 struct se_task *task,
819 struct se_task *task_prev,
820 struct se_device *dev)
821{
822 int head_of_queue;
823
824 head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
825 atomic_inc(&dev->execute_tasks);
826
827 if (atomic_read(&task->task_state_active))
828 return;
829
830
831
832
833
834 if (head_of_queue)
835 list_add(&task->t_state_list, (task_prev) ?
836 &task_prev->t_state_list :
837 &dev->state_task_list);
838 else
839 list_add_tail(&task->t_state_list, &dev->state_task_list);
840
841 atomic_set(&task->task_state_active, 1);
842
843 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
844 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
845 task, dev);
846}
847
848static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
849{
850 struct se_device *dev = cmd->se_dev;
851 struct se_task *task;
852 unsigned long flags;
853
854 spin_lock_irqsave(&cmd->t_state_lock, flags);
855 list_for_each_entry(task, &cmd->t_task_list, t_list) {
856 if (atomic_read(&task->task_state_active))
857 continue;
858
859 spin_lock(&dev->execute_task_lock);
860 list_add_tail(&task->t_state_list, &dev->state_task_list);
861 atomic_set(&task->task_state_active, 1);
862
863 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
864 task->task_se_cmd->se_tfo->get_task_tag(
865 task->task_se_cmd), task, dev);
866
867 spin_unlock(&dev->execute_task_lock);
868 }
869 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
870}
871
872static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
873{
874 struct se_device *dev = cmd->se_dev;
875 struct se_task *task, *task_prev = NULL;
876 unsigned long flags;
877
878 spin_lock_irqsave(&dev->execute_task_lock, flags);
879 list_for_each_entry(task, &cmd->t_task_list, t_list) {
880 if (!list_empty(&task->t_execute_list))
881 continue;
882
883
884
885
886 __transport_add_task_to_execute_queue(task, task_prev, dev);
887 task_prev = task;
888 }
889 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
890}
891
892void __transport_remove_task_from_execute_queue(struct se_task *task,
893 struct se_device *dev)
894{
895 list_del_init(&task->t_execute_list);
896 atomic_dec(&dev->execute_tasks);
897}
898
899void transport_remove_task_from_execute_queue(
900 struct se_task *task,
901 struct se_device *dev)
902{
903 unsigned long flags;
904
905 if (WARN_ON(list_empty(&task->t_execute_list)))
906 return;
907
908 spin_lock_irqsave(&dev->execute_task_lock, flags);
909 __transport_remove_task_from_execute_queue(task, dev);
910 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
911}
912
913
914
915
916
917static void target_qf_do_work(struct work_struct *work)
918{
919 struct se_device *dev = container_of(work, struct se_device,
920 qf_work_queue);
921 LIST_HEAD(qf_cmd_list);
922 struct se_cmd *cmd, *cmd_tmp;
923
924 spin_lock_irq(&dev->qf_cmd_lock);
925 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
926 spin_unlock_irq(&dev->qf_cmd_lock);
927
928 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
929 list_del(&cmd->se_qf_node);
930 atomic_dec(&dev->dev_qf_count);
931 smp_mb__after_atomic_dec();
932
933 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
934 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
935 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
936 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
937 : "UNKNOWN");
938
939 transport_add_cmd_to_queue(cmd, cmd->t_state, true);
940 }
941}
942
943unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
944{
945 switch (cmd->data_direction) {
946 case DMA_NONE:
947 return "NONE";
948 case DMA_FROM_DEVICE:
949 return "READ";
950 case DMA_TO_DEVICE:
951 return "WRITE";
952 case DMA_BIDIRECTIONAL:
953 return "BIDI";
954 default:
955 break;
956 }
957
958 return "UNKNOWN";
959}
960
961void transport_dump_dev_state(
962 struct se_device *dev,
963 char *b,
964 int *bl)
965{
966 *bl += sprintf(b + *bl, "Status: ");
967 switch (dev->dev_status) {
968 case TRANSPORT_DEVICE_ACTIVATED:
969 *bl += sprintf(b + *bl, "ACTIVATED");
970 break;
971 case TRANSPORT_DEVICE_DEACTIVATED:
972 *bl += sprintf(b + *bl, "DEACTIVATED");
973 break;
974 case TRANSPORT_DEVICE_SHUTDOWN:
975 *bl += sprintf(b + *bl, "SHUTDOWN");
976 break;
977 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
978 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
979 *bl += sprintf(b + *bl, "OFFLINE");
980 break;
981 default:
982 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
983 break;
984 }
985
986 *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d",
987 atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
988 dev->queue_depth);
989 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
990 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
991 *bl += sprintf(b + *bl, " ");
992}
993
994void transport_dump_vpd_proto_id(
995 struct t10_vpd *vpd,
996 unsigned char *p_buf,
997 int p_buf_len)
998{
999 unsigned char buf[VPD_TMP_BUF_SIZE];
1000 int len;
1001
1002 memset(buf, 0, VPD_TMP_BUF_SIZE);
1003 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
1004
1005 switch (vpd->protocol_identifier) {
1006 case 0x00:
1007 sprintf(buf+len, "Fibre Channel\n");
1008 break;
1009 case 0x10:
1010 sprintf(buf+len, "Parallel SCSI\n");
1011 break;
1012 case 0x20:
1013 sprintf(buf+len, "SSA\n");
1014 break;
1015 case 0x30:
1016 sprintf(buf+len, "IEEE 1394\n");
1017 break;
1018 case 0x40:
1019 sprintf(buf+len, "SCSI Remote Direct Memory Access"
1020 " Protocol\n");
1021 break;
1022 case 0x50:
1023 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1024 break;
1025 case 0x60:
1026 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1027 break;
1028 case 0x70:
1029 sprintf(buf+len, "Automation/Drive Interface Transport"
1030 " Protocol\n");
1031 break;
1032 case 0x80:
1033 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1034 break;
1035 default:
1036 sprintf(buf+len, "Unknown 0x%02x\n",
1037 vpd->protocol_identifier);
1038 break;
1039 }
1040
1041 if (p_buf)
1042 strncpy(p_buf, buf, p_buf_len);
1043 else
1044 pr_debug("%s", buf);
1045}
1046
1047void
1048transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1049{
1050
1051
1052
1053
1054
1055 if (page_83[1] & 0x80) {
1056 vpd->protocol_identifier = (page_83[0] & 0xf0);
1057 vpd->protocol_identifier_set = 1;
1058 transport_dump_vpd_proto_id(vpd, NULL, 0);
1059 }
1060}
1061EXPORT_SYMBOL(transport_set_vpd_proto_id);
1062
1063int transport_dump_vpd_assoc(
1064 struct t10_vpd *vpd,
1065 unsigned char *p_buf,
1066 int p_buf_len)
1067{
1068 unsigned char buf[VPD_TMP_BUF_SIZE];
1069 int ret = 0;
1070 int len;
1071
1072 memset(buf, 0, VPD_TMP_BUF_SIZE);
1073 len = sprintf(buf, "T10 VPD Identifier Association: ");
1074
1075 switch (vpd->association) {
1076 case 0x00:
1077 sprintf(buf+len, "addressed logical unit\n");
1078 break;
1079 case 0x10:
1080 sprintf(buf+len, "target port\n");
1081 break;
1082 case 0x20:
1083 sprintf(buf+len, "SCSI target device\n");
1084 break;
1085 default:
1086 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1087 ret = -EINVAL;
1088 break;
1089 }
1090
1091 if (p_buf)
1092 strncpy(p_buf, buf, p_buf_len);
1093 else
1094 pr_debug("%s", buf);
1095
1096 return ret;
1097}
1098
1099int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1100{
1101
1102
1103
1104
1105
1106 vpd->association = (page_83[1] & 0x30);
1107 return transport_dump_vpd_assoc(vpd, NULL, 0);
1108}
1109EXPORT_SYMBOL(transport_set_vpd_assoc);
1110
1111int transport_dump_vpd_ident_type(
1112 struct t10_vpd *vpd,
1113 unsigned char *p_buf,
1114 int p_buf_len)
1115{
1116 unsigned char buf[VPD_TMP_BUF_SIZE];
1117 int ret = 0;
1118 int len;
1119
1120 memset(buf, 0, VPD_TMP_BUF_SIZE);
1121 len = sprintf(buf, "T10 VPD Identifier Type: ");
1122
1123 switch (vpd->device_identifier_type) {
1124 case 0x00:
1125 sprintf(buf+len, "Vendor specific\n");
1126 break;
1127 case 0x01:
1128 sprintf(buf+len, "T10 Vendor ID based\n");
1129 break;
1130 case 0x02:
1131 sprintf(buf+len, "EUI-64 based\n");
1132 break;
1133 case 0x03:
1134 sprintf(buf+len, "NAA\n");
1135 break;
1136 case 0x04:
1137 sprintf(buf+len, "Relative target port identifier\n");
1138 break;
1139 case 0x08:
1140 sprintf(buf+len, "SCSI name string\n");
1141 break;
1142 default:
1143 sprintf(buf+len, "Unsupported: 0x%02x\n",
1144 vpd->device_identifier_type);
1145 ret = -EINVAL;
1146 break;
1147 }
1148
1149 if (p_buf) {
1150 if (p_buf_len < strlen(buf)+1)
1151 return -EINVAL;
1152 strncpy(p_buf, buf, p_buf_len);
1153 } else {
1154 pr_debug("%s", buf);
1155 }
1156
1157 return ret;
1158}
1159
1160int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1161{
1162
1163
1164
1165
1166
1167 vpd->device_identifier_type = (page_83[1] & 0x0f);
1168 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1169}
1170EXPORT_SYMBOL(transport_set_vpd_ident_type);
1171
1172int transport_dump_vpd_ident(
1173 struct t10_vpd *vpd,
1174 unsigned char *p_buf,
1175 int p_buf_len)
1176{
1177 unsigned char buf[VPD_TMP_BUF_SIZE];
1178 int ret = 0;
1179
1180 memset(buf, 0, VPD_TMP_BUF_SIZE);
1181
1182 switch (vpd->device_identifier_code_set) {
1183 case 0x01:
1184 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1185 &vpd->device_identifier[0]);
1186 break;
1187 case 0x02:
1188 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1189 &vpd->device_identifier[0]);
1190 break;
1191 case 0x03:
1192 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1193 &vpd->device_identifier[0]);
1194 break;
1195 default:
1196 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1197 " 0x%02x", vpd->device_identifier_code_set);
1198 ret = -EINVAL;
1199 break;
1200 }
1201
1202 if (p_buf)
1203 strncpy(p_buf, buf, p_buf_len);
1204 else
1205 pr_debug("%s", buf);
1206
1207 return ret;
1208}
1209
1210int
1211transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1212{
1213 static const char hex_str[] = "0123456789abcdef";
1214 int j = 0, i = 4;
1215
1216
1217
1218
1219
1220
1221 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1222 switch (vpd->device_identifier_code_set) {
1223 case 0x01:
1224 vpd->device_identifier[j++] =
1225 hex_str[vpd->device_identifier_type];
1226 while (i < (4 + page_83[3])) {
1227 vpd->device_identifier[j++] =
1228 hex_str[(page_83[i] & 0xf0) >> 4];
1229 vpd->device_identifier[j++] =
1230 hex_str[page_83[i] & 0x0f];
1231 i++;
1232 }
1233 break;
1234 case 0x02:
1235 case 0x03:
1236 while (i < (4 + page_83[3]))
1237 vpd->device_identifier[j++] = page_83[i++];
1238 break;
1239 default:
1240 break;
1241 }
1242
1243 return transport_dump_vpd_ident(vpd, NULL, 0);
1244}
1245EXPORT_SYMBOL(transport_set_vpd_ident);
1246
1247static void core_setup_task_attr_emulation(struct se_device *dev)
1248{
1249
1250
1251
1252
1253
1254
1255
1256 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1257 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1258 return;
1259 }
1260
1261 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1262 pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1263 " device\n", dev->transport->name,
1264 dev->transport->get_device_rev(dev));
1265}
1266
1267static void scsi_dump_inquiry(struct se_device *dev)
1268{
1269 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1270 int i, device_type;
1271
1272
1273
1274 pr_debug(" Vendor: ");
1275 for (i = 0; i < 8; i++)
1276 if (wwn->vendor[i] >= 0x20)
1277 pr_debug("%c", wwn->vendor[i]);
1278 else
1279 pr_debug(" ");
1280
1281 pr_debug(" Model: ");
1282 for (i = 0; i < 16; i++)
1283 if (wwn->model[i] >= 0x20)
1284 pr_debug("%c", wwn->model[i]);
1285 else
1286 pr_debug(" ");
1287
1288 pr_debug(" Revision: ");
1289 for (i = 0; i < 4; i++)
1290 if (wwn->revision[i] >= 0x20)
1291 pr_debug("%c", wwn->revision[i]);
1292 else
1293 pr_debug(" ");
1294
1295 pr_debug("\n");
1296
1297 device_type = dev->transport->get_device_type(dev);
1298 pr_debug(" Type: %s ", scsi_device_type(device_type));
1299 pr_debug(" ANSI SCSI revision: %02x\n",
1300 dev->transport->get_device_rev(dev));
1301}
1302
1303struct se_device *transport_add_device_to_core_hba(
1304 struct se_hba *hba,
1305 struct se_subsystem_api *transport,
1306 struct se_subsystem_dev *se_dev,
1307 u32 device_flags,
1308 void *transport_dev,
1309 struct se_dev_limits *dev_limits,
1310 const char *inquiry_prod,
1311 const char *inquiry_rev)
1312{
1313 int force_pt;
1314 struct se_device *dev;
1315
1316 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1317 if (!dev) {
1318 pr_err("Unable to allocate memory for se_dev_t\n");
1319 return NULL;
1320 }
1321
1322 transport_init_queue_obj(&dev->dev_queue_obj);
1323 dev->dev_flags = device_flags;
1324 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
1325 dev->dev_ptr = transport_dev;
1326 dev->se_hba = hba;
1327 dev->se_sub_dev = se_dev;
1328 dev->transport = transport;
1329 INIT_LIST_HEAD(&dev->dev_list);
1330 INIT_LIST_HEAD(&dev->dev_sep_list);
1331 INIT_LIST_HEAD(&dev->dev_tmr_list);
1332 INIT_LIST_HEAD(&dev->execute_task_list);
1333 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1334 INIT_LIST_HEAD(&dev->state_task_list);
1335 INIT_LIST_HEAD(&dev->qf_cmd_list);
1336 spin_lock_init(&dev->execute_task_lock);
1337 spin_lock_init(&dev->delayed_cmd_lock);
1338 spin_lock_init(&dev->dev_reservation_lock);
1339 spin_lock_init(&dev->dev_status_lock);
1340 spin_lock_init(&dev->se_port_lock);
1341 spin_lock_init(&dev->se_tmr_lock);
1342 spin_lock_init(&dev->qf_cmd_lock);
1343
1344 dev->queue_depth = dev_limits->queue_depth;
1345 atomic_set(&dev->depth_left, dev->queue_depth);
1346 atomic_set(&dev->dev_ordered_id, 0);
1347
1348 se_dev_set_default_attribs(dev, dev_limits);
1349
1350 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1351 dev->creation_time = get_jiffies_64();
1352 spin_lock_init(&dev->stats_lock);
1353
1354 spin_lock(&hba->device_lock);
1355 list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1356 hba->dev_count++;
1357 spin_unlock(&hba->device_lock);
1358
1359
1360
1361 core_setup_task_attr_emulation(dev);
1362
1363
1364
1365 force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1366
1367
1368
1369 core_setup_reservations(dev, force_pt);
1370
1371
1372
1373 if (core_setup_alua(dev, force_pt) < 0)
1374 goto out;
1375
1376
1377
1378
1379 dev->process_thread = kthread_run(transport_processing_thread, dev,
1380 "LIO_%s", dev->transport->name);
1381 if (IS_ERR(dev->process_thread)) {
1382 pr_err("Unable to create kthread: LIO_%s\n",
1383 dev->transport->name);
1384 goto out;
1385 }
1386
1387
1388
1389 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1390
1391
1392
1393
1394
1395
1396
1397
1398 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1399 if (!inquiry_prod || !inquiry_rev) {
1400 pr_err("All non TCM/pSCSI plugins require"
1401 " INQUIRY consts\n");
1402 goto out;
1403 }
1404
1405 strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1406 strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
1407 strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1408 }
1409 scsi_dump_inquiry(dev);
1410
1411 return dev;
1412out:
1413 kthread_stop(dev->process_thread);
1414
1415 spin_lock(&hba->device_lock);
1416 list_del(&dev->dev_list);
1417 hba->dev_count--;
1418 spin_unlock(&hba->device_lock);
1419
1420 se_release_vpd_for_dev(dev);
1421
1422 kfree(dev);
1423
1424 return NULL;
1425}
1426EXPORT_SYMBOL(transport_add_device_to_core_hba);
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436static inline void transport_generic_prepare_cdb(
1437 unsigned char *cdb)
1438{
1439 switch (cdb[0]) {
1440 case READ_10:
1441 case READ_12:
1442 case READ_16:
1443 case SEND_DIAGNOSTIC:
1444 case VERIFY:
1445 case VERIFY_16:
1446 case WRITE_VERIFY:
1447 case WRITE_VERIFY_12:
1448 case MAINTENANCE_IN:
1449 break;
1450 default:
1451 cdb[1] &= 0x1f;
1452 break;
1453 }
1454}
1455
1456static struct se_task *
1457transport_generic_get_task(struct se_cmd *cmd,
1458 enum dma_data_direction data_direction)
1459{
1460 struct se_task *task;
1461 struct se_device *dev = cmd->se_dev;
1462
1463 task = dev->transport->alloc_task(cmd->t_task_cdb);
1464 if (!task) {
1465 pr_err("Unable to allocate struct se_task\n");
1466 return NULL;
1467 }
1468
1469 INIT_LIST_HEAD(&task->t_list);
1470 INIT_LIST_HEAD(&task->t_execute_list);
1471 INIT_LIST_HEAD(&task->t_state_list);
1472 init_completion(&task->task_stop_comp);
1473 task->task_se_cmd = cmd;
1474 task->task_data_direction = data_direction;
1475
1476 return task;
1477}
1478
1479static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1480
1481
1482
1483
1484
1485void transport_init_se_cmd(
1486 struct se_cmd *cmd,
1487 struct target_core_fabric_ops *tfo,
1488 struct se_session *se_sess,
1489 u32 data_length,
1490 int data_direction,
1491 int task_attr,
1492 unsigned char *sense_buffer)
1493{
1494 INIT_LIST_HEAD(&cmd->se_lun_node);
1495 INIT_LIST_HEAD(&cmd->se_delayed_node);
1496 INIT_LIST_HEAD(&cmd->se_qf_node);
1497 INIT_LIST_HEAD(&cmd->se_queue_node);
1498 INIT_LIST_HEAD(&cmd->se_cmd_list);
1499 INIT_LIST_HEAD(&cmd->t_task_list);
1500 init_completion(&cmd->transport_lun_fe_stop_comp);
1501 init_completion(&cmd->transport_lun_stop_comp);
1502 init_completion(&cmd->t_transport_stop_comp);
1503 init_completion(&cmd->cmd_wait_comp);
1504 spin_lock_init(&cmd->t_state_lock);
1505 atomic_set(&cmd->transport_dev_active, 1);
1506
1507 cmd->se_tfo = tfo;
1508 cmd->se_sess = se_sess;
1509 cmd->data_length = data_length;
1510 cmd->data_direction = data_direction;
1511 cmd->sam_task_attr = task_attr;
1512 cmd->sense_buffer = sense_buffer;
1513}
1514EXPORT_SYMBOL(transport_init_se_cmd);
1515
1516static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1517{
1518
1519
1520
1521
1522 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1523 return 0;
1524
1525 if (cmd->sam_task_attr == MSG_ACA_TAG) {
1526 pr_debug("SAM Task Attribute ACA"
1527 " emulation is not supported\n");
1528 return -EINVAL;
1529 }
1530
1531
1532
1533
1534 cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1535 smp_mb__after_atomic_inc();
1536 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1537 cmd->se_ordered_id, cmd->sam_task_attr,
1538 cmd->se_dev->transport->name);
1539 return 0;
1540}
1541
1542
1543
1544
1545
1546int transport_generic_allocate_tasks(
1547 struct se_cmd *cmd,
1548 unsigned char *cdb)
1549{
1550 int ret;
1551
1552 transport_generic_prepare_cdb(cdb);
1553
1554
1555
1556
1557 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1558 pr_err("Received SCSI CDB with command_size: %d that"
1559 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1560 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1561 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1562 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1563 return -EINVAL;
1564 }
1565
1566
1567
1568
1569
1570 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1571 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1572 GFP_KERNEL);
1573 if (!cmd->t_task_cdb) {
1574 pr_err("Unable to allocate cmd->t_task_cdb"
1575 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1576 scsi_command_size(cdb),
1577 (unsigned long)sizeof(cmd->__t_task_cdb));
1578 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1579 cmd->scsi_sense_reason =
1580 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1581 return -ENOMEM;
1582 }
1583 } else
1584 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1585
1586
1587
1588 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1589
1590
1591
1592
1593
1594
1595 ret = transport_generic_cmd_sequencer(cmd, cdb);
1596 if (ret < 0)
1597 return ret;
1598
1599
1600
1601 if (transport_check_alloc_task_attr(cmd) < 0) {
1602 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1603 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1604 return -EINVAL;
1605 }
1606 spin_lock(&cmd->se_lun->lun_sep_lock);
1607 if (cmd->se_lun->lun_sep)
1608 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1609 spin_unlock(&cmd->se_lun->lun_sep_lock);
1610 return 0;
1611}
1612EXPORT_SYMBOL(transport_generic_allocate_tasks);
1613
1614
1615
1616
1617
1618int transport_handle_cdb_direct(
1619 struct se_cmd *cmd)
1620{
1621 int ret;
1622
1623 if (!cmd->se_lun) {
1624 dump_stack();
1625 pr_err("cmd->se_lun is NULL\n");
1626 return -EINVAL;
1627 }
1628 if (in_interrupt()) {
1629 dump_stack();
1630 pr_err("transport_generic_handle_cdb cannot be called"
1631 " from interrupt context\n");
1632 return -EINVAL;
1633 }
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643 cmd->t_state = TRANSPORT_NEW_CMD;
1644 atomic_set(&cmd->t_transport_active, 1);
1645
1646
1647
1648
1649
1650 ret = transport_generic_new_cmd(cmd);
1651 if (ret < 0)
1652 transport_generic_request_failure(cmd);
1653
1654 return 0;
1655}
1656EXPORT_SYMBOL(transport_handle_cdb_direct);
1657
1658
1659
1660
1661
1662
1663int transport_generic_handle_cdb_map(
1664 struct se_cmd *cmd)
1665{
1666 if (!cmd->se_lun) {
1667 dump_stack();
1668 pr_err("cmd->se_lun is NULL\n");
1669 return -EINVAL;
1670 }
1671
1672 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
1673 return 0;
1674}
1675EXPORT_SYMBOL(transport_generic_handle_cdb_map);
1676
1677
1678
1679
1680
1681int transport_generic_handle_data(
1682 struct se_cmd *cmd)
1683{
1684
1685
1686
1687
1688
1689
1690 if (!in_interrupt() && signal_pending(current))
1691 return -EPERM;
1692
1693
1694
1695
1696
1697
1698
1699 if (transport_check_aborted_status(cmd, 1) != 0)
1700 return 0;
1701
1702 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
1703 return 0;
1704}
1705EXPORT_SYMBOL(transport_generic_handle_data);
1706
1707
1708
1709
1710
1711int transport_generic_handle_tmr(
1712 struct se_cmd *cmd)
1713{
1714 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
1715 return 0;
1716}
1717EXPORT_SYMBOL(transport_generic_handle_tmr);
1718
1719
1720
1721
1722
1723bool target_stop_task(struct se_task *task, unsigned long *flags)
1724{
1725 struct se_cmd *cmd = task->task_se_cmd;
1726 bool was_active = false;
1727
1728 if (task->task_flags & TF_ACTIVE) {
1729 task->task_flags |= TF_REQUEST_STOP;
1730 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1731
1732 pr_debug("Task %p waiting to complete\n", task);
1733 wait_for_completion(&task->task_stop_comp);
1734 pr_debug("Task %p stopped successfully\n", task);
1735
1736 spin_lock_irqsave(&cmd->t_state_lock, *flags);
1737 atomic_dec(&cmd->t_task_cdbs_left);
1738 task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
1739 was_active = true;
1740 }
1741
1742 return was_active;
1743}
1744
1745static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1746{
1747 struct se_task *task, *task_tmp;
1748 unsigned long flags;
1749 int ret = 0;
1750
1751 pr_debug("ITT[0x%08x] - Stopping tasks\n",
1752 cmd->se_tfo->get_task_tag(cmd));
1753
1754
1755
1756
1757 spin_lock_irqsave(&cmd->t_state_lock, flags);
1758 list_for_each_entry_safe(task, task_tmp,
1759 &cmd->t_task_list, t_list) {
1760 pr_debug("Processing task %p\n", task);
1761
1762
1763
1764
1765 if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
1766 spin_unlock_irqrestore(&cmd->t_state_lock,
1767 flags);
1768 transport_remove_task_from_execute_queue(task,
1769 cmd->se_dev);
1770
1771 pr_debug("Task %p removed from execute queue\n", task);
1772 spin_lock_irqsave(&cmd->t_state_lock, flags);
1773 continue;
1774 }
1775
1776 if (!target_stop_task(task, &flags)) {
1777 pr_debug("Task %p - did nothing\n", task);
1778 ret++;
1779 }
1780 }
1781 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1782
1783 return ret;
1784}
1785
1786
1787
1788
1789static void transport_generic_request_failure(struct se_cmd *cmd)
1790{
1791 int ret = 0;
1792
1793 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1794 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1795 cmd->t_task_cdb[0]);
1796 pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1797 cmd->se_tfo->get_cmd_state(cmd),
1798 cmd->t_state, cmd->scsi_sense_reason);
1799 pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
1800 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1801 " t_transport_active: %d t_transport_stop: %d"
1802 " t_transport_sent: %d\n", cmd->t_task_list_num,
1803 atomic_read(&cmd->t_task_cdbs_left),
1804 atomic_read(&cmd->t_task_cdbs_sent),
1805 atomic_read(&cmd->t_task_cdbs_ex_left),
1806 atomic_read(&cmd->t_transport_active),
1807 atomic_read(&cmd->t_transport_stop),
1808 atomic_read(&cmd->t_transport_sent));
1809
1810
1811
1812
1813 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1814 transport_complete_task_attr(cmd);
1815
1816 switch (cmd->scsi_sense_reason) {
1817 case TCM_NON_EXISTENT_LUN:
1818 case TCM_UNSUPPORTED_SCSI_OPCODE:
1819 case TCM_INVALID_CDB_FIELD:
1820 case TCM_INVALID_PARAMETER_LIST:
1821 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1822 case TCM_UNKNOWN_MODE_PAGE:
1823 case TCM_WRITE_PROTECTED:
1824 case TCM_ADDRESS_OUT_OF_RANGE:
1825 case TCM_CHECK_CONDITION_ABORT_CMD:
1826 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1827 case TCM_CHECK_CONDITION_NOT_READY:
1828 break;
1829 case TCM_RESERVATION_CONFLICT:
1830
1831
1832
1833
1834
1835
1836 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1837
1838
1839
1840
1841
1842
1843
1844 if (cmd->se_sess &&
1845 cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
1846 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
1847 cmd->orig_fe_lun, 0x2C,
1848 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1849
1850 ret = cmd->se_tfo->queue_status(cmd);
1851 if (ret == -EAGAIN || ret == -ENOMEM)
1852 goto queue_full;
1853 goto check_stop;
1854 default:
1855 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1856 cmd->t_task_cdb[0], cmd->scsi_sense_reason);
1857 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1858 break;
1859 }
1860
1861
1862
1863
1864
1865
1866
1867 ret = transport_send_check_condition_and_sense(cmd,
1868 cmd->scsi_sense_reason, 0);
1869 if (ret == -EAGAIN || ret == -ENOMEM)
1870 goto queue_full;
1871
1872check_stop:
1873 transport_lun_remove_cmd(cmd);
1874 if (!transport_cmd_check_stop_to_fabric(cmd))
1875 ;
1876 return;
1877
1878queue_full:
1879 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
1880 transport_handle_queue_full(cmd, cmd->se_dev);
1881}
1882
1883static inline u32 transport_lba_21(unsigned char *cdb)
1884{
1885 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
1886}
1887
1888static inline u32 transport_lba_32(unsigned char *cdb)
1889{
1890 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1891}
1892
1893static inline unsigned long long transport_lba_64(unsigned char *cdb)
1894{
1895 unsigned int __v1, __v2;
1896
1897 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1898 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1899
1900 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1901}
1902
1903
1904
1905
1906static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
1907{
1908 unsigned int __v1, __v2;
1909
1910 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
1911 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
1912
1913 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1914}
1915
1916static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
1917{
1918 unsigned long flags;
1919
1920 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1921 se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1922 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1923}
1924
1925static inline int transport_tcq_window_closed(struct se_device *dev)
1926{
1927 if (dev->dev_tcq_window_closed++ <
1928 PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
1929 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
1930 } else
1931 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
1932
1933 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
1934 return 0;
1935}
1936
1937
1938
1939
1940
1941
1942
1943
1944static inline int transport_execute_task_attr(struct se_cmd *cmd)
1945{
1946 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1947 return 1;
1948
1949
1950
1951
1952 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
1953 pr_debug("Added HEAD_OF_QUEUE for CDB:"
1954 " 0x%02x, se_ordered_id: %u\n",
1955 cmd->t_task_cdb[0],
1956 cmd->se_ordered_id);
1957 return 1;
1958 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
1959 atomic_inc(&cmd->se_dev->dev_ordered_sync);
1960 smp_mb__after_atomic_inc();
1961
1962 pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
1963 " list, se_ordered_id: %u\n",
1964 cmd->t_task_cdb[0],
1965 cmd->se_ordered_id);
1966
1967
1968
1969
1970
1971 if (!atomic_read(&cmd->se_dev->simple_cmds))
1972 return 1;
1973 } else {
1974
1975
1976
1977 atomic_inc(&cmd->se_dev->simple_cmds);
1978 smp_mb__after_atomic_inc();
1979 }
1980
1981
1982
1983
1984
1985 if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
1986
1987
1988
1989
1990 spin_lock(&cmd->se_dev->delayed_cmd_lock);
1991 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
1992 list_add_tail(&cmd->se_delayed_node,
1993 &cmd->se_dev->delayed_cmd_list);
1994 spin_unlock(&cmd->se_dev->delayed_cmd_lock);
1995
1996 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
1997 " delayed CMD list, se_ordered_id: %u\n",
1998 cmd->t_task_cdb[0], cmd->sam_task_attr,
1999 cmd->se_ordered_id);
2000
2001
2002
2003
2004 return 0;
2005 }
2006
2007
2008
2009 return 1;
2010}
2011
2012
2013
2014
2015
2016static int transport_execute_tasks(struct se_cmd *cmd)
2017{
2018 int add_tasks;
2019
2020 if (se_dev_check_online(cmd->se_dev) != 0) {
2021 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2022 transport_generic_request_failure(cmd);
2023 return 0;
2024 }
2025
2026
2027
2028
2029
2030 if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
2031
2032
2033
2034
2035 add_tasks = transport_execute_task_attr(cmd);
2036 if (!add_tasks)
2037 goto execute_tasks;
2038
2039
2040
2041
2042
2043
2044 transport_add_tasks_from_cmd(cmd);
2045 }
2046
2047
2048
2049
2050execute_tasks:
2051 __transport_execute_tasks(cmd->se_dev);
2052 return 0;
2053}
2054
2055
2056
2057
2058
2059
2060
2061static int __transport_execute_tasks(struct se_device *dev)
2062{
2063 int error;
2064 struct se_cmd *cmd = NULL;
2065 struct se_task *task = NULL;
2066 unsigned long flags;
2067
2068
2069
2070
2071
2072check_depth:
2073 if (!atomic_read(&dev->depth_left))
2074 return transport_tcq_window_closed(dev);
2075
2076 dev->dev_tcq_window_closed = 0;
2077
2078 spin_lock_irq(&dev->execute_task_lock);
2079 if (list_empty(&dev->execute_task_list)) {
2080 spin_unlock_irq(&dev->execute_task_lock);
2081 return 0;
2082 }
2083 task = list_first_entry(&dev->execute_task_list,
2084 struct se_task, t_execute_list);
2085 __transport_remove_task_from_execute_queue(task, dev);
2086 spin_unlock_irq(&dev->execute_task_lock);
2087
2088 atomic_dec(&dev->depth_left);
2089
2090 cmd = task->task_se_cmd;
2091
2092 spin_lock_irqsave(&cmd->t_state_lock, flags);
2093 task->task_flags |= (TF_ACTIVE | TF_SENT);
2094 atomic_inc(&cmd->t_task_cdbs_sent);
2095
2096 if (atomic_read(&cmd->t_task_cdbs_sent) ==
2097 cmd->t_task_list_num)
2098 atomic_set(&cmd->t_transport_sent, 1);
2099
2100 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2101
2102 if (cmd->execute_task)
2103 error = cmd->execute_task(task);
2104 else
2105 error = dev->transport->do_task(task);
2106 if (error != 0) {
2107 spin_lock_irqsave(&cmd->t_state_lock, flags);
2108 task->task_flags &= ~TF_ACTIVE;
2109 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2110 atomic_set(&cmd->t_transport_sent, 0);
2111 transport_stop_tasks_for_cmd(cmd);
2112 atomic_inc(&dev->depth_left);
2113 transport_generic_request_failure(cmd);
2114 }
2115
2116 goto check_depth;
2117
2118 return 0;
2119}
2120
2121static inline u32 transport_get_sectors_6(
2122 unsigned char *cdb,
2123 struct se_cmd *cmd,
2124 int *ret)
2125{
2126 struct se_device *dev = cmd->se_dev;
2127
2128
2129
2130
2131
2132 if (!dev)
2133 goto type_disk;
2134
2135
2136
2137
2138 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2139 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150type_disk:
2151 return cdb[4] ? : 256;
2152}
2153
2154static inline u32 transport_get_sectors_10(
2155 unsigned char *cdb,
2156 struct se_cmd *cmd,
2157 int *ret)
2158{
2159 struct se_device *dev = cmd->se_dev;
2160
2161
2162
2163
2164
2165 if (!dev)
2166 goto type_disk;
2167
2168
2169
2170
2171 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2172 *ret = -EINVAL;
2173 return 0;
2174 }
2175
2176
2177
2178
2179
2180type_disk:
2181 return (u32)(cdb[7] << 8) + cdb[8];
2182}
2183
2184static inline u32 transport_get_sectors_12(
2185 unsigned char *cdb,
2186 struct se_cmd *cmd,
2187 int *ret)
2188{
2189 struct se_device *dev = cmd->se_dev;
2190
2191
2192
2193
2194
2195 if (!dev)
2196 goto type_disk;
2197
2198
2199
2200
2201 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2202 *ret = -EINVAL;
2203 return 0;
2204 }
2205
2206
2207
2208
2209
2210type_disk:
2211 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2212}
2213
2214static inline u32 transport_get_sectors_16(
2215 unsigned char *cdb,
2216 struct se_cmd *cmd,
2217 int *ret)
2218{
2219 struct se_device *dev = cmd->se_dev;
2220
2221
2222
2223
2224
2225 if (!dev)
2226 goto type_disk;
2227
2228
2229
2230
2231 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2232 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2233
2234type_disk:
2235 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2236 (cdb[12] << 8) + cdb[13];
2237}
2238
2239
2240
2241
2242static inline u32 transport_get_sectors_32(
2243 unsigned char *cdb,
2244 struct se_cmd *cmd,
2245 int *ret)
2246{
2247
2248
2249
2250
2251 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2252 (cdb[30] << 8) + cdb[31];
2253
2254}
2255
2256static inline u32 transport_get_size(
2257 u32 sectors,
2258 unsigned char *cdb,
2259 struct se_cmd *cmd)
2260{
2261 struct se_device *dev = cmd->se_dev;
2262
2263 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2264 if (cdb[1] & 1) {
2265 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2266 } else
2267 return sectors;
2268 }
2269#if 0
2270 pr_debug("Returning block_size: %u, sectors: %u == %u for"
2271 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
2272 dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2273 dev->transport->name);
2274#endif
2275 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2276}
2277
2278static void transport_xor_callback(struct se_cmd *cmd)
2279{
2280 unsigned char *buf, *addr;
2281 struct scatterlist *sg;
2282 unsigned int offset;
2283 int i;
2284 int count;
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296 buf = kmalloc(cmd->data_length, GFP_KERNEL);
2297 if (!buf) {
2298 pr_err("Unable to allocate xor_callback buf\n");
2299 return;
2300 }
2301
2302
2303
2304
2305 sg_copy_to_buffer(cmd->t_data_sg,
2306 cmd->t_data_nents,
2307 buf,
2308 cmd->data_length);
2309
2310
2311
2312
2313
2314
2315 offset = 0;
2316 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2317 addr = kmap_atomic(sg_page(sg), KM_USER0);
2318 if (!addr)
2319 goto out;
2320
2321 for (i = 0; i < sg->length; i++)
2322 *(addr + sg->offset + i) ^= *(buf + offset + i);
2323
2324 offset += sg->length;
2325 kunmap_atomic(addr, KM_USER0);
2326 }
2327
2328out:
2329 kfree(buf);
2330}
2331
2332
2333
2334
2335static int transport_get_sense_data(struct se_cmd *cmd)
2336{
2337 unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2338 struct se_device *dev = cmd->se_dev;
2339 struct se_task *task = NULL, *task_tmp;
2340 unsigned long flags;
2341 u32 offset = 0;
2342
2343 WARN_ON(!cmd->se_lun);
2344
2345 if (!dev)
2346 return 0;
2347
2348 spin_lock_irqsave(&cmd->t_state_lock, flags);
2349 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2350 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2351 return 0;
2352 }
2353
2354 list_for_each_entry_safe(task, task_tmp,
2355 &cmd->t_task_list, t_list) {
2356 if (!task->task_sense)
2357 continue;
2358
2359 if (!dev->transport->get_sense_buffer) {
2360 pr_err("dev->transport->get_sense_buffer"
2361 " is NULL\n");
2362 continue;
2363 }
2364
2365 sense_buffer = dev->transport->get_sense_buffer(task);
2366 if (!sense_buffer) {
2367 pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
2368 " sense buffer for task with sense\n",
2369 cmd->se_tfo->get_task_tag(cmd), task);
2370 continue;
2371 }
2372 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2373
2374 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
2375 TRANSPORT_SENSE_BUFFER);
2376
2377 memcpy(&buffer[offset], sense_buffer,
2378 TRANSPORT_SENSE_BUFFER);
2379 cmd->scsi_status = task->task_scsi_status;
2380
2381 cmd->scsi_sense_length =
2382 (TRANSPORT_SENSE_BUFFER + offset);
2383
2384 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
2385 " and sense\n",
2386 dev->se_hba->hba_id, dev->transport->name,
2387 cmd->scsi_status);
2388 return 0;
2389 }
2390 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2391
2392 return -1;
2393}
2394
2395static inline long long transport_dev_end_lba(struct se_device *dev)
2396{
2397 return dev->transport->get_blocks(dev) + 1;
2398}
2399
2400static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2401{
2402 struct se_device *dev = cmd->se_dev;
2403 u32 sectors;
2404
2405 if (dev->transport->get_device_type(dev) != TYPE_DISK)
2406 return 0;
2407
2408 sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
2409
2410 if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
2411 pr_err("LBA: %llu Sectors: %u exceeds"
2412 " transport_dev_end_lba(): %llu\n",
2413 cmd->t_task_lba, sectors,
2414 transport_dev_end_lba(dev));
2415 return -EINVAL;
2416 }
2417
2418 return 0;
2419}
2420
2421static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
2422{
2423
2424
2425
2426
2427
2428
2429 int passthrough = (dev->transport->transport_type ==
2430 TRANSPORT_PLUGIN_PHBA_PDEV);
2431
2432 if (!passthrough) {
2433 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
2434 pr_err("WRITE_SAME PBDATA and LBDATA"
2435 " bits not supported for Block Discard"
2436 " Emulation\n");
2437 return -ENOSYS;
2438 }
2439
2440
2441
2442
2443 if (!(flags[0] & 0x08)) {
2444 pr_err("WRITE_SAME w/o UNMAP bit not"
2445 " supported for Block Discard Emulation\n");
2446 return -ENOSYS;
2447 }
2448 }
2449
2450 return 0;
2451}
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463static int transport_generic_cmd_sequencer(
2464 struct se_cmd *cmd,
2465 unsigned char *cdb)
2466{
2467 struct se_device *dev = cmd->se_dev;
2468 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2469 int ret = 0, sector_ret = 0, passthrough;
2470 u32 sectors = 0, size = 0, pr_reg_type = 0;
2471 u16 service_action;
2472 u8 alua_ascq = 0;
2473
2474
2475
2476 if (core_scsi3_ua_check(cmd, cdb) < 0) {
2477 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2478 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
2479 return -EINVAL;
2480 }
2481
2482
2483
2484 ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
2485 if (ret != 0) {
2486
2487
2488
2489
2490
2491 if (ret > 0) {
2492#if 0
2493 pr_debug("[%s]: ALUA TG Port not available,"
2494 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
2495 cmd->se_tfo->get_fabric_name(), alua_ascq);
2496#endif
2497 transport_set_sense_codes(cmd, 0x04, alua_ascq);
2498 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2499 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
2500 return -EINVAL;
2501 }
2502 goto out_invalid_cdb_field;
2503 }
2504
2505
2506
2507 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
2508 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
2509 cmd, cdb, pr_reg_type) != 0) {
2510 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2511 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2512 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2513 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2514 return -EBUSY;
2515 }
2516
2517
2518
2519
2520
2521 }
2522
2523
2524
2525
2526
2527 passthrough =
2528 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
2529
2530 switch (cdb[0]) {
2531 case READ_6:
2532 sectors = transport_get_sectors_6(cdb, cmd, §or_ret);
2533 if (sector_ret)
2534 goto out_unsupported_cdb;
2535 size = transport_get_size(sectors, cdb, cmd);
2536 cmd->t_task_lba = transport_lba_21(cdb);
2537 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2538 break;
2539 case READ_10:
2540 sectors = transport_get_sectors_10(cdb, cmd, §or_ret);
2541 if (sector_ret)
2542 goto out_unsupported_cdb;
2543 size = transport_get_size(sectors, cdb, cmd);
2544 cmd->t_task_lba = transport_lba_32(cdb);
2545 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2546 break;
2547 case READ_12:
2548 sectors = transport_get_sectors_12(cdb, cmd, §or_ret);
2549 if (sector_ret)
2550 goto out_unsupported_cdb;
2551 size = transport_get_size(sectors, cdb, cmd);
2552 cmd->t_task_lba = transport_lba_32(cdb);
2553 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2554 break;
2555 case READ_16:
2556 sectors = transport_get_sectors_16(cdb, cmd, §or_ret);
2557 if (sector_ret)
2558 goto out_unsupported_cdb;
2559 size = transport_get_size(sectors, cdb, cmd);
2560 cmd->t_task_lba = transport_lba_64(cdb);
2561 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2562 break;
2563 case WRITE_6:
2564 sectors = transport_get_sectors_6(cdb, cmd, §or_ret);
2565 if (sector_ret)
2566 goto out_unsupported_cdb;
2567 size = transport_get_size(sectors, cdb, cmd);
2568 cmd->t_task_lba = transport_lba_21(cdb);
2569 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2570 break;
2571 case WRITE_10:
2572 sectors = transport_get_sectors_10(cdb, cmd, §or_ret);
2573 if (sector_ret)
2574 goto out_unsupported_cdb;
2575 size = transport_get_size(sectors, cdb, cmd);
2576 cmd->t_task_lba = transport_lba_32(cdb);
2577 if (cdb[1] & 0x8)
2578 cmd->se_cmd_flags |= SCF_FUA;
2579 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2580 break;
2581 case WRITE_12:
2582 sectors = transport_get_sectors_12(cdb, cmd, §or_ret);
2583 if (sector_ret)
2584 goto out_unsupported_cdb;
2585 size = transport_get_size(sectors, cdb, cmd);
2586 cmd->t_task_lba = transport_lba_32(cdb);
2587 if (cdb[1] & 0x8)
2588 cmd->se_cmd_flags |= SCF_FUA;
2589 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2590 break;
2591 case WRITE_16:
2592 sectors = transport_get_sectors_16(cdb, cmd, §or_ret);
2593 if (sector_ret)
2594 goto out_unsupported_cdb;
2595 size = transport_get_size(sectors, cdb, cmd);
2596 cmd->t_task_lba = transport_lba_64(cdb);
2597 if (cdb[1] & 0x8)
2598 cmd->se_cmd_flags |= SCF_FUA;
2599 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2600 break;
2601 case XDWRITEREAD_10:
2602 if ((cmd->data_direction != DMA_TO_DEVICE) ||
2603 !(cmd->se_cmd_flags & SCF_BIDI))
2604 goto out_invalid_cdb_field;
2605 sectors = transport_get_sectors_10(cdb, cmd, §or_ret);
2606 if (sector_ret)
2607 goto out_unsupported_cdb;
2608 size = transport_get_size(sectors, cdb, cmd);
2609 cmd->t_task_lba = transport_lba_32(cdb);
2610 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2611
2612
2613
2614
2615 if (passthrough)
2616 goto out_unsupported_cdb;
2617
2618
2619
2620
2621 cmd->transport_complete_callback = &transport_xor_callback;
2622 if (cdb[1] & 0x8)
2623 cmd->se_cmd_flags |= SCF_FUA;
2624 break;
2625 case VARIABLE_LENGTH_CMD:
2626 service_action = get_unaligned_be16(&cdb[8]);
2627 switch (service_action) {
2628 case XDWRITEREAD_32:
2629 sectors = transport_get_sectors_32(cdb, cmd, §or_ret);
2630 if (sector_ret)
2631 goto out_unsupported_cdb;
2632 size = transport_get_size(sectors, cdb, cmd);
2633
2634
2635
2636
2637 cmd->t_task_lba = transport_lba_64_ext(cdb);
2638 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2639
2640
2641
2642
2643 if (passthrough)
2644 goto out_unsupported_cdb;
2645
2646
2647
2648
2649
2650 cmd->transport_complete_callback = &transport_xor_callback;
2651 if (cdb[1] & 0x8)
2652 cmd->se_cmd_flags |= SCF_FUA;
2653 break;
2654 case WRITE_SAME_32:
2655 sectors = transport_get_sectors_32(cdb, cmd, §or_ret);
2656 if (sector_ret)
2657 goto out_unsupported_cdb;
2658
2659 if (sectors)
2660 size = transport_get_size(1, cdb, cmd);
2661 else {
2662 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
2663 " supported\n");
2664 goto out_invalid_cdb_field;
2665 }
2666
2667 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
2668 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2669
2670 if (target_check_write_same_discard(&cdb[10], dev) < 0)
2671 goto out_unsupported_cdb;
2672 if (!passthrough)
2673 cmd->execute_task = target_emulate_write_same;
2674 break;
2675 default:
2676 pr_err("VARIABLE_LENGTH_CMD service action"
2677 " 0x%04x not supported\n", service_action);
2678 goto out_unsupported_cdb;
2679 }
2680 break;
2681 case MAINTENANCE_IN:
2682 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2683
2684
2685
2686
2687 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
2688 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2689 cmd->execute_task =
2690 target_emulate_report_target_port_groups;
2691 }
2692 size = (cdb[6] << 24) | (cdb[7] << 16) |
2693 (cdb[8] << 8) | cdb[9];
2694 } else {
2695
2696 size = (cdb[8] << 8) + cdb[9];
2697 }
2698 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2699 break;
2700 case MODE_SELECT:
2701 size = cdb[4];
2702 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2703 break;
2704 case MODE_SELECT_10:
2705 size = (cdb[7] << 8) + cdb[8];
2706 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2707 break;
2708 case MODE_SENSE:
2709 size = cdb[4];
2710 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2711 if (!passthrough)
2712 cmd->execute_task = target_emulate_modesense;
2713 break;
2714 case MODE_SENSE_10:
2715 size = (cdb[7] << 8) + cdb[8];
2716 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2717 if (!passthrough)
2718 cmd->execute_task = target_emulate_modesense;
2719 break;
2720 case GPCMD_READ_BUFFER_CAPACITY:
2721 case GPCMD_SEND_OPC:
2722 case LOG_SELECT:
2723 case LOG_SENSE:
2724 size = (cdb[7] << 8) + cdb[8];
2725 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2726 break;
2727 case READ_BLOCK_LIMITS:
2728 size = READ_BLOCK_LEN;
2729 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2730 break;
2731 case GPCMD_GET_CONFIGURATION:
2732 case GPCMD_READ_FORMAT_CAPACITIES:
2733 case GPCMD_READ_DISC_INFO:
2734 case GPCMD_READ_TRACK_RZONE_INFO:
2735 size = (cdb[7] << 8) + cdb[8];
2736 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2737 break;
2738 case PERSISTENT_RESERVE_IN:
2739 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2740 cmd->execute_task = target_scsi3_emulate_pr_in;
2741 size = (cdb[7] << 8) + cdb[8];
2742 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2743 break;
2744 case PERSISTENT_RESERVE_OUT:
2745 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2746 cmd->execute_task = target_scsi3_emulate_pr_out;
2747 size = (cdb[7] << 8) + cdb[8];
2748 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2749 break;
2750 case GPCMD_MECHANISM_STATUS:
2751 case GPCMD_READ_DVD_STRUCTURE:
2752 size = (cdb[8] << 8) + cdb[9];
2753 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2754 break;
2755 case READ_POSITION:
2756 size = READ_POSITION_LEN;
2757 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2758 break;
2759 case MAINTENANCE_OUT:
2760 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2761
2762
2763
2764
2765 if (cdb[1] == MO_SET_TARGET_PGS &&
2766 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2767 cmd->execute_task =
2768 target_emulate_set_target_port_groups;
2769 }
2770
2771 size = (cdb[6] << 24) | (cdb[7] << 16) |
2772 (cdb[8] << 8) | cdb[9];
2773 } else {
2774
2775 size = (cdb[8] << 8) + cdb[9];
2776 }
2777 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2778 break;
2779 case INQUIRY:
2780 size = (cdb[3] << 8) + cdb[4];
2781
2782
2783
2784
2785 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2786 cmd->sam_task_attr = MSG_HEAD_TAG;
2787 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2788 if (!passthrough)
2789 cmd->execute_task = target_emulate_inquiry;
2790 break;
2791 case READ_BUFFER:
2792 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2793 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2794 break;
2795 case READ_CAPACITY:
2796 size = READ_CAP_LEN;
2797 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2798 if (!passthrough)
2799 cmd->execute_task = target_emulate_readcapacity;
2800 break;
2801 case READ_MEDIA_SERIAL_NUMBER:
2802 case SECURITY_PROTOCOL_IN:
2803 case SECURITY_PROTOCOL_OUT:
2804 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2805 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2806 break;
2807 case SERVICE_ACTION_IN:
2808 switch (cmd->t_task_cdb[1] & 0x1f) {
2809 case SAI_READ_CAPACITY_16:
2810 if (!passthrough)
2811 cmd->execute_task =
2812 target_emulate_readcapacity_16;
2813 break;
2814 default:
2815 if (passthrough)
2816 break;
2817
2818 pr_err("Unsupported SA: 0x%02x\n",
2819 cmd->t_task_cdb[1] & 0x1f);
2820 goto out_unsupported_cdb;
2821 }
2822
2823 case ACCESS_CONTROL_IN:
2824 case ACCESS_CONTROL_OUT:
2825 case EXTENDED_COPY:
2826 case READ_ATTRIBUTE:
2827 case RECEIVE_COPY_RESULTS:
2828 case WRITE_ATTRIBUTE:
2829 size = (cdb[10] << 24) | (cdb[11] << 16) |
2830 (cdb[12] << 8) | cdb[13];
2831 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2832 break;
2833 case RECEIVE_DIAGNOSTIC:
2834 case SEND_DIAGNOSTIC:
2835 size = (cdb[3] << 8) | cdb[4];
2836 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2837 break;
2838
2839#if 0
2840 case GPCMD_READ_CD:
2841 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2842 size = (2336 * sectors);
2843 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2844 break;
2845#endif
2846 case READ_TOC:
2847 size = cdb[8];
2848 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2849 break;
2850 case REQUEST_SENSE:
2851 size = cdb[4];
2852 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2853 if (!passthrough)
2854 cmd->execute_task = target_emulate_request_sense;
2855 break;
2856 case READ_ELEMENT_STATUS:
2857 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
2858 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2859 break;
2860 case WRITE_BUFFER:
2861 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2862 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2863 break;
2864 case RESERVE:
2865 case RESERVE_10:
2866
2867
2868
2869
2870 if (cdb[0] == RESERVE_10)
2871 size = (cdb[7] << 8) | cdb[8];
2872 else
2873 size = cmd->data_length;
2874
2875
2876
2877
2878
2879
2880
2881
2882 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2883 cmd->execute_task = target_scsi2_reservation_reserve;
2884 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2885 break;
2886 case RELEASE:
2887 case RELEASE_10:
2888
2889
2890
2891
2892 if (cdb[0] == RELEASE_10)
2893 size = (cdb[7] << 8) | cdb[8];
2894 else
2895 size = cmd->data_length;
2896
2897 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2898 cmd->execute_task = target_scsi2_reservation_release;
2899 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2900 break;
2901 case SYNCHRONIZE_CACHE:
2902 case 0x91:
2903
2904
2905
2906 if (cdb[0] == SYNCHRONIZE_CACHE) {
2907 sectors = transport_get_sectors_10(cdb, cmd, §or_ret);
2908 cmd->t_task_lba = transport_lba_32(cdb);
2909 } else {
2910 sectors = transport_get_sectors_16(cdb, cmd, §or_ret);
2911 cmd->t_task_lba = transport_lba_64(cdb);
2912 }
2913 if (sector_ret)
2914 goto out_unsupported_cdb;
2915
2916 size = transport_get_size(sectors, cdb, cmd);
2917 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2918
2919 if (passthrough)
2920 break;
2921
2922
2923
2924
2925
2926 if ((cmd->t_task_lba != 0) || (sectors != 0)) {
2927 if (transport_cmd_get_valid_sectors(cmd) < 0)
2928 goto out_invalid_cdb_field;
2929 }
2930 cmd->execute_task = target_emulate_synchronize_cache;
2931 break;
2932 case UNMAP:
2933 size = get_unaligned_be16(&cdb[7]);
2934 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2935 if (!passthrough)
2936 cmd->execute_task = target_emulate_unmap;
2937 break;
2938 case WRITE_SAME_16:
2939 sectors = transport_get_sectors_16(cdb, cmd, §or_ret);
2940 if (sector_ret)
2941 goto out_unsupported_cdb;
2942
2943 if (sectors)
2944 size = transport_get_size(1, cdb, cmd);
2945 else {
2946 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2947 goto out_invalid_cdb_field;
2948 }
2949
2950 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
2951 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2952
2953 if (target_check_write_same_discard(&cdb[1], dev) < 0)
2954 goto out_unsupported_cdb;
2955 if (!passthrough)
2956 cmd->execute_task = target_emulate_write_same;
2957 break;
2958 case WRITE_SAME:
2959 sectors = transport_get_sectors_10(cdb, cmd, §or_ret);
2960 if (sector_ret)
2961 goto out_unsupported_cdb;
2962
2963 if (sectors)
2964 size = transport_get_size(1, cdb, cmd);
2965 else {
2966 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2967 goto out_invalid_cdb_field;
2968 }
2969
2970 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
2971 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2972
2973
2974
2975
2976 if (target_check_write_same_discard(&cdb[1], dev) < 0)
2977 goto out_unsupported_cdb;
2978 if (!passthrough)
2979 cmd->execute_task = target_emulate_write_same;
2980 break;
2981 case ALLOW_MEDIUM_REMOVAL:
2982 case ERASE:
2983 case REZERO_UNIT:
2984 case SEEK_10:
2985 case SPACE:
2986 case START_STOP:
2987 case TEST_UNIT_READY:
2988 case VERIFY:
2989 case WRITE_FILEMARKS:
2990 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2991 if (!passthrough)
2992 cmd->execute_task = target_emulate_noop;
2993 break;
2994 case GPCMD_CLOSE_TRACK:
2995 case INITIALIZE_ELEMENT_STATUS:
2996 case GPCMD_LOAD_UNLOAD:
2997 case GPCMD_SET_SPEED:
2998 case MOVE_MEDIUM:
2999 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3000 break;
3001 case REPORT_LUNS:
3002 cmd->execute_task = target_report_luns;
3003 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3004
3005
3006
3007
3008 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3009 cmd->sam_task_attr = MSG_HEAD_TAG;
3010 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3011 break;
3012 default:
3013 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
3014 " 0x%02x, sending CHECK_CONDITION.\n",
3015 cmd->se_tfo->get_fabric_name(), cdb[0]);
3016 goto out_unsupported_cdb;
3017 }
3018
3019 if (size != cmd->data_length) {
3020 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
3021 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
3022 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
3023 cmd->data_length, size, cdb[0]);
3024
3025 cmd->cmd_spdtl = size;
3026
3027 if (cmd->data_direction == DMA_TO_DEVICE) {
3028 pr_err("Rejecting underflow/overflow"
3029 " WRITE data\n");
3030 goto out_invalid_cdb_field;
3031 }
3032
3033
3034
3035
3036 if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
3037 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
3038 " CDB on non 512-byte sector setup subsystem"
3039 " plugin: %s\n", dev->transport->name);
3040
3041 goto out_invalid_cdb_field;
3042 }
3043
3044
3045
3046
3047
3048
3049 if (size > cmd->data_length) {
3050 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
3051 cmd->residual_count = (size - cmd->data_length);
3052 } else {
3053 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
3054 cmd->residual_count = (cmd->data_length - size);
3055 cmd->data_length = size;
3056 }
3057 }
3058
3059
3060 if (!(passthrough || cmd->execute_task ||
3061 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
3062 goto out_unsupported_cdb;
3063
3064 transport_set_supported_SAM_opcode(cmd);
3065 return ret;
3066
3067out_unsupported_cdb:
3068 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3069 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3070 return -EINVAL;
3071out_invalid_cdb_field:
3072 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3073 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3074 return -EINVAL;
3075}
3076
3077
3078
3079
3080
3081static void transport_complete_task_attr(struct se_cmd *cmd)
3082{
3083 struct se_device *dev = cmd->se_dev;
3084 struct se_cmd *cmd_p, *cmd_tmp;
3085 int new_active_tasks = 0;
3086
3087 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3088 atomic_dec(&dev->simple_cmds);
3089 smp_mb__after_atomic_dec();
3090 dev->dev_cur_ordered_id++;
3091 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
3092 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3093 cmd->se_ordered_id);
3094 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3095 dev->dev_cur_ordered_id++;
3096 pr_debug("Incremented dev_cur_ordered_id: %u for"
3097 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3098 cmd->se_ordered_id);
3099 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3100 atomic_dec(&dev->dev_ordered_sync);
3101 smp_mb__after_atomic_dec();
3102
3103 dev->dev_cur_ordered_id++;
3104 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
3105 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3106 }
3107
3108
3109
3110
3111
3112 spin_lock(&dev->delayed_cmd_lock);
3113 list_for_each_entry_safe(cmd_p, cmd_tmp,
3114 &dev->delayed_cmd_list, se_delayed_node) {
3115
3116 list_del(&cmd_p->se_delayed_node);
3117 spin_unlock(&dev->delayed_cmd_lock);
3118
3119 pr_debug("Calling add_tasks() for"
3120 " cmd_p: 0x%02x Task Attr: 0x%02x"
3121 " Dormant -> Active, se_ordered_id: %u\n",
3122 cmd_p->t_task_cdb[0],
3123 cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3124
3125 transport_add_tasks_from_cmd(cmd_p);
3126 new_active_tasks++;
3127
3128 spin_lock(&dev->delayed_cmd_lock);
3129 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3130 break;
3131 }
3132 spin_unlock(&dev->delayed_cmd_lock);
3133
3134
3135
3136
3137 if (new_active_tasks != 0)
3138 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
3139}
3140
3141static void transport_complete_qf(struct se_cmd *cmd)
3142{
3143 int ret = 0;
3144
3145 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3146 transport_complete_task_attr(cmd);
3147
3148 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3149 ret = cmd->se_tfo->queue_status(cmd);
3150 goto out;
3151 }
3152
3153 switch (cmd->data_direction) {
3154 case DMA_FROM_DEVICE:
3155 ret = cmd->se_tfo->queue_data_in(cmd);
3156 break;
3157 case DMA_TO_DEVICE:
3158 if (cmd->t_bidi_data_sg) {
3159 ret = cmd->se_tfo->queue_data_in(cmd);
3160 if (ret < 0)
3161 break;
3162 }
3163
3164 case DMA_NONE:
3165 ret = cmd->se_tfo->queue_status(cmd);
3166 break;
3167 default:
3168 break;
3169 }
3170
3171out:
3172 if (ret < 0) {
3173 transport_handle_queue_full(cmd, cmd->se_dev);
3174 return;
3175 }
3176 transport_lun_remove_cmd(cmd);
3177 transport_cmd_check_stop_to_fabric(cmd);
3178}
3179
3180static void transport_handle_queue_full(
3181 struct se_cmd *cmd,
3182 struct se_device *dev)
3183{
3184 spin_lock_irq(&dev->qf_cmd_lock);
3185 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
3186 atomic_inc(&dev->dev_qf_count);
3187 smp_mb__after_atomic_inc();
3188 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
3189
3190 schedule_work(&cmd->se_dev->qf_work_queue);
3191}
3192
3193static void target_complete_ok_work(struct work_struct *work)
3194{
3195 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3196 int reason = 0, ret;
3197
3198
3199
3200
3201
3202
3203 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3204 transport_complete_task_attr(cmd);
3205
3206
3207
3208
3209 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
3210 schedule_work(&cmd->se_dev->qf_work_queue);
3211
3212
3213
3214
3215
3216 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3217 if (transport_get_sense_data(cmd) < 0)
3218 reason = TCM_NON_EXISTENT_LUN;
3219
3220
3221
3222
3223
3224 if (cmd->scsi_status) {
3225 ret = transport_send_check_condition_and_sense(
3226 cmd, reason, 1);
3227 if (ret == -EAGAIN || ret == -ENOMEM)
3228 goto queue_full;
3229
3230 transport_lun_remove_cmd(cmd);
3231 transport_cmd_check_stop_to_fabric(cmd);
3232 return;
3233 }
3234 }
3235
3236
3237
3238
3239 if (cmd->transport_complete_callback)
3240 cmd->transport_complete_callback(cmd);
3241
3242 switch (cmd->data_direction) {
3243 case DMA_FROM_DEVICE:
3244 spin_lock(&cmd->se_lun->lun_sep_lock);
3245 if (cmd->se_lun->lun_sep) {
3246 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3247 cmd->data_length;
3248 }
3249 spin_unlock(&cmd->se_lun->lun_sep_lock);
3250
3251 ret = cmd->se_tfo->queue_data_in(cmd);
3252 if (ret == -EAGAIN || ret == -ENOMEM)
3253 goto queue_full;
3254 break;
3255 case DMA_TO_DEVICE:
3256 spin_lock(&cmd->se_lun->lun_sep_lock);
3257 if (cmd->se_lun->lun_sep) {
3258 cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
3259 cmd->data_length;
3260 }
3261 spin_unlock(&cmd->se_lun->lun_sep_lock);
3262
3263
3264
3265 if (cmd->t_bidi_data_sg) {
3266 spin_lock(&cmd->se_lun->lun_sep_lock);
3267 if (cmd->se_lun->lun_sep) {
3268 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3269 cmd->data_length;
3270 }
3271 spin_unlock(&cmd->se_lun->lun_sep_lock);
3272 ret = cmd->se_tfo->queue_data_in(cmd);
3273 if (ret == -EAGAIN || ret == -ENOMEM)
3274 goto queue_full;
3275 break;
3276 }
3277
3278 case DMA_NONE:
3279 ret = cmd->se_tfo->queue_status(cmd);
3280 if (ret == -EAGAIN || ret == -ENOMEM)
3281 goto queue_full;
3282 break;
3283 default:
3284 break;
3285 }
3286
3287 transport_lun_remove_cmd(cmd);
3288 transport_cmd_check_stop_to_fabric(cmd);
3289 return;
3290
3291queue_full:
3292 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
3293 " data_direction: %d\n", cmd, cmd->data_direction);
3294 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
3295 transport_handle_queue_full(cmd, cmd->se_dev);
3296}
3297
3298static void transport_free_dev_tasks(struct se_cmd *cmd)
3299{
3300 struct se_task *task, *task_tmp;
3301 unsigned long flags;
3302 LIST_HEAD(dispose_list);
3303
3304 spin_lock_irqsave(&cmd->t_state_lock, flags);
3305 list_for_each_entry_safe(task, task_tmp,
3306 &cmd->t_task_list, t_list) {
3307 if (!(task->task_flags & TF_ACTIVE))
3308 list_move_tail(&task->t_list, &dispose_list);
3309 }
3310 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3311
3312 while (!list_empty(&dispose_list)) {
3313 task = list_first_entry(&dispose_list, struct se_task, t_list);
3314
3315 if (task->task_sg != cmd->t_data_sg &&
3316 task->task_sg != cmd->t_bidi_data_sg)
3317 kfree(task->task_sg);
3318
3319 list_del(&task->t_list);
3320
3321 cmd->se_dev->transport->free_task(task);
3322 }
3323}
3324
3325static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
3326{
3327 struct scatterlist *sg;
3328 int count;
3329
3330 for_each_sg(sgl, sg, nents, count)
3331 __free_page(sg_page(sg));
3332
3333 kfree(sgl);
3334}
3335
3336static inline void transport_free_pages(struct se_cmd *cmd)
3337{
3338 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3339 return;
3340
3341 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
3342 cmd->t_data_sg = NULL;
3343 cmd->t_data_nents = 0;
3344
3345 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
3346 cmd->t_bidi_data_sg = NULL;
3347 cmd->t_bidi_data_nents = 0;
3348}
3349
3350
3351
3352
3353
3354
3355
3356static void transport_put_cmd(struct se_cmd *cmd)
3357{
3358 unsigned long flags;
3359 int free_tasks = 0;
3360
3361 spin_lock_irqsave(&cmd->t_state_lock, flags);
3362 if (atomic_read(&cmd->t_fe_count)) {
3363 if (!atomic_dec_and_test(&cmd->t_fe_count))
3364 goto out_busy;
3365 }
3366
3367 if (atomic_read(&cmd->t_se_count)) {
3368 if (!atomic_dec_and_test(&cmd->t_se_count))
3369 goto out_busy;
3370 }
3371
3372 if (atomic_read(&cmd->transport_dev_active)) {
3373 atomic_set(&cmd->transport_dev_active, 0);
3374 transport_all_task_dev_remove_state(cmd);
3375 free_tasks = 1;
3376 }
3377 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3378
3379 if (free_tasks != 0)
3380 transport_free_dev_tasks(cmd);
3381
3382 transport_free_pages(cmd);
3383 transport_release_cmd(cmd);
3384 return;
3385out_busy:
3386 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3387}
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401int transport_generic_map_mem_to_cmd(
3402 struct se_cmd *cmd,
3403 struct scatterlist *sgl,
3404 u32 sgl_count,
3405 struct scatterlist *sgl_bidi,
3406 u32 sgl_bidi_count)
3407{
3408 if (!sgl || !sgl_count)
3409 return 0;
3410
3411 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3412 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
3413
3414
3415
3416
3417
3418 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
3419 pr_warn("Rejecting SCSI DATA overflow for fabric using"
3420 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
3421 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3422 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3423 return -EINVAL;
3424 }
3425
3426 cmd->t_data_sg = sgl;
3427 cmd->t_data_nents = sgl_count;
3428
3429 if (sgl_bidi && sgl_bidi_count) {
3430 cmd->t_bidi_data_sg = sgl_bidi;
3431 cmd->t_bidi_data_nents = sgl_bidi_count;
3432 }
3433 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
3434 }
3435
3436 return 0;
3437}
3438EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3439
3440void *transport_kmap_data_sg(struct se_cmd *cmd)
3441{
3442 struct scatterlist *sg = cmd->t_data_sg;
3443 struct page **pages;
3444 int i;
3445
3446 BUG_ON(!sg);
3447
3448
3449
3450
3451
3452 if (!cmd->t_data_nents)
3453 return NULL;
3454 else if (cmd->t_data_nents == 1)
3455 return kmap(sg_page(sg)) + sg->offset;
3456
3457
3458 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
3459 if (!pages)
3460 return NULL;
3461
3462
3463 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
3464 pages[i] = sg_page(sg);
3465 }
3466
3467 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
3468 kfree(pages);
3469 if (!cmd->t_data_vmap)
3470 return NULL;
3471
3472 return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
3473}
3474EXPORT_SYMBOL(transport_kmap_data_sg);
3475
3476void transport_kunmap_data_sg(struct se_cmd *cmd)
3477{
3478 if (!cmd->t_data_nents)
3479 return;
3480 else if (cmd->t_data_nents == 1)
3481 kunmap(sg_page(cmd->t_data_sg));
3482
3483 vunmap(cmd->t_data_vmap);
3484 cmd->t_data_vmap = NULL;
3485}
3486EXPORT_SYMBOL(transport_kunmap_data_sg);
3487
3488static int
3489transport_generic_get_mem(struct se_cmd *cmd)
3490{
3491 u32 length = cmd->data_length;
3492 unsigned int nents;
3493 struct page *page;
3494 int i = 0;
3495
3496 nents = DIV_ROUND_UP(length, PAGE_SIZE);
3497 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
3498 if (!cmd->t_data_sg)
3499 return -ENOMEM;
3500
3501 cmd->t_data_nents = nents;
3502 sg_init_table(cmd->t_data_sg, nents);
3503
3504 while (length) {
3505 u32 page_len = min_t(u32, length, PAGE_SIZE);
3506 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3507 if (!page)
3508 goto out;
3509
3510 sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
3511 length -= page_len;
3512 i++;
3513 }
3514 return 0;
3515
3516out:
3517 while (i > 0) {
3518 i--;
3519 __free_page(sg_page(&cmd->t_data_sg[i]));
3520 }
3521 kfree(cmd->t_data_sg);
3522 cmd->t_data_sg = NULL;
3523 return -ENOMEM;
3524}
3525
3526
3527static inline sector_t transport_limit_task_sectors(
3528 struct se_device *dev,
3529 unsigned long long lba,
3530 sector_t sectors)
3531{
3532 sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
3533
3534 if (dev->transport->get_device_type(dev) == TYPE_DISK)
3535 if ((lba + sectors) > transport_dev_end_lba(dev))
3536 sectors = ((transport_dev_end_lba(dev) - lba) + 1);
3537
3538 return sectors;
3539}
3540
3541
3542
3543
3544
3545
3546
3547
3548void transport_do_task_sg_chain(struct se_cmd *cmd)
3549{
3550 struct scatterlist *sg_first = NULL;
3551 struct scatterlist *sg_prev = NULL;
3552 int sg_prev_nents = 0;
3553 struct scatterlist *sg;
3554 struct se_task *task;
3555 u32 chained_nents = 0;
3556 int i;
3557
3558 BUG_ON(!cmd->se_tfo->task_sg_chaining);
3559
3560
3561
3562
3563
3564 list_for_each_entry(task, &cmd->t_task_list, t_list) {
3565 if (!task->task_sg)
3566 continue;
3567
3568 if (!sg_first) {
3569 sg_first = task->task_sg;
3570 chained_nents = task->task_sg_nents;
3571 } else {
3572 sg_chain(sg_prev, sg_prev_nents, task->task_sg);
3573 chained_nents += task->task_sg_nents;
3574 }
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584 sg_prev_nents = (task->task_sg_nents + 1);
3585 sg_prev = task->task_sg;
3586 }
3587
3588
3589
3590
3591 cmd->t_tasks_sg_chained = sg_first;
3592 cmd->t_tasks_sg_chained_no = chained_nents;
3593
3594 pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
3595 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
3596 cmd->t_tasks_sg_chained_no);
3597
3598 for_each_sg(cmd->t_tasks_sg_chained, sg,
3599 cmd->t_tasks_sg_chained_no, i) {
3600
3601 pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
3602 i, sg, sg_page(sg), sg->length, sg->offset);
3603 if (sg_is_chain(sg))
3604 pr_debug("SG: %p sg_is_chain=1\n", sg);
3605 if (sg_is_last(sg))
3606 pr_debug("SG: %p sg_is_last=1\n", sg);
3607 }
3608}
3609EXPORT_SYMBOL(transport_do_task_sg_chain);
3610
3611
3612
3613
3614static int
3615transport_allocate_data_tasks(struct se_cmd *cmd,
3616 enum dma_data_direction data_direction,
3617 struct scatterlist *cmd_sg, unsigned int sgl_nents)
3618{
3619 struct se_device *dev = cmd->se_dev;
3620 int task_count, i;
3621 unsigned long long lba;
3622 sector_t sectors, dev_max_sectors;
3623 u32 sector_size;
3624
3625 if (transport_cmd_get_valid_sectors(cmd) < 0)
3626 return -EINVAL;
3627
3628 dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
3629 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
3630
3631 WARN_ON(cmd->data_length % sector_size);
3632
3633 lba = cmd->t_task_lba;
3634 sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
3635 task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
3636
3637
3638
3639
3640
3641 if (task_count == 1) {
3642 struct se_task *task;
3643 unsigned long flags;
3644
3645 task = transport_generic_get_task(cmd, data_direction);
3646 if (!task)
3647 return -ENOMEM;
3648
3649 task->task_sg = cmd_sg;
3650 task->task_sg_nents = sgl_nents;
3651
3652 task->task_lba = lba;
3653 task->task_sectors = sectors;
3654 task->task_size = task->task_sectors * sector_size;
3655
3656 spin_lock_irqsave(&cmd->t_state_lock, flags);
3657 list_add_tail(&task->t_list, &cmd->t_task_list);
3658 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3659
3660 return task_count;
3661 }
3662
3663 for (i = 0; i < task_count; i++) {
3664 struct se_task *task;
3665 unsigned int task_size, task_sg_nents_padded;
3666 struct scatterlist *sg;
3667 unsigned long flags;
3668 int count;
3669
3670 task = transport_generic_get_task(cmd, data_direction);
3671 if (!task)
3672 return -ENOMEM;
3673
3674 task->task_lba = lba;
3675 task->task_sectors = min(sectors, dev_max_sectors);
3676 task->task_size = task->task_sectors * sector_size;
3677
3678
3679
3680
3681
3682 task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
3683
3684
3685
3686
3687
3688
3689
3690
3691 if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
3692 task_sg_nents_padded = (task->task_sg_nents + 1);
3693 } else
3694 task_sg_nents_padded = task->task_sg_nents;
3695
3696 task->task_sg = kmalloc(sizeof(struct scatterlist) *
3697 task_sg_nents_padded, GFP_KERNEL);
3698 if (!task->task_sg) {
3699 cmd->se_dev->transport->free_task(task);
3700 return -ENOMEM;
3701 }
3702
3703 sg_init_table(task->task_sg, task_sg_nents_padded);
3704
3705 task_size = task->task_size;
3706
3707
3708 for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
3709 if (cmd_sg->length > task_size)
3710 break;
3711
3712 *sg = *cmd_sg;
3713 task_size -= cmd_sg->length;
3714 cmd_sg = sg_next(cmd_sg);
3715 }
3716
3717 lba += task->task_sectors;
3718 sectors -= task->task_sectors;
3719
3720 spin_lock_irqsave(&cmd->t_state_lock, flags);
3721 list_add_tail(&task->t_list, &cmd->t_task_list);
3722 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3723 }
3724
3725 return task_count;
3726}
3727
3728static int
3729transport_allocate_control_task(struct se_cmd *cmd)
3730{
3731 struct se_task *task;
3732 unsigned long flags;
3733
3734
3735 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3736 !cmd->data_length)
3737 return 0;
3738
3739 task = transport_generic_get_task(cmd, cmd->data_direction);
3740 if (!task)
3741 return -ENOMEM;
3742
3743 task->task_sg = cmd->t_data_sg;
3744 task->task_size = cmd->data_length;
3745 task->task_sg_nents = cmd->t_data_nents;
3746
3747 spin_lock_irqsave(&cmd->t_state_lock, flags);
3748 list_add_tail(&task->t_list, &cmd->t_task_list);
3749 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3750
3751
3752 return 1;
3753}
3754
3755
3756
3757
3758
3759
3760int transport_generic_new_cmd(struct se_cmd *cmd)
3761{
3762 struct se_device *dev = cmd->se_dev;
3763 int task_cdbs, task_cdbs_bidi = 0;
3764 int set_counts = 1;
3765 int ret = 0;
3766
3767
3768
3769
3770
3771
3772 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
3773 cmd->data_length) {
3774 ret = transport_generic_get_mem(cmd);
3775 if (ret < 0)
3776 goto out_fail;
3777 }
3778
3779
3780
3781
3782 if (cmd->t_bidi_data_sg &&
3783 dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
3784 BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
3785
3786 task_cdbs_bidi = transport_allocate_data_tasks(cmd,
3787 DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
3788 cmd->t_bidi_data_nents);
3789 if (task_cdbs_bidi <= 0)
3790 goto out_fail;
3791
3792 atomic_inc(&cmd->t_fe_count);
3793 atomic_inc(&cmd->t_se_count);
3794 set_counts = 0;
3795 }
3796
3797 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3798 task_cdbs = transport_allocate_data_tasks(cmd,
3799 cmd->data_direction, cmd->t_data_sg,
3800 cmd->t_data_nents);
3801 } else {
3802 task_cdbs = transport_allocate_control_task(cmd);
3803 }
3804
3805 if (task_cdbs < 0)
3806 goto out_fail;
3807 else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
3808 cmd->t_state = TRANSPORT_COMPLETE;
3809 atomic_set(&cmd->t_transport_active, 1);
3810
3811 if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
3812 u8 ua_asc = 0, ua_ascq = 0;
3813
3814 core_scsi3_ua_clear_for_request_sense(cmd,
3815 &ua_asc, &ua_ascq);
3816 }
3817
3818 INIT_WORK(&cmd->work, target_complete_ok_work);
3819 queue_work(target_completion_wq, &cmd->work);
3820 return 0;
3821 }
3822
3823 if (set_counts) {
3824 atomic_inc(&cmd->t_fe_count);
3825 atomic_inc(&cmd->t_se_count);
3826 }
3827
3828 cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
3829 atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
3830 atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
3831
3832
3833
3834
3835
3836
3837
3838
3839 if (cmd->data_direction == DMA_TO_DEVICE) {
3840 transport_add_tasks_to_state_queue(cmd);
3841 return transport_generic_write_pending(cmd);
3842 }
3843
3844
3845
3846
3847 transport_execute_tasks(cmd);
3848 return 0;
3849
3850out_fail:
3851 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3852 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3853 return -EINVAL;
3854}
3855EXPORT_SYMBOL(transport_generic_new_cmd);
3856
3857
3858
3859
3860
3861void transport_generic_process_write(struct se_cmd *cmd)
3862{
3863 transport_execute_tasks(cmd);
3864}
3865EXPORT_SYMBOL(transport_generic_process_write);
3866
3867static void transport_write_pending_qf(struct se_cmd *cmd)
3868{
3869 int ret;
3870
3871 ret = cmd->se_tfo->write_pending(cmd);
3872 if (ret == -EAGAIN || ret == -ENOMEM) {
3873 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
3874 cmd);
3875 transport_handle_queue_full(cmd, cmd->se_dev);
3876 }
3877}
3878
3879static int transport_generic_write_pending(struct se_cmd *cmd)
3880{
3881 unsigned long flags;
3882 int ret;
3883
3884 spin_lock_irqsave(&cmd->t_state_lock, flags);
3885 cmd->t_state = TRANSPORT_WRITE_PENDING;
3886 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3887
3888
3889
3890
3891
3892
3893
3894
3895 transport_cmd_check_stop(cmd, 1, 0);
3896
3897
3898
3899
3900
3901 ret = cmd->se_tfo->write_pending(cmd);
3902 if (ret == -EAGAIN || ret == -ENOMEM)
3903 goto queue_full;
3904 else if (ret < 0)
3905 return ret;
3906
3907 return 1;
3908
3909queue_full:
3910 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
3911 cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
3912 transport_handle_queue_full(cmd, cmd->se_dev);
3913 return 0;
3914}
3915
3916
3917
3918
3919
3920
3921
3922
3923void transport_release_cmd(struct se_cmd *cmd)
3924{
3925 BUG_ON(!cmd->se_tfo);
3926
3927 if (cmd->se_tmr_req)
3928 core_tmr_release_req(cmd->se_tmr_req);
3929 if (cmd->t_task_cdb != cmd->__t_task_cdb)
3930 kfree(cmd->t_task_cdb);
3931
3932
3933
3934
3935 if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd)
3936 if (cmd->se_tfo->check_release_cmd(cmd) != 0)
3937 return;
3938
3939 cmd->se_tfo->release_cmd(cmd);
3940}
3941EXPORT_SYMBOL(transport_release_cmd);
3942
3943void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
3944{
3945 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
3946 if (wait_for_tasks && cmd->se_tmr_req)
3947 transport_wait_for_tasks(cmd);
3948
3949 transport_release_cmd(cmd);
3950 } else {
3951 if (wait_for_tasks)
3952 transport_wait_for_tasks(cmd);
3953
3954 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
3955
3956 if (cmd->se_lun)
3957 transport_lun_remove_cmd(cmd);
3958
3959 transport_free_dev_tasks(cmd);
3960
3961 transport_put_cmd(cmd);
3962 }
3963}
3964EXPORT_SYMBOL(transport_generic_free_cmd);
3965
3966
3967
3968
3969
3970void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
3971{
3972 unsigned long flags;
3973
3974 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3975 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
3976 se_cmd->check_release = 1;
3977 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3978}
3979EXPORT_SYMBOL(target_get_sess_cmd);
3980
3981
3982
3983
3984
3985int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
3986{
3987 unsigned long flags;
3988
3989 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3990 if (list_empty(&se_cmd->se_cmd_list)) {
3991 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3992 WARN_ON(1);
3993 return 0;
3994 }
3995
3996 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
3997 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3998 complete(&se_cmd->cmd_wait_comp);
3999 return 1;
4000 }
4001 list_del(&se_cmd->se_cmd_list);
4002 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4003
4004 return 0;
4005}
4006EXPORT_SYMBOL(target_put_sess_cmd);
4007
4008
4009
4010
4011void target_splice_sess_cmd_list(struct se_session *se_sess)
4012{
4013 struct se_cmd *se_cmd;
4014 unsigned long flags;
4015
4016 WARN_ON(!list_empty(&se_sess->sess_wait_list));
4017 INIT_LIST_HEAD(&se_sess->sess_wait_list);
4018
4019 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
4020 se_sess->sess_tearing_down = 1;
4021
4022 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
4023
4024 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
4025 se_cmd->cmd_wait_set = 1;
4026
4027 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4028}
4029EXPORT_SYMBOL(target_splice_sess_cmd_list);
4030
4031
4032
4033
4034
4035void target_wait_for_sess_cmds(
4036 struct se_session *se_sess,
4037 int wait_for_tasks)
4038{
4039 struct se_cmd *se_cmd, *tmp_cmd;
4040 bool rc = false;
4041
4042 list_for_each_entry_safe(se_cmd, tmp_cmd,
4043 &se_sess->sess_wait_list, se_cmd_list) {
4044 list_del(&se_cmd->se_cmd_list);
4045
4046 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
4047 " %d\n", se_cmd, se_cmd->t_state,
4048 se_cmd->se_tfo->get_cmd_state(se_cmd));
4049
4050 if (wait_for_tasks) {
4051 pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
4052 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4053 se_cmd->se_tfo->get_cmd_state(se_cmd));
4054
4055 rc = transport_wait_for_tasks(se_cmd);
4056
4057 pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
4058 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4059 se_cmd->se_tfo->get_cmd_state(se_cmd));
4060 }
4061
4062 if (!rc) {
4063 wait_for_completion(&se_cmd->cmd_wait_comp);
4064 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
4065 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4066 se_cmd->se_tfo->get_cmd_state(se_cmd));
4067 }
4068
4069 se_cmd->se_tfo->release_cmd(se_cmd);
4070 }
4071}
4072EXPORT_SYMBOL(target_wait_for_sess_cmds);
4073
4074
4075
4076
4077
4078
4079static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4080{
4081 unsigned long flags;
4082 int ret;
4083
4084
4085
4086
4087 spin_lock_irqsave(&cmd->t_state_lock, flags);
4088 if (atomic_read(&cmd->t_transport_stop)) {
4089 atomic_set(&cmd->transport_lun_stop, 0);
4090 pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
4091 " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
4092 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4093 transport_cmd_check_stop(cmd, 1, 0);
4094 return -EPERM;
4095 }
4096 atomic_set(&cmd->transport_lun_fe_stop, 1);
4097 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4098
4099 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4100
4101 ret = transport_stop_tasks_for_cmd(cmd);
4102
4103 pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
4104 " %d\n", cmd, cmd->t_task_list_num, ret);
4105 if (!ret) {
4106 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
4107 cmd->se_tfo->get_task_tag(cmd));
4108 wait_for_completion(&cmd->transport_lun_stop_comp);
4109