1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/net.h>
30#include <linux/string.h>
31#include <linux/timer.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/in.h>
35#include <linux/export.h>
36#include <net/sock.h>
37#include <net/tcp.h>
38#include <scsi/scsi.h>
39#include <scsi/scsi_cmnd.h>
40
41#include <target/target_core_base.h>
42#include <target/target_core_device.h>
43#include <target/target_core_tpg.h>
44#include <target/target_core_transport.h>
45#include <target/target_core_fabric_ops.h>
46
47#include "target_core_hba.h"
48#include "target_core_stat.h"
49
50extern struct se_device *g_lun0_dev;
51
52static DEFINE_SPINLOCK(tpg_lock);
53static LIST_HEAD(tpg_list);
54
55
56
57
58
59static void core_clear_initiator_node_from_tpg(
60 struct se_node_acl *nacl,
61 struct se_portal_group *tpg)
62{
63 int i;
64 struct se_dev_entry *deve;
65 struct se_lun *lun;
66
67 spin_lock_irq(&nacl->device_list_lock);
68 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
69 deve = &nacl->device_list[i];
70
71 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
72 continue;
73
74 if (!deve->se_lun) {
75 pr_err("%s device entries device pointer is"
76 " NULL, but Initiator has access.\n",
77 tpg->se_tpg_tfo->get_fabric_name());
78 continue;
79 }
80
81 lun = deve->se_lun;
82 spin_unlock_irq(&nacl->device_list_lock);
83 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
84 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
85
86 spin_lock_irq(&nacl->device_list_lock);
87 }
88 spin_unlock_irq(&nacl->device_list_lock);
89}
90
91
92
93
94
95struct se_node_acl *__core_tpg_get_initiator_node_acl(
96 struct se_portal_group *tpg,
97 const char *initiatorname)
98{
99 struct se_node_acl *acl;
100
101 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
102 if (!strcmp(acl->initiatorname, initiatorname))
103 return acl;
104 }
105
106 return NULL;
107}
108
109
110
111
112
113struct se_node_acl *core_tpg_get_initiator_node_acl(
114 struct se_portal_group *tpg,
115 unsigned char *initiatorname)
116{
117 struct se_node_acl *acl;
118
119 spin_lock_irq(&tpg->acl_node_lock);
120 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
121 spin_unlock_irq(&tpg->acl_node_lock);
122
123 return acl;
124}
125
126
127
128
129
130void core_tpg_add_node_to_devs(
131 struct se_node_acl *acl,
132 struct se_portal_group *tpg)
133{
134 int i = 0;
135 u32 lun_access = 0;
136 struct se_lun *lun;
137 struct se_device *dev;
138
139 spin_lock(&tpg->tpg_lun_lock);
140 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
141 lun = &tpg->tpg_lun_list[i];
142 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
143 continue;
144
145 spin_unlock(&tpg->tpg_lun_lock);
146
147 dev = lun->lun_se_dev;
148
149
150
151
152 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
153 if (dev->dev_flags & DF_READ_ONLY)
154 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
155 else
156 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
157 } else {
158
159
160
161
162 if (dev->transport->get_device_type(dev) == TYPE_DISK)
163 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
164 else
165 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
166 }
167
168 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
169 " access for LUN in Demo Mode\n",
170 tpg->se_tpg_tfo->get_fabric_name(),
171 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
172 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
173 "READ-WRITE" : "READ-ONLY");
174
175 core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
176 lun_access, acl, tpg, 1);
177 spin_lock(&tpg->tpg_lun_lock);
178 }
179 spin_unlock(&tpg->tpg_lun_lock);
180}
181
182
183
184
185
186static int core_set_queue_depth_for_node(
187 struct se_portal_group *tpg,
188 struct se_node_acl *acl)
189{
190 if (!acl->queue_depth) {
191 pr_err("Queue depth for %s Initiator Node: %s is 0,"
192 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
193 acl->initiatorname);
194 acl->queue_depth = 1;
195 }
196
197 return 0;
198}
199
200
201
202
203
204static int core_create_device_list_for_node(struct se_node_acl *nacl)
205{
206 struct se_dev_entry *deve;
207 int i;
208
209 nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
210 TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
211 if (!nacl->device_list) {
212 pr_err("Unable to allocate memory for"
213 " struct se_node_acl->device_list\n");
214 return -ENOMEM;
215 }
216 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
217 deve = &nacl->device_list[i];
218
219 atomic_set(&deve->ua_count, 0);
220 atomic_set(&deve->pr_ref_count, 0);
221 spin_lock_init(&deve->ua_lock);
222 INIT_LIST_HEAD(&deve->alua_port_list);
223 INIT_LIST_HEAD(&deve->ua_list);
224 }
225
226 return 0;
227}
228
229
230
231
232
233struct se_node_acl *core_tpg_check_initiator_node_acl(
234 struct se_portal_group *tpg,
235 unsigned char *initiatorname)
236{
237 struct se_node_acl *acl;
238
239 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
240 if (acl)
241 return acl;
242
243 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
244 return NULL;
245
246 acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
247 if (!acl)
248 return NULL;
249
250 INIT_LIST_HEAD(&acl->acl_list);
251 INIT_LIST_HEAD(&acl->acl_sess_list);
252 spin_lock_init(&acl->device_list_lock);
253 spin_lock_init(&acl->nacl_sess_lock);
254 atomic_set(&acl->acl_pr_ref_count, 0);
255 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
256 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
257 acl->se_tpg = tpg;
258 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
259 spin_lock_init(&acl->stats_lock);
260 acl->dynamic_node_acl = 1;
261
262 tpg->se_tpg_tfo->set_default_node_attributes(acl);
263
264 if (core_create_device_list_for_node(acl) < 0) {
265 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
266 return NULL;
267 }
268
269 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
270 core_free_device_list_for_node(acl, tpg);
271 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
272 return NULL;
273 }
274
275
276
277
278
279 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
280 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
281 do { ; } while (0);
282 else
283 core_tpg_add_node_to_devs(acl, tpg);
284
285 spin_lock_irq(&tpg->acl_node_lock);
286 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
287 tpg->num_node_acls++;
288 spin_unlock_irq(&tpg->acl_node_lock);
289
290 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
291 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
292 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
293 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
294
295 return acl;
296}
297EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
298
299void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
300{
301 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
302 cpu_relax();
303}
304
305void core_tpg_clear_object_luns(struct se_portal_group *tpg)
306{
307 int i, ret;
308 struct se_lun *lun;
309
310 spin_lock(&tpg->tpg_lun_lock);
311 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
312 lun = &tpg->tpg_lun_list[i];
313
314 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
315 (lun->lun_se_dev == NULL))
316 continue;
317
318 spin_unlock(&tpg->tpg_lun_lock);
319 ret = core_dev_del_lun(tpg, lun->unpacked_lun);
320 spin_lock(&tpg->tpg_lun_lock);
321 }
322 spin_unlock(&tpg->tpg_lun_lock);
323}
324EXPORT_SYMBOL(core_tpg_clear_object_luns);
325
326
327
328
329
330struct se_node_acl *core_tpg_add_initiator_node_acl(
331 struct se_portal_group *tpg,
332 struct se_node_acl *se_nacl,
333 const char *initiatorname,
334 u32 queue_depth)
335{
336 struct se_node_acl *acl = NULL;
337
338 spin_lock_irq(&tpg->acl_node_lock);
339 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
340 if (acl) {
341 if (acl->dynamic_node_acl) {
342 acl->dynamic_node_acl = 0;
343 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
344 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
345 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
346 spin_unlock_irq(&tpg->acl_node_lock);
347
348
349
350
351
352 if (se_nacl)
353 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
354 se_nacl);
355 goto done;
356 }
357
358 pr_err("ACL entry for %s Initiator"
359 " Node %s already exists for TPG %u, ignoring"
360 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
361 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
362 spin_unlock_irq(&tpg->acl_node_lock);
363 return ERR_PTR(-EEXIST);
364 }
365 spin_unlock_irq(&tpg->acl_node_lock);
366
367 if (!se_nacl) {
368 pr_err("struct se_node_acl pointer is NULL\n");
369 return ERR_PTR(-EINVAL);
370 }
371
372
373
374
375
376 acl = se_nacl;
377
378 INIT_LIST_HEAD(&acl->acl_list);
379 INIT_LIST_HEAD(&acl->acl_sess_list);
380 spin_lock_init(&acl->device_list_lock);
381 spin_lock_init(&acl->nacl_sess_lock);
382 atomic_set(&acl->acl_pr_ref_count, 0);
383 acl->queue_depth = queue_depth;
384 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
385 acl->se_tpg = tpg;
386 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
387 spin_lock_init(&acl->stats_lock);
388
389 tpg->se_tpg_tfo->set_default_node_attributes(acl);
390
391 if (core_create_device_list_for_node(acl) < 0) {
392 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
393 return ERR_PTR(-ENOMEM);
394 }
395
396 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
397 core_free_device_list_for_node(acl, tpg);
398 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
399 return ERR_PTR(-EINVAL);
400 }
401
402 spin_lock_irq(&tpg->acl_node_lock);
403 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
404 tpg->num_node_acls++;
405 spin_unlock_irq(&tpg->acl_node_lock);
406
407done:
408 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
409 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
410 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
411 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
412
413 return acl;
414}
415EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
416
417
418
419
420
421int core_tpg_del_initiator_node_acl(
422 struct se_portal_group *tpg,
423 struct se_node_acl *acl,
424 int force)
425{
426 struct se_session *sess, *sess_tmp;
427 int dynamic_acl = 0;
428
429 spin_lock_irq(&tpg->acl_node_lock);
430 if (acl->dynamic_node_acl) {
431 acl->dynamic_node_acl = 0;
432 dynamic_acl = 1;
433 }
434 list_del(&acl->acl_list);
435 tpg->num_node_acls--;
436 spin_unlock_irq(&tpg->acl_node_lock);
437
438 spin_lock_bh(&tpg->session_lock);
439 list_for_each_entry_safe(sess, sess_tmp,
440 &tpg->tpg_sess_list, sess_list) {
441 if (sess->se_node_acl != acl)
442 continue;
443
444
445
446 if (!tpg->se_tpg_tfo->shutdown_session(sess))
447 continue;
448
449 spin_unlock_bh(&tpg->session_lock);
450
451
452
453
454 tpg->se_tpg_tfo->close_session(sess);
455
456 spin_lock_bh(&tpg->session_lock);
457 }
458 spin_unlock_bh(&tpg->session_lock);
459
460 core_tpg_wait_for_nacl_pr_ref(acl);
461 core_clear_initiator_node_from_tpg(acl, tpg);
462 core_free_device_list_for_node(acl, tpg);
463
464 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
465 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
466 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
467 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
468
469 return 0;
470}
471EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
472
473
474
475
476
477int core_tpg_set_initiator_node_queue_depth(
478 struct se_portal_group *tpg,
479 unsigned char *initiatorname,
480 u32 queue_depth,
481 int force)
482{
483 struct se_session *sess, *init_sess = NULL;
484 struct se_node_acl *acl;
485 int dynamic_acl = 0;
486
487 spin_lock_irq(&tpg->acl_node_lock);
488 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
489 if (!acl) {
490 pr_err("Access Control List entry for %s Initiator"
491 " Node %s does not exists for TPG %hu, ignoring"
492 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
493 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
494 spin_unlock_irq(&tpg->acl_node_lock);
495 return -ENODEV;
496 }
497 if (acl->dynamic_node_acl) {
498 acl->dynamic_node_acl = 0;
499 dynamic_acl = 1;
500 }
501 spin_unlock_irq(&tpg->acl_node_lock);
502
503 spin_lock_bh(&tpg->session_lock);
504 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
505 if (sess->se_node_acl != acl)
506 continue;
507
508 if (!force) {
509 pr_err("Unable to change queue depth for %s"
510 " Initiator Node: %s while session is"
511 " operational. To forcefully change the queue"
512 " depth and force session reinstatement"
513 " use the \"force=1\" parameter.\n",
514 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
515 spin_unlock_bh(&tpg->session_lock);
516
517 spin_lock_irq(&tpg->acl_node_lock);
518 if (dynamic_acl)
519 acl->dynamic_node_acl = 1;
520 spin_unlock_irq(&tpg->acl_node_lock);
521 return -EEXIST;
522 }
523
524
525
526 if (!tpg->se_tpg_tfo->shutdown_session(sess))
527 continue;
528
529 init_sess = sess;
530 break;
531 }
532
533
534
535
536
537
538
539
540
541
542 acl->queue_depth = queue_depth;
543
544 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
545 spin_unlock_bh(&tpg->session_lock);
546
547
548
549
550
551
552 if (init_sess)
553 tpg->se_tpg_tfo->close_session(init_sess);
554
555 spin_lock_irq(&tpg->acl_node_lock);
556 if (dynamic_acl)
557 acl->dynamic_node_acl = 1;
558 spin_unlock_irq(&tpg->acl_node_lock);
559 return -EINVAL;
560 }
561 spin_unlock_bh(&tpg->session_lock);
562
563
564
565
566 if (init_sess)
567 tpg->se_tpg_tfo->close_session(init_sess);
568
569 pr_debug("Successfully changed queue depth to: %d for Initiator"
570 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
571 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
572 tpg->se_tpg_tfo->tpg_get_tag(tpg));
573
574 spin_lock_irq(&tpg->acl_node_lock);
575 if (dynamic_acl)
576 acl->dynamic_node_acl = 1;
577 spin_unlock_irq(&tpg->acl_node_lock);
578
579 return 0;
580}
581EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
582
583static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
584{
585
586 struct se_device *dev = g_lun0_dev;
587 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
588 u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
589 int ret;
590
591 lun->unpacked_lun = 0;
592 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
593 atomic_set(&lun->lun_acl_count, 0);
594 init_completion(&lun->lun_shutdown_comp);
595 INIT_LIST_HEAD(&lun->lun_acl_list);
596 INIT_LIST_HEAD(&lun->lun_cmd_list);
597 spin_lock_init(&lun->lun_acl_lock);
598 spin_lock_init(&lun->lun_cmd_lock);
599 spin_lock_init(&lun->lun_sep_lock);
600
601 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
602 if (ret < 0)
603 return ret;
604
605 return 0;
606}
607
608static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
609{
610 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
611
612 core_tpg_post_dellun(se_tpg, lun);
613}
614
615int core_tpg_register(
616 struct target_core_fabric_ops *tfo,
617 struct se_wwn *se_wwn,
618 struct se_portal_group *se_tpg,
619 void *tpg_fabric_ptr,
620 int se_tpg_type)
621{
622 struct se_lun *lun;
623 u32 i;
624
625 se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
626 TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
627 if (!se_tpg->tpg_lun_list) {
628 pr_err("Unable to allocate struct se_portal_group->"
629 "tpg_lun_list\n");
630 return -ENOMEM;
631 }
632
633 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
634 lun = &se_tpg->tpg_lun_list[i];
635 lun->unpacked_lun = i;
636 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
637 atomic_set(&lun->lun_acl_count, 0);
638 init_completion(&lun->lun_shutdown_comp);
639 INIT_LIST_HEAD(&lun->lun_acl_list);
640 INIT_LIST_HEAD(&lun->lun_cmd_list);
641 spin_lock_init(&lun->lun_acl_lock);
642 spin_lock_init(&lun->lun_cmd_lock);
643 spin_lock_init(&lun->lun_sep_lock);
644 }
645
646 se_tpg->se_tpg_type = se_tpg_type;
647 se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
648 se_tpg->se_tpg_tfo = tfo;
649 se_tpg->se_tpg_wwn = se_wwn;
650 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
651 INIT_LIST_HEAD(&se_tpg->acl_node_list);
652 INIT_LIST_HEAD(&se_tpg->se_tpg_node);
653 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
654 spin_lock_init(&se_tpg->acl_node_lock);
655 spin_lock_init(&se_tpg->session_lock);
656 spin_lock_init(&se_tpg->tpg_lun_lock);
657
658 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
659 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
660 kfree(se_tpg);
661 return -ENOMEM;
662 }
663 }
664
665 spin_lock_bh(&tpg_lock);
666 list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
667 spin_unlock_bh(&tpg_lock);
668
669 pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
670 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
671 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
672 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
673 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
674
675 return 0;
676}
677EXPORT_SYMBOL(core_tpg_register);
678
679int core_tpg_deregister(struct se_portal_group *se_tpg)
680{
681 struct se_node_acl *nacl, *nacl_tmp;
682
683 pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
684 " for endpoint: %s Portal Tag %u\n",
685 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
686 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
687 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
688 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
689
690 spin_lock_bh(&tpg_lock);
691 list_del(&se_tpg->se_tpg_node);
692 spin_unlock_bh(&tpg_lock);
693
694 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
695 cpu_relax();
696
697
698
699
700
701 spin_lock_irq(&se_tpg->acl_node_lock);
702 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
703 acl_list) {
704 list_del(&nacl->acl_list);
705 se_tpg->num_node_acls--;
706 spin_unlock_irq(&se_tpg->acl_node_lock);
707
708 core_tpg_wait_for_nacl_pr_ref(nacl);
709 core_free_device_list_for_node(nacl, se_tpg);
710 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
711
712 spin_lock_irq(&se_tpg->acl_node_lock);
713 }
714 spin_unlock_irq(&se_tpg->acl_node_lock);
715
716 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
717 core_tpg_release_virtual_lun0(se_tpg);
718
719 se_tpg->se_tpg_fabric_ptr = NULL;
720 kfree(se_tpg->tpg_lun_list);
721 return 0;
722}
723EXPORT_SYMBOL(core_tpg_deregister);
724
725struct se_lun *core_tpg_pre_addlun(
726 struct se_portal_group *tpg,
727 u32 unpacked_lun)
728{
729 struct se_lun *lun;
730
731 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
732 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
733 "-1: %u for Target Portal Group: %u\n",
734 tpg->se_tpg_tfo->get_fabric_name(),
735 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
736 tpg->se_tpg_tfo->tpg_get_tag(tpg));
737 return ERR_PTR(-EOVERFLOW);
738 }
739
740 spin_lock(&tpg->tpg_lun_lock);
741 lun = &tpg->tpg_lun_list[unpacked_lun];
742 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
743 pr_err("TPG Logical Unit Number: %u is already active"
744 " on %s Target Portal Group: %u, ignoring request.\n",
745 unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
746 tpg->se_tpg_tfo->tpg_get_tag(tpg));
747 spin_unlock(&tpg->tpg_lun_lock);
748 return ERR_PTR(-EINVAL);
749 }
750 spin_unlock(&tpg->tpg_lun_lock);
751
752 return lun;
753}
754
755int core_tpg_post_addlun(
756 struct se_portal_group *tpg,
757 struct se_lun *lun,
758 u32 lun_access,
759 void *lun_ptr)
760{
761 int ret;
762
763 ret = core_dev_export(lun_ptr, tpg, lun);
764 if (ret < 0)
765 return ret;
766
767 spin_lock(&tpg->tpg_lun_lock);
768 lun->lun_access = lun_access;
769 lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
770 spin_unlock(&tpg->tpg_lun_lock);
771
772 return 0;
773}
774
775static void core_tpg_shutdown_lun(
776 struct se_portal_group *tpg,
777 struct se_lun *lun)
778{
779 core_clear_lun_from_tpg(lun, tpg);
780 transport_clear_lun_from_sessions(lun);
781}
782
783struct se_lun *core_tpg_pre_dellun(
784 struct se_portal_group *tpg,
785 u32 unpacked_lun,
786 int *ret)
787{
788 struct se_lun *lun;
789
790 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
791 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
792 "-1: %u for Target Portal Group: %u\n",
793 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
794 TRANSPORT_MAX_LUNS_PER_TPG-1,
795 tpg->se_tpg_tfo->tpg_get_tag(tpg));
796 return ERR_PTR(-EOVERFLOW);
797 }
798
799 spin_lock(&tpg->tpg_lun_lock);
800 lun = &tpg->tpg_lun_list[unpacked_lun];
801 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
802 pr_err("%s Logical Unit Number: %u is not active on"
803 " Target Portal Group: %u, ignoring request.\n",
804 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
805 tpg->se_tpg_tfo->tpg_get_tag(tpg));
806 spin_unlock(&tpg->tpg_lun_lock);
807 return ERR_PTR(-ENODEV);
808 }
809 spin_unlock(&tpg->tpg_lun_lock);
810
811 return lun;
812}
813
814int core_tpg_post_dellun(
815 struct se_portal_group *tpg,
816 struct se_lun *lun)
817{
818 core_tpg_shutdown_lun(tpg, lun);
819
820 core_dev_unexport(lun->lun_se_dev, tpg, lun);
821
822 spin_lock(&tpg->tpg_lun_lock);
823 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
824 spin_unlock(&tpg->tpg_lun_lock);
825
826 return 0;
827}
828