1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <linux/configfs.h>
30#include <linux/export.h>
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33#include <asm/unaligned.h>
34
35#include <target/target_core_base.h>
36#include <target/target_core_device.h>
37#include <target/target_core_transport.h>
38#include <target/target_core_fabric_ops.h>
39#include <target/target_core_configfs.h>
40
41#include "target_core_alua.h"
42#include "target_core_hba.h"
43#include "target_core_ua.h"
44
45static int core_alua_check_transition(int state, int *primary);
46static int core_alua_set_tg_pt_secondary_state(
47 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
48 struct se_port *port, int explict, int offline);
49
50static u16 alua_lu_gps_counter;
51static u32 alua_lu_gps_count;
52
53static DEFINE_SPINLOCK(lu_gps_lock);
54static LIST_HEAD(lu_gps_list);
55
56struct t10_alua_lu_gp *default_lu_gp;
57
58
59
60
61
62
63int target_emulate_report_target_port_groups(struct se_task *task)
64{
65 struct se_cmd *cmd = task->task_se_cmd;
66 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
67 struct se_port *port;
68 struct t10_alua_tg_pt_gp *tg_pt_gp;
69 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
70 unsigned char *buf;
71 u32 rd_len = 0, off = 4;
72
73
74
75
76
77 if (cmd->data_length < 4) {
78 pr_warn("REPORT TARGET PORT GROUPS allocation length %u"
79 " too small\n", cmd->data_length);
80 return -EINVAL;
81 }
82
83 buf = transport_kmap_data_sg(cmd);
84
85 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
86 list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
87 tg_pt_gp_list) {
88
89
90
91
92
93
94 if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
95 cmd->data_length) {
96 rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
97 continue;
98 }
99
100
101
102
103 if (tg_pt_gp->tg_pt_gp_pref)
104 buf[off] = 0x80;
105
106
107
108 buf[off++] |= (atomic_read(
109 &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
110
111
112
113 buf[off] = 0x80;
114 buf[off] |= 0x40;
115 buf[off] |= 0x8;
116 buf[off] |= 0x4;
117 buf[off] |= 0x2;
118 buf[off++] |= 0x1;
119
120
121
122 buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
123 buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
124
125 off++;
126
127
128
129 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
130
131
132
133 buf[off++] = 0x00;
134
135
136
137 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
138 rd_len += 8;
139
140 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
141 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
142 tg_pt_gp_mem_list) {
143 port = tg_pt_gp_mem->tg_pt;
144
145
146
147
148
149 off += 2;
150
151
152
153 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
154 buf[off++] = (port->sep_rtpi & 0xff);
155 rd_len += 4;
156 }
157 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
158 }
159 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
160
161
162
163 buf[0] = ((rd_len >> 24) & 0xff);
164 buf[1] = ((rd_len >> 16) & 0xff);
165 buf[2] = ((rd_len >> 8) & 0xff);
166 buf[3] = (rd_len & 0xff);
167
168 transport_kunmap_data_sg(cmd);
169
170 task->task_scsi_status = GOOD;
171 transport_complete_task(task, 1);
172 return 0;
173}
174
175
176
177
178
179
180int target_emulate_set_target_port_groups(struct se_task *task)
181{
182 struct se_cmd *cmd = task->task_se_cmd;
183 struct se_device *dev = cmd->se_dev;
184 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
185 struct se_port *port, *l_port = cmd->se_lun->lun_sep;
186 struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
187 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
188 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
189 unsigned char *buf;
190 unsigned char *ptr;
191 u32 len = 4;
192 int alua_access_state, primary = 0, rc;
193 u16 tg_pt_id, rtpi;
194
195 if (!l_port) {
196 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
197 return -EINVAL;
198 }
199 buf = transport_kmap_data_sg(cmd);
200
201
202
203
204
205 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
206 if (!l_tg_pt_gp_mem) {
207 pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
208 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
209 rc = -EINVAL;
210 goto out;
211 }
212 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
213 l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
214 if (!l_tg_pt_gp) {
215 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
216 pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
217 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
218 rc = -EINVAL;
219 goto out;
220 }
221 rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
222 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
223
224 if (!rc) {
225 pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
226 " while TPGS_EXPLICT_ALUA is disabled\n");
227 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
228 rc = -EINVAL;
229 goto out;
230 }
231
232 ptr = &buf[4];
233
234 while (len < cmd->data_length) {
235 alua_access_state = (ptr[0] & 0x0f);
236
237
238
239
240
241 rc = core_alua_check_transition(alua_access_state, &primary);
242 if (rc != 0) {
243
244
245
246
247
248
249
250
251
252
253 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
254 rc = -EINVAL;
255 goto out;
256 }
257 rc = -1;
258
259
260
261
262
263
264
265
266
267
268
269
270
271 if (primary) {
272 tg_pt_id = get_unaligned_be16(ptr + 2);
273
274
275
276
277 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
278 list_for_each_entry(tg_pt_gp,
279 &su_dev->t10_alua.tg_pt_gps_list,
280 tg_pt_gp_list) {
281 if (!tg_pt_gp->tg_pt_gp_valid_id)
282 continue;
283
284 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
285 continue;
286
287 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
288 smp_mb__after_atomic_inc();
289 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
290
291 rc = core_alua_do_port_transition(tg_pt_gp,
292 dev, l_port, nacl,
293 alua_access_state, 1);
294
295 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
296 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
297 smp_mb__after_atomic_dec();
298 break;
299 }
300 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
301
302
303
304
305 if (rc != 0) {
306 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
307 rc = -EINVAL;
308 goto out;
309 }
310 } else {
311
312
313
314
315
316 rtpi = get_unaligned_be16(ptr + 2);
317
318
319
320
321 spin_lock(&dev->se_port_lock);
322 list_for_each_entry(port, &dev->dev_sep_list,
323 sep_list) {
324 if (port->sep_rtpi != rtpi)
325 continue;
326
327 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
328 spin_unlock(&dev->se_port_lock);
329
330 rc = core_alua_set_tg_pt_secondary_state(
331 tg_pt_gp_mem, port, 1, 1);
332
333 spin_lock(&dev->se_port_lock);
334 break;
335 }
336 spin_unlock(&dev->se_port_lock);
337
338
339
340
341
342 if (rc != 0) {
343 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
344 rc = -EINVAL;
345 goto out;
346 }
347 }
348
349 ptr += 4;
350 len += 4;
351 }
352
353out:
354 transport_kunmap_data_sg(cmd);
355 if (!rc) {
356 task->task_scsi_status = GOOD;
357 transport_complete_task(task, 1);
358 }
359 return rc;
360}
361
362static inline int core_alua_state_nonoptimized(
363 struct se_cmd *cmd,
364 unsigned char *cdb,
365 int nonop_delay_msecs,
366 u8 *alua_ascq)
367{
368
369
370
371
372
373 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
374 cmd->alua_nonop_delay = nonop_delay_msecs;
375 return 0;
376}
377
378static inline int core_alua_state_standby(
379 struct se_cmd *cmd,
380 unsigned char *cdb,
381 u8 *alua_ascq)
382{
383
384
385
386
387 switch (cdb[0]) {
388 case INQUIRY:
389 case LOG_SELECT:
390 case LOG_SENSE:
391 case MODE_SELECT:
392 case MODE_SENSE:
393 case REPORT_LUNS:
394 case RECEIVE_DIAGNOSTIC:
395 case SEND_DIAGNOSTIC:
396 return 0;
397 case MAINTENANCE_IN:
398 switch (cdb[1] & 0x1f) {
399 case MI_REPORT_TARGET_PGS:
400 return 0;
401 default:
402 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
403 return 1;
404 }
405 case MAINTENANCE_OUT:
406 switch (cdb[1]) {
407 case MO_SET_TARGET_PGS:
408 return 0;
409 default:
410 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
411 return 1;
412 }
413 case REQUEST_SENSE:
414 case PERSISTENT_RESERVE_IN:
415 case PERSISTENT_RESERVE_OUT:
416 case READ_BUFFER:
417 case WRITE_BUFFER:
418 return 0;
419 default:
420 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
421 return 1;
422 }
423
424 return 0;
425}
426
427static inline int core_alua_state_unavailable(
428 struct se_cmd *cmd,
429 unsigned char *cdb,
430 u8 *alua_ascq)
431{
432
433
434
435
436 switch (cdb[0]) {
437 case INQUIRY:
438 case REPORT_LUNS:
439 return 0;
440 case MAINTENANCE_IN:
441 switch (cdb[1] & 0x1f) {
442 case MI_REPORT_TARGET_PGS:
443 return 0;
444 default:
445 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
446 return 1;
447 }
448 case MAINTENANCE_OUT:
449 switch (cdb[1]) {
450 case MO_SET_TARGET_PGS:
451 return 0;
452 default:
453 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
454 return 1;
455 }
456 case REQUEST_SENSE:
457 case READ_BUFFER:
458 case WRITE_BUFFER:
459 return 0;
460 default:
461 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
462 return 1;
463 }
464
465 return 0;
466}
467
468static inline int core_alua_state_transition(
469 struct se_cmd *cmd,
470 unsigned char *cdb,
471 u8 *alua_ascq)
472{
473
474
475
476
477 switch (cdb[0]) {
478 case INQUIRY:
479 case REPORT_LUNS:
480 return 0;
481 case MAINTENANCE_IN:
482 switch (cdb[1] & 0x1f) {
483 case MI_REPORT_TARGET_PGS:
484 return 0;
485 default:
486 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
487 return 1;
488 }
489 case REQUEST_SENSE:
490 case READ_BUFFER:
491 case WRITE_BUFFER:
492 return 0;
493 default:
494 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
495 return 1;
496 }
497
498 return 0;
499}
500
501
502
503
504
505
506static int core_alua_state_check_nop(
507 struct se_cmd *cmd,
508 unsigned char *cdb,
509 u8 *alua_ascq)
510{
511 return 0;
512}
513
514
515
516
517
518
519
520
521
522
523
524
525
526static int core_alua_state_check(
527 struct se_cmd *cmd,
528 unsigned char *cdb,
529 u8 *alua_ascq)
530{
531 struct se_lun *lun = cmd->se_lun;
532 struct se_port *port = lun->lun_sep;
533 struct t10_alua_tg_pt_gp *tg_pt_gp;
534 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
535 int out_alua_state, nonop_delay_msecs;
536
537 if (!port)
538 return 0;
539
540
541
542
543 if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
544 *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
545 pr_debug("ALUA: Got secondary offline status for local"
546 " target port\n");
547 *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
548 return 1;
549 }
550
551
552
553
554
555
556 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
557 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
558 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
559 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
560 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
561 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
562
563
564
565
566
567
568 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
569 return 0;
570
571 switch (out_alua_state) {
572 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
573 return core_alua_state_nonoptimized(cmd, cdb,
574 nonop_delay_msecs, alua_ascq);
575 case ALUA_ACCESS_STATE_STANDBY:
576 return core_alua_state_standby(cmd, cdb, alua_ascq);
577 case ALUA_ACCESS_STATE_UNAVAILABLE:
578 return core_alua_state_unavailable(cmd, cdb, alua_ascq);
579 case ALUA_ACCESS_STATE_TRANSITION:
580 return core_alua_state_transition(cmd, cdb, alua_ascq);
581
582
583
584
585 case ALUA_ACCESS_STATE_OFFLINE:
586 default:
587 pr_err("Unknown ALUA access state: 0x%02x\n",
588 out_alua_state);
589 return -EINVAL;
590 }
591
592 return 0;
593}
594
595
596
597
598static int core_alua_check_transition(int state, int *primary)
599{
600 switch (state) {
601 case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
602 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
603 case ALUA_ACCESS_STATE_STANDBY:
604 case ALUA_ACCESS_STATE_UNAVAILABLE:
605
606
607
608
609 *primary = 1;
610 break;
611 case ALUA_ACCESS_STATE_OFFLINE:
612
613
614
615
616 *primary = 0;
617 break;
618 default:
619 pr_err("Unknown ALUA access state: 0x%02x\n", state);
620 return -EINVAL;
621 }
622
623 return 0;
624}
625
626static char *core_alua_dump_state(int state)
627{
628 switch (state) {
629 case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
630 return "Active/Optimized";
631 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
632 return "Active/NonOptimized";
633 case ALUA_ACCESS_STATE_STANDBY:
634 return "Standby";
635 case ALUA_ACCESS_STATE_UNAVAILABLE:
636 return "Unavailable";
637 case ALUA_ACCESS_STATE_OFFLINE:
638 return "Offline";
639 default:
640 return "Unknown";
641 }
642
643 return NULL;
644}
645
646char *core_alua_dump_status(int status)
647{
648 switch (status) {
649 case ALUA_STATUS_NONE:
650 return "None";
651 case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
652 return "Altered by Explict STPG";
653 case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
654 return "Altered by Implict ALUA";
655 default:
656 return "Unknown";
657 }
658
659 return NULL;
660}
661
662
663
664
665
666int core_alua_check_nonop_delay(
667 struct se_cmd *cmd)
668{
669 if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
670 return 0;
671 if (in_interrupt())
672 return 0;
673
674
675
676
677 if (!cmd->alua_nonop_delay)
678 return 0;
679
680
681
682
683 msleep_interruptible(cmd->alua_nonop_delay);
684 return 0;
685}
686EXPORT_SYMBOL(core_alua_check_nonop_delay);
687
688
689
690
691
692static int core_alua_write_tpg_metadata(
693 const char *path,
694 unsigned char *md_buf,
695 u32 md_buf_len)
696{
697 mm_segment_t old_fs;
698 struct file *file;
699 struct iovec iov[1];
700 int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
701
702 memset(iov, 0, sizeof(struct iovec));
703
704 file = filp_open(path, flags, 0600);
705 if (IS_ERR(file) || !file || !file->f_dentry) {
706 pr_err("filp_open(%s) for ALUA metadata failed\n",
707 path);
708 return -ENODEV;
709 }
710
711 iov[0].iov_base = &md_buf[0];
712 iov[0].iov_len = md_buf_len;
713
714 old_fs = get_fs();
715 set_fs(get_ds());
716 ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
717 set_fs(old_fs);
718
719 if (ret < 0) {
720 pr_err("Error writing ALUA metadata file: %s\n", path);
721 filp_close(file, NULL);
722 return -EIO;
723 }
724 filp_close(file, NULL);
725
726 return 0;
727}
728
729
730
731
732static int core_alua_update_tpg_primary_metadata(
733 struct t10_alua_tg_pt_gp *tg_pt_gp,
734 int primary_state,
735 unsigned char *md_buf)
736{
737 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
738 struct t10_wwn *wwn = &su_dev->t10_wwn;
739 char path[ALUA_METADATA_PATH_LEN];
740 int len;
741
742 memset(path, 0, ALUA_METADATA_PATH_LEN);
743
744 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
745 "tg_pt_gp_id=%hu\n"
746 "alua_access_state=0x%02x\n"
747 "alua_access_status=0x%02x\n",
748 tg_pt_gp->tg_pt_gp_id, primary_state,
749 tg_pt_gp->tg_pt_gp_alua_access_status);
750
751 snprintf(path, ALUA_METADATA_PATH_LEN,
752 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
753 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
754
755 return core_alua_write_tpg_metadata(path, md_buf, len);
756}
757
758static int core_alua_do_transition_tg_pt(
759 struct t10_alua_tg_pt_gp *tg_pt_gp,
760 struct se_port *l_port,
761 struct se_node_acl *nacl,
762 unsigned char *md_buf,
763 int new_state,
764 int explict)
765{
766 struct se_dev_entry *se_deve;
767 struct se_lun_acl *lacl;
768 struct se_port *port;
769 struct t10_alua_tg_pt_gp_member *mem;
770 int old_state = 0;
771
772
773
774
775 old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
776 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
777 ALUA_ACCESS_STATE_TRANSITION);
778 tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
779 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
780 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
781
782
783
784 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
785 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
786
787 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
788 list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
789 tg_pt_gp_mem_list) {
790 port = mem->tg_pt;
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
806 smp_mb__after_atomic_inc();
807 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
808
809 spin_lock_bh(&port->sep_alua_lock);
810 list_for_each_entry(se_deve, &port->sep_alua_list,
811 alua_port_list) {
812 lacl = se_deve->se_lun_acl;
813
814
815
816
817 if (!lacl)
818 continue;
819
820 if (explict &&
821 (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
822 (l_port != NULL) && (l_port == port))
823 continue;
824
825 core_scsi3_ua_allocate(lacl->se_lun_nacl,
826 se_deve->mapped_lun, 0x2A,
827 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
828 }
829 spin_unlock_bh(&port->sep_alua_lock);
830
831 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
832 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
833 smp_mb__after_atomic_dec();
834 }
835 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
836
837
838
839
840
841
842
843
844
845
846
847
848 if (tg_pt_gp->tg_pt_gp_write_metadata) {
849 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
850 core_alua_update_tpg_primary_metadata(tg_pt_gp,
851 new_state, md_buf);
852 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
853 }
854
855
856
857 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
858
859 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
860 " from primary access state %s to %s\n", (explict) ? "explict" :
861 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
862 tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
863 core_alua_dump_state(new_state));
864
865 return 0;
866}
867
868int core_alua_do_port_transition(
869 struct t10_alua_tg_pt_gp *l_tg_pt_gp,
870 struct se_device *l_dev,
871 struct se_port *l_port,
872 struct se_node_acl *l_nacl,
873 int new_state,
874 int explict)
875{
876 struct se_device *dev;
877 struct se_port *port;
878 struct se_subsystem_dev *su_dev;
879 struct se_node_acl *nacl;
880 struct t10_alua_lu_gp *lu_gp;
881 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
882 struct t10_alua_tg_pt_gp *tg_pt_gp;
883 unsigned char *md_buf;
884 int primary;
885
886 if (core_alua_check_transition(new_state, &primary) != 0)
887 return -EINVAL;
888
889 md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
890 if (!md_buf) {
891 pr_err("Unable to allocate buf for ALUA metadata\n");
892 return -ENOMEM;
893 }
894
895 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
896 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
897 lu_gp = local_lu_gp_mem->lu_gp;
898 atomic_inc(&lu_gp->lu_gp_ref_cnt);
899 smp_mb__after_atomic_inc();
900 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
901
902
903
904
905
906 if (!lu_gp->lu_gp_id) {
907
908
909
910
911 core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
912 md_buf, new_state, explict);
913 atomic_dec(&lu_gp->lu_gp_ref_cnt);
914 smp_mb__after_atomic_dec();
915 kfree(md_buf);
916 return 0;
917 }
918
919
920
921
922
923 spin_lock(&lu_gp->lu_gp_lock);
924 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
925 lu_gp_mem_list) {
926
927 dev = lu_gp_mem->lu_gp_mem_dev;
928 su_dev = dev->se_sub_dev;
929 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
930 smp_mb__after_atomic_inc();
931 spin_unlock(&lu_gp->lu_gp_lock);
932
933 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
934 list_for_each_entry(tg_pt_gp,
935 &su_dev->t10_alua.tg_pt_gps_list,
936 tg_pt_gp_list) {
937
938 if (!tg_pt_gp->tg_pt_gp_valid_id)
939 continue;
940
941
942
943
944
945
946
947
948 if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
949 continue;
950
951 if (l_tg_pt_gp == tg_pt_gp) {
952 port = l_port;
953 nacl = l_nacl;
954 } else {
955 port = NULL;
956 nacl = NULL;
957 }
958 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
959 smp_mb__after_atomic_inc();
960 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
961
962
963
964
965 core_alua_do_transition_tg_pt(tg_pt_gp, port,
966 nacl, md_buf, new_state, explict);
967
968 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
969 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
970 smp_mb__after_atomic_dec();
971 }
972 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
973
974 spin_lock(&lu_gp->lu_gp_lock);
975 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
976 smp_mb__after_atomic_dec();
977 }
978 spin_unlock(&lu_gp->lu_gp_lock);
979
980 pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
981 " Group IDs: %hu %s transition to primary state: %s\n",
982 config_item_name(&lu_gp->lu_gp_group.cg_item),
983 l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
984 core_alua_dump_state(new_state));
985
986 atomic_dec(&lu_gp->lu_gp_ref_cnt);
987 smp_mb__after_atomic_dec();
988 kfree(md_buf);
989 return 0;
990}
991
992
993
994
995static int core_alua_update_tpg_secondary_metadata(
996 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
997 struct se_port *port,
998 unsigned char *md_buf,
999 u32 md_buf_len)
1000{
1001 struct se_portal_group *se_tpg = port->sep_tpg;
1002 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
1003 int len;
1004
1005 memset(path, 0, ALUA_METADATA_PATH_LEN);
1006 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
1007
1008 len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
1009 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
1010
1011 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
1012 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
1013 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
1014
1015 len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
1016 "alua_tg_pt_status=0x%02x\n",
1017 atomic_read(&port->sep_tg_pt_secondary_offline),
1018 port->sep_tg_pt_secondary_stat);
1019
1020 snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
1021 se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
1022 port->sep_lun->unpacked_lun);
1023
1024 return core_alua_write_tpg_metadata(path, md_buf, len);
1025}
1026
1027static int core_alua_set_tg_pt_secondary_state(
1028 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1029 struct se_port *port,
1030 int explict,
1031 int offline)
1032{
1033 struct t10_alua_tg_pt_gp *tg_pt_gp;
1034 unsigned char *md_buf;
1035 u32 md_buf_len;
1036 int trans_delay_msecs;
1037
1038 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1039 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1040 if (!tg_pt_gp) {
1041 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1042 pr_err("Unable to complete secondary state"
1043 " transition\n");
1044 return -EINVAL;
1045 }
1046 trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1047
1048
1049
1050
1051 if (offline)
1052 atomic_set(&port->sep_tg_pt_secondary_offline, 1);
1053 else
1054 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
1055
1056 md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
1057 port->sep_tg_pt_secondary_stat = (explict) ?
1058 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
1059 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
1060
1061 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1062 " to secondary access state: %s\n", (explict) ? "explict" :
1063 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1064 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1065
1066 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1067
1068
1069
1070
1071 if (trans_delay_msecs != 0)
1072 msleep_interruptible(trans_delay_msecs);
1073
1074
1075
1076
1077 if (port->sep_tg_pt_secondary_write_md) {
1078 md_buf = kzalloc(md_buf_len, GFP_KERNEL);
1079 if (!md_buf) {
1080 pr_err("Unable to allocate md_buf for"
1081 " secondary ALUA access metadata\n");
1082 return -ENOMEM;
1083 }
1084 mutex_lock(&port->sep_tg_pt_md_mutex);
1085 core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
1086 md_buf, md_buf_len);
1087 mutex_unlock(&port->sep_tg_pt_md_mutex);
1088
1089 kfree(md_buf);
1090 }
1091
1092 return 0;
1093}
1094
1095struct t10_alua_lu_gp *
1096core_alua_allocate_lu_gp(const char *name, int def_group)
1097{
1098 struct t10_alua_lu_gp *lu_gp;
1099
1100 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1101 if (!lu_gp) {
1102 pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1103 return ERR_PTR(-ENOMEM);
1104 }
1105 INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1106 INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1107 spin_lock_init(&lu_gp->lu_gp_lock);
1108 atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1109
1110 if (def_group) {
1111 lu_gp->lu_gp_id = alua_lu_gps_counter++;
1112 lu_gp->lu_gp_valid_id = 1;
1113 alua_lu_gps_count++;
1114 }
1115
1116 return lu_gp;
1117}
1118
1119int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1120{
1121 struct t10_alua_lu_gp *lu_gp_tmp;
1122 u16 lu_gp_id_tmp;
1123
1124
1125
1126 if (lu_gp->lu_gp_valid_id) {
1127 pr_warn("ALUA LU Group already has a valid ID,"
1128 " ignoring request\n");
1129 return -EINVAL;
1130 }
1131
1132 spin_lock(&lu_gps_lock);
1133 if (alua_lu_gps_count == 0x0000ffff) {
1134 pr_err("Maximum ALUA alua_lu_gps_count:"
1135 " 0x0000ffff reached\n");
1136 spin_unlock(&lu_gps_lock);
1137 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1138 return -ENOSPC;
1139 }
1140again:
1141 lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1142 alua_lu_gps_counter++;
1143
1144 list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1145 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1146 if (!lu_gp_id)
1147 goto again;
1148
1149 pr_warn("ALUA Logical Unit Group ID: %hu"
1150 " already exists, ignoring request\n",
1151 lu_gp_id);
1152 spin_unlock(&lu_gps_lock);
1153 return -EINVAL;
1154 }
1155 }
1156
1157 lu_gp->lu_gp_id = lu_gp_id_tmp;
1158 lu_gp->lu_gp_valid_id = 1;
1159 list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1160 alua_lu_gps_count++;
1161 spin_unlock(&lu_gps_lock);
1162
1163 return 0;
1164}
1165
1166static struct t10_alua_lu_gp_member *
1167core_alua_allocate_lu_gp_mem(struct se_device *dev)
1168{
1169 struct t10_alua_lu_gp_member *lu_gp_mem;
1170
1171 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1172 if (!lu_gp_mem) {
1173 pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1174 return ERR_PTR(-ENOMEM);
1175 }
1176 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1177 spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1178 atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1179
1180 lu_gp_mem->lu_gp_mem_dev = dev;
1181 dev->dev_alua_lu_gp_mem = lu_gp_mem;
1182
1183 return lu_gp_mem;
1184}
1185
1186void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1187{
1188 struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1189
1190
1191
1192
1193
1194
1195
1196
1197 spin_lock(&lu_gps_lock);
1198 list_del(&lu_gp->lu_gp_node);
1199 alua_lu_gps_count--;
1200 spin_unlock(&lu_gps_lock);
1201
1202
1203
1204
1205
1206 while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1207 cpu_relax();
1208
1209
1210
1211
1212 spin_lock(&lu_gp->lu_gp_lock);
1213 list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1214 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1215 if (lu_gp_mem->lu_gp_assoc) {
1216 list_del(&lu_gp_mem->lu_gp_mem_list);
1217 lu_gp->lu_gp_members--;
1218 lu_gp_mem->lu_gp_assoc = 0;
1219 }
1220 spin_unlock(&lu_gp->lu_gp_lock);
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1231 if (lu_gp != default_lu_gp)
1232 __core_alua_attach_lu_gp_mem(lu_gp_mem,
1233 default_lu_gp);
1234 else
1235 lu_gp_mem->lu_gp = NULL;
1236 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1237
1238 spin_lock(&lu_gp->lu_gp_lock);
1239 }
1240 spin_unlock(&lu_gp->lu_gp_lock);
1241
1242 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1243}
1244
1245void core_alua_free_lu_gp_mem(struct se_device *dev)
1246{
1247 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1248 struct t10_alua *alua = &su_dev->t10_alua;
1249 struct t10_alua_lu_gp *lu_gp;
1250 struct t10_alua_lu_gp_member *lu_gp_mem;
1251
1252 if (alua->alua_type != SPC3_ALUA_EMULATED)
1253 return;
1254
1255 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1256 if (!lu_gp_mem)
1257 return;
1258
1259 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1260 cpu_relax();
1261
1262 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1263 lu_gp = lu_gp_mem->lu_gp;
1264 if (lu_gp) {
1265 spin_lock(&lu_gp->lu_gp_lock);
1266 if (lu_gp_mem->lu_gp_assoc) {
1267 list_del(&lu_gp_mem->lu_gp_mem_list);
1268 lu_gp->lu_gp_members--;
1269 lu_gp_mem->lu_gp_assoc = 0;
1270 }
1271 spin_unlock(&lu_gp->lu_gp_lock);
1272 lu_gp_mem->lu_gp = NULL;
1273 }
1274 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1275
1276 kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1277}
1278
1279struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1280{
1281 struct t10_alua_lu_gp *lu_gp;
1282 struct config_item *ci;
1283
1284 spin_lock(&lu_gps_lock);
1285 list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1286 if (!lu_gp->lu_gp_valid_id)
1287 continue;
1288 ci = &lu_gp->lu_gp_group.cg_item;
1289 if (!strcmp(config_item_name(ci), name)) {
1290 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1291 spin_unlock(&lu_gps_lock);
1292 return lu_gp;
1293 }
1294 }
1295 spin_unlock(&lu_gps_lock);
1296
1297 return NULL;
1298}
1299
1300void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1301{
1302 spin_lock(&lu_gps_lock);
1303 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1304 spin_unlock(&lu_gps_lock);
1305}
1306
1307
1308
1309
1310void __core_alua_attach_lu_gp_mem(
1311 struct t10_alua_lu_gp_member *lu_gp_mem,
1312 struct t10_alua_lu_gp *lu_gp)
1313{
1314 spin_lock(&lu_gp->lu_gp_lock);
1315 lu_gp_mem->lu_gp = lu_gp;
1316 lu_gp_mem->lu_gp_assoc = 1;
1317 list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1318 lu_gp->lu_gp_members++;
1319 spin_unlock(&lu_gp->lu_gp_lock);
1320}
1321
1322
1323
1324
1325void __core_alua_drop_lu_gp_mem(
1326 struct t10_alua_lu_gp_member *lu_gp_mem,
1327 struct t10_alua_lu_gp *lu_gp)
1328{
1329 spin_lock(&lu_gp->lu_gp_lock);
1330 list_del(&lu_gp_mem->lu_gp_mem_list);
1331 lu_gp_mem->lu_gp = NULL;
1332 lu_gp_mem->lu_gp_assoc = 0;
1333 lu_gp->lu_gp_members--;
1334 spin_unlock(&lu_gp->lu_gp_lock);
1335}
1336
1337struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
1338 struct se_subsystem_dev *su_dev,
1339 const char *name,
1340 int def_group)
1341{
1342 struct t10_alua_tg_pt_gp *tg_pt_gp;
1343
1344 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1345 if (!tg_pt_gp) {
1346 pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1347 return NULL;
1348 }
1349 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1350 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
1351 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1352 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1353 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1354 tg_pt_gp->tg_pt_gp_su_dev = su_dev;
1355 tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
1356 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1357 ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
1358
1359
1360
1361 tg_pt_gp->tg_pt_gp_alua_access_type =
1362 TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
1363
1364
1365
1366 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1367 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1368
1369 if (def_group) {
1370 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1371 tg_pt_gp->tg_pt_gp_id =
1372 su_dev->t10_alua.alua_tg_pt_gps_counter++;
1373 tg_pt_gp->tg_pt_gp_valid_id = 1;
1374 su_dev->t10_alua.alua_tg_pt_gps_count++;
1375 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1376 &su_dev->t10_alua.tg_pt_gps_list);
1377 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1378 }
1379
1380 return tg_pt_gp;
1381}
1382
1383int core_alua_set_tg_pt_gp_id(
1384 struct t10_alua_tg_pt_gp *tg_pt_gp,
1385 u16 tg_pt_gp_id)
1386{
1387 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1388 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1389 u16 tg_pt_gp_id_tmp;
1390
1391
1392
1393 if (tg_pt_gp->tg_pt_gp_valid_id) {
1394 pr_warn("ALUA TG PT Group already has a valid ID,"
1395 " ignoring request\n");
1396 return -EINVAL;
1397 }
1398
1399 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1400 if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1401 pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1402 " 0x0000ffff reached\n");
1403 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1404 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1405 return -ENOSPC;
1406 }
1407again:
1408 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1409 su_dev->t10_alua.alua_tg_pt_gps_counter++;
1410
1411 list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
1412 tg_pt_gp_list) {
1413 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1414 if (!tg_pt_gp_id)
1415 goto again;
1416
1417 pr_err("ALUA Target Port Group ID: %hu already"
1418 " exists, ignoring request\n", tg_pt_gp_id);
1419 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1420 return -EINVAL;
1421 }
1422 }
1423
1424 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1425 tg_pt_gp->tg_pt_gp_valid_id = 1;
1426 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1427 &su_dev->t10_alua.tg_pt_gps_list);
1428 su_dev->t10_alua.alua_tg_pt_gps_count++;
1429 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1430
1431 return 0;
1432}
1433
1434struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1435 struct se_port *port)
1436{
1437 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1438
1439 tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
1440 GFP_KERNEL);
1441 if (!tg_pt_gp_mem) {
1442 pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
1443 return ERR_PTR(-ENOMEM);
1444 }
1445 INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1446 spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1447 atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
1448
1449 tg_pt_gp_mem->tg_pt = port;
1450 port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
1451
1452 return tg_pt_gp_mem;
1453}
1454
1455void core_alua_free_tg_pt_gp(
1456 struct t10_alua_tg_pt_gp *tg_pt_gp)
1457{
1458 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1459 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
1460
1461
1462
1463
1464
1465
1466
1467
1468 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1469 list_del(&tg_pt_gp->tg_pt_gp_list);
1470 su_dev->t10_alua.alua_tg_pt_gps_counter--;
1471 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1472
1473
1474
1475
1476
1477
1478 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1479 cpu_relax();
1480
1481
1482
1483
1484 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1485 list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
1486 &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
1487 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1488 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1489 tg_pt_gp->tg_pt_gp_members--;
1490 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1491 }
1492 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1503 if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) {
1504 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1505 su_dev->t10_alua.default_tg_pt_gp);
1506 } else
1507 tg_pt_gp_mem->tg_pt_gp = NULL;
1508 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1509
1510 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1511 }
1512 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1513
1514 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1515}
1516
1517void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1518{
1519 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1520 struct t10_alua *alua = &su_dev->t10_alua;
1521 struct t10_alua_tg_pt_gp *tg_pt_gp;
1522 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1523
1524 if (alua->alua_type != SPC3_ALUA_EMULATED)
1525 return;
1526
1527 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1528 if (!tg_pt_gp_mem)
1529 return;
1530
1531 while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
1532 cpu_relax();
1533
1534 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1535 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1536 if (tg_pt_gp) {
1537 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1538 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1539 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1540 tg_pt_gp->tg_pt_gp_members--;
1541 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1542 }
1543 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1544 tg_pt_gp_mem->tg_pt_gp = NULL;
1545 }
1546 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1547
1548 kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
1549}
1550
1551static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1552 struct se_subsystem_dev *su_dev,
1553 const char *name)
1554{
1555 struct t10_alua_tg_pt_gp *tg_pt_gp;
1556 struct config_item *ci;
1557
1558 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1559 list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
1560 tg_pt_gp_list) {
1561 if (!tg_pt_gp->tg_pt_gp_valid_id)
1562 continue;
1563 ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1564 if (!strcmp(config_item_name(ci), name)) {
1565 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1566 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1567 return tg_pt_gp;
1568 }
1569 }
1570 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1571
1572 return NULL;
1573}
1574
1575static void core_alua_put_tg_pt_gp_from_name(
1576 struct t10_alua_tg_pt_gp *tg_pt_gp)
1577{
1578 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1579
1580 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1581 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1582 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1583}
1584
1585
1586
1587
1588void __core_alua_attach_tg_pt_gp_mem(
1589 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1590 struct t10_alua_tg_pt_gp *tg_pt_gp)
1591{
1592 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1593 tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
1594 tg_pt_gp_mem->tg_pt_gp_assoc = 1;
1595 list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
1596 &tg_pt_gp->tg_pt_gp_mem_list);
1597 tg_pt_gp->tg_pt_gp_members++;
1598 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1599}
1600
1601
1602
1603
1604static void __core_alua_drop_tg_pt_gp_mem(
1605 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1606 struct t10_alua_tg_pt_gp *tg_pt_gp)
1607{
1608 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1609 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1610 tg_pt_gp_mem->tg_pt_gp = NULL;
1611 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1612 tg_pt_gp->tg_pt_gp_members--;
1613 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1614}
1615
1616ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
1617{
1618 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1619 struct config_item *tg_pt_ci;
1620 struct t10_alua *alua = &su_dev->t10_alua;
1621 struct t10_alua_tg_pt_gp *tg_pt_gp;
1622 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1623 ssize_t len = 0;
1624
1625 if (alua->alua_type != SPC3_ALUA_EMULATED)
1626 return len;
1627
1628 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1629 if (!tg_pt_gp_mem)
1630 return len;
1631
1632 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1633 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1634 if (tg_pt_gp) {
1635 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1636 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1637 " %hu\nTG Port Primary Access State: %s\nTG Port "
1638 "Primary Access Status: %s\nTG Port Secondary Access"
1639 " State: %s\nTG Port Secondary Access Status: %s\n",
1640 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1641 core_alua_dump_state(atomic_read(
1642 &tg_pt_gp->tg_pt_gp_alua_access_state)),
1643 core_alua_dump_status(
1644 tg_pt_gp->tg_pt_gp_alua_access_status),
1645 (atomic_read(&port->sep_tg_pt_secondary_offline)) ?
1646 "Offline" : "None",
1647 core_alua_dump_status(port->sep_tg_pt_secondary_stat));
1648 }
1649 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1650
1651 return len;
1652}
1653
1654ssize_t core_alua_store_tg_pt_gp_info(
1655 struct se_port *port,
1656 const char *page,
1657 size_t count)
1658{
1659 struct se_portal_group *tpg;
1660 struct se_lun *lun;
1661 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1662 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1663 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1664 unsigned char buf[TG_PT_GROUP_NAME_BUF];
1665 int move = 0;
1666
1667 tpg = port->sep_tpg;
1668 lun = port->sep_lun;
1669
1670 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
1671 pr_warn("SPC3_ALUA_EMULATED not enabled for"
1672 " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1673 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1674 config_item_name(&lun->lun_group.cg_item));
1675 return -EINVAL;
1676 }
1677
1678 if (count > TG_PT_GROUP_NAME_BUF) {
1679 pr_err("ALUA Target Port Group alias too large!\n");
1680 return -EINVAL;
1681 }
1682 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1683 memcpy(buf, page, count);
1684
1685
1686
1687
1688 if (strcmp(strstrip(buf), "NULL")) {
1689
1690
1691
1692
1693
1694 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
1695 strstrip(buf));
1696 if (!tg_pt_gp_new)
1697 return -ENODEV;
1698 }
1699 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1700 if (!tg_pt_gp_mem) {
1701 if (tg_pt_gp_new)
1702 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1703 pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
1704 return -EINVAL;
1705 }
1706
1707 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1708 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1709 if (tg_pt_gp) {
1710
1711
1712
1713
1714 if (!tg_pt_gp_new) {
1715 pr_debug("Target_Core_ConfigFS: Moving"
1716 " %s/tpgt_%hu/%s from ALUA Target Port Group:"
1717 " alua/%s, ID: %hu back to"
1718 " default_tg_pt_gp\n",
1719 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1720 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1721 config_item_name(&lun->lun_group.cg_item),
1722 config_item_name(
1723 &tg_pt_gp->tg_pt_gp_group.cg_item),
1724 tg_pt_gp->tg_pt_gp_id);
1725
1726 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1727 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1728 su_dev->t10_alua.default_tg_pt_gp);
1729 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1730
1731 return count;
1732 }
1733
1734
1735
1736 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1737 move = 1;
1738 }
1739
1740
1741
1742 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
1743 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1744 pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1745 " Target Port Group: alua/%s, ID: %hu\n", (move) ?
1746 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1747 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1748 config_item_name(&lun->lun_group.cg_item),
1749 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
1750 tg_pt_gp_new->tg_pt_gp_id);
1751
1752 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1753 return count;
1754}
1755
1756ssize_t core_alua_show_access_type(
1757 struct t10_alua_tg_pt_gp *tg_pt_gp,
1758 char *page)
1759{
1760 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
1761 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
1762 return sprintf(page, "Implict and Explict\n");
1763 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
1764 return sprintf(page, "Implict\n");
1765 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
1766 return sprintf(page, "Explict\n");
1767 else
1768 return sprintf(page, "None\n");
1769}
1770
1771ssize_t core_alua_store_access_type(
1772 struct t10_alua_tg_pt_gp *tg_pt_gp,
1773 const char *page,
1774 size_t count)
1775{
1776 unsigned long tmp;
1777 int ret;
1778
1779 ret = strict_strtoul(page, 0, &tmp);
1780 if (ret < 0) {
1781 pr_err("Unable to extract alua_access_type\n");
1782 return -EINVAL;
1783 }
1784 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
1785 pr_err("Illegal value for alua_access_type:"
1786 " %lu\n", tmp);
1787 return -EINVAL;
1788 }
1789 if (tmp == 3)
1790 tg_pt_gp->tg_pt_gp_alua_access_type =
1791 TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
1792 else if (tmp == 2)
1793 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
1794 else if (tmp == 1)
1795 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
1796 else
1797 tg_pt_gp->tg_pt_gp_alua_access_type = 0;
1798
1799 return count;
1800}
1801
1802ssize_t core_alua_show_nonop_delay_msecs(
1803 struct t10_alua_tg_pt_gp *tg_pt_gp,
1804 char *page)
1805{
1806 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
1807}
1808
1809ssize_t core_alua_store_nonop_delay_msecs(
1810 struct t10_alua_tg_pt_gp *tg_pt_gp,
1811 const char *page,
1812 size_t count)
1813{
1814 unsigned long tmp;
1815 int ret;
1816
1817 ret = strict_strtoul(page, 0, &tmp);
1818 if (ret < 0) {
1819 pr_err("Unable to extract nonop_delay_msecs\n");
1820 return -EINVAL;
1821 }
1822 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
1823 pr_err("Passed nonop_delay_msecs: %lu, exceeds"
1824 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
1825 ALUA_MAX_NONOP_DELAY_MSECS);
1826 return -EINVAL;
1827 }
1828 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
1829
1830 return count;
1831}
1832
1833ssize_t core_alua_show_trans_delay_msecs(
1834 struct t10_alua_tg_pt_gp *tg_pt_gp,
1835 char *page)
1836{
1837 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1838}
1839
1840ssize_t core_alua_store_trans_delay_msecs(
1841 struct t10_alua_tg_pt_gp *tg_pt_gp,
1842 const char *page,
1843 size_t count)
1844{
1845 unsigned long tmp;
1846 int ret;
1847
1848 ret = strict_strtoul(page, 0, &tmp);
1849 if (ret < 0) {
1850 pr_err("Unable to extract trans_delay_msecs\n");
1851 return -EINVAL;
1852 }
1853 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
1854 pr_err("Passed trans_delay_msecs: %lu, exceeds"
1855 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
1856 ALUA_MAX_TRANS_DELAY_MSECS);
1857 return -EINVAL;
1858 }
1859 tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
1860
1861 return count;
1862}
1863
1864ssize_t core_alua_show_preferred_bit(
1865 struct t10_alua_tg_pt_gp *tg_pt_gp,
1866 char *page)
1867{
1868 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
1869}
1870
1871ssize_t core_alua_store_preferred_bit(
1872 struct t10_alua_tg_pt_gp *tg_pt_gp,
1873 const char *page,
1874 size_t count)
1875{
1876 unsigned long tmp;
1877 int ret;
1878
1879 ret = strict_strtoul(page, 0, &tmp);
1880 if (ret < 0) {
1881 pr_err("Unable to extract preferred ALUA value\n");
1882 return -EINVAL;
1883 }
1884 if ((tmp != 0) && (tmp != 1)) {
1885 pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
1886 return -EINVAL;
1887 }
1888 tg_pt_gp->tg_pt_gp_pref = (int)tmp;
1889
1890 return count;
1891}
1892
1893ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
1894{
1895 if (!lun->lun_sep)
1896 return -ENODEV;
1897
1898 return sprintf(page, "%d\n",
1899 atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
1900}
1901
1902ssize_t core_alua_store_offline_bit(
1903 struct se_lun *lun,
1904 const char *page,
1905 size_t count)
1906{
1907 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1908 unsigned long tmp;
1909 int ret;
1910
1911 if (!lun->lun_sep)
1912 return -ENODEV;
1913
1914 ret = strict_strtoul(page, 0, &tmp);
1915 if (ret < 0) {
1916 pr_err("Unable to extract alua_tg_pt_offline value\n");
1917 return -EINVAL;
1918 }
1919 if ((tmp != 0) && (tmp != 1)) {
1920 pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
1921 tmp);
1922 return -EINVAL;
1923 }
1924 tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
1925 if (!tg_pt_gp_mem) {
1926 pr_err("Unable to locate *tg_pt_gp_mem\n");
1927 return -EINVAL;
1928 }
1929
1930 ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
1931 lun->lun_sep, 0, (int)tmp);
1932 if (ret < 0)
1933 return -EINVAL;
1934
1935 return count;
1936}
1937
1938ssize_t core_alua_show_secondary_status(
1939 struct se_lun *lun,
1940 char *page)
1941{
1942 return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
1943}
1944
1945ssize_t core_alua_store_secondary_status(
1946 struct se_lun *lun,
1947 const char *page,
1948 size_t count)
1949{
1950 unsigned long tmp;
1951 int ret;
1952
1953 ret = strict_strtoul(page, 0, &tmp);
1954 if (ret < 0) {
1955 pr_err("Unable to extract alua_tg_pt_status\n");
1956 return -EINVAL;
1957 }
1958 if ((tmp != ALUA_STATUS_NONE) &&
1959 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
1960 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
1961 pr_err("Illegal value for alua_tg_pt_status: %lu\n",
1962 tmp);
1963 return -EINVAL;
1964 }
1965 lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
1966
1967 return count;
1968}
1969
1970ssize_t core_alua_show_secondary_write_metadata(
1971 struct se_lun *lun,
1972 char *page)
1973{
1974 return sprintf(page, "%d\n",
1975 lun->lun_sep->sep_tg_pt_secondary_write_md);
1976}
1977
1978ssize_t core_alua_store_secondary_write_metadata(
1979 struct se_lun *lun,
1980 const char *page,
1981 size_t count)
1982{
1983 unsigned long tmp;
1984 int ret;
1985
1986 ret = strict_strtoul(page, 0, &tmp);
1987 if (ret < 0) {
1988 pr_err("Unable to extract alua_tg_pt_write_md\n");
1989 return -EINVAL;
1990 }
1991 if ((tmp != 0) && (tmp != 1)) {
1992 pr_err("Illegal value for alua_tg_pt_write_md:"
1993 " %lu\n", tmp);
1994 return -EINVAL;
1995 }
1996 lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
1997
1998 return count;
1999}
2000
2001int core_setup_alua(struct se_device *dev, int force_pt)
2002{
2003 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2004 struct t10_alua *alua = &su_dev->t10_alua;
2005 struct t10_alua_lu_gp_member *lu_gp_mem;
2006
2007
2008
2009
2010
2011
2012 if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
2013 !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
2014 alua->alua_type = SPC_ALUA_PASSTHROUGH;
2015 alua->alua_state_check = &core_alua_state_check_nop;
2016 pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
2017 " emulation\n", dev->transport->name);
2018 return 0;
2019 }
2020
2021
2022
2023
2024 if (dev->transport->get_device_rev(dev) >= SCSI_3) {
2025 pr_debug("%s: Enabling ALUA Emulation for SPC-3"
2026 " device\n", dev->transport->name);
2027
2028
2029
2030
2031 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2032 if (IS_ERR(lu_gp_mem))
2033 return PTR_ERR(lu_gp_mem);
2034
2035 alua->alua_type = SPC3_ALUA_EMULATED;
2036 alua->alua_state_check = &core_alua_state_check;
2037 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2038 __core_alua_attach_lu_gp_mem(lu_gp_mem,
2039 default_lu_gp);
2040 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2041
2042 pr_debug("%s: Adding to default ALUA LU Group:"
2043 " core/alua/lu_gps/default_lu_gp\n",
2044 dev->transport->name);
2045 } else {
2046 alua->alua_type = SPC2_ALUA_DISABLED;
2047 alua->alua_state_check = &core_alua_state_check_nop;
2048 pr_debug("%s: Disabling ALUA Emulation for SPC-2"
2049 " device\n", dev->transport->name);
2050 }
2051
2052 return 0;
2053}
2054