1
2
3
4
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/init.h>
8#include <linux/miscdevice.h>
9#include <linux/module.h>
10#include <linux/mutex.h>
11#include <linux/parser.h>
12#include <linux/seq_file.h>
13#include "nvme.h"
14#include "fabrics.h"
15
16static LIST_HEAD(nvmf_transports);
17static DECLARE_RWSEM(nvmf_transports_rwsem);
18
19static LIST_HEAD(nvmf_hosts);
20static DEFINE_MUTEX(nvmf_hosts_mutex);
21
22static struct nvmf_host *nvmf_default_host;
23
24static struct nvmf_host *__nvmf_host_find(const char *hostnqn)
25{
26 struct nvmf_host *host;
27
28 list_for_each_entry(host, &nvmf_hosts, list) {
29 if (!strcmp(host->nqn, hostnqn))
30 return host;
31 }
32
33 return NULL;
34}
35
36static struct nvmf_host *nvmf_host_add(const char *hostnqn)
37{
38 struct nvmf_host *host;
39
40 mutex_lock(&nvmf_hosts_mutex);
41 host = __nvmf_host_find(hostnqn);
42 if (host) {
43 kref_get(&host->ref);
44 goto out_unlock;
45 }
46
47 host = kmalloc(sizeof(*host), GFP_KERNEL);
48 if (!host)
49 goto out_unlock;
50
51 kref_init(&host->ref);
52 strlcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
53
54 list_add_tail(&host->list, &nvmf_hosts);
55out_unlock:
56 mutex_unlock(&nvmf_hosts_mutex);
57 return host;
58}
59
60static struct nvmf_host *nvmf_host_default(void)
61{
62 struct nvmf_host *host;
63
64 host = kmalloc(sizeof(*host), GFP_KERNEL);
65 if (!host)
66 return NULL;
67
68 kref_init(&host->ref);
69 uuid_gen(&host->id);
70 snprintf(host->nqn, NVMF_NQN_SIZE,
71 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
72
73 mutex_lock(&nvmf_hosts_mutex);
74 list_add_tail(&host->list, &nvmf_hosts);
75 mutex_unlock(&nvmf_hosts_mutex);
76
77 return host;
78}
79
80static void nvmf_host_destroy(struct kref *ref)
81{
82 struct nvmf_host *host = container_of(ref, struct nvmf_host, ref);
83
84 mutex_lock(&nvmf_hosts_mutex);
85 list_del(&host->list);
86 mutex_unlock(&nvmf_hosts_mutex);
87
88 kfree(host);
89}
90
91static void nvmf_host_put(struct nvmf_host *host)
92{
93 if (host)
94 kref_put(&host->ref, nvmf_host_destroy);
95}
96
97
98
99
100
101
102
103int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
104{
105 int len = 0;
106
107 if (ctrl->opts->mask & NVMF_OPT_TRADDR)
108 len += scnprintf(buf, size, "traddr=%s", ctrl->opts->traddr);
109 if (ctrl->opts->mask & NVMF_OPT_TRSVCID)
110 len += scnprintf(buf + len, size - len, "%strsvcid=%s",
111 (len) ? "," : "", ctrl->opts->trsvcid);
112 if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)
113 len += scnprintf(buf + len, size - len, "%shost_traddr=%s",
114 (len) ? "," : "", ctrl->opts->host_traddr);
115 if (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)
116 len += scnprintf(buf + len, size - len, "%shost_iface=%s",
117 (len) ? "," : "", ctrl->opts->host_iface);
118 len += scnprintf(buf + len, size - len, "\n");
119
120 return len;
121}
122EXPORT_SYMBOL_GPL(nvmf_get_address);
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
146{
147 struct nvme_command cmd;
148 union nvme_result res;
149 int ret;
150
151 memset(&cmd, 0, sizeof(cmd));
152 cmd.prop_get.opcode = nvme_fabrics_command;
153 cmd.prop_get.fctype = nvme_fabrics_type_property_get;
154 cmd.prop_get.offset = cpu_to_le32(off);
155
156 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
157 NVME_QID_ANY, 0, 0);
158
159 if (ret >= 0)
160 *val = le64_to_cpu(res.u64);
161 if (unlikely(ret != 0))
162 dev_err(ctrl->device,
163 "Property Get error: %d, offset %#x\n",
164 ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
165
166 return ret;
167}
168EXPORT_SYMBOL_GPL(nvmf_reg_read32);
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
192{
193 struct nvme_command cmd = { };
194 union nvme_result res;
195 int ret;
196
197 cmd.prop_get.opcode = nvme_fabrics_command;
198 cmd.prop_get.fctype = nvme_fabrics_type_property_get;
199 cmd.prop_get.attrib = 1;
200 cmd.prop_get.offset = cpu_to_le32(off);
201
202 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
203 NVME_QID_ANY, 0, 0);
204
205 if (ret >= 0)
206 *val = le64_to_cpu(res.u64);
207 if (unlikely(ret != 0))
208 dev_err(ctrl->device,
209 "Property Get error: %d, offset %#x\n",
210 ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
211 return ret;
212}
213EXPORT_SYMBOL_GPL(nvmf_reg_read64);
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
237{
238 struct nvme_command cmd = { };
239 int ret;
240
241 cmd.prop_set.opcode = nvme_fabrics_command;
242 cmd.prop_set.fctype = nvme_fabrics_type_property_set;
243 cmd.prop_set.attrib = 0;
244 cmd.prop_set.offset = cpu_to_le32(off);
245 cmd.prop_set.value = cpu_to_le64(val);
246
247 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0,
248 NVME_QID_ANY, 0, 0);
249 if (unlikely(ret))
250 dev_err(ctrl->device,
251 "Property Set error: %d, offset %#x\n",
252 ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
253 return ret;
254}
255EXPORT_SYMBOL_GPL(nvmf_reg_write32);
256
257
258
259
260
261
262
263
264
265
266
267
268static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
269 int errval, int offset, struct nvme_command *cmd,
270 struct nvmf_connect_data *data)
271{
272 int err_sctype = errval & ~NVME_SC_DNR;
273
274 switch (err_sctype) {
275 case (NVME_SC_CONNECT_INVALID_PARAM):
276 if (offset >> 16) {
277 char *inv_data = "Connect Invalid Data Parameter";
278
279 switch (offset & 0xffff) {
280 case (offsetof(struct nvmf_connect_data, cntlid)):
281 dev_err(ctrl->device,
282 "%s, cntlid: %d\n",
283 inv_data, data->cntlid);
284 break;
285 case (offsetof(struct nvmf_connect_data, hostnqn)):
286 dev_err(ctrl->device,
287 "%s, hostnqn \"%s\"\n",
288 inv_data, data->hostnqn);
289 break;
290 case (offsetof(struct nvmf_connect_data, subsysnqn)):
291 dev_err(ctrl->device,
292 "%s, subsysnqn \"%s\"\n",
293 inv_data, data->subsysnqn);
294 break;
295 default:
296 dev_err(ctrl->device,
297 "%s, starting byte offset: %d\n",
298 inv_data, offset & 0xffff);
299 break;
300 }
301 } else {
302 char *inv_sqe = "Connect Invalid SQE Parameter";
303
304 switch (offset) {
305 case (offsetof(struct nvmf_connect_command, qid)):
306 dev_err(ctrl->device,
307 "%s, qid %d\n",
308 inv_sqe, cmd->connect.qid);
309 break;
310 default:
311 dev_err(ctrl->device,
312 "%s, starting byte offset: %d\n",
313 inv_sqe, offset);
314 }
315 }
316 break;
317 case NVME_SC_CONNECT_INVALID_HOST:
318 dev_err(ctrl->device,
319 "Connect for subsystem %s is not allowed, hostnqn: %s\n",
320 data->subsysnqn, data->hostnqn);
321 break;
322 case NVME_SC_CONNECT_CTRL_BUSY:
323 dev_err(ctrl->device,
324 "Connect command failed: controller is busy or not available\n");
325 break;
326 case NVME_SC_CONNECT_FORMAT:
327 dev_err(ctrl->device,
328 "Connect incompatible format: %d",
329 cmd->connect.recfmt);
330 break;
331 case NVME_SC_HOST_PATH_ERROR:
332 dev_err(ctrl->device,
333 "Connect command failed: host path error\n");
334 break;
335 default:
336 dev_err(ctrl->device,
337 "Connect command failed, error wo/DNR bit: %d\n",
338 err_sctype);
339 break;
340 }
341}
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
364{
365 struct nvme_command cmd = { };
366 union nvme_result res;
367 struct nvmf_connect_data *data;
368 int ret;
369
370 cmd.connect.opcode = nvme_fabrics_command;
371 cmd.connect.fctype = nvme_fabrics_type_connect;
372 cmd.connect.qid = 0;
373 cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
374
375
376
377
378 cmd.connect.kato = cpu_to_le32(ctrl->kato * 1000);
379
380 if (ctrl->opts->disable_sqflow)
381 cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
382
383 data = kzalloc(sizeof(*data), GFP_KERNEL);
384 if (!data)
385 return -ENOMEM;
386
387 uuid_copy(&data->hostid, &ctrl->opts->host->id);
388 data->cntlid = cpu_to_le16(0xffff);
389 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
390 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
391
392 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
393 data, sizeof(*data), 0, NVME_QID_ANY, 1,
394 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
395 if (ret) {
396 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
397 &cmd, data);
398 goto out_free_data;
399 }
400
401 ctrl->cntlid = le16_to_cpu(res.u16);
402
403out_free_data:
404 kfree(data);
405 return ret;
406}
407EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
430{
431 struct nvme_command cmd = { };
432 struct nvmf_connect_data *data;
433 union nvme_result res;
434 int ret;
435
436 cmd.connect.opcode = nvme_fabrics_command;
437 cmd.connect.fctype = nvme_fabrics_type_connect;
438 cmd.connect.qid = cpu_to_le16(qid);
439 cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
440
441 if (ctrl->opts->disable_sqflow)
442 cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
443
444 data = kzalloc(sizeof(*data), GFP_KERNEL);
445 if (!data)
446 return -ENOMEM;
447
448 uuid_copy(&data->hostid, &ctrl->opts->host->id);
449 data->cntlid = cpu_to_le16(ctrl->cntlid);
450 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
451 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
452
453 ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
454 data, sizeof(*data), 0, qid, 1,
455 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
456 if (ret) {
457 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
458 &cmd, data);
459 }
460 kfree(data);
461 return ret;
462}
463EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
464
465bool nvmf_should_reconnect(struct nvme_ctrl *ctrl)
466{
467 if (ctrl->opts->max_reconnects == -1 ||
468 ctrl->nr_reconnects < ctrl->opts->max_reconnects)
469 return true;
470
471 return false;
472}
473EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
474
475
476
477
478
479
480
481
482
483
484int nvmf_register_transport(struct nvmf_transport_ops *ops)
485{
486 if (!ops->create_ctrl)
487 return -EINVAL;
488
489 down_write(&nvmf_transports_rwsem);
490 list_add_tail(&ops->entry, &nvmf_transports);
491 up_write(&nvmf_transports_rwsem);
492
493 return 0;
494}
495EXPORT_SYMBOL_GPL(nvmf_register_transport);
496
497
498
499
500
501
502
503
504
505
506void nvmf_unregister_transport(struct nvmf_transport_ops *ops)
507{
508 down_write(&nvmf_transports_rwsem);
509 list_del(&ops->entry);
510 up_write(&nvmf_transports_rwsem);
511}
512EXPORT_SYMBOL_GPL(nvmf_unregister_transport);
513
514static struct nvmf_transport_ops *nvmf_lookup_transport(
515 struct nvmf_ctrl_options *opts)
516{
517 struct nvmf_transport_ops *ops;
518
519 lockdep_assert_held(&nvmf_transports_rwsem);
520
521 list_for_each_entry(ops, &nvmf_transports, entry) {
522 if (strcmp(ops->name, opts->transport) == 0)
523 return ops;
524 }
525
526 return NULL;
527}
528
529static const match_table_t opt_tokens = {
530 { NVMF_OPT_TRANSPORT, "transport=%s" },
531 { NVMF_OPT_TRADDR, "traddr=%s" },
532 { NVMF_OPT_TRSVCID, "trsvcid=%s" },
533 { NVMF_OPT_NQN, "nqn=%s" },
534 { NVMF_OPT_QUEUE_SIZE, "queue_size=%d" },
535 { NVMF_OPT_NR_IO_QUEUES, "nr_io_queues=%d" },
536 { NVMF_OPT_RECONNECT_DELAY, "reconnect_delay=%d" },
537 { NVMF_OPT_CTRL_LOSS_TMO, "ctrl_loss_tmo=%d" },
538 { NVMF_OPT_KATO, "keep_alive_tmo=%d" },
539 { NVMF_OPT_HOSTNQN, "hostnqn=%s" },
540 { NVMF_OPT_HOST_TRADDR, "host_traddr=%s" },
541 { NVMF_OPT_HOST_IFACE, "host_iface=%s" },
542 { NVMF_OPT_HOST_ID, "hostid=%s" },
543 { NVMF_OPT_DUP_CONNECT, "duplicate_connect" },
544 { NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" },
545 { NVMF_OPT_HDR_DIGEST, "hdr_digest" },
546 { NVMF_OPT_DATA_DIGEST, "data_digest" },
547 { NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" },
548 { NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
549 { NVMF_OPT_TOS, "tos=%d" },
550 { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
551 { NVMF_OPT_ERR, NULL }
552};
553
554static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
555 const char *buf)
556{
557 substring_t args[MAX_OPT_ARGS];
558 char *options, *o, *p;
559 int token, ret = 0;
560 size_t nqnlen = 0;
561 int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO;
562 uuid_t hostid;
563
564
565 opts->queue_size = NVMF_DEF_QUEUE_SIZE;
566 opts->nr_io_queues = num_online_cpus();
567 opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
568 opts->kato = 0;
569 opts->duplicate_connect = false;
570 opts->fast_io_fail_tmo = NVMF_DEF_FAIL_FAST_TMO;
571 opts->hdr_digest = false;
572 opts->data_digest = false;
573 opts->tos = -1;
574
575 options = o = kstrdup(buf, GFP_KERNEL);
576 if (!options)
577 return -ENOMEM;
578
579 uuid_gen(&hostid);
580
581 while ((p = strsep(&o, ",\n")) != NULL) {
582 if (!*p)
583 continue;
584
585 token = match_token(p, opt_tokens, args);
586 opts->mask |= token;
587 switch (token) {
588 case NVMF_OPT_TRANSPORT:
589 p = match_strdup(args);
590 if (!p) {
591 ret = -ENOMEM;
592 goto out;
593 }
594 kfree(opts->transport);
595 opts->transport = p;
596 break;
597 case NVMF_OPT_NQN:
598 p = match_strdup(args);
599 if (!p) {
600 ret = -ENOMEM;
601 goto out;
602 }
603 kfree(opts->subsysnqn);
604 opts->subsysnqn = p;
605 nqnlen = strlen(opts->subsysnqn);
606 if (nqnlen >= NVMF_NQN_SIZE) {
607 pr_err("%s needs to be < %d bytes\n",
608 opts->subsysnqn, NVMF_NQN_SIZE);
609 ret = -EINVAL;
610 goto out;
611 }
612 opts->discovery_nqn =
613 !(strcmp(opts->subsysnqn,
614 NVME_DISC_SUBSYS_NAME));
615 break;
616 case NVMF_OPT_TRADDR:
617 p = match_strdup(args);
618 if (!p) {
619 ret = -ENOMEM;
620 goto out;
621 }
622 kfree(opts->traddr);
623 opts->traddr = p;
624 break;
625 case NVMF_OPT_TRSVCID:
626 p = match_strdup(args);
627 if (!p) {
628 ret = -ENOMEM;
629 goto out;
630 }
631 kfree(opts->trsvcid);
632 opts->trsvcid = p;
633 break;
634 case NVMF_OPT_QUEUE_SIZE:
635 if (match_int(args, &token)) {
636 ret = -EINVAL;
637 goto out;
638 }
639 if (token < NVMF_MIN_QUEUE_SIZE ||
640 token > NVMF_MAX_QUEUE_SIZE) {
641 pr_err("Invalid queue_size %d\n", token);
642 ret = -EINVAL;
643 goto out;
644 }
645 opts->queue_size = token;
646 break;
647 case NVMF_OPT_NR_IO_QUEUES:
648 if (match_int(args, &token)) {
649 ret = -EINVAL;
650 goto out;
651 }
652 if (token <= 0) {
653 pr_err("Invalid number of IOQs %d\n", token);
654 ret = -EINVAL;
655 goto out;
656 }
657 if (opts->discovery_nqn) {
658 pr_debug("Ignoring nr_io_queues value for discovery controller\n");
659 break;
660 }
661
662 opts->nr_io_queues = min_t(unsigned int,
663 num_online_cpus(), token);
664 break;
665 case NVMF_OPT_KATO:
666 if (match_int(args, &token)) {
667 ret = -EINVAL;
668 goto out;
669 }
670
671 if (token < 0) {
672 pr_err("Invalid keep_alive_tmo %d\n", token);
673 ret = -EINVAL;
674 goto out;
675 } else if (token == 0 && !opts->discovery_nqn) {
676
677 pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
678 }
679 opts->kato = token;
680 break;
681 case NVMF_OPT_CTRL_LOSS_TMO:
682 if (match_int(args, &token)) {
683 ret = -EINVAL;
684 goto out;
685 }
686
687 if (token < 0)
688 pr_warn("ctrl_loss_tmo < 0 will reconnect forever\n");
689 ctrl_loss_tmo = token;
690 break;
691 case NVMF_OPT_FAIL_FAST_TMO:
692 if (match_int(args, &token)) {
693 ret = -EINVAL;
694 goto out;
695 }
696
697 if (token >= 0)
698 pr_warn("I/O fail on reconnect controller after %d sec\n",
699 token);
700 opts->fast_io_fail_tmo = token;
701 break;
702 case NVMF_OPT_HOSTNQN:
703 if (opts->host) {
704 pr_err("hostnqn already user-assigned: %s\n",
705 opts->host->nqn);
706 ret = -EADDRINUSE;
707 goto out;
708 }
709 p = match_strdup(args);
710 if (!p) {
711 ret = -ENOMEM;
712 goto out;
713 }
714 nqnlen = strlen(p);
715 if (nqnlen >= NVMF_NQN_SIZE) {
716 pr_err("%s needs to be < %d bytes\n",
717 p, NVMF_NQN_SIZE);
718 kfree(p);
719 ret = -EINVAL;
720 goto out;
721 }
722 nvmf_host_put(opts->host);
723 opts->host = nvmf_host_add(p);
724 kfree(p);
725 if (!opts->host) {
726 ret = -ENOMEM;
727 goto out;
728 }
729 break;
730 case NVMF_OPT_RECONNECT_DELAY:
731 if (match_int(args, &token)) {
732 ret = -EINVAL;
733 goto out;
734 }
735 if (token <= 0) {
736 pr_err("Invalid reconnect_delay %d\n", token);
737 ret = -EINVAL;
738 goto out;
739 }
740 opts->reconnect_delay = token;
741 break;
742 case NVMF_OPT_HOST_TRADDR:
743 p = match_strdup(args);
744 if (!p) {
745 ret = -ENOMEM;
746 goto out;
747 }
748 kfree(opts->host_traddr);
749 opts->host_traddr = p;
750 break;
751 case NVMF_OPT_HOST_IFACE:
752 p = match_strdup(args);
753 if (!p) {
754 ret = -ENOMEM;
755 goto out;
756 }
757 kfree(opts->host_iface);
758 opts->host_iface = p;
759 break;
760 case NVMF_OPT_HOST_ID:
761 p = match_strdup(args);
762 if (!p) {
763 ret = -ENOMEM;
764 goto out;
765 }
766 ret = uuid_parse(p, &hostid);
767 if (ret) {
768 pr_err("Invalid hostid %s\n", p);
769 ret = -EINVAL;
770 kfree(p);
771 goto out;
772 }
773 kfree(p);
774 break;
775 case NVMF_OPT_DUP_CONNECT:
776 opts->duplicate_connect = true;
777 break;
778 case NVMF_OPT_DISABLE_SQFLOW:
779 opts->disable_sqflow = true;
780 break;
781 case NVMF_OPT_HDR_DIGEST:
782 opts->hdr_digest = true;
783 break;
784 case NVMF_OPT_DATA_DIGEST:
785 opts->data_digest = true;
786 break;
787 case NVMF_OPT_NR_WRITE_QUEUES:
788 if (match_int(args, &token)) {
789 ret = -EINVAL;
790 goto out;
791 }
792 if (token <= 0) {
793 pr_err("Invalid nr_write_queues %d\n", token);
794 ret = -EINVAL;
795 goto out;
796 }
797 opts->nr_write_queues = token;
798 break;
799 case NVMF_OPT_NR_POLL_QUEUES:
800 if (match_int(args, &token)) {
801 ret = -EINVAL;
802 goto out;
803 }
804 if (token <= 0) {
805 pr_err("Invalid nr_poll_queues %d\n", token);
806 ret = -EINVAL;
807 goto out;
808 }
809 opts->nr_poll_queues = token;
810 break;
811 case NVMF_OPT_TOS:
812 if (match_int(args, &token)) {
813 ret = -EINVAL;
814 goto out;
815 }
816 if (token < 0) {
817 pr_err("Invalid type of service %d\n", token);
818 ret = -EINVAL;
819 goto out;
820 }
821 if (token > 255) {
822 pr_warn("Clamping type of service to 255\n");
823 token = 255;
824 }
825 opts->tos = token;
826 break;
827 default:
828 pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
829 p);
830 ret = -EINVAL;
831 goto out;
832 }
833 }
834
835 if (opts->discovery_nqn) {
836 opts->nr_io_queues = 0;
837 opts->nr_write_queues = 0;
838 opts->nr_poll_queues = 0;
839 opts->duplicate_connect = true;
840 } else {
841 if (!opts->kato)
842 opts->kato = NVME_DEFAULT_KATO;
843 }
844 if (ctrl_loss_tmo < 0) {
845 opts->max_reconnects = -1;
846 } else {
847 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
848 opts->reconnect_delay);
849 if (ctrl_loss_tmo < opts->fast_io_fail_tmo)
850 pr_warn("failfast tmo (%d) larger than controller loss tmo (%d)\n",
851 opts->fast_io_fail_tmo, ctrl_loss_tmo);
852 }
853
854 if (!opts->host) {
855 kref_get(&nvmf_default_host->ref);
856 opts->host = nvmf_default_host;
857 }
858
859 uuid_copy(&opts->host->id, &hostid);
860
861out:
862 kfree(options);
863 return ret;
864}
865
866static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
867 unsigned int required_opts)
868{
869 if ((opts->mask & required_opts) != required_opts) {
870 int i;
871
872 for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
873 if ((opt_tokens[i].token & required_opts) &&
874 !(opt_tokens[i].token & opts->mask)) {
875 pr_warn("missing parameter '%s'\n",
876 opt_tokens[i].pattern);
877 }
878 }
879
880 return -EINVAL;
881 }
882
883 return 0;
884}
885
886bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
887 struct nvmf_ctrl_options *opts)
888{
889 if (!nvmf_ctlr_matches_baseopts(ctrl, opts) ||
890 strcmp(opts->traddr, ctrl->opts->traddr) ||
891 strcmp(opts->trsvcid, ctrl->opts->trsvcid))
892 return false;
893
894
895
896
897
898
899
900
901
902
903 if ((opts->mask & NVMF_OPT_HOST_TRADDR) &&
904 (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
905 if (strcmp(opts->host_traddr, ctrl->opts->host_traddr))
906 return false;
907 } else if ((opts->mask & NVMF_OPT_HOST_TRADDR) ||
908 (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
909 return false;
910 }
911
912 return true;
913}
914EXPORT_SYMBOL_GPL(nvmf_ip_options_match);
915
916static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
917 unsigned int allowed_opts)
918{
919 if (opts->mask & ~allowed_opts) {
920 int i;
921
922 for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
923 if ((opt_tokens[i].token & opts->mask) &&
924 (opt_tokens[i].token & ~allowed_opts)) {
925 pr_warn("invalid parameter '%s'\n",
926 opt_tokens[i].pattern);
927 }
928 }
929
930 return -EINVAL;
931 }
932
933 return 0;
934}
935
936void nvmf_free_options(struct nvmf_ctrl_options *opts)
937{
938 nvmf_host_put(opts->host);
939 kfree(opts->transport);
940 kfree(opts->traddr);
941 kfree(opts->trsvcid);
942 kfree(opts->subsysnqn);
943 kfree(opts->host_traddr);
944 kfree(opts->host_iface);
945 kfree(opts);
946}
947EXPORT_SYMBOL_GPL(nvmf_free_options);
948
949#define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
950#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
951 NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
952 NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
953 NVMF_OPT_DISABLE_SQFLOW |\
954 NVMF_OPT_FAIL_FAST_TMO)
955
956static struct nvme_ctrl *
957nvmf_create_ctrl(struct device *dev, const char *buf)
958{
959 struct nvmf_ctrl_options *opts;
960 struct nvmf_transport_ops *ops;
961 struct nvme_ctrl *ctrl;
962 int ret;
963
964 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
965 if (!opts)
966 return ERR_PTR(-ENOMEM);
967
968 ret = nvmf_parse_options(opts, buf);
969 if (ret)
970 goto out_free_opts;
971
972
973 request_module("nvme-%s", opts->transport);
974
975
976
977
978
979
980 ret = nvmf_check_required_opts(opts, NVMF_REQUIRED_OPTS);
981 if (ret)
982 goto out_free_opts;
983 opts->mask &= ~NVMF_REQUIRED_OPTS;
984
985 down_read(&nvmf_transports_rwsem);
986 ops = nvmf_lookup_transport(opts);
987 if (!ops) {
988 pr_info("no handler found for transport %s.\n",
989 opts->transport);
990 ret = -EINVAL;
991 goto out_unlock;
992 }
993
994 if (!try_module_get(ops->module)) {
995 ret = -EBUSY;
996 goto out_unlock;
997 }
998 up_read(&nvmf_transports_rwsem);
999
1000 ret = nvmf_check_required_opts(opts, ops->required_opts);
1001 if (ret)
1002 goto out_module_put;
1003 ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
1004 ops->allowed_opts | ops->required_opts);
1005 if (ret)
1006 goto out_module_put;
1007
1008 ctrl = ops->create_ctrl(dev, opts);
1009 if (IS_ERR(ctrl)) {
1010 ret = PTR_ERR(ctrl);
1011 goto out_module_put;
1012 }
1013
1014 module_put(ops->module);
1015 return ctrl;
1016
1017out_module_put:
1018 module_put(ops->module);
1019 goto out_free_opts;
1020out_unlock:
1021 up_read(&nvmf_transports_rwsem);
1022out_free_opts:
1023 nvmf_free_options(opts);
1024 return ERR_PTR(ret);
1025}
1026
1027static struct class *nvmf_class;
1028static struct device *nvmf_device;
1029static DEFINE_MUTEX(nvmf_dev_mutex);
1030
1031static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf,
1032 size_t count, loff_t *pos)
1033{
1034 struct seq_file *seq_file = file->private_data;
1035 struct nvme_ctrl *ctrl;
1036 const char *buf;
1037 int ret = 0;
1038
1039 if (count > PAGE_SIZE)
1040 return -ENOMEM;
1041
1042 buf = memdup_user_nul(ubuf, count);
1043 if (IS_ERR(buf))
1044 return PTR_ERR(buf);
1045
1046 mutex_lock(&nvmf_dev_mutex);
1047 if (seq_file->private) {
1048 ret = -EINVAL;
1049 goto out_unlock;
1050 }
1051
1052 ctrl = nvmf_create_ctrl(nvmf_device, buf);
1053 if (IS_ERR(ctrl)) {
1054 ret = PTR_ERR(ctrl);
1055 goto out_unlock;
1056 }
1057
1058 seq_file->private = ctrl;
1059
1060out_unlock:
1061 mutex_unlock(&nvmf_dev_mutex);
1062 kfree(buf);
1063 return ret ? ret : count;
1064}
1065
1066static int nvmf_dev_show(struct seq_file *seq_file, void *private)
1067{
1068 struct nvme_ctrl *ctrl;
1069 int ret = 0;
1070
1071 mutex_lock(&nvmf_dev_mutex);
1072 ctrl = seq_file->private;
1073 if (!ctrl) {
1074 ret = -EINVAL;
1075 goto out_unlock;
1076 }
1077
1078 seq_printf(seq_file, "instance=%d,cntlid=%d\n",
1079 ctrl->instance, ctrl->cntlid);
1080
1081out_unlock:
1082 mutex_unlock(&nvmf_dev_mutex);
1083 return ret;
1084}
1085
1086static int nvmf_dev_open(struct inode *inode, struct file *file)
1087{
1088
1089
1090
1091
1092 file->private_data = NULL;
1093 return single_open(file, nvmf_dev_show, NULL);
1094}
1095
1096static int nvmf_dev_release(struct inode *inode, struct file *file)
1097{
1098 struct seq_file *seq_file = file->private_data;
1099 struct nvme_ctrl *ctrl = seq_file->private;
1100
1101 if (ctrl)
1102 nvme_put_ctrl(ctrl);
1103 return single_release(inode, file);
1104}
1105
1106static const struct file_operations nvmf_dev_fops = {
1107 .owner = THIS_MODULE,
1108 .write = nvmf_dev_write,
1109 .read = seq_read,
1110 .open = nvmf_dev_open,
1111 .release = nvmf_dev_release,
1112};
1113
1114static struct miscdevice nvmf_misc = {
1115 .minor = MISC_DYNAMIC_MINOR,
1116 .name = "nvme-fabrics",
1117 .fops = &nvmf_dev_fops,
1118};
1119
1120static int __init nvmf_init(void)
1121{
1122 int ret;
1123
1124 nvmf_default_host = nvmf_host_default();
1125 if (!nvmf_default_host)
1126 return -ENOMEM;
1127
1128 nvmf_class = class_create(THIS_MODULE, "nvme-fabrics");
1129 if (IS_ERR(nvmf_class)) {
1130 pr_err("couldn't register class nvme-fabrics\n");
1131 ret = PTR_ERR(nvmf_class);
1132 goto out_free_host;
1133 }
1134
1135 nvmf_device =
1136 device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
1137 if (IS_ERR(nvmf_device)) {
1138 pr_err("couldn't create nvme-fabris device!\n");
1139 ret = PTR_ERR(nvmf_device);
1140 goto out_destroy_class;
1141 }
1142
1143 ret = misc_register(&nvmf_misc);
1144 if (ret) {
1145 pr_err("couldn't register misc device: %d\n", ret);
1146 goto out_destroy_device;
1147 }
1148
1149 return 0;
1150
1151out_destroy_device:
1152 device_destroy(nvmf_class, MKDEV(0, 0));
1153out_destroy_class:
1154 class_destroy(nvmf_class);
1155out_free_host:
1156 nvmf_host_put(nvmf_default_host);
1157 return ret;
1158}
1159
1160static void __exit nvmf_exit(void)
1161{
1162 misc_deregister(&nvmf_misc);
1163 device_destroy(nvmf_class, MKDEV(0, 0));
1164 class_destroy(nvmf_class);
1165 nvmf_host_put(nvmf_default_host);
1166
1167 BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64);
1168 BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
1169 BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
1170 BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
1171 BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024);
1172}
1173
1174MODULE_LICENSE("GPL v2");
1175
1176module_init(nvmf_init);
1177module_exit(nvmf_exit);
1178