1
2
3
4
5
6
7#include <linux/blkdev.h>
8#include <linux/blk-mq.h>
9#include <linux/compat.h>
10#include <linux/delay.h>
11#include <linux/errno.h>
12#include <linux/hdreg.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/backing-dev.h>
16#include <linux/list_sort.h>
17#include <linux/slab.h>
18#include <linux/types.h>
19#include <linux/pr.h>
20#include <linux/ptrace.h>
21#include <linux/nvme_ioctl.h>
22#include <linux/pm_qos.h>
23#include <asm/unaligned.h>
24
25#include "nvme.h"
26#include "fabrics.h"
27
28#define CREATE_TRACE_POINTS
29#include "trace.h"
30
31#define NVME_MINORS (1U << MINORBITS)
32
33unsigned int admin_timeout = 60;
34module_param(admin_timeout, uint, 0644);
35MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
36EXPORT_SYMBOL_GPL(admin_timeout);
37
38unsigned int nvme_io_timeout = 30;
39module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
40MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
41EXPORT_SYMBOL_GPL(nvme_io_timeout);
42
43static unsigned char shutdown_timeout = 5;
44module_param(shutdown_timeout, byte, 0644);
45MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
46
47static u8 nvme_max_retries = 5;
48module_param_named(max_retries, nvme_max_retries, byte, 0644);
49MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
50
51static unsigned long default_ps_max_latency_us = 100000;
52module_param(default_ps_max_latency_us, ulong, 0644);
53MODULE_PARM_DESC(default_ps_max_latency_us,
54 "max power saving latency for new devices; use PM QOS to change per device");
55
56static bool force_apst;
57module_param(force_apst, bool, 0644);
58MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
59
60static unsigned long apst_primary_timeout_ms = 100;
61module_param(apst_primary_timeout_ms, ulong, 0644);
62MODULE_PARM_DESC(apst_primary_timeout_ms,
63 "primary APST timeout in ms");
64
65static unsigned long apst_secondary_timeout_ms = 2000;
66module_param(apst_secondary_timeout_ms, ulong, 0644);
67MODULE_PARM_DESC(apst_secondary_timeout_ms,
68 "secondary APST timeout in ms");
69
70static unsigned long apst_primary_latency_tol_us = 15000;
71module_param(apst_primary_latency_tol_us, ulong, 0644);
72MODULE_PARM_DESC(apst_primary_latency_tol_us,
73 "primary APST latency tolerance in us");
74
75static unsigned long apst_secondary_latency_tol_us = 100000;
76module_param(apst_secondary_latency_tol_us, ulong, 0644);
77MODULE_PARM_DESC(apst_secondary_latency_tol_us,
78 "secondary APST latency tolerance in us");
79
80static bool streams;
81module_param(streams, bool, 0644);
82MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
83
84
85
86
87
88
89
90
91
92
93
94
95struct workqueue_struct *nvme_wq;
96EXPORT_SYMBOL_GPL(nvme_wq);
97
98struct workqueue_struct *nvme_reset_wq;
99EXPORT_SYMBOL_GPL(nvme_reset_wq);
100
101struct workqueue_struct *nvme_delete_wq;
102EXPORT_SYMBOL_GPL(nvme_delete_wq);
103
104static LIST_HEAD(nvme_subsystems);
105static DEFINE_MUTEX(nvme_subsystems_lock);
106
107static DEFINE_IDA(nvme_instance_ida);
108static dev_t nvme_ctrl_base_chr_devt;
109static struct class *nvme_class;
110static struct class *nvme_subsys_class;
111
112static DEFINE_IDA(nvme_ns_chr_minor_ida);
113static dev_t nvme_ns_chr_devt;
114static struct class *nvme_ns_chr_class;
115
116static void nvme_put_subsystem(struct nvme_subsystem *subsys);
117static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
118 unsigned nsid);
119
120
121
122
123
124
125
126
127
128static void nvme_set_queue_dying(struct nvme_ns *ns)
129{
130 if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
131 return;
132
133 blk_set_queue_dying(ns->queue);
134 blk_mq_unquiesce_queue(ns->queue);
135
136 set_capacity_and_notify(ns->disk, 0);
137}
138
139void nvme_queue_scan(struct nvme_ctrl *ctrl)
140{
141
142
143
144 if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
145 queue_work(nvme_wq, &ctrl->scan_work);
146}
147
148
149
150
151
152
153
154int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
155{
156 if (ctrl->state != NVME_CTRL_RESETTING)
157 return -EBUSY;
158 if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
159 return -EBUSY;
160 return 0;
161}
162EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
163
164static void nvme_failfast_work(struct work_struct *work)
165{
166 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
167 struct nvme_ctrl, failfast_work);
168
169 if (ctrl->state != NVME_CTRL_CONNECTING)
170 return;
171
172 set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
173 dev_info(ctrl->device, "failfast expired\n");
174 nvme_kick_requeue_lists(ctrl);
175}
176
177static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl)
178{
179 if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1)
180 return;
181
182 schedule_delayed_work(&ctrl->failfast_work,
183 ctrl->opts->fast_io_fail_tmo * HZ);
184}
185
186static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl)
187{
188 if (!ctrl->opts)
189 return;
190
191 cancel_delayed_work_sync(&ctrl->failfast_work);
192 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
193}
194
195
196int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
197{
198 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
199 return -EBUSY;
200 if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
201 return -EBUSY;
202 return 0;
203}
204EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
205
206int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
207{
208 int ret;
209
210 ret = nvme_reset_ctrl(ctrl);
211 if (!ret) {
212 flush_work(&ctrl->reset_work);
213 if (ctrl->state != NVME_CTRL_LIVE)
214 ret = -ENETRESET;
215 }
216
217 return ret;
218}
219
220static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
221{
222 dev_info(ctrl->device,
223 "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);
224
225 flush_work(&ctrl->reset_work);
226 nvme_stop_ctrl(ctrl);
227 nvme_remove_namespaces(ctrl);
228 ctrl->ops->delete_ctrl(ctrl);
229 nvme_uninit_ctrl(ctrl);
230}
231
232static void nvme_delete_ctrl_work(struct work_struct *work)
233{
234 struct nvme_ctrl *ctrl =
235 container_of(work, struct nvme_ctrl, delete_work);
236
237 nvme_do_delete_ctrl(ctrl);
238}
239
240int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
241{
242 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
243 return -EBUSY;
244 if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
245 return -EBUSY;
246 return 0;
247}
248EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
249
250static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
251{
252
253
254
255
256 nvme_get_ctrl(ctrl);
257 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
258 nvme_do_delete_ctrl(ctrl);
259 nvme_put_ctrl(ctrl);
260}
261
262static blk_status_t nvme_error_status(u16 status)
263{
264 switch (status & 0x7ff) {
265 case NVME_SC_SUCCESS:
266 return BLK_STS_OK;
267 case NVME_SC_CAP_EXCEEDED:
268 return BLK_STS_NOSPC;
269 case NVME_SC_LBA_RANGE:
270 case NVME_SC_CMD_INTERRUPTED:
271 case NVME_SC_NS_NOT_READY:
272 return BLK_STS_TARGET;
273 case NVME_SC_BAD_ATTRIBUTES:
274 case NVME_SC_ONCS_NOT_SUPPORTED:
275 case NVME_SC_INVALID_OPCODE:
276 case NVME_SC_INVALID_FIELD:
277 case NVME_SC_INVALID_NS:
278 return BLK_STS_NOTSUPP;
279 case NVME_SC_WRITE_FAULT:
280 case NVME_SC_READ_ERROR:
281 case NVME_SC_UNWRITTEN_BLOCK:
282 case NVME_SC_ACCESS_DENIED:
283 case NVME_SC_READ_ONLY:
284 case NVME_SC_COMPARE_FAILED:
285 return BLK_STS_MEDIUM;
286 case NVME_SC_GUARD_CHECK:
287 case NVME_SC_APPTAG_CHECK:
288 case NVME_SC_REFTAG_CHECK:
289 case NVME_SC_INVALID_PI:
290 return BLK_STS_PROTECTION;
291 case NVME_SC_RESERVATION_CONFLICT:
292 return BLK_STS_NEXUS;
293 case NVME_SC_HOST_PATH_ERROR:
294 return BLK_STS_TRANSPORT;
295 case NVME_SC_ZONE_TOO_MANY_ACTIVE:
296 return BLK_STS_ZONE_ACTIVE_RESOURCE;
297 case NVME_SC_ZONE_TOO_MANY_OPEN:
298 return BLK_STS_ZONE_OPEN_RESOURCE;
299 default:
300 return BLK_STS_IOERR;
301 }
302}
303
304static void nvme_retry_req(struct request *req)
305{
306 unsigned long delay = 0;
307 u16 crd;
308
309
310 crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
311 if (crd)
312 delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
313
314 nvme_req(req)->retries++;
315 blk_mq_requeue_request(req, false);
316 blk_mq_delay_kick_requeue_list(req->q, delay);
317}
318
319enum nvme_disposition {
320 COMPLETE,
321 RETRY,
322 FAILOVER,
323};
324
325static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
326{
327 if (likely(nvme_req(req)->status == 0))
328 return COMPLETE;
329
330 if (blk_noretry_request(req) ||
331 (nvme_req(req)->status & NVME_SC_DNR) ||
332 nvme_req(req)->retries >= nvme_max_retries)
333 return COMPLETE;
334
335 if (req->cmd_flags & REQ_NVME_MPATH) {
336 if (nvme_is_path_error(nvme_req(req)->status) ||
337 blk_queue_dying(req->q))
338 return FAILOVER;
339 } else {
340 if (blk_queue_dying(req->q))
341 return COMPLETE;
342 }
343
344 return RETRY;
345}
346
347static inline void nvme_end_req(struct request *req)
348{
349 blk_status_t status = nvme_error_status(nvme_req(req)->status);
350
351 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
352 req_op(req) == REQ_OP_ZONE_APPEND)
353 req->__sector = nvme_lba_to_sect(req->q->queuedata,
354 le64_to_cpu(nvme_req(req)->result.u64));
355
356 nvme_trace_bio_complete(req);
357 blk_mq_end_request(req, status);
358}
359
360void nvme_complete_rq(struct request *req)
361{
362 trace_nvme_complete_rq(req);
363 nvme_cleanup_cmd(req);
364
365 if (nvme_req(req)->ctrl->kas)
366 nvme_req(req)->ctrl->comp_seen = true;
367
368 switch (nvme_decide_disposition(req)) {
369 case COMPLETE:
370 nvme_end_req(req);
371 return;
372 case RETRY:
373 nvme_retry_req(req);
374 return;
375 case FAILOVER:
376 nvme_failover_req(req);
377 return;
378 }
379}
380EXPORT_SYMBOL_GPL(nvme_complete_rq);
381
382
383
384
385
386
387
388blk_status_t nvme_host_path_error(struct request *req)
389{
390 nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
391 blk_mq_set_request_complete(req);
392 nvme_complete_rq(req);
393 return BLK_STS_OK;
394}
395EXPORT_SYMBOL_GPL(nvme_host_path_error);
396
397bool nvme_cancel_request(struct request *req, void *data, bool reserved)
398{
399 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
400 "Cancelling I/O %d", req->tag);
401
402
403 if (blk_mq_request_completed(req))
404 return true;
405
406 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
407 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
408 blk_mq_complete_request(req);
409 return true;
410}
411EXPORT_SYMBOL_GPL(nvme_cancel_request);
412
413void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
414{
415 if (ctrl->tagset) {
416 blk_mq_tagset_busy_iter(ctrl->tagset,
417 nvme_cancel_request, ctrl);
418 blk_mq_tagset_wait_completed_request(ctrl->tagset);
419 }
420}
421EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
422
423void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
424{
425 if (ctrl->admin_tagset) {
426 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
427 nvme_cancel_request, ctrl);
428 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
429 }
430}
431EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
432
433bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
434 enum nvme_ctrl_state new_state)
435{
436 enum nvme_ctrl_state old_state;
437 unsigned long flags;
438 bool changed = false;
439
440 spin_lock_irqsave(&ctrl->lock, flags);
441
442 old_state = ctrl->state;
443 switch (new_state) {
444 case NVME_CTRL_LIVE:
445 switch (old_state) {
446 case NVME_CTRL_NEW:
447 case NVME_CTRL_RESETTING:
448 case NVME_CTRL_CONNECTING:
449 changed = true;
450 fallthrough;
451 default:
452 break;
453 }
454 break;
455 case NVME_CTRL_RESETTING:
456 switch (old_state) {
457 case NVME_CTRL_NEW:
458 case NVME_CTRL_LIVE:
459 changed = true;
460 fallthrough;
461 default:
462 break;
463 }
464 break;
465 case NVME_CTRL_CONNECTING:
466 switch (old_state) {
467 case NVME_CTRL_NEW:
468 case NVME_CTRL_RESETTING:
469 changed = true;
470 fallthrough;
471 default:
472 break;
473 }
474 break;
475 case NVME_CTRL_DELETING:
476 switch (old_state) {
477 case NVME_CTRL_LIVE:
478 case NVME_CTRL_RESETTING:
479 case NVME_CTRL_CONNECTING:
480 changed = true;
481 fallthrough;
482 default:
483 break;
484 }
485 break;
486 case NVME_CTRL_DELETING_NOIO:
487 switch (old_state) {
488 case NVME_CTRL_DELETING:
489 case NVME_CTRL_DEAD:
490 changed = true;
491 fallthrough;
492 default:
493 break;
494 }
495 break;
496 case NVME_CTRL_DEAD:
497 switch (old_state) {
498 case NVME_CTRL_DELETING:
499 changed = true;
500 fallthrough;
501 default:
502 break;
503 }
504 break;
505 default:
506 break;
507 }
508
509 if (changed) {
510 ctrl->state = new_state;
511 wake_up_all(&ctrl->state_wq);
512 }
513
514 spin_unlock_irqrestore(&ctrl->lock, flags);
515 if (!changed)
516 return false;
517
518 if (ctrl->state == NVME_CTRL_LIVE) {
519 if (old_state == NVME_CTRL_CONNECTING)
520 nvme_stop_failfast_work(ctrl);
521 nvme_kick_requeue_lists(ctrl);
522 } else if (ctrl->state == NVME_CTRL_CONNECTING &&
523 old_state == NVME_CTRL_RESETTING) {
524 nvme_start_failfast_work(ctrl);
525 }
526 return changed;
527}
528EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
529
530
531
532
533static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
534{
535 switch (ctrl->state) {
536 case NVME_CTRL_NEW:
537 case NVME_CTRL_LIVE:
538 case NVME_CTRL_RESETTING:
539 case NVME_CTRL_CONNECTING:
540 return false;
541 case NVME_CTRL_DELETING:
542 case NVME_CTRL_DELETING_NOIO:
543 case NVME_CTRL_DEAD:
544 return true;
545 default:
546 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
547 return true;
548 }
549}
550
551
552
553
554
555bool nvme_wait_reset(struct nvme_ctrl *ctrl)
556{
557 wait_event(ctrl->state_wq,
558 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
559 nvme_state_terminal(ctrl));
560 return ctrl->state == NVME_CTRL_RESETTING;
561}
562EXPORT_SYMBOL_GPL(nvme_wait_reset);
563
564static void nvme_free_ns_head(struct kref *ref)
565{
566 struct nvme_ns_head *head =
567 container_of(ref, struct nvme_ns_head, ref);
568
569 nvme_mpath_remove_disk(head);
570 ida_simple_remove(&head->subsys->ns_ida, head->instance);
571 cleanup_srcu_struct(&head->srcu);
572 nvme_put_subsystem(head->subsys);
573 kfree(head);
574}
575
576bool nvme_tryget_ns_head(struct nvme_ns_head *head)
577{
578 return kref_get_unless_zero(&head->ref);
579}
580
581void nvme_put_ns_head(struct nvme_ns_head *head)
582{
583 kref_put(&head->ref, nvme_free_ns_head);
584}
585
586static void nvme_free_ns(struct kref *kref)
587{
588 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
589
590 if (ns->ndev)
591 nvme_nvm_unregister(ns);
592
593 put_disk(ns->disk);
594 nvme_put_ns_head(ns->head);
595 nvme_put_ctrl(ns->ctrl);
596 kfree(ns);
597}
598
599static inline bool nvme_get_ns(struct nvme_ns *ns)
600{
601 return kref_get_unless_zero(&ns->kref);
602}
603
604void nvme_put_ns(struct nvme_ns *ns)
605{
606 kref_put(&ns->kref, nvme_free_ns);
607}
608EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
609
610static inline void nvme_clear_nvme_request(struct request *req)
611{
612 nvme_req(req)->status = 0;
613 nvme_req(req)->retries = 0;
614 nvme_req(req)->flags = 0;
615 req->rq_flags |= RQF_DONTPREP;
616}
617
618static inline unsigned int nvme_req_op(struct nvme_command *cmd)
619{
620 return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
621}
622
623static inline void nvme_init_request(struct request *req,
624 struct nvme_command *cmd)
625{
626 if (req->q->queuedata)
627 req->timeout = NVME_IO_TIMEOUT;
628 else
629 req->timeout = NVME_ADMIN_TIMEOUT;
630
631
632 cmd->common.flags &= ~NVME_CMD_SGL_ALL;
633
634 req->cmd_flags |= REQ_FAILFAST_DRIVER;
635 if (req->mq_hctx->type == HCTX_TYPE_POLL)
636 req->cmd_flags |= REQ_HIPRI;
637 nvme_clear_nvme_request(req);
638 memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
639}
640
641struct request *nvme_alloc_request(struct request_queue *q,
642 struct nvme_command *cmd, blk_mq_req_flags_t flags)
643{
644 struct request *req;
645
646 req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
647 if (!IS_ERR(req))
648 nvme_init_request(req, cmd);
649 return req;
650}
651EXPORT_SYMBOL_GPL(nvme_alloc_request);
652
653static struct request *nvme_alloc_request_qid(struct request_queue *q,
654 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
655{
656 struct request *req;
657
658 req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
659 qid ? qid - 1 : 0);
660 if (!IS_ERR(req))
661 nvme_init_request(req, cmd);
662 return req;
663}
664
665
666
667
668
669
670
671
672
673
674blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
675 struct request *rq)
676{
677 if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
678 ctrl->state != NVME_CTRL_DEAD &&
679 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
680 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
681 return BLK_STS_RESOURCE;
682 return nvme_host_path_error(rq);
683}
684EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
685
686bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
687 bool queue_live)
688{
689 struct nvme_request *req = nvme_req(rq);
690
691
692
693
694
695
696
697
698
699 if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
700 return false;
701
702 if (ctrl->ops->flags & NVME_F_FABRICS) {
703
704
705
706
707
708 switch (ctrl->state) {
709 case NVME_CTRL_CONNECTING:
710 if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
711 req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
712 return true;
713 break;
714 default:
715 break;
716 case NVME_CTRL_DEAD:
717 return false;
718 }
719 }
720
721 return queue_live;
722}
723EXPORT_SYMBOL_GPL(__nvme_check_ready);
724
725static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
726{
727 struct nvme_command c = { };
728
729 c.directive.opcode = nvme_admin_directive_send;
730 c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
731 c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
732 c.directive.dtype = NVME_DIR_IDENTIFY;
733 c.directive.tdtype = NVME_DIR_STREAMS;
734 c.directive.endir = enable ? NVME_DIR_ENDIR : 0;
735
736 return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
737}
738
739static int nvme_disable_streams(struct nvme_ctrl *ctrl)
740{
741 return nvme_toggle_streams(ctrl, false);
742}
743
744static int nvme_enable_streams(struct nvme_ctrl *ctrl)
745{
746 return nvme_toggle_streams(ctrl, true);
747}
748
749static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
750 struct streams_directive_params *s, u32 nsid)
751{
752 struct nvme_command c = { };
753
754 memset(s, 0, sizeof(*s));
755
756 c.directive.opcode = nvme_admin_directive_recv;
757 c.directive.nsid = cpu_to_le32(nsid);
758 c.directive.numd = cpu_to_le32(nvme_bytes_to_numd(sizeof(*s)));
759 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
760 c.directive.dtype = NVME_DIR_STREAMS;
761
762 return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
763}
764
765static int nvme_configure_directives(struct nvme_ctrl *ctrl)
766{
767 struct streams_directive_params s;
768 int ret;
769
770 if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
771 return 0;
772 if (!streams)
773 return 0;
774
775 ret = nvme_enable_streams(ctrl);
776 if (ret)
777 return ret;
778
779 ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
780 if (ret)
781 goto out_disable_stream;
782
783 ctrl->nssa = le16_to_cpu(s.nssa);
784 if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
785 dev_info(ctrl->device, "too few streams (%u) available\n",
786 ctrl->nssa);
787 goto out_disable_stream;
788 }
789
790 ctrl->nr_streams = min_t(u16, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
791 dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
792 return 0;
793
794out_disable_stream:
795 nvme_disable_streams(ctrl);
796 return ret;
797}
798
799
800
801
802
803static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
804 struct request *req, u16 *control,
805 u32 *dsmgmt)
806{
807 enum rw_hint streamid = req->write_hint;
808
809 if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
810 streamid = 0;
811 else {
812 streamid--;
813 if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
814 return;
815
816 *control |= NVME_RW_DTYPE_STREAMS;
817 *dsmgmt |= streamid << 16;
818 }
819
820 if (streamid < ARRAY_SIZE(req->q->write_hints))
821 req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
822}
823
824static inline void nvme_setup_flush(struct nvme_ns *ns,
825 struct nvme_command *cmnd)
826{
827 cmnd->common.opcode = nvme_cmd_flush;
828 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
829}
830
831static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
832 struct nvme_command *cmnd)
833{
834 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
835 struct nvme_dsm_range *range;
836 struct bio *bio;
837
838
839
840
841
842
843 static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
844
845 range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
846 if (!range) {
847
848
849
850
851
852 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
853 return BLK_STS_RESOURCE;
854
855 range = page_address(ns->ctrl->discard_page);
856 }
857
858 __rq_for_each_bio(bio, req) {
859 u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
860 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
861
862 if (n < segments) {
863 range[n].cattr = cpu_to_le32(0);
864 range[n].nlb = cpu_to_le32(nlb);
865 range[n].slba = cpu_to_le64(slba);
866 }
867 n++;
868 }
869
870 if (WARN_ON_ONCE(n != segments)) {
871 if (virt_to_page(range) == ns->ctrl->discard_page)
872 clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
873 else
874 kfree(range);
875 return BLK_STS_IOERR;
876 }
877
878 cmnd->dsm.opcode = nvme_cmd_dsm;
879 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
880 cmnd->dsm.nr = cpu_to_le32(segments - 1);
881 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
882
883 req->special_vec.bv_page = virt_to_page(range);
884 req->special_vec.bv_offset = offset_in_page(range);
885 req->special_vec.bv_len = alloc_size;
886 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
887
888 return BLK_STS_OK;
889}
890
891static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
892 struct request *req, struct nvme_command *cmnd)
893{
894 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
895 return nvme_setup_discard(ns, req, cmnd);
896
897 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
898 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
899 cmnd->write_zeroes.slba =
900 cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
901 cmnd->write_zeroes.length =
902 cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
903 if (nvme_ns_has_pi(ns))
904 cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT);
905 else
906 cmnd->write_zeroes.control = 0;
907 return BLK_STS_OK;
908}
909
910static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
911 struct request *req, struct nvme_command *cmnd,
912 enum nvme_opcode op)
913{
914 struct nvme_ctrl *ctrl = ns->ctrl;
915 u16 control = 0;
916 u32 dsmgmt = 0;
917
918 if (req->cmd_flags & REQ_FUA)
919 control |= NVME_RW_FUA;
920 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
921 control |= NVME_RW_LR;
922
923 if (req->cmd_flags & REQ_RAHEAD)
924 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
925
926 cmnd->rw.opcode = op;
927 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
928 cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
929 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
930
931 if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
932 nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
933
934 if (ns->ms) {
935
936
937
938
939
940
941 if (!blk_integrity_rq(req)) {
942 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
943 return BLK_STS_NOTSUPP;
944 control |= NVME_RW_PRINFO_PRACT;
945 }
946
947 switch (ns->pi_type) {
948 case NVME_NS_DPS_PI_TYPE3:
949 control |= NVME_RW_PRINFO_PRCHK_GUARD;
950 break;
951 case NVME_NS_DPS_PI_TYPE1:
952 case NVME_NS_DPS_PI_TYPE2:
953 control |= NVME_RW_PRINFO_PRCHK_GUARD |
954 NVME_RW_PRINFO_PRCHK_REF;
955 if (op == nvme_cmd_zone_append)
956 control |= NVME_RW_APPEND_PIREMAP;
957 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
958 break;
959 }
960 }
961
962 cmnd->rw.control = cpu_to_le16(control);
963 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
964 return 0;
965}
966
967void nvme_cleanup_cmd(struct request *req)
968{
969 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
970 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
971 struct page *page = req->special_vec.bv_page;
972
973 if (page == ctrl->discard_page)
974 clear_bit_unlock(0, &ctrl->discard_page_busy);
975 else
976 kfree(page_address(page) + req->special_vec.bv_offset);
977 }
978}
979EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
980
981blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
982{
983 struct nvme_command *cmd = nvme_req(req)->cmd;
984 blk_status_t ret = BLK_STS_OK;
985
986 if (!(req->rq_flags & RQF_DONTPREP)) {
987 nvme_clear_nvme_request(req);
988 memset(cmd, 0, sizeof(*cmd));
989 }
990
991 switch (req_op(req)) {
992 case REQ_OP_DRV_IN:
993 case REQ_OP_DRV_OUT:
994
995 break;
996 case REQ_OP_FLUSH:
997 nvme_setup_flush(ns, cmd);
998 break;
999 case REQ_OP_ZONE_RESET_ALL:
1000 case REQ_OP_ZONE_RESET:
1001 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
1002 break;
1003 case REQ_OP_ZONE_OPEN:
1004 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
1005 break;
1006 case REQ_OP_ZONE_CLOSE:
1007 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
1008 break;
1009 case REQ_OP_ZONE_FINISH:
1010 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
1011 break;
1012 case REQ_OP_WRITE_ZEROES:
1013 ret = nvme_setup_write_zeroes(ns, req, cmd);
1014 break;
1015 case REQ_OP_DISCARD:
1016 ret = nvme_setup_discard(ns, req, cmd);
1017 break;
1018 case REQ_OP_READ:
1019 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
1020 break;
1021 case REQ_OP_WRITE:
1022 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
1023 break;
1024 case REQ_OP_ZONE_APPEND:
1025 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
1026 break;
1027 default:
1028 WARN_ON_ONCE(1);
1029 return BLK_STS_IOERR;
1030 }
1031
1032 cmd->common.command_id = req->tag;
1033 trace_nvme_setup_cmd(req, cmd);
1034 return ret;
1035}
1036EXPORT_SYMBOL_GPL(nvme_setup_cmd);
1037
1038
1039
1040
1041
1042
1043
1044static int nvme_execute_rq(struct gendisk *disk, struct request *rq,
1045 bool at_head)
1046{
1047 blk_status_t status;
1048
1049 status = blk_execute_rq(disk, rq, at_head);
1050 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
1051 return -EINTR;
1052 if (nvme_req(rq)->status)
1053 return nvme_req(rq)->status;
1054 return blk_status_to_errno(status);
1055}
1056
1057
1058
1059
1060
1061int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1062 union nvme_result *result, void *buffer, unsigned bufflen,
1063 unsigned timeout, int qid, int at_head,
1064 blk_mq_req_flags_t flags)
1065{
1066 struct request *req;
1067 int ret;
1068
1069 if (qid == NVME_QID_ANY)
1070 req = nvme_alloc_request(q, cmd, flags);
1071 else
1072 req = nvme_alloc_request_qid(q, cmd, flags, qid);
1073 if (IS_ERR(req))
1074 return PTR_ERR(req);
1075
1076 if (timeout)
1077 req->timeout = timeout;
1078
1079 if (buffer && bufflen) {
1080 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
1081 if (ret)
1082 goto out;
1083 }
1084
1085 ret = nvme_execute_rq(NULL, req, at_head);
1086 if (result && ret >= 0)
1087 *result = nvme_req(req)->result;
1088 out:
1089 blk_mq_free_request(req);
1090 return ret;
1091}
1092EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
1093
1094int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1095 void *buffer, unsigned bufflen)
1096{
1097 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
1098 NVME_QID_ANY, 0, 0);
1099}
1100EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
1101
1102static u32 nvme_known_admin_effects(u8 opcode)
1103{
1104 switch (opcode) {
1105 case nvme_admin_format_nvm:
1106 return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC |
1107 NVME_CMD_EFFECTS_CSE_MASK;
1108 case nvme_admin_sanitize_nvm:
1109 return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK;
1110 default:
1111 break;
1112 }
1113 return 0;
1114}
1115
1116u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
1117{
1118 u32 effects = 0;
1119
1120 if (ns) {
1121 if (ns->head->effects)
1122 effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
1123 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
1124 dev_warn_once(ctrl->device,
1125 "IO command:%02x has unhandled effects:%08x\n",
1126 opcode, effects);
1127 return 0;
1128 }
1129
1130 if (ctrl->effects)
1131 effects = le32_to_cpu(ctrl->effects->acs[opcode]);
1132 effects |= nvme_known_admin_effects(opcode);
1133
1134 return effects;
1135}
1136EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU);
1137
1138static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1139 u8 opcode)
1140{
1141 u32 effects = nvme_command_effects(ctrl, ns, opcode);
1142
1143
1144
1145
1146
1147 if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1148 mutex_lock(&ctrl->scan_lock);
1149 mutex_lock(&ctrl->subsys->lock);
1150 nvme_mpath_start_freeze(ctrl->subsys);
1151 nvme_mpath_wait_freeze(ctrl->subsys);
1152 nvme_start_freeze(ctrl);
1153 nvme_wait_freeze(ctrl);
1154 }
1155 return effects;
1156}
1157
1158static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
1159{
1160 if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1161 nvme_unfreeze(ctrl);
1162 nvme_mpath_unfreeze(ctrl->subsys);
1163 mutex_unlock(&ctrl->subsys->lock);
1164 nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
1165 mutex_unlock(&ctrl->scan_lock);
1166 }
1167 if (effects & NVME_CMD_EFFECTS_CCC)
1168 nvme_init_ctrl_finish(ctrl);
1169 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
1170 nvme_queue_scan(ctrl);
1171 flush_work(&ctrl->scan_work);
1172 }
1173}
1174
1175int nvme_execute_passthru_rq(struct request *rq)
1176{
1177 struct nvme_command *cmd = nvme_req(rq)->cmd;
1178 struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
1179 struct nvme_ns *ns = rq->q->queuedata;
1180 struct gendisk *disk = ns ? ns->disk : NULL;
1181 u32 effects;
1182 int ret;
1183
1184 effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
1185 ret = nvme_execute_rq(disk, rq, false);
1186 if (effects)
1187 nvme_passthru_end(ctrl, effects);
1188
1189 return ret;
1190}
1191EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
1192
1193
1194
1195
1196
1197
1198
1199static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
1200{
1201 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
1202}
1203
1204static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
1205{
1206 struct nvme_ctrl *ctrl = rq->end_io_data;
1207 unsigned long flags;
1208 bool startka = false;
1209
1210 blk_mq_free_request(rq);
1211
1212 if (status) {
1213 dev_err(ctrl->device,
1214 "failed nvme_keep_alive_end_io error=%d\n",
1215 status);
1216 return;
1217 }
1218
1219 ctrl->comp_seen = false;
1220 spin_lock_irqsave(&ctrl->lock, flags);
1221 if (ctrl->state == NVME_CTRL_LIVE ||
1222 ctrl->state == NVME_CTRL_CONNECTING)
1223 startka = true;
1224 spin_unlock_irqrestore(&ctrl->lock, flags);
1225 if (startka)
1226 nvme_queue_keep_alive_work(ctrl);
1227}
1228
1229static void nvme_keep_alive_work(struct work_struct *work)
1230{
1231 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
1232 struct nvme_ctrl, ka_work);
1233 bool comp_seen = ctrl->comp_seen;
1234 struct request *rq;
1235
1236 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
1237 dev_dbg(ctrl->device,
1238 "reschedule traffic based keep-alive timer\n");
1239 ctrl->comp_seen = false;
1240 nvme_queue_keep_alive_work(ctrl);
1241 return;
1242 }
1243
1244 rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,
1245 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
1246 if (IS_ERR(rq)) {
1247
1248 dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
1249 nvme_reset_ctrl(ctrl);
1250 return;
1251 }
1252
1253 rq->timeout = ctrl->kato * HZ;
1254 rq->end_io_data = ctrl;
1255 blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io);
1256}
1257
1258static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
1259{
1260 if (unlikely(ctrl->kato == 0))
1261 return;
1262
1263 nvme_queue_keep_alive_work(ctrl);
1264}
1265
1266void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
1267{
1268 if (unlikely(ctrl->kato == 0))
1269 return;
1270
1271 cancel_delayed_work_sync(&ctrl->ka_work);
1272}
1273EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
1274
1275
1276
1277
1278
1279
1280
1281static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
1282{
1283 if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
1284 return ctrl->vs < NVME_VS(1, 2, 0);
1285 return ctrl->vs < NVME_VS(1, 1, 0);
1286}
1287
1288static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
1289{
1290 struct nvme_command c = { };
1291 int error;
1292
1293
1294 c.identify.opcode = nvme_admin_identify;
1295 c.identify.cns = NVME_ID_CNS_CTRL;
1296
1297 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
1298 if (!*id)
1299 return -ENOMEM;
1300
1301 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
1302 sizeof(struct nvme_id_ctrl));
1303 if (error)
1304 kfree(*id);
1305 return error;
1306}
1307
1308static bool nvme_multi_css(struct nvme_ctrl *ctrl)
1309{
1310 return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
1311}
1312
1313static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1314 struct nvme_ns_id_desc *cur, bool *csi_seen)
1315{
1316 const char *warn_str = "ctrl returned bogus length:";
1317 void *data = cur;
1318
1319 switch (cur->nidt) {
1320 case NVME_NIDT_EUI64:
1321 if (cur->nidl != NVME_NIDT_EUI64_LEN) {
1322 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
1323 warn_str, cur->nidl);
1324 return -1;
1325 }
1326 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
1327 return NVME_NIDT_EUI64_LEN;
1328 case NVME_NIDT_NGUID:
1329 if (cur->nidl != NVME_NIDT_NGUID_LEN) {
1330 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
1331 warn_str, cur->nidl);
1332 return -1;
1333 }
1334 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
1335 return NVME_NIDT_NGUID_LEN;
1336 case NVME_NIDT_UUID:
1337 if (cur->nidl != NVME_NIDT_UUID_LEN) {
1338 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
1339 warn_str, cur->nidl);
1340 return -1;
1341 }
1342 uuid_copy(&ids->uuid, data + sizeof(*cur));
1343 return NVME_NIDT_UUID_LEN;
1344 case NVME_NIDT_CSI:
1345 if (cur->nidl != NVME_NIDT_CSI_LEN) {
1346 dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
1347 warn_str, cur->nidl);
1348 return -1;
1349 }
1350 memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
1351 *csi_seen = true;
1352 return NVME_NIDT_CSI_LEN;
1353 default:
1354
1355 return cur->nidl;
1356 }
1357}
1358
1359static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
1360 struct nvme_ns_ids *ids)
1361{
1362 struct nvme_command c = { };
1363 bool csi_seen = false;
1364 int status, pos, len;
1365 void *data;
1366
1367 if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
1368 return 0;
1369 if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
1370 return 0;
1371
1372 c.identify.opcode = nvme_admin_identify;
1373 c.identify.nsid = cpu_to_le32(nsid);
1374 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
1375
1376 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
1377 if (!data)
1378 return -ENOMEM;
1379
1380 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1381 NVME_IDENTIFY_DATA_SIZE);
1382 if (status) {
1383 dev_warn(ctrl->device,
1384 "Identify Descriptors failed (nsid=%u, status=0x%x)\n",
1385 nsid, status);
1386 goto free_data;
1387 }
1388
1389 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
1390 struct nvme_ns_id_desc *cur = data + pos;
1391
1392 if (cur->nidl == 0)
1393 break;
1394
1395 len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen);
1396 if (len < 0)
1397 break;
1398
1399 len += sizeof(*cur);
1400 }
1401
1402 if (nvme_multi_css(ctrl) && !csi_seen) {
1403 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
1404 nsid);
1405 status = -EINVAL;
1406 }
1407
1408free_data:
1409 kfree(data);
1410 return status;
1411}
1412
1413static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
1414 struct nvme_ns_ids *ids, struct nvme_id_ns **id)
1415{
1416 struct nvme_command c = { };
1417 int error;
1418
1419
1420 c.identify.opcode = nvme_admin_identify;
1421 c.identify.nsid = cpu_to_le32(nsid);
1422 c.identify.cns = NVME_ID_CNS_NS;
1423
1424 *id = kmalloc(sizeof(**id), GFP_KERNEL);
1425 if (!*id)
1426 return -ENOMEM;
1427
1428 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1429 if (error) {
1430 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1431 goto out_free_id;
1432 }
1433
1434 error = NVME_SC_INVALID_NS | NVME_SC_DNR;
1435 if ((*id)->ncap == 0)
1436 goto out_free_id;
1437
1438 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1439 !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
1440 memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
1441 if (ctrl->vs >= NVME_VS(1, 2, 0) &&
1442 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
1443 memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
1444
1445 return 0;
1446
1447out_free_id:
1448 kfree(*id);
1449 return error;
1450}
1451
1452static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
1453 unsigned int dword11, void *buffer, size_t buflen, u32 *result)
1454{
1455 union nvme_result res = { 0 };
1456 struct nvme_command c = { };
1457 int ret;
1458
1459 c.features.opcode = op;
1460 c.features.fid = cpu_to_le32(fid);
1461 c.features.dword11 = cpu_to_le32(dword11);
1462
1463 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1464 buffer, buflen, 0, NVME_QID_ANY, 0, 0);
1465 if (ret >= 0 && result)
1466 *result = le32_to_cpu(res.u32);
1467 return ret;
1468}
1469
1470int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
1471 unsigned int dword11, void *buffer, size_t buflen,
1472 u32 *result)
1473{
1474 return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
1475 buflen, result);
1476}
1477EXPORT_SYMBOL_GPL(nvme_set_features);
1478
1479int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
1480 unsigned int dword11, void *buffer, size_t buflen,
1481 u32 *result)
1482{
1483 return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
1484 buflen, result);
1485}
1486EXPORT_SYMBOL_GPL(nvme_get_features);
1487
1488int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
1489{
1490 u32 q_count = (*count - 1) | ((*count - 1) << 16);
1491 u32 result;
1492 int status, nr_io_queues;
1493
1494 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
1495 &result);
1496 if (status < 0)
1497 return status;
1498
1499
1500
1501
1502
1503
1504 if (status > 0) {
1505 dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1506 *count = 0;
1507 } else {
1508 nr_io_queues = min(result & 0xffff, result >> 16) + 1;
1509 *count = min(*count, nr_io_queues);
1510 }
1511
1512 return 0;
1513}
1514EXPORT_SYMBOL_GPL(nvme_set_queue_count);
1515
1516#define NVME_AEN_SUPPORTED \
1517 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
1518 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1519
1520static void nvme_enable_aen(struct nvme_ctrl *ctrl)
1521{
1522 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1523 int status;
1524
1525 if (!supported_aens)
1526 return;
1527
1528 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
1529 NULL, 0, &result);
1530 if (status)
1531 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1532 supported_aens);
1533
1534 queue_work(nvme_wq, &ctrl->async_event_work);
1535}
1536
1537static int nvme_ns_open(struct nvme_ns *ns)
1538{
1539
1540
1541 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head)))
1542 goto fail;
1543 if (!nvme_get_ns(ns))
1544 goto fail;
1545 if (!try_module_get(ns->ctrl->ops->module))
1546 goto fail_put_ns;
1547
1548 return 0;
1549
1550fail_put_ns:
1551 nvme_put_ns(ns);
1552fail:
1553 return -ENXIO;
1554}
1555
1556static void nvme_ns_release(struct nvme_ns *ns)
1557{
1558
1559 module_put(ns->ctrl->ops->module);
1560 nvme_put_ns(ns);
1561}
1562
1563static int nvme_open(struct block_device *bdev, fmode_t mode)
1564{
1565 return nvme_ns_open(bdev->bd_disk->private_data);
1566}
1567
1568static void nvme_release(struct gendisk *disk, fmode_t mode)
1569{
1570 nvme_ns_release(disk->private_data);
1571}
1572
1573int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1574{
1575
1576 geo->heads = 1 << 6;
1577 geo->sectors = 1 << 5;
1578 geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
1579 return 0;
1580}
1581
1582#ifdef CONFIG_BLK_DEV_INTEGRITY
1583static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
1584 u32 max_integrity_segments)
1585{
1586 struct blk_integrity integrity = { };
1587
1588 switch (pi_type) {
1589 case NVME_NS_DPS_PI_TYPE3:
1590 integrity.profile = &t10_pi_type3_crc;
1591 integrity.tag_size = sizeof(u16) + sizeof(u32);
1592 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1593 break;
1594 case NVME_NS_DPS_PI_TYPE1:
1595 case NVME_NS_DPS_PI_TYPE2:
1596 integrity.profile = &t10_pi_type1_crc;
1597 integrity.tag_size = sizeof(u16);
1598 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1599 break;
1600 default:
1601 integrity.profile = NULL;
1602 break;
1603 }
1604 integrity.tuple_size = ms;
1605 blk_integrity_register(disk, &integrity);
1606 blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
1607}
1608#else
1609static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
1610 u32 max_integrity_segments)
1611{
1612}
1613#endif
1614
1615static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1616{
1617 struct nvme_ctrl *ctrl = ns->ctrl;
1618 struct request_queue *queue = disk->queue;
1619 u32 size = queue_logical_block_size(queue);
1620
1621 if (ctrl->max_discard_sectors == 0) {
1622 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
1623 return;
1624 }
1625
1626 if (ctrl->nr_streams && ns->sws && ns->sgs)
1627 size *= ns->sws * ns->sgs;
1628
1629 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
1630 NVME_DSM_MAX_RANGES);
1631
1632 queue->limits.discard_alignment = 0;
1633 queue->limits.discard_granularity = size;
1634
1635
1636 if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
1637 return;
1638
1639 blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
1640 blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
1641
1642 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
1643 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
1644}
1645
1646static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
1647{
1648 return !uuid_is_null(&ids->uuid) ||
1649 memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
1650 memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
1651}
1652
1653static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
1654{
1655 return uuid_equal(&a->uuid, &b->uuid) &&
1656 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1657 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
1658 a->csi == b->csi;
1659}
1660
1661static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1662 u32 *phys_bs, u32 *io_opt)
1663{
1664 struct streams_directive_params s;
1665 int ret;
1666
1667 if (!ctrl->nr_streams)
1668 return 0;
1669
1670 ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
1671 if (ret)
1672 return ret;
1673
1674 ns->sws = le32_to_cpu(s.sws);
1675 ns->sgs = le16_to_cpu(s.sgs);
1676
1677 if (ns->sws) {
1678 *phys_bs = ns->sws * (1 << ns->lba_shift);
1679 if (ns->sgs)
1680 *io_opt = *phys_bs * ns->sgs;
1681 }
1682
1683 return 0;
1684}
1685
1686static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
1687{
1688 struct nvme_ctrl *ctrl = ns->ctrl;
1689
1690
1691
1692
1693
1694 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
1695 if (ns->ms == sizeof(struct t10_pi_tuple))
1696 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1697 else
1698 ns->pi_type = 0;
1699
1700 ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
1701 if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1702 return 0;
1703 if (ctrl->ops->flags & NVME_F_FABRICS) {
1704
1705
1706
1707
1708
1709 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
1710 return -EINVAL;
1711 if (ctrl->max_integrity_segments)
1712 ns->features |=
1713 (NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
1714 } else {
1715
1716
1717
1718
1719
1720
1721 if (id->flbas & NVME_NS_FLBAS_META_EXT)
1722 ns->features |= NVME_NS_EXT_LBAS;
1723 else
1724 ns->features |= NVME_NS_METADATA_SUPPORTED;
1725 }
1726
1727 return 0;
1728}
1729
1730static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
1731 struct request_queue *q)
1732{
1733 bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
1734
1735 if (ctrl->max_hw_sectors) {
1736 u32 max_segments =
1737 (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
1738
1739 max_segments = min_not_zero(max_segments, ctrl->max_segments);
1740 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
1741 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
1742 }
1743 blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
1744 blk_queue_dma_alignment(q, 7);
1745 blk_queue_write_cache(q, vwc, vwc);
1746}
1747
1748static void nvme_update_disk_info(struct gendisk *disk,
1749 struct nvme_ns *ns, struct nvme_id_ns *id)
1750{
1751 sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
1752 unsigned short bs = 1 << ns->lba_shift;
1753 u32 atomic_bs, phys_bs, io_opt = 0;
1754
1755
1756
1757
1758
1759 if (ns->lba_shift > PAGE_SHIFT) {
1760 capacity = 0;
1761 bs = (1 << 9);
1762 }
1763
1764 blk_integrity_unregister(disk);
1765
1766 atomic_bs = phys_bs = bs;
1767 nvme_setup_streams_ns(ns->ctrl, ns, &phys_bs, &io_opt);
1768 if (id->nabo == 0) {
1769
1770
1771
1772
1773
1774 if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
1775 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
1776 else
1777 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
1778 }
1779
1780 if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
1781
1782 phys_bs = bs * (1 + le16_to_cpu(id->npwg));
1783
1784 io_opt = bs * (1 + le16_to_cpu(id->nows));
1785 }
1786
1787 blk_queue_logical_block_size(disk->queue, bs);
1788
1789
1790
1791
1792
1793 blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
1794 blk_queue_io_min(disk->queue, phys_bs);
1795 blk_queue_io_opt(disk->queue, io_opt);
1796
1797
1798
1799
1800
1801
1802
1803 if (ns->ms) {
1804 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
1805 (ns->features & NVME_NS_METADATA_SUPPORTED))
1806 nvme_init_integrity(disk, ns->ms, ns->pi_type,
1807 ns->ctrl->max_integrity_segments);
1808 else if (!nvme_ns_has_pi(ns))
1809 capacity = 0;
1810 }
1811
1812 set_capacity_and_notify(disk, capacity);
1813
1814 nvme_config_discard(disk, ns);
1815 blk_queue_max_write_zeroes_sectors(disk->queue,
1816 ns->ctrl->max_zeroes_sectors);
1817
1818 set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) ||
1819 test_bit(NVME_NS_FORCE_RO, &ns->flags));
1820}
1821
1822static inline bool nvme_first_scan(struct gendisk *disk)
1823{
1824
1825 return !(disk->flags & GENHD_FL_UP);
1826}
1827
1828static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
1829{
1830 struct nvme_ctrl *ctrl = ns->ctrl;
1831 u32 iob;
1832
1833 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
1834 is_power_of_2(ctrl->max_hw_sectors))
1835 iob = ctrl->max_hw_sectors;
1836 else
1837 iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
1838
1839 if (!iob)
1840 return;
1841
1842 if (!is_power_of_2(iob)) {
1843 if (nvme_first_scan(ns->disk))
1844 pr_warn("%s: ignoring unaligned IO boundary:%u\n",
1845 ns->disk->disk_name, iob);
1846 return;
1847 }
1848
1849 if (blk_queue_is_zoned(ns->disk->queue)) {
1850 if (nvme_first_scan(ns->disk))
1851 pr_warn("%s: ignoring zoned namespace IO boundary\n",
1852 ns->disk->disk_name);
1853 return;
1854 }
1855
1856 blk_queue_chunk_sectors(ns->queue, iob);
1857}
1858
1859static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
1860{
1861 unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
1862 int ret;
1863
1864 blk_mq_freeze_queue(ns->disk->queue);
1865 ns->lba_shift = id->lbaf[lbaf].ds;
1866 nvme_set_queue_limits(ns->ctrl, ns->queue);
1867
1868 ret = nvme_configure_metadata(ns, id);
1869 if (ret)
1870 goto out_unfreeze;
1871 nvme_set_chunk_sectors(ns, id);
1872 nvme_update_disk_info(ns->disk, ns, id);
1873
1874 if (ns->head->ids.csi == NVME_CSI_ZNS) {
1875 ret = nvme_update_zone_info(ns, lbaf);
1876 if (ret)
1877 goto out_unfreeze;
1878 }
1879
1880 blk_mq_unfreeze_queue(ns->disk->queue);
1881
1882 if (blk_queue_is_zoned(ns->queue)) {
1883 ret = nvme_revalidate_zones(ns);
1884 if (ret && !nvme_first_scan(ns->disk))
1885 goto out;
1886 }
1887
1888 if (nvme_ns_head_multipath(ns->head)) {
1889 blk_mq_freeze_queue(ns->head->disk->queue);
1890 nvme_update_disk_info(ns->head->disk, ns, id);
1891 blk_stack_limits(&ns->head->disk->queue->limits,
1892 &ns->queue->limits, 0);
1893 blk_queue_update_readahead(ns->head->disk->queue);
1894 blk_mq_unfreeze_queue(ns->head->disk->queue);
1895 }
1896 return 0;
1897
1898out_unfreeze:
1899 blk_mq_unfreeze_queue(ns->disk->queue);
1900out:
1901
1902
1903
1904
1905 if (ret == -ENODEV) {
1906 ns->disk->flags |= GENHD_FL_HIDDEN;
1907 ret = 0;
1908 }
1909 return ret;
1910}
1911
1912static char nvme_pr_type(enum pr_type type)
1913{
1914 switch (type) {
1915 case PR_WRITE_EXCLUSIVE:
1916 return 1;
1917 case PR_EXCLUSIVE_ACCESS:
1918 return 2;
1919 case PR_WRITE_EXCLUSIVE_REG_ONLY:
1920 return 3;
1921 case PR_EXCLUSIVE_ACCESS_REG_ONLY:
1922 return 4;
1923 case PR_WRITE_EXCLUSIVE_ALL_REGS:
1924 return 5;
1925 case PR_EXCLUSIVE_ACCESS_ALL_REGS:
1926 return 6;
1927 default:
1928 return 0;
1929 }
1930};
1931
1932static int nvme_send_ns_head_pr_command(struct block_device *bdev,
1933 struct nvme_command *c, u8 data[16])
1934{
1935 struct nvme_ns_head *head = bdev->bd_disk->private_data;
1936 int srcu_idx = srcu_read_lock(&head->srcu);
1937 struct nvme_ns *ns = nvme_find_path(head);
1938 int ret = -EWOULDBLOCK;
1939
1940 if (ns) {
1941 c->common.nsid = cpu_to_le32(ns->head->ns_id);
1942 ret = nvme_submit_sync_cmd(ns->queue, c, data, 16);
1943 }
1944 srcu_read_unlock(&head->srcu, srcu_idx);
1945 return ret;
1946}
1947
1948static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
1949 u8 data[16])
1950{
1951 c->common.nsid = cpu_to_le32(ns->head->ns_id);
1952 return nvme_submit_sync_cmd(ns->queue, c, data, 16);
1953}
1954
1955static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
1956 u64 key, u64 sa_key, u8 op)
1957{
1958 struct nvme_command c = { };
1959 u8 data[16] = { 0, };
1960
1961 put_unaligned_le64(key, &data[0]);
1962 put_unaligned_le64(sa_key, &data[8]);
1963
1964 c.common.opcode = op;
1965 c.common.cdw10 = cpu_to_le32(cdw10);
1966
1967 if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
1968 bdev->bd_disk->fops == &nvme_ns_head_ops)
1969 return nvme_send_ns_head_pr_command(bdev, &c, data);
1970 return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, data);
1971}
1972
1973static int nvme_pr_register(struct block_device *bdev, u64 old,
1974 u64 new, unsigned flags)
1975{
1976 u32 cdw10;
1977
1978 if (flags & ~PR_FL_IGNORE_KEY)
1979 return -EOPNOTSUPP;
1980
1981 cdw10 = old ? 2 : 0;
1982 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
1983 cdw10 |= (1 << 30) | (1 << 31);
1984 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
1985}
1986
1987static int nvme_pr_reserve(struct block_device *bdev, u64 key,
1988 enum pr_type type, unsigned flags)
1989{
1990 u32 cdw10;
1991
1992 if (flags & ~PR_FL_IGNORE_KEY)
1993 return -EOPNOTSUPP;
1994
1995 cdw10 = nvme_pr_type(type) << 8;
1996 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
1997 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
1998}
1999
2000static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
2001 enum pr_type type, bool abort)
2002{
2003 u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
2004
2005 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
2006}
2007
2008static int nvme_pr_clear(struct block_device *bdev, u64 key)
2009{
2010 u32 cdw10 = 1 | (key ? 1 << 3 : 0);
2011
2012 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
2013}
2014
2015static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
2016{
2017 u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
2018
2019 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
2020}
2021
2022const struct pr_ops nvme_pr_ops = {
2023 .pr_register = nvme_pr_register,
2024 .pr_reserve = nvme_pr_reserve,
2025 .pr_release = nvme_pr_release,
2026 .pr_preempt = nvme_pr_preempt,
2027 .pr_clear = nvme_pr_clear,
2028};
2029
2030#ifdef CONFIG_BLK_SED_OPAL
2031int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
2032 bool send)
2033{
2034 struct nvme_ctrl *ctrl = data;
2035 struct nvme_command cmd = { };
2036
2037 if (send)
2038 cmd.common.opcode = nvme_admin_security_send;
2039 else
2040 cmd.common.opcode = nvme_admin_security_recv;
2041 cmd.common.nsid = 0;
2042 cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
2043 cmd.common.cdw11 = cpu_to_le32(len);
2044
2045 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0,
2046 NVME_QID_ANY, 1, 0);
2047}
2048EXPORT_SYMBOL_GPL(nvme_sec_submit);
2049#endif
2050
2051#ifdef CONFIG_BLK_DEV_ZONED
2052static int nvme_report_zones(struct gendisk *disk, sector_t sector,
2053 unsigned int nr_zones, report_zones_cb cb, void *data)
2054{
2055 return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb,
2056 data);
2057}
2058#else
2059#define nvme_report_zones NULL
2060#endif
2061
2062static const struct block_device_operations nvme_bdev_ops = {
2063 .owner = THIS_MODULE,
2064 .ioctl = nvme_ioctl,
2065 .open = nvme_open,
2066 .release = nvme_release,
2067 .getgeo = nvme_getgeo,
2068 .report_zones = nvme_report_zones,
2069 .pr_ops = &nvme_pr_ops,
2070};
2071
2072static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
2073{
2074 unsigned long timeout =
2075 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
2076 u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
2077 int ret;
2078
2079 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2080 if (csts == ~0)
2081 return -ENODEV;
2082 if ((csts & NVME_CSTS_RDY) == bit)
2083 break;
2084
2085 usleep_range(1000, 2000);
2086 if (fatal_signal_pending(current))
2087 return -EINTR;
2088 if (time_after(jiffies, timeout)) {
2089 dev_err(ctrl->device,
2090 "Device not ready; aborting %s, CSTS=0x%x\n",
2091 enabled ? "initialisation" : "reset", csts);
2092 return -ENODEV;
2093 }
2094 }
2095
2096 return ret;
2097}
2098
2099
2100
2101
2102
2103
2104
2105int nvme_disable_ctrl(struct nvme_ctrl *ctrl)
2106{
2107 int ret;
2108
2109 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2110 ctrl->ctrl_config &= ~NVME_CC_ENABLE;
2111
2112 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2113 if (ret)
2114 return ret;
2115
2116 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2117 msleep(NVME_QUIRK_DELAY_AMOUNT);
2118
2119 return nvme_wait_ready(ctrl, ctrl->cap, false);
2120}
2121EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
2122
2123int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
2124{
2125 unsigned dev_page_min;
2126 int ret;
2127
2128 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2129 if (ret) {
2130 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
2131 return ret;
2132 }
2133 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2134
2135 if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
2136 dev_err(ctrl->device,
2137 "Minimum device page size %u too large for host (%u)\n",
2138 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
2139 return -ENODEV;
2140 }
2141
2142 if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
2143 ctrl->ctrl_config = NVME_CC_CSS_CSI;
2144 else
2145 ctrl->ctrl_config = NVME_CC_CSS_NVM;
2146 ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
2147 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2148 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
2149 ctrl->ctrl_config |= NVME_CC_ENABLE;
2150
2151 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2152 if (ret)
2153 return ret;
2154 return nvme_wait_ready(ctrl, ctrl->cap, true);
2155}
2156EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
2157
2158int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
2159{
2160 unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ);
2161 u32 csts;
2162 int ret;
2163
2164 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2165 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
2166
2167 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2168 if (ret)
2169 return ret;
2170
2171 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2172 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
2173 break;
2174
2175 msleep(100);
2176 if (fatal_signal_pending(current))
2177 return -EINTR;
2178 if (time_after(jiffies, timeout)) {
2179 dev_err(ctrl->device,
2180 "Device shutdown incomplete; abort shutdown\n");
2181 return -ENODEV;
2182 }
2183 }
2184
2185 return ret;
2186}
2187EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
2188
2189static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
2190{
2191 __le64 ts;
2192 int ret;
2193
2194 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
2195 return 0;
2196
2197 ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
2198 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
2199 NULL);
2200 if (ret)
2201 dev_warn_once(ctrl->device,
2202 "could not set timestamp (%d)\n", ret);
2203 return ret;
2204}
2205
2206static int nvme_configure_acre(struct nvme_ctrl *ctrl)
2207{
2208 struct nvme_feat_host_behavior *host;
2209 int ret;
2210
2211
2212 if (!ctrl->crdt[0])
2213 return 0;
2214
2215 host = kzalloc(sizeof(*host), GFP_KERNEL);
2216 if (!host)
2217 return 0;
2218
2219 host->acre = NVME_ENABLE_ACRE;
2220 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
2221 host, sizeof(*host), NULL);
2222 kfree(host);
2223 return ret;
2224}
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234static bool nvme_apst_get_transition_time(u64 total_latency,
2235 u64 *transition_time, unsigned *last_index)
2236{
2237 if (total_latency <= apst_primary_latency_tol_us) {
2238 if (*last_index == 1)
2239 return false;
2240 *last_index = 1;
2241 *transition_time = apst_primary_timeout_ms;
2242 return true;
2243 }
2244 if (apst_secondary_timeout_ms &&
2245 total_latency <= apst_secondary_latency_tol_us) {
2246 if (*last_index <= 2)
2247 return false;
2248 *last_index = 2;
2249 *transition_time = apst_secondary_timeout_ms;
2250 return true;
2251 }
2252 return false;
2253}
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280static int nvme_configure_apst(struct nvme_ctrl *ctrl)
2281{
2282 struct nvme_feat_auto_pst *table;
2283 unsigned apste = 0;
2284 u64 max_lat_us = 0;
2285 __le64 target = 0;
2286 int max_ps = -1;
2287 int state;
2288 int ret;
2289 unsigned last_lt_index = UINT_MAX;
2290
2291
2292
2293
2294
2295 if (!ctrl->apsta)
2296 return 0;
2297
2298 if (ctrl->npss > 31) {
2299 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2300 return 0;
2301 }
2302
2303 table = kzalloc(sizeof(*table), GFP_KERNEL);
2304 if (!table)
2305 return 0;
2306
2307 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2308
2309 dev_dbg(ctrl->device, "APST disabled\n");
2310 goto done;
2311 }
2312
2313
2314
2315
2316
2317
2318
2319 for (state = (int)ctrl->npss; state >= 0; state--) {
2320 u64 total_latency_us, exit_latency_us, transition_ms;
2321
2322 if (target)
2323 table->entries[state] = target;
2324
2325
2326
2327
2328
2329 if (state == ctrl->npss &&
2330 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
2331 continue;
2332
2333
2334
2335
2336
2337 if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE))
2338 continue;
2339
2340 exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
2341 if (exit_latency_us > ctrl->ps_max_latency_us)
2342 continue;
2343
2344 total_latency_us = exit_latency_us +
2345 le32_to_cpu(ctrl->psd[state].entry_lat);
2346
2347
2348
2349
2350
2351 if (apst_primary_timeout_ms && apst_primary_latency_tol_us) {
2352 if (!nvme_apst_get_transition_time(total_latency_us,
2353 &transition_ms, &last_lt_index))
2354 continue;
2355 } else {
2356 transition_ms = total_latency_us + 19;
2357 do_div(transition_ms, 20);
2358 if (transition_ms > (1 << 24) - 1)
2359 transition_ms = (1 << 24) - 1;
2360 }
2361
2362 target = cpu_to_le64((state << 3) | (transition_ms << 8));
2363 if (max_ps == -1)
2364 max_ps = state;
2365 if (total_latency_us > max_lat_us)
2366 max_lat_us = total_latency_us;
2367 }
2368
2369 if (max_ps == -1)
2370 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
2371 else
2372 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2373 max_ps, max_lat_us, (int)sizeof(*table), table);
2374 apste = 1;
2375
2376done:
2377 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
2378 table, sizeof(*table), NULL);
2379 if (ret)
2380 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
2381 kfree(table);
2382 return ret;
2383}
2384
2385static void nvme_set_latency_tolerance(struct device *dev, s32 val)
2386{
2387 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2388 u64 latency;
2389
2390 switch (val) {
2391 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
2392 case PM_QOS_LATENCY_ANY:
2393 latency = U64_MAX;
2394 break;
2395
2396 default:
2397 latency = val;
2398 }
2399
2400 if (ctrl->ps_max_latency_us != latency) {
2401 ctrl->ps_max_latency_us = latency;
2402 if (ctrl->state == NVME_CTRL_LIVE)
2403 nvme_configure_apst(ctrl);
2404 }
2405}
2406
2407struct nvme_core_quirk_entry {
2408
2409
2410
2411
2412
2413 u16 vid;
2414 const char *mn;
2415 const char *fr;
2416 unsigned long quirks;
2417};
2418
2419static const struct nvme_core_quirk_entry core_quirks[] = {
2420 {
2421
2422
2423
2424
2425 .vid = 0x1179,
2426 .mn = "THNSF5256GPUK TOSHIBA",
2427 .quirks = NVME_QUIRK_NO_APST,
2428 },
2429 {
2430
2431
2432
2433
2434
2435 .vid = 0x14a4,
2436 .fr = "22301111",
2437 .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2438 }
2439};
2440
2441
2442static bool string_matches(const char *idstr, const char *match, size_t len)
2443{
2444 size_t matchlen;
2445
2446 if (!match)
2447 return true;
2448
2449 matchlen = strlen(match);
2450 WARN_ON_ONCE(matchlen > len);
2451
2452 if (memcmp(idstr, match, matchlen))
2453 return false;
2454
2455 for (; matchlen < len; matchlen++)
2456 if (idstr[matchlen] != ' ')
2457 return false;
2458
2459 return true;
2460}
2461
2462static bool quirk_matches(const struct nvme_id_ctrl *id,
2463 const struct nvme_core_quirk_entry *q)
2464{
2465 return q->vid == le16_to_cpu(id->vid) &&
2466 string_matches(id->mn, q->mn, sizeof(id->mn)) &&
2467 string_matches(id->fr, q->fr, sizeof(id->fr));
2468}
2469
2470static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
2471 struct nvme_id_ctrl *id)
2472{
2473 size_t nqnlen;
2474 int off;
2475
2476 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
2477 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
2478 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
2479 strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
2480 return;
2481 }
2482
2483 if (ctrl->vs >= NVME_VS(1, 2, 1))
2484 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
2485 }
2486
2487
2488 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2489 "nqn.2014.08.org.nvmexpress:%04x%04x",
2490 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
2491 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2492 off += sizeof(id->sn);
2493 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
2494 off += sizeof(id->mn);
2495 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
2496}
2497
2498static void nvme_release_subsystem(struct device *dev)
2499{
2500 struct nvme_subsystem *subsys =
2501 container_of(dev, struct nvme_subsystem, dev);
2502
2503 if (subsys->instance >= 0)
2504 ida_simple_remove(&nvme_instance_ida, subsys->instance);
2505 kfree(subsys);
2506}
2507
2508static void nvme_destroy_subsystem(struct kref *ref)
2509{
2510 struct nvme_subsystem *subsys =
2511 container_of(ref, struct nvme_subsystem, ref);
2512
2513 mutex_lock(&nvme_subsystems_lock);
2514 list_del(&subsys->entry);
2515 mutex_unlock(&nvme_subsystems_lock);
2516
2517 ida_destroy(&subsys->ns_ida);
2518 device_del(&subsys->dev);
2519 put_device(&subsys->dev);
2520}
2521
2522static void nvme_put_subsystem(struct nvme_subsystem *subsys)
2523{
2524 kref_put(&subsys->ref, nvme_destroy_subsystem);
2525}
2526
2527static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
2528{
2529 struct nvme_subsystem *subsys;
2530
2531 lockdep_assert_held(&nvme_subsystems_lock);
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541 if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
2542 return NULL;
2543
2544 list_for_each_entry(subsys, &nvme_subsystems, entry) {
2545 if (strcmp(subsys->subnqn, subsysnqn))
2546 continue;
2547 if (!kref_get_unless_zero(&subsys->ref))
2548 continue;
2549 return subsys;
2550 }
2551
2552 return NULL;
2553}
2554
2555#define SUBSYS_ATTR_RO(_name, _mode, _show) \
2556 struct device_attribute subsys_attr_##_name = \
2557 __ATTR(_name, _mode, _show, NULL)
2558
2559static ssize_t nvme_subsys_show_nqn(struct device *dev,
2560 struct device_attribute *attr,
2561 char *buf)
2562{
2563 struct nvme_subsystem *subsys =
2564 container_of(dev, struct nvme_subsystem, dev);
2565
2566 return sysfs_emit(buf, "%s\n", subsys->subnqn);
2567}
2568static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
2569
2570#define nvme_subsys_show_str_function(field) \
2571static ssize_t subsys_##field##_show(struct device *dev, \
2572 struct device_attribute *attr, char *buf) \
2573{ \
2574 struct nvme_subsystem *subsys = \
2575 container_of(dev, struct nvme_subsystem, dev); \
2576 return sysfs_emit(buf, "%.*s\n", \
2577 (int)sizeof(subsys->field), subsys->field); \
2578} \
2579static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
2580
2581nvme_subsys_show_str_function(model);
2582nvme_subsys_show_str_function(serial);
2583nvme_subsys_show_str_function(firmware_rev);
2584
2585static struct attribute *nvme_subsys_attrs[] = {
2586 &subsys_attr_model.attr,
2587 &subsys_attr_serial.attr,
2588 &subsys_attr_firmware_rev.attr,
2589 &subsys_attr_subsysnqn.attr,
2590#ifdef CONFIG_NVME_MULTIPATH
2591 &subsys_attr_iopolicy.attr,
2592#endif
2593 NULL,
2594};
2595
2596static const struct attribute_group nvme_subsys_attrs_group = {
2597 .attrs = nvme_subsys_attrs,
2598};
2599
2600static const struct attribute_group *nvme_subsys_attrs_groups[] = {
2601 &nvme_subsys_attrs_group,
2602 NULL,
2603};
2604
2605static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
2606{
2607 return ctrl->opts && ctrl->opts->discovery_nqn;
2608}
2609
2610static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
2611 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2612{
2613 struct nvme_ctrl *tmp;
2614
2615 lockdep_assert_held(&nvme_subsystems_lock);
2616
2617 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
2618 if (nvme_state_terminal(tmp))
2619 continue;
2620
2621 if (tmp->cntlid == ctrl->cntlid) {
2622 dev_err(ctrl->device,
2623 "Duplicate cntlid %u with %s, rejecting\n",
2624 ctrl->cntlid, dev_name(tmp->device));
2625 return false;
2626 }
2627
2628 if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
2629 nvme_discovery_ctrl(ctrl))
2630 continue;
2631
2632 dev_err(ctrl->device,
2633 "Subsystem does not support multiple controllers\n");
2634 return false;
2635 }
2636
2637 return true;
2638}
2639
2640static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2641{
2642 struct nvme_subsystem *subsys, *found;
2643 int ret;
2644
2645 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
2646 if (!subsys)
2647 return -ENOMEM;
2648
2649 subsys->instance = -1;
2650 mutex_init(&subsys->lock);
2651 kref_init(&subsys->ref);
2652 INIT_LIST_HEAD(&subsys->ctrls);
2653 INIT_LIST_HEAD(&subsys->nsheads);
2654 nvme_init_subnqn(subsys, ctrl, id);
2655 memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
2656 memcpy(subsys->model, id->mn, sizeof(subsys->model));
2657 memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
2658 subsys->vendor_id = le16_to_cpu(id->vid);
2659 subsys->cmic = id->cmic;
2660 subsys->awupf = le16_to_cpu(id->awupf);
2661#ifdef CONFIG_NVME_MULTIPATH
2662 subsys->iopolicy = NVME_IOPOLICY_NUMA;
2663#endif
2664
2665 subsys->dev.class = nvme_subsys_class;
2666 subsys->dev.release = nvme_release_subsystem;
2667 subsys->dev.groups = nvme_subsys_attrs_groups;
2668 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
2669 device_initialize(&subsys->dev);
2670
2671 mutex_lock(&nvme_subsystems_lock);
2672 found = __nvme_find_get_subsystem(subsys->subnqn);
2673 if (found) {
2674 put_device(&subsys->dev);
2675 subsys = found;
2676
2677 if (!nvme_validate_cntlid(subsys, ctrl, id)) {
2678 ret = -EINVAL;
2679 goto out_put_subsystem;
2680 }
2681 } else {
2682 ret = device_add(&subsys->dev);
2683 if (ret) {
2684 dev_err(ctrl->device,
2685 "failed to register subsystem device.\n");
2686 put_device(&subsys->dev);
2687 goto out_unlock;
2688 }
2689 ida_init(&subsys->ns_ida);
2690 list_add_tail(&subsys->entry, &nvme_subsystems);
2691 }
2692
2693 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
2694 dev_name(ctrl->device));
2695 if (ret) {
2696 dev_err(ctrl->device,
2697 "failed to create sysfs link from subsystem.\n");
2698 goto out_put_subsystem;
2699 }
2700
2701 if (!found)
2702 subsys->instance = ctrl->instance;
2703 ctrl->subsys = subsys;
2704 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
2705 mutex_unlock(&nvme_subsystems_lock);
2706 return 0;
2707
2708out_put_subsystem:
2709 nvme_put_subsystem(subsys);
2710out_unlock:
2711 mutex_unlock(&nvme_subsystems_lock);
2712 return ret;
2713}
2714
2715int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
2716 void *log, size_t size, u64 offset)
2717{
2718 struct nvme_command c = { };
2719 u32 dwlen = nvme_bytes_to_numd(size);
2720
2721 c.get_log_page.opcode = nvme_admin_get_log_page;
2722 c.get_log_page.nsid = cpu_to_le32(nsid);
2723 c.get_log_page.lid = log_page;
2724 c.get_log_page.lsp = lsp;
2725 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
2726 c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
2727 c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
2728 c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
2729 c.get_log_page.csi = csi;
2730
2731 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
2732}
2733
2734static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
2735 struct nvme_effects_log **log)
2736{
2737 struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi);
2738 int ret;
2739
2740 if (cel)
2741 goto out;
2742
2743 cel = kzalloc(sizeof(*cel), GFP_KERNEL);
2744 if (!cel)
2745 return -ENOMEM;
2746
2747 ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
2748 cel, sizeof(*cel), 0);
2749 if (ret) {
2750 kfree(cel);
2751 return ret;
2752 }
2753
2754 xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
2755out:
2756 *log = cel;
2757 return 0;
2758}
2759
2760static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units)
2761{
2762 u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val;
2763
2764 if (check_shl_overflow(1U, units + page_shift - 9, &val))
2765 return UINT_MAX;
2766 return val;
2767}
2768
2769static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
2770{
2771 struct nvme_command c = { };
2772 struct nvme_id_ctrl_nvm *id;
2773 int ret;
2774
2775 if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
2776 ctrl->max_discard_sectors = UINT_MAX;
2777 ctrl->max_discard_segments = NVME_DSM_MAX_RANGES;
2778 } else {
2779 ctrl->max_discard_sectors = 0;
2780 ctrl->max_discard_segments = 0;
2781 }
2782
2783
2784
2785
2786
2787
2788
2789 if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
2790 !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
2791 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors;
2792 else
2793 ctrl->max_zeroes_sectors = 0;
2794
2795 if (nvme_ctrl_limited_cns(ctrl))
2796 return 0;
2797
2798 id = kzalloc(sizeof(*id), GFP_KERNEL);
2799 if (!id)
2800 return 0;
2801
2802 c.identify.opcode = nvme_admin_identify;
2803 c.identify.cns = NVME_ID_CNS_CS_CTRL;
2804 c.identify.csi = NVME_CSI_NVM;
2805
2806 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
2807 if (ret)
2808 goto free_data;
2809
2810 if (id->dmrl)
2811 ctrl->max_discard_segments = id->dmrl;
2812 if (id->dmrsl)
2813 ctrl->max_discard_sectors = le32_to_cpu(id->dmrsl);
2814 if (id->wzsl)
2815 ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
2816
2817free_data:
2818 kfree(id);
2819 return ret;
2820}
2821
2822static int nvme_init_identify(struct nvme_ctrl *ctrl)
2823{
2824 struct nvme_id_ctrl *id;
2825 u32 max_hw_sectors;
2826 bool prev_apst_enabled;
2827 int ret;
2828
2829 ret = nvme_identify_ctrl(ctrl, &id);
2830 if (ret) {
2831 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
2832 return -EIO;
2833 }
2834
2835 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
2836 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
2837 if (ret < 0)
2838 goto out_free;
2839 }
2840
2841 if (!(ctrl->ops->flags & NVME_F_FABRICS))
2842 ctrl->cntlid = le16_to_cpu(id->cntlid);
2843
2844 if (!ctrl->identified) {
2845 unsigned int i;
2846
2847 ret = nvme_init_subsystem(ctrl, id);
2848 if (ret)
2849 goto out_free;
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
2860 if (quirk_matches(id, &core_quirks[i]))
2861 ctrl->quirks |= core_quirks[i].quirks;
2862 }
2863 }
2864
2865 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
2866 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
2867 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
2868 }
2869
2870 ctrl->crdt[0] = le16_to_cpu(id->crdt1);
2871 ctrl->crdt[1] = le16_to_cpu(id->crdt2);
2872 ctrl->crdt[2] = le16_to_cpu(id->crdt3);
2873
2874 ctrl->oacs = le16_to_cpu(id->oacs);
2875 ctrl->oncs = le16_to_cpu(id->oncs);
2876 ctrl->mtfa = le16_to_cpu(id->mtfa);
2877 ctrl->oaes = le32_to_cpu(id->oaes);
2878 ctrl->wctemp = le16_to_cpu(id->wctemp);
2879 ctrl->cctemp = le16_to_cpu(id->cctemp);
2880
2881 atomic_set(&ctrl->abort_limit, id->acl + 1);
2882 ctrl->vwc = id->vwc;
2883 if (id->mdts)
2884 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts);
2885 else
2886 max_hw_sectors = UINT_MAX;
2887 ctrl->max_hw_sectors =
2888 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
2889
2890 nvme_set_queue_limits(ctrl, ctrl->admin_q);
2891 ctrl->sgls = le32_to_cpu(id->sgls);
2892 ctrl->kas = le16_to_cpu(id->kas);
2893 ctrl->max_namespaces = le32_to_cpu(id->mnan);
2894 ctrl->ctratt = le32_to_cpu(id->ctratt);
2895
2896 if (id->rtd3e) {
2897
2898 u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
2899
2900 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
2901 shutdown_timeout, 60);
2902
2903 if (ctrl->shutdown_timeout != shutdown_timeout)
2904 dev_info(ctrl->device,
2905 "Shutdown timeout set to %u seconds\n",
2906 ctrl->shutdown_timeout);
2907 } else
2908 ctrl->shutdown_timeout = shutdown_timeout;
2909
2910 ctrl->npss = id->npss;
2911 ctrl->apsta = id->apsta;
2912 prev_apst_enabled = ctrl->apst_enabled;
2913 if (ctrl->quirks & NVME_QUIRK_NO_APST) {
2914 if (force_apst && id->apsta) {
2915 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
2916 ctrl->apst_enabled = true;
2917 } else {
2918 ctrl->apst_enabled = false;
2919 }
2920 } else {
2921 ctrl->apst_enabled = id->apsta;
2922 }
2923 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
2924
2925 if (ctrl->ops->flags & NVME_F_FABRICS) {
2926 ctrl->icdoff = le16_to_cpu(id->icdoff);
2927 ctrl->ioccsz = le32_to_cpu(id->ioccsz);
2928 ctrl->iorcsz = le32_to_cpu(id->iorcsz);
2929 ctrl->maxcmd = le16_to_cpu(id->maxcmd);
2930
2931
2932
2933
2934
2935 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
2936 dev_err(ctrl->device,
2937 "Mismatching cntlid: Connect %u vs Identify "
2938 "%u, rejecting\n",
2939 ctrl->cntlid, le16_to_cpu(id->cntlid));
2940 ret = -EINVAL;
2941 goto out_free;
2942 }
2943
2944 if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
2945 dev_err(ctrl->device,
2946 "keep-alive support is mandatory for fabrics\n");
2947 ret = -EINVAL;
2948 goto out_free;
2949 }
2950 } else {
2951 ctrl->hmpre = le32_to_cpu(id->hmpre);
2952 ctrl->hmmin = le32_to_cpu(id->hmmin);
2953 ctrl->hmminds = le32_to_cpu(id->hmminds);
2954 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
2955 }
2956
2957 ret = nvme_mpath_init_identify(ctrl, id);
2958 if (ret < 0)
2959 goto out_free;
2960
2961 if (ctrl->apst_enabled && !prev_apst_enabled)
2962 dev_pm_qos_expose_latency_tolerance(ctrl->device);
2963 else if (!ctrl->apst_enabled && prev_apst_enabled)
2964 dev_pm_qos_hide_latency_tolerance(ctrl->device);
2965
2966out_free:
2967 kfree(id);
2968 return ret;
2969}
2970
2971
2972
2973
2974
2975
2976int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl)
2977{
2978 int ret;
2979
2980 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
2981 if (ret) {
2982 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
2983 return ret;
2984 }
2985
2986 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
2987
2988 if (ctrl->vs >= NVME_VS(1, 1, 0))
2989 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
2990
2991 ret = nvme_init_identify(ctrl);
2992 if (ret)
2993 return ret;
2994
2995 ret = nvme_init_non_mdts_limits(ctrl);
2996 if (ret < 0)
2997 return ret;
2998
2999 ret = nvme_configure_apst(ctrl);
3000 if (ret < 0)
3001 return ret;
3002
3003 ret = nvme_configure_timestamp(ctrl);
3004 if (ret < 0)
3005 return ret;
3006
3007 ret = nvme_configure_directives(ctrl);
3008 if (ret < 0)
3009 return ret;
3010
3011 ret = nvme_configure_acre(ctrl);
3012 if (ret < 0)
3013 return ret;
3014
3015 if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
3016 ret = nvme_hwmon_init(ctrl);
3017 if (ret < 0)
3018 return ret;
3019 }
3020
3021 ctrl->identified = true;
3022
3023 return 0;
3024}
3025EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish);
3026
3027static int nvme_dev_open(struct inode *inode, struct file *file)
3028{
3029 struct nvme_ctrl *ctrl =
3030 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3031
3032 switch (ctrl->state) {
3033 case NVME_CTRL_LIVE:
3034 break;
3035 default:
3036 return -EWOULDBLOCK;
3037 }
3038
3039 nvme_get_ctrl(ctrl);
3040 if (!try_module_get(ctrl->ops->module)) {
3041 nvme_put_ctrl(ctrl);
3042 return -EINVAL;
3043 }
3044
3045 file->private_data = ctrl;
3046 return 0;
3047}
3048
3049static int nvme_dev_release(struct inode *inode, struct file *file)
3050{
3051 struct nvme_ctrl *ctrl =
3052 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3053
3054 module_put(ctrl->ops->module);
3055 nvme_put_ctrl(ctrl);
3056 return 0;
3057}
3058
3059static const struct file_operations nvme_dev_fops = {
3060 .owner = THIS_MODULE,
3061 .open = nvme_dev_open,
3062 .release = nvme_dev_release,
3063 .unlocked_ioctl = nvme_dev_ioctl,
3064 .compat_ioctl = compat_ptr_ioctl,
3065};
3066
3067static ssize_t nvme_sysfs_reset(struct device *dev,
3068 struct device_attribute *attr, const char *buf,
3069 size_t count)
3070{
3071 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3072 int ret;
3073
3074 ret = nvme_reset_ctrl_sync(ctrl);
3075 if (ret < 0)
3076 return ret;
3077 return count;
3078}
3079static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
3080
3081static ssize_t nvme_sysfs_rescan(struct device *dev,
3082 struct device_attribute *attr, const char *buf,
3083 size_t count)
3084{
3085 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3086
3087 nvme_queue_scan(ctrl);
3088 return count;
3089}
3090static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
3091
3092static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
3093{
3094 struct gendisk *disk = dev_to_disk(dev);
3095
3096 if (disk->fops == &nvme_bdev_ops)
3097 return nvme_get_ns_from_dev(dev)->head;
3098 else
3099 return disk->private_data;
3100}
3101
3102static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
3103 char *buf)
3104{
3105 struct nvme_ns_head *head = dev_to_ns_head(dev);
3106 struct nvme_ns_ids *ids = &head->ids;
3107 struct nvme_subsystem *subsys = head->subsys;
3108 int serial_len = sizeof(subsys->serial);
3109 int model_len = sizeof(subsys->model);
3110
3111 if (!uuid_is_null(&ids->uuid))
3112 return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
3113
3114 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3115 return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
3116
3117 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3118 return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
3119
3120 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
3121 subsys->serial[serial_len - 1] == '\0'))
3122 serial_len--;
3123 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
3124 subsys->model[model_len - 1] == '\0'))
3125 model_len--;
3126
3127 return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
3128 serial_len, subsys->serial, model_len, subsys->model,
3129 head->ns_id);
3130}
3131static DEVICE_ATTR_RO(wwid);
3132
3133static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
3134 char *buf)
3135{
3136 return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
3137}
3138static DEVICE_ATTR_RO(nguid);
3139
3140static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
3141 char *buf)
3142{
3143 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3144
3145
3146
3147
3148 if (uuid_is_null(&ids->uuid)) {
3149 printk_ratelimited(KERN_WARNING
3150 "No UUID available providing old NGUID\n");
3151 return sysfs_emit(buf, "%pU\n", ids->nguid);
3152 }
3153 return sysfs_emit(buf, "%pU\n", &ids->uuid);
3154}
3155static DEVICE_ATTR_RO(uuid);
3156
3157static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
3158 char *buf)
3159{
3160 return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
3161}
3162static DEVICE_ATTR_RO(eui);
3163
3164static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
3165 char *buf)
3166{
3167 return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
3168}
3169static DEVICE_ATTR_RO(nsid);
3170
3171static struct attribute *nvme_ns_id_attrs[] = {
3172 &dev_attr_wwid.attr,
3173 &dev_attr_uuid.attr,
3174 &dev_attr_nguid.attr,
3175 &dev_attr_eui.attr,
3176 &dev_attr_nsid.attr,
3177#ifdef CONFIG_NVME_MULTIPATH
3178 &dev_attr_ana_grpid.attr,
3179 &dev_attr_ana_state.attr,
3180#endif
3181 NULL,
3182};
3183
3184static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
3185 struct attribute *a, int n)
3186{
3187 struct device *dev = container_of(kobj, struct device, kobj);
3188 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3189
3190 if (a == &dev_attr_uuid.attr) {
3191 if (uuid_is_null(&ids->uuid) &&
3192 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3193 return 0;
3194 }
3195 if (a == &dev_attr_nguid.attr) {
3196 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3197 return 0;
3198 }
3199 if (a == &dev_attr_eui.attr) {
3200 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3201 return 0;
3202 }
3203#ifdef CONFIG_NVME_MULTIPATH
3204 if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
3205 if (dev_to_disk(dev)->fops != &nvme_bdev_ops)
3206 return 0;
3207 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
3208 return 0;
3209 }
3210#endif
3211 return a->mode;
3212}
3213
3214static const struct attribute_group nvme_ns_id_attr_group = {
3215 .attrs = nvme_ns_id_attrs,
3216 .is_visible = nvme_ns_id_attrs_are_visible,
3217};
3218
3219const struct attribute_group *nvme_ns_id_attr_groups[] = {
3220 &nvme_ns_id_attr_group,
3221#ifdef CONFIG_NVM
3222 &nvme_nvm_attr_group,
3223#endif
3224 NULL,
3225};
3226
3227#define nvme_show_str_function(field) \
3228static ssize_t field##_show(struct device *dev, \
3229 struct device_attribute *attr, char *buf) \
3230{ \
3231 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3232 return sysfs_emit(buf, "%.*s\n", \
3233 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
3234} \
3235static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3236
3237nvme_show_str_function(model);
3238nvme_show_str_function(serial);
3239nvme_show_str_function(firmware_rev);
3240
3241#define nvme_show_int_function(field) \
3242static ssize_t field##_show(struct device *dev, \
3243 struct device_attribute *attr, char *buf) \
3244{ \
3245 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3246 return sysfs_emit(buf, "%d\n", ctrl->field); \
3247} \
3248static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3249
3250nvme_show_int_function(cntlid);
3251nvme_show_int_function(numa_node);
3252nvme_show_int_function(queue_count);
3253nvme_show_int_function(sqsize);
3254nvme_show_int_function(kato);
3255
3256static ssize_t nvme_sysfs_delete(struct device *dev,
3257 struct device_attribute *attr, const char *buf,
3258 size_t count)
3259{
3260 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3261
3262 if (device_remove_file_self(dev, attr))
3263 nvme_delete_ctrl_sync(ctrl);
3264 return count;
3265}
3266static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
3267
3268static ssize_t nvme_sysfs_show_transport(struct device *dev,
3269 struct device_attribute *attr,
3270 char *buf)
3271{
3272 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3273
3274 return sysfs_emit(buf, "%s\n", ctrl->ops->name);
3275}
3276static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
3277
3278static ssize_t nvme_sysfs_show_state(struct device *dev,
3279 struct device_attribute *attr,
3280 char *buf)
3281{
3282 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3283 static const char *const state_name[] = {
3284 [NVME_CTRL_NEW] = "new",
3285 [NVME_CTRL_LIVE] = "live",
3286 [NVME_CTRL_RESETTING] = "resetting",
3287 [NVME_CTRL_CONNECTING] = "connecting",
3288 [NVME_CTRL_DELETING] = "deleting",
3289 [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
3290 [NVME_CTRL_DEAD] = "dead",
3291 };
3292
3293 if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
3294 state_name[ctrl->state])
3295 return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
3296
3297 return sysfs_emit(buf, "unknown state\n");
3298}
3299
3300static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
3301
3302static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
3303 struct device_attribute *attr,
3304 char *buf)
3305{
3306 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3307
3308 return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
3309}
3310static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
3311
3312static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
3313 struct device_attribute *attr,
3314 char *buf)
3315{
3316 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3317
3318 return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
3319}
3320static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
3321
3322static ssize_t nvme_sysfs_show_hostid(struct device *dev,
3323 struct device_attribute *attr,
3324 char *buf)
3325{
3326 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3327
3328 return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
3329}
3330static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
3331
3332static ssize_t nvme_sysfs_show_address(struct device *dev,
3333 struct device_attribute *attr,
3334 char *buf)
3335{
3336 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3337
3338 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
3339}
3340static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
3341
3342static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
3343 struct device_attribute *attr, char *buf)
3344{
3345 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3346 struct nvmf_ctrl_options *opts = ctrl->opts;
3347
3348 if (ctrl->opts->max_reconnects == -1)
3349 return sysfs_emit(buf, "off\n");
3350 return sysfs_emit(buf, "%d\n",
3351 opts->max_reconnects * opts->reconnect_delay);
3352}
3353
3354static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
3355 struct device_attribute *attr, const char *buf, size_t count)
3356{
3357 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3358 struct nvmf_ctrl_options *opts = ctrl->opts;
3359 int ctrl_loss_tmo, err;
3360
3361 err = kstrtoint(buf, 10, &ctrl_loss_tmo);
3362 if (err)
3363 return -EINVAL;
3364
3365 if (ctrl_loss_tmo < 0)
3366 opts->max_reconnects = -1;
3367 else
3368 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
3369 opts->reconnect_delay);
3370 return count;
3371}
3372static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
3373 nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
3374
3375static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
3376 struct device_attribute *attr, char *buf)
3377{
3378 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3379
3380 if (ctrl->opts->reconnect_delay == -1)
3381 return sysfs_emit(buf, "off\n");
3382 return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
3383}
3384
3385static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
3386 struct device_attribute *attr, const char *buf, size_t count)
3387{
3388 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3389 unsigned int v;
3390 int err;
3391
3392 err = kstrtou32(buf, 10, &v);
3393 if (err)
3394 return err;
3395
3396 ctrl->opts->reconnect_delay = v;
3397 return count;
3398}
3399static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
3400 nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
3401
3402static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
3403 struct device_attribute *attr, char *buf)
3404{
3405 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3406
3407 if (ctrl->opts->fast_io_fail_tmo == -1)
3408 return sysfs_emit(buf, "off\n");
3409 return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
3410}
3411
3412static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
3413 struct device_attribute *attr, const char *buf, size_t count)
3414{
3415 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3416 struct nvmf_ctrl_options *opts = ctrl->opts;
3417 int fast_io_fail_tmo, err;
3418
3419 err = kstrtoint(buf, 10, &fast_io_fail_tmo);
3420 if (err)
3421 return -EINVAL;
3422
3423 if (fast_io_fail_tmo < 0)
3424 opts->fast_io_fail_tmo = -1;
3425 else
3426 opts->fast_io_fail_tmo = fast_io_fail_tmo;
3427 return count;
3428}
3429static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
3430 nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
3431
3432static struct attribute *nvme_dev_attrs[] = {
3433 &dev_attr_reset_controller.attr,
3434 &dev_attr_rescan_controller.attr,
3435 &dev_attr_model.attr,
3436 &dev_attr_serial.attr,
3437 &dev_attr_firmware_rev.attr,
3438 &dev_attr_cntlid.attr,
3439 &dev_attr_delete_controller.attr,
3440 &dev_attr_transport.attr,
3441 &dev_attr_subsysnqn.attr,
3442 &dev_attr_address.attr,
3443 &dev_attr_state.attr,
3444 &dev_attr_numa_node.attr,
3445 &dev_attr_queue_count.attr,
3446 &dev_attr_sqsize.attr,
3447 &dev_attr_hostnqn.attr,
3448 &dev_attr_hostid.attr,
3449 &dev_attr_ctrl_loss_tmo.attr,
3450 &dev_attr_reconnect_delay.attr,
3451 &dev_attr_fast_io_fail_tmo.attr,
3452 &dev_attr_kato.attr,
3453 NULL
3454};
3455
3456static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
3457 struct attribute *a, int n)
3458{
3459 struct device *dev = container_of(kobj, struct device, kobj);
3460 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3461
3462 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
3463 return 0;
3464 if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
3465 return 0;
3466 if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
3467 return 0;
3468 if (a == &dev_attr_hostid.attr && !ctrl->opts)
3469 return 0;
3470 if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
3471 return 0;
3472 if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
3473 return 0;
3474 if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
3475 return 0;
3476
3477 return a->mode;
3478}
3479
3480static const struct attribute_group nvme_dev_attrs_group = {
3481 .attrs = nvme_dev_attrs,
3482 .is_visible = nvme_dev_attrs_are_visible,
3483};
3484
3485static const struct attribute_group *nvme_dev_attr_groups[] = {
3486 &nvme_dev_attrs_group,
3487 NULL,
3488};
3489
3490static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
3491 unsigned nsid)
3492{
3493 struct nvme_ns_head *h;
3494
3495 lockdep_assert_held(&subsys->lock);
3496
3497 list_for_each_entry(h, &subsys->nsheads, entry) {
3498 if (h->ns_id == nsid && nvme_tryget_ns_head(h))
3499 return h;
3500 }
3501
3502 return NULL;
3503}
3504
3505static int __nvme_check_ids(struct nvme_subsystem *subsys,
3506 struct nvme_ns_head *new)
3507{
3508 struct nvme_ns_head *h;
3509
3510 lockdep_assert_held(&subsys->lock);
3511
3512 list_for_each_entry(h, &subsys->nsheads, entry) {
3513 if (nvme_ns_ids_valid(&new->ids) &&
3514 nvme_ns_ids_equal(&new->ids, &h->ids))
3515 return -EINVAL;
3516 }
3517
3518 return 0;
3519}
3520
3521void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
3522{
3523 cdev_device_del(cdev, cdev_device);
3524 ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(cdev_device->devt));
3525}
3526
3527int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
3528 const struct file_operations *fops, struct module *owner)
3529{
3530 int minor, ret;
3531
3532 minor = ida_simple_get(&nvme_ns_chr_minor_ida, 0, 0, GFP_KERNEL);
3533 if (minor < 0)
3534 return minor;
3535 cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
3536 cdev_device->class = nvme_ns_chr_class;
3537 device_initialize(cdev_device);
3538 cdev_init(cdev, fops);
3539 cdev->owner = owner;
3540 ret = cdev_device_add(cdev, cdev_device);
3541 if (ret) {
3542 put_device(cdev_device);
3543 ida_simple_remove(&nvme_ns_chr_minor_ida, minor);
3544 }
3545 return ret;
3546}
3547
3548static int nvme_ns_chr_open(struct inode *inode, struct file *file)
3549{
3550 return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev));
3551}
3552
3553static int nvme_ns_chr_release(struct inode *inode, struct file *file)
3554{
3555 nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev));
3556 return 0;
3557}
3558
3559static const struct file_operations nvme_ns_chr_fops = {
3560 .owner = THIS_MODULE,
3561 .open = nvme_ns_chr_open,
3562 .release = nvme_ns_chr_release,
3563 .unlocked_ioctl = nvme_ns_chr_ioctl,
3564 .compat_ioctl = compat_ptr_ioctl,
3565};
3566
3567static int nvme_add_ns_cdev(struct nvme_ns *ns)
3568{
3569 int ret;
3570
3571 ns->cdev_device.parent = ns->ctrl->device;
3572 ret = dev_set_name(&ns->cdev_device, "ng%dn%d",
3573 ns->ctrl->instance, ns->head->instance);
3574 if (ret)
3575 return ret;
3576 ret = nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
3577 ns->ctrl->ops->module);
3578 if (ret)
3579 kfree_const(ns->cdev_device.kobj.name);
3580 return ret;
3581}
3582
3583static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
3584 unsigned nsid, struct nvme_ns_ids *ids)
3585{
3586 struct nvme_ns_head *head;
3587 size_t size = sizeof(*head);
3588 int ret = -ENOMEM;
3589
3590#ifdef CONFIG_NVME_MULTIPATH
3591 size += num_possible_nodes() * sizeof(struct nvme_ns *);
3592#endif
3593
3594 head = kzalloc(size, GFP_KERNEL);
3595 if (!head)
3596 goto out;
3597 ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
3598 if (ret < 0)
3599 goto out_free_head;
3600 head->instance = ret;
3601 INIT_LIST_HEAD(&head->list);
3602 ret = init_srcu_struct(&head->srcu);
3603 if (ret)
3604 goto out_ida_remove;
3605 head->subsys = ctrl->subsys;
3606 head->ns_id = nsid;
3607 head->ids = *ids;
3608 kref_init(&head->ref);
3609
3610 ret = __nvme_check_ids(ctrl->subsys, head);
3611 if (ret) {
3612 dev_err(ctrl->device,
3613 "duplicate IDs for nsid %d\n", nsid);
3614 goto out_cleanup_srcu;
3615 }
3616
3617 if (head->ids.csi) {
3618 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
3619 if (ret)
3620 goto out_cleanup_srcu;
3621 } else
3622 head->effects = ctrl->effects;
3623
3624 ret = nvme_mpath_alloc_disk(ctrl, head);
3625 if (ret)
3626 goto out_cleanup_srcu;
3627
3628 list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3629
3630 kref_get(&ctrl->subsys->ref);
3631
3632 return head;
3633out_cleanup_srcu:
3634 cleanup_srcu_struct(&head->srcu);
3635out_ida_remove:
3636 ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
3637out_free_head:
3638 kfree(head);
3639out:
3640 if (ret > 0)
3641 ret = blk_status_to_errno(nvme_error_status(ret));
3642 return ERR_PTR(ret);
3643}
3644
3645static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
3646 struct nvme_ns_ids *ids, bool is_shared)
3647{
3648 struct nvme_ctrl *ctrl = ns->ctrl;
3649 struct nvme_ns_head *head = NULL;
3650 int ret = 0;
3651
3652 mutex_lock(&ctrl->subsys->lock);
3653 head = nvme_find_ns_head(ctrl->subsys, nsid);
3654 if (!head) {
3655 head = nvme_alloc_ns_head(ctrl, nsid, ids);
3656 if (IS_ERR(head)) {
3657 ret = PTR_ERR(head);
3658 goto out_unlock;
3659 }
3660 head->shared = is_shared;
3661 } else {
3662 ret = -EINVAL;
3663 if (!is_shared || !head->shared) {
3664 dev_err(ctrl->device,
3665 "Duplicate unshared namespace %d\n", nsid);
3666 goto out_put_ns_head;
3667 }
3668 if (!nvme_ns_ids_equal(&head->ids, ids)) {
3669 dev_err(ctrl->device,
3670 "IDs don't match for shared namespace %d\n",
3671 nsid);
3672 goto out_put_ns_head;
3673 }
3674 }
3675
3676 list_add_tail_rcu(&ns->siblings, &head->list);
3677 ns->head = head;
3678 mutex_unlock(&ctrl->subsys->lock);
3679 return 0;
3680
3681out_put_ns_head:
3682 nvme_put_ns_head(head);
3683out_unlock:
3684 mutex_unlock(&ctrl->subsys->lock);
3685 return ret;
3686}
3687
3688static int ns_cmp(void *priv, const struct list_head *a,
3689 const struct list_head *b)
3690{
3691 struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
3692 struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
3693
3694 return nsa->head->ns_id - nsb->head->ns_id;
3695}
3696
3697struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3698{
3699 struct nvme_ns *ns, *ret = NULL;
3700
3701 down_read(&ctrl->namespaces_rwsem);
3702 list_for_each_entry(ns, &ctrl->namespaces, list) {
3703 if (ns->head->ns_id == nsid) {
3704 if (!nvme_get_ns(ns))
3705 continue;
3706 ret = ns;
3707 break;
3708 }
3709 if (ns->head->ns_id > nsid)
3710 break;
3711 }
3712 up_read(&ctrl->namespaces_rwsem);
3713 return ret;
3714}
3715EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
3716
3717static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
3718 struct nvme_ns_ids *ids)
3719{
3720 struct nvme_ns *ns;
3721 struct gendisk *disk;
3722 struct nvme_id_ns *id;
3723 int node = ctrl->numa_node;
3724
3725 if (nvme_identify_ns(ctrl, nsid, ids, &id))
3726 return;
3727
3728 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
3729 if (!ns)
3730 goto out_free_id;
3731
3732 ns->queue = blk_mq_init_queue(ctrl->tagset);
3733 if (IS_ERR(ns->queue))
3734 goto out_free_ns;
3735
3736 if (ctrl->opts && ctrl->opts->data_digest)
3737 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
3738
3739 blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3740 if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
3741 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
3742
3743 ns->queue->queuedata = ns;
3744 ns->ctrl = ctrl;
3745 kref_init(&ns->kref);
3746
3747 if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
3748 goto out_free_queue;
3749
3750 disk = alloc_disk_node(0, node);
3751 if (!disk)
3752 goto out_unlink_ns;
3753
3754 disk->fops = &nvme_bdev_ops;
3755 disk->private_data = ns;
3756 disk->queue = ns->queue;
3757
3758
3759
3760
3761
3762 if (!nvme_mpath_set_disk_name(ns, disk->disk_name, &disk->flags))
3763 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
3764 ns->head->instance);
3765 ns->disk = disk;
3766
3767 if (nvme_update_ns_info(ns, id))
3768 goto out_put_disk;
3769
3770 if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
3771 if (nvme_nvm_register(ns, disk->disk_name, node)) {
3772 dev_warn(ctrl->device, "LightNVM init failure\n");
3773 goto out_put_disk;
3774 }
3775 }
3776
3777 down_write(&ctrl->namespaces_rwsem);
3778 list_add_tail(&ns->list, &ctrl->namespaces);
3779 up_write(&ctrl->namespaces_rwsem);
3780
3781 nvme_get_ctrl(ctrl);
3782
3783 device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
3784 if (!nvme_ns_head_multipath(ns->head))
3785 nvme_add_ns_cdev(ns);
3786
3787 nvme_mpath_add_disk(ns, id);
3788 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
3789 kfree(id);
3790
3791 return;
3792 out_put_disk:
3793
3794 ns->disk->queue = NULL;
3795 put_disk(ns->disk);
3796 out_unlink_ns:
3797 mutex_lock(&ctrl->subsys->lock);
3798 list_del_rcu(&ns->siblings);
3799 if (list_empty(&ns->head->list))
3800 list_del_init(&ns->head->entry);
3801 mutex_unlock(&ctrl->subsys->lock);
3802 nvme_put_ns_head(ns->head);
3803 out_free_queue:
3804 blk_cleanup_queue(ns->queue);
3805 out_free_ns:
3806 kfree(ns);
3807 out_free_id:
3808 kfree(id);
3809}
3810
3811static void nvme_ns_remove(struct nvme_ns *ns)
3812{
3813 bool last_path = false;
3814
3815 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
3816 return;
3817
3818 set_capacity(ns->disk, 0);
3819 nvme_fault_inject_fini(&ns->fault_inject);
3820
3821 mutex_lock(&ns->ctrl->subsys->lock);
3822 list_del_rcu(&ns->siblings);
3823 mutex_unlock(&ns->ctrl->subsys->lock);
3824
3825 synchronize_rcu();
3826 nvme_mpath_clear_current_path(ns);
3827 synchronize_srcu(&ns->head->srcu);
3828
3829 if (ns->disk->flags & GENHD_FL_UP) {
3830 if (!nvme_ns_head_multipath(ns->head))
3831 nvme_cdev_del(&ns->cdev, &ns->cdev_device);
3832 del_gendisk(ns->disk);
3833 blk_cleanup_queue(ns->queue);
3834 if (blk_get_integrity(ns->disk))
3835 blk_integrity_unregister(ns->disk);
3836 }
3837
3838 down_write(&ns->ctrl->namespaces_rwsem);
3839 list_del_init(&ns->list);
3840 up_write(&ns->ctrl->namespaces_rwsem);
3841
3842
3843 mutex_lock(&ns->head->subsys->lock);
3844 if (list_empty(&ns->head->list)) {
3845 list_del_init(&ns->head->entry);
3846 last_path = true;
3847 }
3848 mutex_unlock(&ns->head->subsys->lock);
3849 if (last_path)
3850 nvme_mpath_shutdown_disk(ns->head);
3851 nvme_put_ns(ns);
3852}
3853
3854static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
3855{
3856 struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);
3857
3858 if (ns) {
3859 nvme_ns_remove(ns);
3860 nvme_put_ns(ns);
3861 }
3862}
3863
3864static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
3865{
3866 struct nvme_id_ns *id;
3867 int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
3868
3869 if (test_bit(NVME_NS_DEAD, &ns->flags))
3870 goto out;
3871
3872 ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id);
3873 if (ret)
3874 goto out;
3875
3876 ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
3877 if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
3878 dev_err(ns->ctrl->device,
3879 "identifiers changed for nsid %d\n", ns->head->ns_id);
3880 goto out_free_id;
3881 }
3882
3883 ret = nvme_update_ns_info(ns, id);
3884
3885out_free_id:
3886 kfree(id);
3887out:
3888
3889
3890
3891
3892
3893
3894 if (ret > 0 && (ret & NVME_SC_DNR))
3895 nvme_ns_remove(ns);
3896}
3897
3898static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3899{
3900 struct nvme_ns_ids ids = { };
3901 struct nvme_ns *ns;
3902
3903 if (nvme_identify_ns_descs(ctrl, nsid, &ids))
3904 return;
3905
3906 ns = nvme_find_get_ns(ctrl, nsid);
3907 if (ns) {
3908 nvme_validate_ns(ns, &ids);
3909 nvme_put_ns(ns);
3910 return;
3911 }
3912
3913 switch (ids.csi) {
3914 case NVME_CSI_NVM:
3915 nvme_alloc_ns(ctrl, nsid, &ids);
3916 break;
3917 case NVME_CSI_ZNS:
3918 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
3919 dev_warn(ctrl->device,
3920 "nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
3921 nsid);
3922 break;
3923 }
3924 if (!nvme_multi_css(ctrl)) {
3925 dev_warn(ctrl->device,
3926 "command set not reported for nsid: %d\n",
3927 nsid);
3928 break;
3929 }
3930 nvme_alloc_ns(ctrl, nsid, &ids);
3931 break;
3932 default:
3933 dev_warn(ctrl->device, "unknown csi %u for nsid %u\n",
3934 ids.csi, nsid);
3935 break;
3936 }
3937}
3938
3939static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
3940 unsigned nsid)
3941{
3942 struct nvme_ns *ns, *next;
3943 LIST_HEAD(rm_list);
3944
3945 down_write(&ctrl->namespaces_rwsem);
3946 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
3947 if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
3948 list_move_tail(&ns->list, &rm_list);
3949 }
3950 up_write(&ctrl->namespaces_rwsem);
3951
3952 list_for_each_entry_safe(ns, next, &rm_list, list)
3953 nvme_ns_remove(ns);
3954
3955}
3956
3957static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
3958{
3959 const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
3960 __le32 *ns_list;
3961 u32 prev = 0;
3962 int ret = 0, i;
3963
3964 if (nvme_ctrl_limited_cns(ctrl))
3965 return -EOPNOTSUPP;
3966
3967 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
3968 if (!ns_list)
3969 return -ENOMEM;
3970
3971 for (;;) {
3972 struct nvme_command cmd = {
3973 .identify.opcode = nvme_admin_identify,
3974 .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST,
3975 .identify.nsid = cpu_to_le32(prev),
3976 };
3977
3978 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
3979 NVME_IDENTIFY_DATA_SIZE);
3980 if (ret) {
3981 dev_warn(ctrl->device,
3982 "Identify NS List failed (status=0x%x)\n", ret);
3983 goto free;
3984 }
3985
3986 for (i = 0; i < nr_entries; i++) {
3987 u32 nsid = le32_to_cpu(ns_list[i]);
3988
3989 if (!nsid)
3990 goto out;
3991 nvme_validate_or_alloc_ns(ctrl, nsid);
3992 while (++prev < nsid)
3993 nvme_ns_remove_by_nsid(ctrl, prev);
3994 }
3995 }
3996 out:
3997 nvme_remove_invalid_namespaces(ctrl, prev);
3998 free:
3999 kfree(ns_list);
4000 return ret;
4001}
4002
4003static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
4004{
4005 struct nvme_id_ctrl *id;
4006 u32 nn, i;
4007
4008 if (nvme_identify_ctrl(ctrl, &id))
4009 return;
4010 nn = le32_to_cpu(id->nn);
4011 kfree(id);
4012
4013 for (i = 1; i <= nn; i++)
4014 nvme_validate_or_alloc_ns(ctrl, i);
4015
4016 nvme_remove_invalid_namespaces(ctrl, nn);
4017}
4018
4019static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
4020{
4021 size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
4022 __le32 *log;
4023 int error;
4024
4025 log = kzalloc(log_size, GFP_KERNEL);
4026 if (!log)
4027 return;
4028
4029
4030
4031
4032
4033
4034
4035 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0,
4036 NVME_CSI_NVM, log, log_size, 0);
4037 if (error)
4038 dev_warn(ctrl->device,
4039 "reading changed ns log failed: %d\n", error);
4040
4041 kfree(log);
4042}
4043
4044static void nvme_scan_work(struct work_struct *work)
4045{
4046 struct nvme_ctrl *ctrl =
4047 container_of(work, struct nvme_ctrl, scan_work);
4048
4049
4050 if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
4051 return;
4052
4053 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
4054 dev_info(ctrl->device, "rescanning namespaces.\n");
4055 nvme_clear_changed_ns_log(ctrl);
4056 }
4057
4058 mutex_lock(&ctrl->scan_lock);
4059 if (nvme_scan_ns_list(ctrl) != 0)
4060 nvme_scan_ns_sequential(ctrl);
4061 mutex_unlock(&ctrl->scan_lock);
4062
4063 down_write(&ctrl->namespaces_rwsem);
4064 list_sort(NULL, &ctrl->namespaces, ns_cmp);
4065 up_write(&ctrl->namespaces_rwsem);
4066}
4067
4068
4069
4070
4071
4072
4073void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
4074{
4075 struct nvme_ns *ns, *next;
4076 LIST_HEAD(ns_list);
4077
4078
4079
4080
4081
4082
4083 nvme_mpath_clear_ctrl_paths(ctrl);
4084
4085
4086 flush_work(&ctrl->scan_work);
4087
4088
4089
4090
4091
4092
4093
4094 if (ctrl->state == NVME_CTRL_DEAD)
4095 nvme_kill_queues(ctrl);
4096
4097
4098 nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
4099
4100 down_write(&ctrl->namespaces_rwsem);
4101 list_splice_init(&ctrl->namespaces, &ns_list);
4102 up_write(&ctrl->namespaces_rwsem);
4103
4104 list_for_each_entry_safe(ns, next, &ns_list, list)
4105 nvme_ns_remove(ns);
4106}
4107EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
4108
4109static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
4110{
4111 struct nvme_ctrl *ctrl =
4112 container_of(dev, struct nvme_ctrl, ctrl_device);
4113 struct nvmf_ctrl_options *opts = ctrl->opts;
4114 int ret;
4115
4116 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
4117 if (ret)
4118 return ret;
4119
4120 if (opts) {
4121 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
4122 if (ret)
4123 return ret;
4124
4125 ret = add_uevent_var(env, "NVME_TRSVCID=%s",
4126 opts->trsvcid ?: "none");
4127 if (ret)
4128 return ret;
4129
4130 ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
4131 opts->host_traddr ?: "none");
4132 if (ret)
4133 return ret;
4134
4135 ret = add_uevent_var(env, "NVME_HOST_IFACE=%s",
4136 opts->host_iface ?: "none");
4137 }
4138 return ret;
4139}
4140
4141static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
4142{
4143 char *envp[2] = { NULL, NULL };
4144 u32 aen_result = ctrl->aen_result;
4145
4146 ctrl->aen_result = 0;
4147 if (!aen_result)
4148 return;
4149
4150 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
4151 if (!envp[0])
4152 return;
4153 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
4154 kfree(envp[0]);
4155}
4156
4157static void nvme_async_event_work(struct work_struct *work)
4158{
4159 struct nvme_ctrl *ctrl =
4160 container_of(work, struct nvme_ctrl,