1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/platform_device.h>
35#include <linux/vmalloc.h>
36#include <rdma/ib_umem.h>
37#include "hns_roce_device.h"
38#include "hns_roce_cmd.h"
39#include "hns_roce_hem.h"
40
41static u32 hw_index_to_key(int ind)
42{
43 return ((u32)ind >> 24) | ((u32)ind << 8);
44}
45
46unsigned long key_to_hw_index(u32 key)
47{
48 return (key << 24) | (key >> 8);
49}
50
51static int hns_roce_hw_create_mpt(struct hns_roce_dev *hr_dev,
52 struct hns_roce_cmd_mailbox *mailbox,
53 unsigned long mpt_index)
54{
55 return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
56 HNS_ROCE_CMD_CREATE_MPT,
57 HNS_ROCE_CMD_TIMEOUT_MSECS);
58}
59
60int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
61 struct hns_roce_cmd_mailbox *mailbox,
62 unsigned long mpt_index)
63{
64 return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
65 mpt_index, !mailbox, HNS_ROCE_CMD_DESTROY_MPT,
66 HNS_ROCE_CMD_TIMEOUT_MSECS);
67}
68
69static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
70{
71 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
72 struct ib_device *ibdev = &hr_dev->ib_dev;
73 int err;
74 int id;
75
76
77 id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
78 GFP_KERNEL);
79 if (id < 0) {
80 ibdev_err(ibdev, "failed to alloc id for MR key, id(%d)\n", id);
81 return -ENOMEM;
82 }
83
84 mr->key = hw_index_to_key(id);
85
86 err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table,
87 (unsigned long)id);
88 if (err) {
89 ibdev_err(ibdev, "failed to alloc mtpt, ret = %d.\n", err);
90 goto err_free_bitmap;
91 }
92
93 return 0;
94err_free_bitmap:
95 ida_free(&mtpt_ida->ida, id);
96 return err;
97}
98
99static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
100{
101 unsigned long obj = key_to_hw_index(mr->key);
102
103 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
104 ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)obj);
105}
106
107static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
108 struct ib_udata *udata, u64 start)
109{
110 struct ib_device *ibdev = &hr_dev->ib_dev;
111 bool is_fast = mr->type == MR_TYPE_FRMR;
112 struct hns_roce_buf_attr buf_attr = {};
113 int err;
114
115 mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
116 buf_attr.page_shift = is_fast ? PAGE_SHIFT :
117 hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
118 buf_attr.region[0].size = mr->size;
119 buf_attr.region[0].hopnum = mr->pbl_hop_num;
120 buf_attr.region_count = 1;
121 buf_attr.user_access = mr->access;
122
123 buf_attr.mtt_only = is_fast;
124
125 err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
126 hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT,
127 udata, start);
128 if (err)
129 ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
130 else
131 mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
132
133 return err;
134}
135
136static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
137{
138 hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
139}
140
141static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
142 struct hns_roce_mr *mr)
143{
144 struct ib_device *ibdev = &hr_dev->ib_dev;
145 int ret;
146
147 if (mr->enabled) {
148 ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
149 key_to_hw_index(mr->key) &
150 (hr_dev->caps.num_mtpts - 1));
151 if (ret)
152 ibdev_warn(ibdev, "failed to destroy mpt, ret = %d.\n",
153 ret);
154 }
155
156 free_mr_pbl(hr_dev, mr);
157 free_mr_key(hr_dev, mr);
158}
159
160static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
161 struct hns_roce_mr *mr)
162{
163 unsigned long mtpt_idx = key_to_hw_index(mr->key);
164 struct hns_roce_cmd_mailbox *mailbox;
165 struct device *dev = hr_dev->dev;
166 int ret;
167
168
169 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
170 if (IS_ERR(mailbox)) {
171 ret = PTR_ERR(mailbox);
172 return ret;
173 }
174
175 if (mr->type != MR_TYPE_FRMR)
176 ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr,
177 mtpt_idx);
178 else
179 ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
180 if (ret) {
181 dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
182 goto err_page;
183 }
184
185 ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
186 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
187 if (ret) {
188 dev_err(dev, "failed to create mpt, ret = %d.\n", ret);
189 goto err_page;
190 }
191
192 mr->enabled = 1;
193
194err_page:
195 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
196
197 return ret;
198}
199
200void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
201{
202 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
203
204 ida_init(&mtpt_ida->ida);
205 mtpt_ida->max = hr_dev->caps.num_mtpts - 1;
206 mtpt_ida->min = hr_dev->caps.reserved_mrws;
207}
208
209struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
210{
211 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
212 struct hns_roce_mr *mr;
213 int ret;
214
215 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
216 if (mr == NULL)
217 return ERR_PTR(-ENOMEM);
218
219 mr->type = MR_TYPE_DMA;
220 mr->pd = to_hr_pd(pd)->pdn;
221 mr->access = acc;
222
223
224 hns_roce_hem_list_init(&mr->pbl_mtr.hem_list);
225 ret = alloc_mr_key(hr_dev, mr);
226 if (ret)
227 goto err_free;
228
229 ret = hns_roce_mr_enable(hr_dev, mr);
230 if (ret)
231 goto err_mr;
232
233 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
234
235 return &mr->ibmr;
236err_mr:
237 free_mr_key(hr_dev, mr);
238
239err_free:
240 kfree(mr);
241 return ERR_PTR(ret);
242}
243
244struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
245 u64 virt_addr, int access_flags,
246 struct ib_udata *udata)
247{
248 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
249 struct hns_roce_mr *mr;
250 int ret;
251
252 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
253 if (!mr)
254 return ERR_PTR(-ENOMEM);
255
256 mr->iova = virt_addr;
257 mr->size = length;
258 mr->pd = to_hr_pd(pd)->pdn;
259 mr->access = access_flags;
260 mr->type = MR_TYPE_MR;
261
262 ret = alloc_mr_key(hr_dev, mr);
263 if (ret)
264 goto err_alloc_mr;
265
266 ret = alloc_mr_pbl(hr_dev, mr, udata, start);
267 if (ret)
268 goto err_alloc_key;
269
270 ret = hns_roce_mr_enable(hr_dev, mr);
271 if (ret)
272 goto err_alloc_pbl;
273
274 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
275 mr->ibmr.length = length;
276
277 return &mr->ibmr;
278
279err_alloc_pbl:
280 free_mr_pbl(hr_dev, mr);
281err_alloc_key:
282 free_mr_key(hr_dev, mr);
283err_alloc_mr:
284 kfree(mr);
285 return ERR_PTR(ret);
286}
287
288struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
289 u64 length, u64 virt_addr,
290 int mr_access_flags, struct ib_pd *pd,
291 struct ib_udata *udata)
292{
293 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
294 struct ib_device *ib_dev = &hr_dev->ib_dev;
295 struct hns_roce_mr *mr = to_hr_mr(ibmr);
296 struct hns_roce_cmd_mailbox *mailbox;
297 unsigned long mtpt_idx;
298 int ret;
299
300 if (!mr->enabled)
301 return ERR_PTR(-EINVAL);
302
303 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
304 if (IS_ERR(mailbox))
305 return ERR_CAST(mailbox);
306
307 mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
308 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0,
309 HNS_ROCE_CMD_QUERY_MPT,
310 HNS_ROCE_CMD_TIMEOUT_MSECS);
311 if (ret)
312 goto free_cmd_mbox;
313
314 ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, mtpt_idx);
315 if (ret)
316 ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret);
317
318 mr->enabled = 0;
319 mr->iova = virt_addr;
320 mr->size = length;
321
322 if (flags & IB_MR_REREG_PD)
323 mr->pd = to_hr_pd(pd)->pdn;
324
325 if (flags & IB_MR_REREG_ACCESS)
326 mr->access = mr_access_flags;
327
328 if (flags & IB_MR_REREG_TRANS) {
329 free_mr_pbl(hr_dev, mr);
330 ret = alloc_mr_pbl(hr_dev, mr, udata, start);
331 if (ret) {
332 ibdev_err(ib_dev, "failed to alloc mr PBL, ret = %d.\n",
333 ret);
334 goto free_cmd_mbox;
335 }
336 }
337
338 ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, mailbox->buf);
339 if (ret) {
340 ibdev_err(ib_dev, "failed to write mtpt, ret = %d.\n", ret);
341 goto free_cmd_mbox;
342 }
343
344 ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx);
345 if (ret) {
346 ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret);
347 goto free_cmd_mbox;
348 }
349
350 mr->enabled = 1;
351
352free_cmd_mbox:
353 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
354
355 return ERR_PTR(ret);
356}
357
358int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
359{
360 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
361 struct hns_roce_mr *mr = to_hr_mr(ibmr);
362 int ret = 0;
363
364 if (hr_dev->hw->dereg_mr) {
365 ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata);
366 } else {
367 hns_roce_mr_free(hr_dev, mr);
368 kfree(mr);
369 }
370
371 return ret;
372}
373
374struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
375 u32 max_num_sg)
376{
377 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
378 struct device *dev = hr_dev->dev;
379 struct hns_roce_mr *mr;
380 int ret;
381
382 if (mr_type != IB_MR_TYPE_MEM_REG)
383 return ERR_PTR(-EINVAL);
384
385 if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) {
386 dev_err(dev, "max_num_sg larger than %d\n",
387 HNS_ROCE_FRMR_MAX_PA);
388 return ERR_PTR(-EINVAL);
389 }
390
391 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
392 if (!mr)
393 return ERR_PTR(-ENOMEM);
394
395 mr->type = MR_TYPE_FRMR;
396 mr->pd = to_hr_pd(pd)->pdn;
397 mr->size = max_num_sg * (1 << PAGE_SHIFT);
398
399
400 ret = alloc_mr_key(hr_dev, mr);
401 if (ret)
402 goto err_free;
403
404 ret = alloc_mr_pbl(hr_dev, mr, NULL, 0);
405 if (ret)
406 goto err_key;
407
408 ret = hns_roce_mr_enable(hr_dev, mr);
409 if (ret)
410 goto err_pbl;
411
412 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
413 mr->ibmr.length = mr->size;
414
415 return &mr->ibmr;
416
417err_key:
418 free_mr_key(hr_dev, mr);
419err_pbl:
420 free_mr_pbl(hr_dev, mr);
421err_free:
422 kfree(mr);
423 return ERR_PTR(ret);
424}
425
426static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
427{
428 struct hns_roce_mr *mr = to_hr_mr(ibmr);
429
430 if (likely(mr->npages < mr->pbl_mtr.hem_cfg.buf_pg_count)) {
431 mr->page_list[mr->npages++] = addr;
432 return 0;
433 }
434
435 return -ENOBUFS;
436}
437
438int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
439 unsigned int *sg_offset)
440{
441 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
442 struct ib_device *ibdev = &hr_dev->ib_dev;
443 struct hns_roce_mr *mr = to_hr_mr(ibmr);
444 struct hns_roce_mtr *mtr = &mr->pbl_mtr;
445 int ret = 0;
446
447 mr->npages = 0;
448 mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
449 sizeof(dma_addr_t), GFP_KERNEL);
450 if (!mr->page_list)
451 return ret;
452
453 ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
454 if (ret < 1) {
455 ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
456 mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
457 goto err_page_list;
458 }
459
460 mtr->hem_cfg.region[0].offset = 0;
461 mtr->hem_cfg.region[0].count = mr->npages;
462 mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
463 mtr->hem_cfg.region_count = 1;
464 ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
465 if (ret) {
466 ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
467 ret = 0;
468 } else {
469 mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
470 ret = mr->npages;
471 }
472
473err_page_list:
474 kvfree(mr->page_list);
475 mr->page_list = NULL;
476
477 return ret;
478}
479
480static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
481 struct hns_roce_mw *mw)
482{
483 struct device *dev = hr_dev->dev;
484 int ret;
485
486 if (mw->enabled) {
487 ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
488 key_to_hw_index(mw->rkey) &
489 (hr_dev->caps.num_mtpts - 1));
490 if (ret)
491 dev_warn(dev, "MW DESTROY_MPT failed (%d)\n", ret);
492
493 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
494 key_to_hw_index(mw->rkey));
495 }
496
497 ida_free(&hr_dev->mr_table.mtpt_ida.ida,
498 (int)key_to_hw_index(mw->rkey));
499}
500
501static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
502 struct hns_roce_mw *mw)
503{
504 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
505 struct hns_roce_cmd_mailbox *mailbox;
506 struct device *dev = hr_dev->dev;
507 unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
508 int ret;
509
510
511 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
512 if (ret)
513 return ret;
514
515 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
516 if (IS_ERR(mailbox)) {
517 ret = PTR_ERR(mailbox);
518 goto err_table;
519 }
520
521 ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
522 if (ret) {
523 dev_err(dev, "MW write mtpt fail!\n");
524 goto err_page;
525 }
526
527 ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
528 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
529 if (ret) {
530 dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret);
531 goto err_page;
532 }
533
534 mw->enabled = 1;
535
536 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
537
538 return 0;
539
540err_page:
541 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
542
543err_table:
544 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
545
546 return ret;
547}
548
549int hns_roce_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
550{
551 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
552 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
553 struct ib_device *ibdev = &hr_dev->ib_dev;
554 struct hns_roce_mw *mw = to_hr_mw(ibmw);
555 int ret;
556 int id;
557
558
559 id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
560 GFP_KERNEL);
561 if (id < 0) {
562 ibdev_err(ibdev, "failed to alloc id for MW key, id(%d)\n", id);
563 return -ENOMEM;
564 }
565
566 mw->rkey = hw_index_to_key(id);
567
568 ibmw->rkey = mw->rkey;
569 mw->pdn = to_hr_pd(ibmw->pd)->pdn;
570 mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
571 mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
572 mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
573
574 ret = hns_roce_mw_enable(hr_dev, mw);
575 if (ret)
576 goto err_mw;
577
578 return 0;
579
580err_mw:
581 hns_roce_mw_free(hr_dev, mw);
582 return ret;
583}
584
585int hns_roce_dealloc_mw(struct ib_mw *ibmw)
586{
587 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
588 struct hns_roce_mw *mw = to_hr_mw(ibmw);
589
590 hns_roce_mw_free(hr_dev, mw);
591 return 0;
592}
593
594static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
595 struct hns_roce_buf_region *region, dma_addr_t *pages,
596 int max_count)
597{
598 int count, npage;
599 int offset, end;
600 __le64 *mtts;
601 u64 addr;
602 int i;
603
604 offset = region->offset;
605 end = offset + region->count;
606 npage = 0;
607 while (offset < end && npage < max_count) {
608 count = 0;
609 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
610 offset, &count, NULL);
611 if (!mtts)
612 return -ENOBUFS;
613
614 for (i = 0; i < count && npage < max_count; i++) {
615 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
616 addr = to_hr_hw_page_addr(pages[npage]);
617 else
618 addr = pages[npage];
619
620 mtts[i] = cpu_to_le64(addr);
621 npage++;
622 }
623 offset += count;
624 }
625
626 return npage;
627}
628
629static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
630{
631 int i;
632
633 for (i = 0; i < attr->region_count; i++)
634 if (attr->region[i].hopnum != HNS_ROCE_HOP_NUM_0 &&
635 attr->region[i].hopnum > 0)
636 return true;
637
638
639
640
641
642 return false;
643}
644
645static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr)
646{
647 size_t size = 0;
648 int i;
649
650 for (i = 0; i < attr->region_count; i++)
651 size += attr->region[i].size;
652
653 return size;
654}
655
656
657
658
659
660static inline int mtr_check_direct_pages(dma_addr_t *pages, int page_count,
661 unsigned int page_shift)
662{
663 size_t page_size = 1 << page_shift;
664 int i;
665
666 for (i = 1; i < page_count; i++)
667 if (pages[i] - pages[i - 1] != page_size)
668 return i;
669
670 return 0;
671}
672
673static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
674{
675
676 if (mtr->umem) {
677 ib_umem_release(mtr->umem);
678 mtr->umem = NULL;
679 }
680
681
682 if (mtr->kmem) {
683 hns_roce_buf_free(hr_dev, mtr->kmem);
684 mtr->kmem = NULL;
685 }
686}
687
688static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
689 struct hns_roce_buf_attr *buf_attr,
690 struct ib_udata *udata, unsigned long user_addr)
691{
692 struct ib_device *ibdev = &hr_dev->ib_dev;
693 size_t total_size;
694
695 total_size = mtr_bufs_size(buf_attr);
696
697 if (udata) {
698 mtr->kmem = NULL;
699 mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
700 buf_attr->user_access);
701 if (IS_ERR_OR_NULL(mtr->umem)) {
702 ibdev_err(ibdev, "failed to get umem, ret = %ld.\n",
703 PTR_ERR(mtr->umem));
704 return -ENOMEM;
705 }
706 } else {
707 mtr->umem = NULL;
708 mtr->kmem = hns_roce_buf_alloc(hr_dev, total_size,
709 buf_attr->page_shift,
710 mtr->hem_cfg.is_direct ?
711 HNS_ROCE_BUF_DIRECT : 0);
712 if (IS_ERR(mtr->kmem)) {
713 ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
714 PTR_ERR(mtr->kmem));
715 return PTR_ERR(mtr->kmem);
716 }
717 }
718
719 return 0;
720}
721
722static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
723 int page_count, unsigned int page_shift)
724{
725 struct ib_device *ibdev = &hr_dev->ib_dev;
726 dma_addr_t *pages;
727 int npage;
728 int ret;
729
730
731 pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
732 if (!pages)
733 return -ENOMEM;
734
735 if (mtr->umem)
736 npage = hns_roce_get_umem_bufs(hr_dev, pages, page_count,
737 mtr->umem, page_shift);
738 else
739 npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count,
740 mtr->kmem, page_shift);
741
742 if (npage != page_count) {
743 ibdev_err(ibdev, "failed to get mtr page %d != %d.\n", npage,
744 page_count);
745 ret = -ENOBUFS;
746 goto err_alloc_list;
747 }
748
749 if (mtr->hem_cfg.is_direct && npage > 1) {
750 ret = mtr_check_direct_pages(pages, npage, page_shift);
751 if (ret) {
752 ibdev_err(ibdev, "failed to check %s page: %d / %d.\n",
753 mtr->umem ? "umtr" : "kmtr", ret, npage);
754 ret = -ENOBUFS;
755 goto err_alloc_list;
756 }
757 }
758
759 ret = hns_roce_mtr_map(hr_dev, mtr, pages, page_count);
760 if (ret)
761 ibdev_err(ibdev, "failed to map mtr pages, ret = %d.\n", ret);
762
763err_alloc_list:
764 kvfree(pages);
765
766 return ret;
767}
768
769int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
770 dma_addr_t *pages, unsigned int page_cnt)
771{
772 struct ib_device *ibdev = &hr_dev->ib_dev;
773 struct hns_roce_buf_region *r;
774 unsigned int i, mapped_cnt;
775 int ret = 0;
776
777
778
779
780
781 if (mtr->hem_cfg.is_direct) {
782 mtr->hem_cfg.root_ba = pages[0];
783 return 0;
784 }
785
786 for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
787 mapped_cnt < page_cnt; i++) {
788 r = &mtr->hem_cfg.region[i];
789
790 if (!r->hopnum) {
791 mapped_cnt += r->count;
792 continue;
793 }
794
795 if (r->offset + r->count > page_cnt) {
796 ret = -EINVAL;
797 ibdev_err(ibdev,
798 "failed to check mtr%u count %u + %u > %u.\n",
799 i, r->offset, r->count, page_cnt);
800 return ret;
801 }
802
803 ret = mtr_map_region(hr_dev, mtr, r, &pages[r->offset],
804 page_cnt - mapped_cnt);
805 if (ret < 0) {
806 ibdev_err(ibdev,
807 "failed to map mtr%u offset %u, ret = %d.\n",
808 i, r->offset, ret);
809 return ret;
810 }
811 mapped_cnt += ret;
812 ret = 0;
813 }
814
815 if (mapped_cnt < page_cnt) {
816 ret = -ENOBUFS;
817 ibdev_err(ibdev, "failed to map mtr pages count: %u < %u.\n",
818 mapped_cnt, page_cnt);
819 }
820
821 return ret;
822}
823
824int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
825 int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
826{
827 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
828 int mtt_count, left;
829 int start_index;
830 int total = 0;
831 __le64 *mtts;
832 u32 npage;
833 u64 addr;
834
835 if (!mtt_buf || mtt_max < 1)
836 goto done;
837
838
839 if (cfg->is_direct) {
840 start_index = offset >> HNS_HW_PAGE_SHIFT;
841 for (mtt_count = 0; mtt_count < cfg->region_count &&
842 total < mtt_max; mtt_count++) {
843 npage = cfg->region[mtt_count].offset;
844 if (npage < start_index)
845 continue;
846
847 addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
848 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
849 mtt_buf[total] = to_hr_hw_page_addr(addr);
850 else
851 mtt_buf[total] = addr;
852
853 total++;
854 }
855
856 goto done;
857 }
858
859 start_index = offset >> cfg->buf_pg_shift;
860 left = mtt_max;
861 while (left > 0) {
862 mtt_count = 0;
863 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
864 start_index + total,
865 &mtt_count, NULL);
866 if (!mtts || !mtt_count)
867 goto done;
868
869 npage = min(mtt_count, left);
870 left -= npage;
871 for (mtt_count = 0; mtt_count < npage; mtt_count++)
872 mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
873 }
874
875done:
876 if (base_addr)
877 *base_addr = cfg->root_ba;
878
879 return total;
880}
881
882static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
883 struct hns_roce_buf_attr *attr,
884 struct hns_roce_hem_cfg *cfg,
885 unsigned int *buf_page_shift, int unalinged_size)
886{
887 struct hns_roce_buf_region *r;
888 int first_region_padding;
889 int page_cnt, region_cnt;
890 unsigned int page_shift;
891 size_t buf_size;
892
893
894 cfg->is_direct = !mtr_has_mtt(attr);
895 buf_size = mtr_bufs_size(attr);
896 if (cfg->is_direct) {
897
898
899
900
901
902
903 page_shift = HNS_HW_PAGE_SHIFT;
904
905
906 cfg->buf_pg_count = 1;
907 cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT +
908 order_base_2(DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE));
909 first_region_padding = 0;
910 } else {
911 page_shift = attr->page_shift;
912 cfg->buf_pg_count = DIV_ROUND_UP(buf_size + unalinged_size,
913 1 << page_shift);
914 cfg->buf_pg_shift = page_shift;
915 first_region_padding = unalinged_size;
916 }
917
918
919
920
921 for (page_cnt = 0, region_cnt = 0; region_cnt < attr->region_count &&
922 region_cnt < ARRAY_SIZE(cfg->region); region_cnt++) {
923 r = &cfg->region[region_cnt];
924 r->offset = page_cnt;
925 buf_size = hr_hw_page_align(attr->region[region_cnt].size +
926 first_region_padding);
927 r->count = DIV_ROUND_UP(buf_size, 1 << page_shift);
928 first_region_padding = 0;
929 page_cnt += r->count;
930 r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum,
931 r->count);
932 }
933
934 cfg->region_count = region_cnt;
935 *buf_page_shift = page_shift;
936
937 return page_cnt;
938}
939
940static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
941 unsigned int ba_page_shift)
942{
943 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
944 int ret;
945
946 hns_roce_hem_list_init(&mtr->hem_list);
947 if (!cfg->is_direct) {
948 ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
949 cfg->region, cfg->region_count,
950 ba_page_shift);
951 if (ret)
952 return ret;
953 cfg->root_ba = mtr->hem_list.root_ba;
954 cfg->ba_pg_shift = ba_page_shift;
955 } else {
956 cfg->ba_pg_shift = cfg->buf_pg_shift;
957 }
958
959 return 0;
960}
961
962static void mtr_free_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
963{
964 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
965}
966
967
968
969
970
971
972
973
974
975
976
977int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
978 struct hns_roce_buf_attr *buf_attr,
979 unsigned int ba_page_shift, struct ib_udata *udata,
980 unsigned long user_addr)
981{
982 struct ib_device *ibdev = &hr_dev->ib_dev;
983 unsigned int buf_page_shift = 0;
984 int buf_page_cnt;
985 int ret;
986
987 buf_page_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, &mtr->hem_cfg,
988 &buf_page_shift,
989 udata ? user_addr & ~PAGE_MASK : 0);
990 if (buf_page_cnt < 1 || buf_page_shift < HNS_HW_PAGE_SHIFT) {
991 ibdev_err(ibdev, "failed to init mtr cfg, count %d shift %u.\n",
992 buf_page_cnt, buf_page_shift);
993 return -EINVAL;
994 }
995
996 ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift);
997 if (ret) {
998 ibdev_err(ibdev, "failed to alloc mtr mtt, ret = %d.\n", ret);
999 return ret;
1000 }
1001
1002
1003
1004
1005 if (buf_attr->mtt_only) {
1006 mtr->umem = NULL;
1007 mtr->kmem = NULL;
1008 return 0;
1009 }
1010
1011 ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr);
1012 if (ret) {
1013 ibdev_err(ibdev, "failed to alloc mtr bufs, ret = %d.\n", ret);
1014 goto err_alloc_mtt;
1015 }
1016
1017
1018 ret = mtr_map_bufs(hr_dev, mtr, buf_page_cnt, buf_page_shift);
1019 if (ret)
1020 ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret);
1021 else
1022 return 0;
1023
1024 mtr_free_bufs(hr_dev, mtr);
1025err_alloc_mtt:
1026 mtr_free_mtt(hr_dev, mtr);
1027 return ret;
1028}
1029
1030void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
1031{
1032
1033 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1034
1035
1036 mtr_free_bufs(hr_dev, mtr);
1037}
1038