1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <asm/page.h>
49#include <linux/string.h>
50
51#include "mmu_rb.h"
52#include "user_exp_rcv.h"
53#include "trace.h"
54
55static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
56 struct exp_tid_set *set,
57 struct hfi1_filedata *fd);
58static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages);
59static int set_rcvarray_entry(struct hfi1_filedata *fd,
60 struct tid_user_buf *tbuf,
61 u32 rcventry, struct tid_group *grp,
62 u16 pageidx, unsigned int npages);
63static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
64 struct tid_rb_node *tnode);
65static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
66 const struct mmu_notifier_range *range,
67 unsigned long cur_seq);
68static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
69 struct tid_group *grp,
70 unsigned int start, u16 count,
71 u32 *tidlist, unsigned int *tididx,
72 unsigned int *pmapped);
73static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
74 struct tid_group **grp);
75static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
76
77static const struct mmu_interval_notifier_ops tid_mn_ops = {
78 .invalidate = tid_rb_invalidate,
79};
80
81
82
83
84
85
86int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
87 struct hfi1_ctxtdata *uctxt)
88{
89 int ret = 0;
90
91 fd->entry_to_rb = kcalloc(uctxt->expected_count,
92 sizeof(struct rb_node *),
93 GFP_KERNEL);
94 if (!fd->entry_to_rb)
95 return -ENOMEM;
96
97 if (!HFI1_CAP_UGET_MASK(uctxt->flags, TID_UNMAP)) {
98 fd->invalid_tid_idx = 0;
99 fd->invalid_tids = kcalloc(uctxt->expected_count,
100 sizeof(*fd->invalid_tids),
101 GFP_KERNEL);
102 if (!fd->invalid_tids) {
103 kfree(fd->entry_to_rb);
104 fd->entry_to_rb = NULL;
105 return -ENOMEM;
106 }
107 fd->use_mn = true;
108 }
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123 spin_lock(&fd->tid_lock);
124 if (uctxt->subctxt_cnt && fd->use_mn) {
125 u16 remainder;
126
127 fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
128 remainder = uctxt->expected_count % uctxt->subctxt_cnt;
129 if (remainder && fd->subctxt < remainder)
130 fd->tid_limit++;
131 } else {
132 fd->tid_limit = uctxt->expected_count;
133 }
134 spin_unlock(&fd->tid_lock);
135
136 return ret;
137}
138
139void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
140{
141 struct hfi1_ctxtdata *uctxt = fd->uctxt;
142
143 mutex_lock(&uctxt->exp_mutex);
144 if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
145 unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
146 if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
147 unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
148 mutex_unlock(&uctxt->exp_mutex);
149
150 kfree(fd->invalid_tids);
151 fd->invalid_tids = NULL;
152
153 kfree(fd->entry_to_rb);
154 fd->entry_to_rb = NULL;
155}
156
157
158
159
160
161
162
163
164
165
166
167
168static void unpin_rcv_pages(struct hfi1_filedata *fd,
169 struct tid_user_buf *tidbuf,
170 struct tid_rb_node *node,
171 unsigned int idx,
172 unsigned int npages,
173 bool mapped)
174{
175 struct page **pages;
176 struct hfi1_devdata *dd = fd->uctxt->dd;
177 struct mm_struct *mm;
178
179 if (mapped) {
180 pci_unmap_single(dd->pcidev, node->dma_addr,
181 node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
182 pages = &node->pages[idx];
183 mm = mm_from_tid_node(node);
184 } else {
185 pages = &tidbuf->pages[idx];
186 mm = current->mm;
187 }
188 hfi1_release_user_pages(mm, pages, npages, mapped);
189 fd->tid_n_pinned -= npages;
190}
191
192
193
194
195static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
196{
197 int pinned;
198 unsigned int npages;
199 unsigned long vaddr = tidbuf->vaddr;
200 struct page **pages = NULL;
201 struct hfi1_devdata *dd = fd->uctxt->dd;
202
203
204 npages = num_user_pages(vaddr, tidbuf->length);
205 if (!npages)
206 return -EINVAL;
207
208 if (npages > fd->uctxt->expected_count) {
209 dd_dev_err(dd, "Expected buffer too big\n");
210 return -EINVAL;
211 }
212
213
214 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
215 if (!pages)
216 return -ENOMEM;
217
218
219
220
221
222
223 if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) {
224 kfree(pages);
225 return -ENOMEM;
226 }
227
228 pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages);
229 if (pinned <= 0) {
230 kfree(pages);
231 return pinned;
232 }
233 tidbuf->pages = pages;
234 tidbuf->npages = npages;
235 fd->tid_n_pinned += pinned;
236 return pinned;
237}
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
289 struct hfi1_tid_info *tinfo)
290{
291 int ret = 0, need_group = 0, pinned;
292 struct hfi1_ctxtdata *uctxt = fd->uctxt;
293 struct hfi1_devdata *dd = uctxt->dd;
294 unsigned int ngroups, pageidx = 0, pageset_count,
295 tididx = 0, mapped, mapped_pages = 0;
296 u32 *tidlist = NULL;
297 struct tid_user_buf *tidbuf;
298
299 if (!PAGE_ALIGNED(tinfo->vaddr))
300 return -EINVAL;
301
302 tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
303 if (!tidbuf)
304 return -ENOMEM;
305
306 tidbuf->vaddr = tinfo->vaddr;
307 tidbuf->length = tinfo->length;
308 tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
309 GFP_KERNEL);
310 if (!tidbuf->psets) {
311 kfree(tidbuf);
312 return -ENOMEM;
313 }
314
315 pinned = pin_rcv_pages(fd, tidbuf);
316 if (pinned <= 0) {
317 kfree(tidbuf->psets);
318 kfree(tidbuf);
319 return pinned;
320 }
321
322
323 tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
324
325
326
327
328
329
330 spin_lock(&fd->tid_lock);
331 if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
332 pageset_count = fd->tid_limit - fd->tid_used;
333 else
334 pageset_count = tidbuf->n_psets;
335 spin_unlock(&fd->tid_lock);
336
337 if (!pageset_count)
338 goto bail;
339
340 ngroups = pageset_count / dd->rcv_entries.group_size;
341 tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
342 if (!tidlist) {
343 ret = -ENOMEM;
344 goto nomem;
345 }
346
347 tididx = 0;
348
349
350
351
352
353 mutex_lock(&uctxt->exp_mutex);
354
355
356
357
358 while (ngroups && uctxt->tid_group_list.count) {
359 struct tid_group *grp =
360 tid_group_pop(&uctxt->tid_group_list);
361
362 ret = program_rcvarray(fd, tidbuf, grp,
363 pageidx, dd->rcv_entries.group_size,
364 tidlist, &tididx, &mapped);
365
366
367
368
369
370 if (ret <= 0) {
371 tid_group_add_tail(grp, &uctxt->tid_group_list);
372 hfi1_cdbg(TID,
373 "Failed to program RcvArray group %d", ret);
374 goto unlock;
375 }
376
377 tid_group_add_tail(grp, &uctxt->tid_full_list);
378 ngroups--;
379 pageidx += ret;
380 mapped_pages += mapped;
381 }
382
383 while (pageidx < pageset_count) {
384 struct tid_group *grp, *ptr;
385
386
387
388
389
390 if (!uctxt->tid_used_list.count || need_group) {
391 if (!uctxt->tid_group_list.count)
392 goto unlock;
393
394 grp = tid_group_pop(&uctxt->tid_group_list);
395 tid_group_add_tail(grp, &uctxt->tid_used_list);
396 need_group = 0;
397 }
398
399
400
401
402
403 list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list,
404 list) {
405 unsigned use = min_t(unsigned, pageset_count - pageidx,
406 grp->size - grp->used);
407
408 ret = program_rcvarray(fd, tidbuf, grp,
409 pageidx, use, tidlist,
410 &tididx, &mapped);
411 if (ret < 0) {
412 hfi1_cdbg(TID,
413 "Failed to program RcvArray entries %d",
414 ret);
415 goto unlock;
416 } else if (ret > 0) {
417 if (grp->used == grp->size)
418 tid_group_move(grp,
419 &uctxt->tid_used_list,
420 &uctxt->tid_full_list);
421 pageidx += ret;
422 mapped_pages += mapped;
423 need_group = 0;
424
425 if (pageidx >= pageset_count)
426 break;
427 } else if (WARN_ON(ret == 0)) {
428
429
430
431
432
433
434 need_group = 1;
435 }
436 }
437 }
438unlock:
439 mutex_unlock(&uctxt->exp_mutex);
440nomem:
441 hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
442 mapped_pages, ret);
443 if (tididx) {
444 spin_lock(&fd->tid_lock);
445 fd->tid_used += tididx;
446 spin_unlock(&fd->tid_lock);
447 tinfo->tidcnt = tididx;
448 tinfo->length = mapped_pages * PAGE_SIZE;
449
450 if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
451 tidlist, sizeof(tidlist[0]) * tididx)) {
452
453
454
455
456 tinfo->tidlist = (unsigned long)&tidlist;
457 hfi1_user_exp_rcv_clear(fd, tinfo);
458 tinfo->tidlist = 0;
459 ret = -EFAULT;
460 goto bail;
461 }
462 }
463
464
465
466
467
468 if (mapped_pages != pinned)
469 unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
470 (pinned - mapped_pages), false);
471bail:
472 kfree(tidbuf->psets);
473 kfree(tidlist);
474 kfree(tidbuf->pages);
475 kfree(tidbuf);
476 return ret > 0 ? 0 : ret;
477}
478
479int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
480 struct hfi1_tid_info *tinfo)
481{
482 int ret = 0;
483 struct hfi1_ctxtdata *uctxt = fd->uctxt;
484 u32 *tidinfo;
485 unsigned tididx;
486
487 if (unlikely(tinfo->tidcnt > fd->tid_used))
488 return -EINVAL;
489
490 tidinfo = memdup_user(u64_to_user_ptr(tinfo->tidlist),
491 sizeof(tidinfo[0]) * tinfo->tidcnt);
492 if (IS_ERR(tidinfo))
493 return PTR_ERR(tidinfo);
494
495 mutex_lock(&uctxt->exp_mutex);
496 for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
497 ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL);
498 if (ret) {
499 hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
500 ret);
501 break;
502 }
503 }
504 spin_lock(&fd->tid_lock);
505 fd->tid_used -= tididx;
506 spin_unlock(&fd->tid_lock);
507 tinfo->tidcnt = tididx;
508 mutex_unlock(&uctxt->exp_mutex);
509
510 kfree(tidinfo);
511 return ret;
512}
513
514int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
515 struct hfi1_tid_info *tinfo)
516{
517 struct hfi1_ctxtdata *uctxt = fd->uctxt;
518 unsigned long *ev = uctxt->dd->events +
519 (uctxt_offset(uctxt) + fd->subctxt);
520 u32 *array;
521 int ret = 0;
522
523
524
525
526
527
528
529 array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL);
530 if (!array)
531 return -EFAULT;
532
533 spin_lock(&fd->invalid_lock);
534 if (fd->invalid_tid_idx) {
535 memcpy(array, fd->invalid_tids, sizeof(*array) *
536 fd->invalid_tid_idx);
537 memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) *
538 fd->invalid_tid_idx);
539 tinfo->tidcnt = fd->invalid_tid_idx;
540 fd->invalid_tid_idx = 0;
541
542
543
544
545 clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
546 } else {
547 tinfo->tidcnt = 0;
548 }
549 spin_unlock(&fd->invalid_lock);
550
551 if (tinfo->tidcnt) {
552 if (copy_to_user((void __user *)tinfo->tidlist,
553 array, sizeof(*array) * tinfo->tidcnt))
554 ret = -EFAULT;
555 }
556 kfree(array);
557
558 return ret;
559}
560
561static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages)
562{
563 unsigned pagecount, pageidx, setcount = 0, i;
564 unsigned long pfn, this_pfn;
565 struct page **pages = tidbuf->pages;
566 struct tid_pageset *list = tidbuf->psets;
567
568 if (!npages)
569 return 0;
570
571
572
573
574
575
576 pfn = page_to_pfn(pages[0]);
577 for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
578 this_pfn = i < npages ? page_to_pfn(pages[i]) : 0;
579
580
581
582
583
584 if (this_pfn != ++pfn) {
585
586
587
588
589
590
591
592
593
594
595
596
597 while (pagecount) {
598 int maxpages = pagecount;
599 u32 bufsize = pagecount * PAGE_SIZE;
600
601 if (bufsize > MAX_EXPECTED_BUFFER)
602 maxpages =
603 MAX_EXPECTED_BUFFER >>
604 PAGE_SHIFT;
605 else if (!is_power_of_2(bufsize))
606 maxpages =
607 rounddown_pow_of_two(bufsize) >>
608 PAGE_SHIFT;
609
610 list[setcount].idx = pageidx;
611 list[setcount].count = maxpages;
612 pagecount -= maxpages;
613 pageidx += maxpages;
614 setcount++;
615 }
616 pageidx = i;
617 pagecount = 1;
618 pfn = this_pfn;
619 } else {
620 pagecount++;
621 }
622 }
623 return setcount;
624}
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *tbuf,
654 struct tid_group *grp,
655 unsigned int start, u16 count,
656 u32 *tidlist, unsigned int *tididx,
657 unsigned int *pmapped)
658{
659 struct hfi1_ctxtdata *uctxt = fd->uctxt;
660 struct hfi1_devdata *dd = uctxt->dd;
661 u16 idx;
662 u32 tidinfo = 0, rcventry, useidx = 0;
663 int mapped = 0;
664
665
666 if (count > grp->size)
667 return -EINVAL;
668
669
670 for (idx = 0; idx < grp->size; idx++) {
671 if (!(grp->map & (1 << idx))) {
672 useidx = idx;
673 break;
674 }
675 rcv_array_wc_fill(dd, grp->base + idx);
676 }
677
678 idx = 0;
679 while (idx < count) {
680 u16 npages, pageidx, setidx = start + idx;
681 int ret = 0;
682
683
684
685
686
687 if (useidx >= grp->size) {
688 break;
689 } else if (grp->map & (1 << useidx)) {
690 rcv_array_wc_fill(dd, grp->base + useidx);
691 useidx++;
692 continue;
693 }
694
695 rcventry = grp->base + useidx;
696 npages = tbuf->psets[setidx].count;
697 pageidx = tbuf->psets[setidx].idx;
698
699 ret = set_rcvarray_entry(fd, tbuf,
700 rcventry, grp, pageidx,
701 npages);
702 if (ret)
703 return ret;
704 mapped += npages;
705
706 tidinfo = rcventry2tidinfo(rcventry - uctxt->expected_base) |
707 EXP_TID_SET(LEN, npages);
708 tidlist[(*tididx)++] = tidinfo;
709 grp->used++;
710 grp->map |= 1 << useidx++;
711 idx++;
712 }
713
714
715 for (; useidx < grp->size; useidx++)
716 rcv_array_wc_fill(dd, grp->base + useidx);
717 *pmapped = mapped;
718 return idx;
719}
720
721static int set_rcvarray_entry(struct hfi1_filedata *fd,
722 struct tid_user_buf *tbuf,
723 u32 rcventry, struct tid_group *grp,
724 u16 pageidx, unsigned int npages)
725{
726 int ret;
727 struct hfi1_ctxtdata *uctxt = fd->uctxt;
728 struct tid_rb_node *node;
729 struct hfi1_devdata *dd = uctxt->dd;
730 dma_addr_t phys;
731 struct page **pages = tbuf->pages + pageidx;
732
733
734
735
736
737 node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages),
738 GFP_KERNEL);
739 if (!node)
740 return -ENOMEM;
741
742 phys = pci_map_single(dd->pcidev,
743 __va(page_to_phys(pages[0])),
744 npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
745 if (dma_mapping_error(&dd->pcidev->dev, phys)) {
746 dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
747 phys);
748 kfree(node);
749 return -EFAULT;
750 }
751
752 node->fdata = fd;
753 node->phys = page_to_phys(pages[0]);
754 node->npages = npages;
755 node->rcventry = rcventry;
756 node->dma_addr = phys;
757 node->grp = grp;
758 node->freed = false;
759 memcpy(node->pages, pages, sizeof(struct page *) * npages);
760
761 if (fd->use_mn) {
762 ret = mmu_interval_notifier_insert(
763 &node->notifier, current->mm,
764 tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
765 &tid_mn_ops);
766 if (ret)
767 goto out_unmap;
768
769
770
771
772 mmu_interval_read_begin(&node->notifier);
773 }
774 fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
775
776 hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
777 trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
778 node->notifier.interval_tree.start, node->phys,
779 phys);
780 return 0;
781
782out_unmap:
783 hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
784 node->rcventry, node->notifier.interval_tree.start,
785 node->phys, ret);
786 pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
787 PCI_DMA_FROMDEVICE);
788 kfree(node);
789 return -EFAULT;
790}
791
792static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
793 struct tid_group **grp)
794{
795 struct hfi1_ctxtdata *uctxt = fd->uctxt;
796 struct hfi1_devdata *dd = uctxt->dd;
797 struct tid_rb_node *node;
798 u8 tidctrl = EXP_TID_GET(tidinfo, CTRL);
799 u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry;
800
801 if (tididx >= uctxt->expected_count) {
802 dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
803 tididx, uctxt->ctxt);
804 return -EINVAL;
805 }
806
807 if (tidctrl == 0x3)
808 return -EINVAL;
809
810 rcventry = tididx + (tidctrl - 1);
811
812 node = fd->entry_to_rb[rcventry];
813 if (!node || node->rcventry != (uctxt->expected_base + rcventry))
814 return -EBADF;
815
816 if (grp)
817 *grp = node->grp;
818
819 if (fd->use_mn)
820 mmu_interval_notifier_remove(&node->notifier);
821 cacheless_tid_rb_remove(fd, node);
822
823 return 0;
824}
825
826static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
827{
828 struct hfi1_ctxtdata *uctxt = fd->uctxt;
829 struct hfi1_devdata *dd = uctxt->dd;
830
831 trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
832 node->npages,
833 node->notifier.interval_tree.start, node->phys,
834 node->dma_addr);
835
836
837
838
839
840 hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
841
842 unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
843
844 node->grp->used--;
845 node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
846
847 if (node->grp->used == node->grp->size - 1)
848 tid_group_move(node->grp, &uctxt->tid_full_list,
849 &uctxt->tid_used_list);
850 else if (!node->grp->used)
851 tid_group_move(node->grp, &uctxt->tid_used_list,
852 &uctxt->tid_group_list);
853 kfree(node);
854}
855
856
857
858
859
860static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
861 struct exp_tid_set *set,
862 struct hfi1_filedata *fd)
863{
864 struct tid_group *grp, *ptr;
865 int i;
866
867 list_for_each_entry_safe(grp, ptr, &set->list, list) {
868 list_del_init(&grp->list);
869
870 for (i = 0; i < grp->size; i++) {
871 if (grp->map & (1 << i)) {
872 u16 rcventry = grp->base + i;
873 struct tid_rb_node *node;
874
875 node = fd->entry_to_rb[rcventry -
876 uctxt->expected_base];
877 if (!node || node->rcventry != rcventry)
878 continue;
879
880 if (fd->use_mn)
881 mmu_interval_notifier_remove(
882 &node->notifier);
883 cacheless_tid_rb_remove(fd, node);
884 }
885 }
886 }
887}
888
889static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
890 const struct mmu_notifier_range *range,
891 unsigned long cur_seq)
892{
893 struct tid_rb_node *node =
894 container_of(mni, struct tid_rb_node, notifier);
895 struct hfi1_filedata *fdata = node->fdata;
896 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
897
898 if (node->freed)
899 return true;
900
901 trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt,
902 node->notifier.interval_tree.start,
903 node->rcventry, node->npages, node->dma_addr);
904 node->freed = true;
905
906 spin_lock(&fdata->invalid_lock);
907 if (fdata->invalid_tid_idx < uctxt->expected_count) {
908 fdata->invalid_tids[fdata->invalid_tid_idx] =
909 rcventry2tidinfo(node->rcventry - uctxt->expected_base);
910 fdata->invalid_tids[fdata->invalid_tid_idx] |=
911 EXP_TID_SET(LEN, node->npages);
912 if (!fdata->invalid_tid_idx) {
913 unsigned long *ev;
914
915
916
917
918
919
920
921
922
923
924 ev = uctxt->dd->events +
925 (uctxt_offset(uctxt) + fdata->subctxt);
926 set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
927 }
928 fdata->invalid_tid_idx++;
929 }
930 spin_unlock(&fdata->invalid_lock);
931 return true;
932}
933
934static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
935 struct tid_rb_node *tnode)
936{
937 u32 base = fdata->uctxt->expected_base;
938
939 fdata->entry_to_rb[tnode->rcventry - base] = NULL;
940 clear_tid_node(fdata, tnode);
941}
942