1
2
3
4
5
6
7
8
9
10
11#include <linux/module.h>
12#include <linux/interrupt.h>
13#include <linux/delay.h>
14#include <linux/irq.h>
15#include <linux/pci.h>
16#include <linux/sysfs.h>
17
18#include "cgx.h"
19#include "rvu.h"
20#include "rvu_reg.h"
21#include "ptp.h"
22
23#include "rvu_trace.h"
24
25#define DRV_NAME "rvu_af"
26#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
27
28static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
29
30static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
31 struct rvu_block *block, int lf);
32static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
33 struct rvu_block *block, int lf);
34static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
35
36static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
37 int type, int num,
38 void (mbox_handler)(struct work_struct *),
39 void (mbox_up_handler)(struct work_struct *));
40enum {
41 TYPE_AFVF,
42 TYPE_AFPF,
43};
44
45
46static const struct pci_device_id rvu_id_table[] = {
47 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
48 { 0, }
49};
50
51MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
52MODULE_DESCRIPTION(DRV_STRING);
53MODULE_LICENSE("GPL v2");
54MODULE_DEVICE_TABLE(pci, rvu_id_table);
55
56static char *mkex_profile;
57module_param(mkex_profile, charp, 0000);
58MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
59
60static char *kpu_profile;
61module_param(kpu_profile, charp, 0000);
62MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
63
64static void rvu_setup_hw_capabilities(struct rvu *rvu)
65{
66 struct rvu_hwinfo *hw = rvu->hw;
67
68 hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
69 hw->cap.nix_fixed_txschq_mapping = false;
70 hw->cap.nix_shaping = true;
71 hw->cap.nix_tx_link_bp = true;
72 hw->cap.nix_rx_multicast = true;
73 hw->rvu = rvu;
74
75 if (is_rvu_96xx_B0(rvu)) {
76 hw->cap.nix_fixed_txschq_mapping = true;
77 hw->cap.nix_txsch_per_cgx_lmac = 4;
78 hw->cap.nix_txsch_per_lbk_lmac = 132;
79 hw->cap.nix_txsch_per_sdp_lmac = 76;
80 hw->cap.nix_shaping = false;
81 hw->cap.nix_tx_link_bp = false;
82 if (is_rvu_96xx_A0(rvu))
83 hw->cap.nix_rx_multicast = false;
84 }
85
86 if (!is_rvu_otx2(rvu))
87 hw->cap.per_pf_mbox_regs = true;
88}
89
90
91
92
93int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
94{
95 unsigned long timeout = jiffies + usecs_to_jiffies(10000);
96 void __iomem *reg;
97 u64 reg_val;
98
99 reg = rvu->afreg_base + ((block << 28) | offset);
100again:
101 reg_val = readq(reg);
102 if (zero && !(reg_val & mask))
103 return 0;
104 if (!zero && (reg_val & mask))
105 return 0;
106 if (time_before(jiffies, timeout)) {
107 usleep_range(1, 5);
108 goto again;
109 }
110 return -EBUSY;
111}
112
113int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
114{
115 int id;
116
117 if (!rsrc->bmap)
118 return -EINVAL;
119
120 id = find_first_zero_bit(rsrc->bmap, rsrc->max);
121 if (id >= rsrc->max)
122 return -ENOSPC;
123
124 __set_bit(id, rsrc->bmap);
125
126 return id;
127}
128
129int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
130{
131 int start;
132
133 if (!rsrc->bmap)
134 return -EINVAL;
135
136 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
137 if (start >= rsrc->max)
138 return -ENOSPC;
139
140 bitmap_set(rsrc->bmap, start, nrsrc);
141 return start;
142}
143
144static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
145{
146 if (!rsrc->bmap)
147 return;
148 if (start >= rsrc->max)
149 return;
150
151 bitmap_clear(rsrc->bmap, start, nrsrc);
152}
153
154bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
155{
156 int start;
157
158 if (!rsrc->bmap)
159 return false;
160
161 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
162 if (start >= rsrc->max)
163 return false;
164
165 return true;
166}
167
168void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
169{
170 if (!rsrc->bmap)
171 return;
172
173 __clear_bit(id, rsrc->bmap);
174}
175
176int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
177{
178 int used;
179
180 if (!rsrc->bmap)
181 return 0;
182
183 used = bitmap_weight(rsrc->bmap, rsrc->max);
184 return (rsrc->max - used);
185}
186
187bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
188{
189 if (!rsrc->bmap)
190 return false;
191
192 return !test_bit(id, rsrc->bmap);
193}
194
195int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
196{
197 rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
198 sizeof(long), GFP_KERNEL);
199 if (!rsrc->bmap)
200 return -ENOMEM;
201 return 0;
202}
203
204
205int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
206{
207 u16 match = 0;
208 int lf;
209
210 mutex_lock(&rvu->rsrc_lock);
211 for (lf = 0; lf < block->lf.max; lf++) {
212 if (block->fn_map[lf] == pcifunc) {
213 if (slot == match) {
214 mutex_unlock(&rvu->rsrc_lock);
215 return lf;
216 }
217 match++;
218 }
219 }
220 mutex_unlock(&rvu->rsrc_lock);
221 return -ENODEV;
222}
223
224
225
226
227
228
229
230
231
232
233int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
234{
235 int devnum, blkaddr = -ENODEV;
236 u64 cfg, reg;
237 bool is_pf;
238
239 switch (blktype) {
240 case BLKTYPE_NPC:
241 blkaddr = BLKADDR_NPC;
242 goto exit;
243 case BLKTYPE_NPA:
244 blkaddr = BLKADDR_NPA;
245 goto exit;
246 case BLKTYPE_NIX:
247
248 if (!pcifunc) {
249 blkaddr = BLKADDR_NIX0;
250 goto exit;
251 }
252 break;
253 case BLKTYPE_SSO:
254 blkaddr = BLKADDR_SSO;
255 goto exit;
256 case BLKTYPE_SSOW:
257 blkaddr = BLKADDR_SSOW;
258 goto exit;
259 case BLKTYPE_TIM:
260 blkaddr = BLKADDR_TIM;
261 goto exit;
262 case BLKTYPE_CPT:
263
264 if (!pcifunc) {
265 blkaddr = BLKADDR_CPT0;
266 goto exit;
267 }
268 break;
269 }
270
271
272 if (pcifunc & RVU_PFVF_FUNC_MASK) {
273 is_pf = false;
274 devnum = rvu_get_hwvf(rvu, pcifunc);
275 } else {
276 is_pf = true;
277 devnum = rvu_get_pf(pcifunc);
278 }
279
280
281
282
283 if (blktype == BLKTYPE_NIX) {
284 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
285 RVU_PRIV_HWVFX_NIXX_CFG(0);
286 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
287 if (cfg) {
288 blkaddr = BLKADDR_NIX0;
289 goto exit;
290 }
291
292 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
293 RVU_PRIV_HWVFX_NIXX_CFG(1);
294 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
295 if (cfg)
296 blkaddr = BLKADDR_NIX1;
297 }
298
299 if (blktype == BLKTYPE_CPT) {
300 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
301 RVU_PRIV_HWVFX_CPTX_CFG(0);
302 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
303 if (cfg) {
304 blkaddr = BLKADDR_CPT0;
305 goto exit;
306 }
307
308 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
309 RVU_PRIV_HWVFX_CPTX_CFG(1);
310 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
311 if (cfg)
312 blkaddr = BLKADDR_CPT1;
313 }
314
315exit:
316 if (is_block_implemented(rvu->hw, blkaddr))
317 return blkaddr;
318 return -ENODEV;
319}
320
321static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
322 struct rvu_block *block, u16 pcifunc,
323 u16 lf, bool attach)
324{
325 int devnum, num_lfs = 0;
326 bool is_pf;
327 u64 reg;
328
329 if (lf >= block->lf.max) {
330 dev_err(&rvu->pdev->dev,
331 "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
332 __func__, lf, block->name, block->lf.max);
333 return;
334 }
335
336
337 if (pcifunc & RVU_PFVF_FUNC_MASK) {
338 is_pf = false;
339 devnum = rvu_get_hwvf(rvu, pcifunc);
340 } else {
341 is_pf = true;
342 devnum = rvu_get_pf(pcifunc);
343 }
344
345 block->fn_map[lf] = attach ? pcifunc : 0;
346
347 switch (block->addr) {
348 case BLKADDR_NPA:
349 pfvf->npalf = attach ? true : false;
350 num_lfs = pfvf->npalf;
351 break;
352 case BLKADDR_NIX0:
353 case BLKADDR_NIX1:
354 pfvf->nixlf = attach ? true : false;
355 num_lfs = pfvf->nixlf;
356 break;
357 case BLKADDR_SSO:
358 attach ? pfvf->sso++ : pfvf->sso--;
359 num_lfs = pfvf->sso;
360 break;
361 case BLKADDR_SSOW:
362 attach ? pfvf->ssow++ : pfvf->ssow--;
363 num_lfs = pfvf->ssow;
364 break;
365 case BLKADDR_TIM:
366 attach ? pfvf->timlfs++ : pfvf->timlfs--;
367 num_lfs = pfvf->timlfs;
368 break;
369 case BLKADDR_CPT0:
370 attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
371 num_lfs = pfvf->cptlfs;
372 break;
373 case BLKADDR_CPT1:
374 attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
375 num_lfs = pfvf->cpt1_lfs;
376 break;
377 }
378
379 reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
380 rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
381}
382
383inline int rvu_get_pf(u16 pcifunc)
384{
385 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
386}
387
388void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
389{
390 u64 cfg;
391
392
393 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
394 if (numvfs)
395 *numvfs = (cfg >> 12) & 0xFF;
396 if (hwvf)
397 *hwvf = cfg & 0xFFF;
398}
399
400static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
401{
402 int pf, func;
403 u64 cfg;
404
405 pf = rvu_get_pf(pcifunc);
406 func = pcifunc & RVU_PFVF_FUNC_MASK;
407
408
409 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
410
411 return ((cfg & 0xFFF) + func - 1);
412}
413
414struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
415{
416
417 if (pcifunc & RVU_PFVF_FUNC_MASK)
418 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
419 else
420 return &rvu->pf[rvu_get_pf(pcifunc)];
421}
422
423static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
424{
425 int pf, vf, nvfs;
426 u64 cfg;
427
428 pf = rvu_get_pf(pcifunc);
429 if (pf >= rvu->hw->total_pfs)
430 return false;
431
432 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
433 return true;
434
435
436 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
437 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
438 nvfs = (cfg >> 12) & 0xFF;
439 if (vf >= nvfs)
440 return false;
441
442 return true;
443}
444
445bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
446{
447 struct rvu_block *block;
448
449 if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
450 return false;
451
452 block = &hw->block[blkaddr];
453 return block->implemented;
454}
455
456static void rvu_check_block_implemented(struct rvu *rvu)
457{
458 struct rvu_hwinfo *hw = rvu->hw;
459 struct rvu_block *block;
460 int blkid;
461 u64 cfg;
462
463
464 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
465 block = &hw->block[blkid];
466 cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
467 if (cfg & BIT_ULL(11))
468 block->implemented = true;
469 }
470}
471
472static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
473{
474 rvu_write64(rvu, BLKADDR_RVUM,
475 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
476 RVU_BLK_RVUM_REVID);
477}
478
479static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
480{
481 rvu_write64(rvu, BLKADDR_RVUM,
482 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
483}
484
485int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
486{
487 int err;
488
489 if (!block->implemented)
490 return 0;
491
492 rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
493 err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
494 true);
495 return err;
496}
497
498static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
499{
500 struct rvu_block *block = &rvu->hw->block[blkaddr];
501
502 if (!block->implemented)
503 return;
504
505 rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
506 rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
507}
508
509static void rvu_reset_all_blocks(struct rvu *rvu)
510{
511
512 rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
513 rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
514 rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
515 rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
516 rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
517 rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
518 rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
519 rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
520 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
521 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
522 rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
523 rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
524 rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
525}
526
527static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
528{
529 struct rvu_pfvf *pfvf;
530 u64 cfg;
531 int lf;
532
533 for (lf = 0; lf < block->lf.max; lf++) {
534 cfg = rvu_read64(rvu, block->addr,
535 block->lfcfg_reg | (lf << block->lfshift));
536 if (!(cfg & BIT_ULL(63)))
537 continue;
538
539
540 __set_bit(lf, block->lf.bmap);
541
542
543 pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
544 rvu_update_rsrc_map(rvu, pfvf, block,
545 (cfg >> 8) & 0xFFFF, lf, true);
546
547
548 rvu_set_msix_offset(rvu, pfvf, block, lf);
549 }
550}
551
552static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
553{
554 int min_vecs;
555
556 if (!vf)
557 goto check_pf;
558
559 if (!nvecs) {
560 dev_warn(rvu->dev,
561 "PF%d:VF%d is configured with zero msix vectors, %d\n",
562 pf, vf - 1, nvecs);
563 }
564 return;
565
566check_pf:
567 if (pf == 0)
568 min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
569 else
570 min_vecs = RVU_PF_INT_VEC_CNT;
571
572 if (!(nvecs < min_vecs))
573 return;
574 dev_warn(rvu->dev,
575 "PF%d is configured with too few vectors, %d, min is %d\n",
576 pf, nvecs, min_vecs);
577}
578
579static int rvu_setup_msix_resources(struct rvu *rvu)
580{
581 struct rvu_hwinfo *hw = rvu->hw;
582 int pf, vf, numvfs, hwvf, err;
583 int nvecs, offset, max_msix;
584 struct rvu_pfvf *pfvf;
585 u64 cfg, phy_addr;
586 dma_addr_t iova;
587
588 for (pf = 0; pf < hw->total_pfs; pf++) {
589 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
590
591 if (!((cfg >> 20) & 0x01))
592 continue;
593
594 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
595
596 pfvf = &rvu->pf[pf];
597
598 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
599 pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
600 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
601
602
603 err = rvu_alloc_bitmap(&pfvf->msix);
604 if (err)
605 return err;
606
607
608 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
609 sizeof(u16), GFP_KERNEL);
610 if (!pfvf->msix_lfmap)
611 return -ENOMEM;
612
613
614
615
616 if (!pf)
617 goto setup_vfmsix;
618
619
620
621
622
623 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
624 nvecs = (cfg >> 12) & 0xFF;
625 cfg &= ~0x7FFULL;
626 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
627 rvu_write64(rvu, BLKADDR_RVUM,
628 RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
629setup_vfmsix:
630
631 for (vf = 0; vf < numvfs; vf++) {
632 pfvf = &rvu->hwvf[hwvf + vf];
633
634 cfg = rvu_read64(rvu, BLKADDR_RVUM,
635 RVU_PRIV_PFX_MSIX_CFG(pf));
636 pfvf->msix.max = (cfg & 0xFFF) + 1;
637 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
638
639
640 err = rvu_alloc_bitmap(&pfvf->msix);
641 if (err)
642 return err;
643
644 pfvf->msix_lfmap =
645 devm_kcalloc(rvu->dev, pfvf->msix.max,
646 sizeof(u16), GFP_KERNEL);
647 if (!pfvf->msix_lfmap)
648 return -ENOMEM;
649
650
651
652
653
654 cfg = rvu_read64(rvu, BLKADDR_RVUM,
655 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
656 nvecs = (cfg >> 12) & 0xFF;
657 cfg &= ~0x7FFULL;
658 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
659 rvu_write64(rvu, BLKADDR_RVUM,
660 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
661 cfg | offset);
662 }
663 }
664
665
666
667
668
669 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
670 max_msix = cfg & 0xFFFFF;
671 if (rvu->fwdata && rvu->fwdata->msixtr_base)
672 phy_addr = rvu->fwdata->msixtr_base;
673 else
674 phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
675
676 iova = dma_map_resource(rvu->dev, phy_addr,
677 max_msix * PCI_MSIX_ENTRY_SIZE,
678 DMA_BIDIRECTIONAL, 0);
679
680 if (dma_mapping_error(rvu->dev, iova))
681 return -ENOMEM;
682
683 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
684 rvu->msix_base_iova = iova;
685 rvu->msixtr_base_phy = phy_addr;
686
687 return 0;
688}
689
690static void rvu_reset_msix(struct rvu *rvu)
691{
692
693 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
694 rvu->msixtr_base_phy);
695}
696
697static void rvu_free_hw_resources(struct rvu *rvu)
698{
699 struct rvu_hwinfo *hw = rvu->hw;
700 struct rvu_block *block;
701 struct rvu_pfvf *pfvf;
702 int id, max_msix;
703 u64 cfg;
704
705 rvu_npa_freemem(rvu);
706 rvu_npc_freemem(rvu);
707 rvu_nix_freemem(rvu);
708
709
710 for (id = 0; id < BLK_COUNT; id++) {
711 block = &hw->block[id];
712 kfree(block->lf.bmap);
713 }
714
715
716 for (id = 0; id < hw->total_pfs; id++) {
717 pfvf = &rvu->pf[id];
718 kfree(pfvf->msix.bmap);
719 }
720
721 for (id = 0; id < hw->total_vfs; id++) {
722 pfvf = &rvu->hwvf[id];
723 kfree(pfvf->msix.bmap);
724 }
725
726
727 if (!rvu->msix_base_iova)
728 return;
729 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
730 max_msix = cfg & 0xFFFFF;
731 dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
732 max_msix * PCI_MSIX_ENTRY_SIZE,
733 DMA_BIDIRECTIONAL, 0);
734
735 rvu_reset_msix(rvu);
736 mutex_destroy(&rvu->rsrc_lock);
737}
738
739static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
740{
741 struct rvu_hwinfo *hw = rvu->hw;
742 int pf, vf, numvfs, hwvf;
743 struct rvu_pfvf *pfvf;
744 u64 *mac;
745
746 for (pf = 0; pf < hw->total_pfs; pf++) {
747
748 if (!pf)
749 goto lbkvf;
750
751 if (!is_pf_cgxmapped(rvu, pf))
752 continue;
753
754 pfvf = &rvu->pf[pf];
755 if (rvu->fwdata && pf < PF_MACNUM_MAX) {
756 mac = &rvu->fwdata->pf_macs[pf];
757 if (*mac)
758 u64_to_ether_addr(*mac, pfvf->mac_addr);
759 else
760 eth_random_addr(pfvf->mac_addr);
761 } else {
762 eth_random_addr(pfvf->mac_addr);
763 }
764 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
765
766lbkvf:
767
768 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
769 for (vf = 0; vf < numvfs; vf++, hwvf++) {
770 pfvf = &rvu->hwvf[hwvf];
771 if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
772 mac = &rvu->fwdata->vf_macs[hwvf];
773 if (*mac)
774 u64_to_ether_addr(*mac, pfvf->mac_addr);
775 else
776 eth_random_addr(pfvf->mac_addr);
777 } else {
778 eth_random_addr(pfvf->mac_addr);
779 }
780 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
781 }
782 }
783}
784
785static int rvu_fwdata_init(struct rvu *rvu)
786{
787 u64 fwdbase;
788 int err;
789
790
791 err = cgx_get_fwdata_base(&fwdbase);
792 if (err)
793 goto fail;
794 rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
795 if (!rvu->fwdata)
796 goto fail;
797 if (!is_rvu_fwdata_valid(rvu)) {
798 dev_err(rvu->dev,
799 "Mismatch in 'fwdata' struct btw kernel and firmware\n");
800 iounmap(rvu->fwdata);
801 rvu->fwdata = NULL;
802 return -EINVAL;
803 }
804 return 0;
805fail:
806 dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
807 return -EIO;
808}
809
810static void rvu_fwdata_exit(struct rvu *rvu)
811{
812 if (rvu->fwdata)
813 iounmap(rvu->fwdata);
814}
815
816static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
817{
818 struct rvu_hwinfo *hw = rvu->hw;
819 struct rvu_block *block;
820 int blkid;
821 u64 cfg;
822
823
824 block = &hw->block[blkaddr];
825 if (!block->implemented)
826 return 0;
827 blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
828 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
829 block->lf.max = cfg & 0xFFF;
830 block->addr = blkaddr;
831 block->type = BLKTYPE_NIX;
832 block->lfshift = 8;
833 block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
834 block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
835 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
836 block->lfcfg_reg = NIX_PRIV_LFX_CFG;
837 block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
838 block->lfreset_reg = NIX_AF_LF_RST;
839 sprintf(block->name, "NIX%d", blkid);
840 rvu->nix_blkaddr[blkid] = blkaddr;
841 return rvu_alloc_bitmap(&block->lf);
842}
843
844static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
845{
846 struct rvu_hwinfo *hw = rvu->hw;
847 struct rvu_block *block;
848 int blkid;
849 u64 cfg;
850
851
852 block = &hw->block[blkaddr];
853 if (!block->implemented)
854 return 0;
855 blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
856 cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
857 block->lf.max = cfg & 0xFF;
858 block->addr = blkaddr;
859 block->type = BLKTYPE_CPT;
860 block->multislot = true;
861 block->lfshift = 3;
862 block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
863 block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
864 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
865 block->lfcfg_reg = CPT_PRIV_LFX_CFG;
866 block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
867 block->lfreset_reg = CPT_AF_LF_RST;
868 sprintf(block->name, "CPT%d", blkid);
869 return rvu_alloc_bitmap(&block->lf);
870}
871
872static void rvu_get_lbk_bufsize(struct rvu *rvu)
873{
874 struct pci_dev *pdev = NULL;
875 void __iomem *base;
876 u64 lbk_const;
877
878 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
879 PCI_DEVID_OCTEONTX2_LBK, pdev);
880 if (!pdev)
881 return;
882
883 base = pci_ioremap_bar(pdev, 0);
884 if (!base)
885 goto err_put;
886
887 lbk_const = readq(base + LBK_CONST);
888
889
890 rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
891
892 iounmap(base);
893err_put:
894 pci_dev_put(pdev);
895}
896
897static int rvu_setup_hw_resources(struct rvu *rvu)
898{
899 struct rvu_hwinfo *hw = rvu->hw;
900 struct rvu_block *block;
901 int blkid, err;
902 u64 cfg;
903
904
905 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
906 hw->total_pfs = (cfg >> 32) & 0xFF;
907 hw->total_vfs = (cfg >> 20) & 0xFFF;
908 hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
909
910
911 block = &hw->block[BLKADDR_NPA];
912 if (!block->implemented)
913 goto nix;
914 cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
915 block->lf.max = (cfg >> 16) & 0xFFF;
916 block->addr = BLKADDR_NPA;
917 block->type = BLKTYPE_NPA;
918 block->lfshift = 8;
919 block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
920 block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
921 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
922 block->lfcfg_reg = NPA_PRIV_LFX_CFG;
923 block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
924 block->lfreset_reg = NPA_AF_LF_RST;
925 sprintf(block->name, "NPA");
926 err = rvu_alloc_bitmap(&block->lf);
927 if (err)
928 return err;
929
930nix:
931 err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
932 if (err)
933 return err;
934 err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
935 if (err)
936 return err;
937
938
939 block = &hw->block[BLKADDR_SSO];
940 if (!block->implemented)
941 goto ssow;
942 cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
943 block->lf.max = cfg & 0xFFFF;
944 block->addr = BLKADDR_SSO;
945 block->type = BLKTYPE_SSO;
946 block->multislot = true;
947 block->lfshift = 3;
948 block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
949 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
950 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
951 block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
952 block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
953 block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
954 sprintf(block->name, "SSO GROUP");
955 err = rvu_alloc_bitmap(&block->lf);
956 if (err)
957 return err;
958
959ssow:
960
961 block = &hw->block[BLKADDR_SSOW];
962 if (!block->implemented)
963 goto tim;
964 block->lf.max = (cfg >> 56) & 0xFF;
965 block->addr = BLKADDR_SSOW;
966 block->type = BLKTYPE_SSOW;
967 block->multislot = true;
968 block->lfshift = 3;
969 block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
970 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
971 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
972 block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
973 block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
974 block->lfreset_reg = SSOW_AF_LF_HWS_RST;
975 sprintf(block->name, "SSOWS");
976 err = rvu_alloc_bitmap(&block->lf);
977 if (err)
978 return err;
979
980tim:
981
982 block = &hw->block[BLKADDR_TIM];
983 if (!block->implemented)
984 goto cpt;
985 cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
986 block->lf.max = cfg & 0xFFFF;
987 block->addr = BLKADDR_TIM;
988 block->type = BLKTYPE_TIM;
989 block->multislot = true;
990 block->lfshift = 3;
991 block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
992 block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
993 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
994 block->lfcfg_reg = TIM_PRIV_LFX_CFG;
995 block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
996 block->lfreset_reg = TIM_AF_LF_RST;
997 sprintf(block->name, "TIM");
998 err = rvu_alloc_bitmap(&block->lf);
999 if (err)
1000 return err;
1001
1002cpt:
1003 err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
1004 if (err)
1005 return err;
1006 err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
1007 if (err)
1008 return err;
1009
1010
1011 rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
1012 sizeof(struct rvu_pfvf), GFP_KERNEL);
1013 if (!rvu->pf)
1014 return -ENOMEM;
1015
1016 rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
1017 sizeof(struct rvu_pfvf), GFP_KERNEL);
1018 if (!rvu->hwvf)
1019 return -ENOMEM;
1020
1021 mutex_init(&rvu->rsrc_lock);
1022
1023 rvu_fwdata_init(rvu);
1024
1025 err = rvu_setup_msix_resources(rvu);
1026 if (err)
1027 return err;
1028
1029 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1030 block = &hw->block[blkid];
1031 if (!block->lf.bmap)
1032 continue;
1033
1034
1035 block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
1036 sizeof(u16), GFP_KERNEL);
1037 if (!block->fn_map) {
1038 err = -ENOMEM;
1039 goto msix_err;
1040 }
1041
1042
1043
1044
1045 rvu_scan_block(rvu, block);
1046 }
1047
1048 err = rvu_set_channels_base(rvu);
1049 if (err)
1050 goto msix_err;
1051
1052 err = rvu_npc_init(rvu);
1053 if (err)
1054 goto npc_err;
1055
1056 err = rvu_cgx_init(rvu);
1057 if (err)
1058 goto cgx_err;
1059
1060
1061 rvu_setup_pfvf_macaddress(rvu);
1062
1063 err = rvu_npa_init(rvu);
1064 if (err)
1065 goto npa_err;
1066
1067 rvu_get_lbk_bufsize(rvu);
1068
1069 err = rvu_nix_init(rvu);
1070 if (err)
1071 goto nix_err;
1072
1073 rvu_program_channels(rvu);
1074
1075 return 0;
1076
1077nix_err:
1078 rvu_nix_freemem(rvu);
1079npa_err:
1080 rvu_npa_freemem(rvu);
1081cgx_err:
1082 rvu_cgx_exit(rvu);
1083npc_err:
1084 rvu_npc_freemem(rvu);
1085 rvu_fwdata_exit(rvu);
1086msix_err:
1087 rvu_reset_msix(rvu);
1088 return err;
1089}
1090
1091
1092void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
1093{
1094 if (!aq)
1095 return;
1096
1097 qmem_free(rvu->dev, aq->inst);
1098 qmem_free(rvu->dev, aq->res);
1099 devm_kfree(rvu->dev, aq);
1100}
1101
1102int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
1103 int qsize, int inst_size, int res_size)
1104{
1105 struct admin_queue *aq;
1106 int err;
1107
1108 *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
1109 if (!*ad_queue)
1110 return -ENOMEM;
1111 aq = *ad_queue;
1112
1113
1114 err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
1115 if (err) {
1116 devm_kfree(rvu->dev, aq);
1117 return err;
1118 }
1119
1120
1121 err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
1122 if (err) {
1123 rvu_aq_free(rvu, aq);
1124 return err;
1125 }
1126
1127 spin_lock_init(&aq->lock);
1128 return 0;
1129}
1130
1131int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
1132 struct ready_msg_rsp *rsp)
1133{
1134 if (rvu->fwdata) {
1135 rsp->rclk_freq = rvu->fwdata->rclk;
1136 rsp->sclk_freq = rvu->fwdata->sclk;
1137 }
1138 return 0;
1139}
1140
1141
1142
1143
1144u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
1145{
1146 switch (blkaddr) {
1147 case BLKADDR_NPA:
1148 return pfvf->npalf ? 1 : 0;
1149 case BLKADDR_NIX0:
1150 case BLKADDR_NIX1:
1151 return pfvf->nixlf ? 1 : 0;
1152 case BLKADDR_SSO:
1153 return pfvf->sso;
1154 case BLKADDR_SSOW:
1155 return pfvf->ssow;
1156 case BLKADDR_TIM:
1157 return pfvf->timlfs;
1158 case BLKADDR_CPT0:
1159 return pfvf->cptlfs;
1160 case BLKADDR_CPT1:
1161 return pfvf->cpt1_lfs;
1162 }
1163 return 0;
1164}
1165
1166
1167static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
1168{
1169 switch (blktype) {
1170 case BLKTYPE_NPA:
1171 return pfvf->npalf ? 1 : 0;
1172 case BLKTYPE_NIX:
1173 return pfvf->nixlf ? 1 : 0;
1174 case BLKTYPE_SSO:
1175 return !!pfvf->sso;
1176 case BLKTYPE_SSOW:
1177 return !!pfvf->ssow;
1178 case BLKTYPE_TIM:
1179 return !!pfvf->timlfs;
1180 case BLKTYPE_CPT:
1181 return pfvf->cptlfs || pfvf->cpt1_lfs;
1182 }
1183
1184 return false;
1185}
1186
1187bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
1188{
1189 struct rvu_pfvf *pfvf;
1190
1191 if (!is_pf_func_valid(rvu, pcifunc))
1192 return false;
1193
1194 pfvf = rvu_get_pfvf(rvu, pcifunc);
1195
1196
1197 if (!is_blktype_attached(pfvf, blktype))
1198 return false;
1199
1200 return true;
1201}
1202
1203static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
1204 int pcifunc, int slot)
1205{
1206 u64 val;
1207
1208 val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
1209 rvu_write64(rvu, block->addr, block->lookup_reg, val);
1210
1211
1212 while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
1213 ;
1214
1215 val = rvu_read64(rvu, block->addr, block->lookup_reg);
1216
1217
1218 if (!(val & (1ULL << 12)))
1219 return -1;
1220
1221 return (val & 0xFFF);
1222}
1223
1224static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
1225{
1226 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1227 struct rvu_hwinfo *hw = rvu->hw;
1228 struct rvu_block *block;
1229 int slot, lf, num_lfs;
1230 int blkaddr;
1231
1232 blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
1233 if (blkaddr < 0)
1234 return;
1235
1236 if (blktype == BLKTYPE_NIX)
1237 rvu_nix_reset_mac(pfvf, pcifunc);
1238
1239 block = &hw->block[blkaddr];
1240
1241 num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1242 if (!num_lfs)
1243 return;
1244
1245 for (slot = 0; slot < num_lfs; slot++) {
1246 lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
1247 if (lf < 0)
1248 continue;
1249
1250
1251 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1252 (lf << block->lfshift), 0x00ULL);
1253
1254
1255 rvu_update_rsrc_map(rvu, pfvf, block,
1256 pcifunc, lf, false);
1257
1258
1259 rvu_free_rsrc(&block->lf, lf);
1260
1261
1262 rvu_clear_msix_offset(rvu, pfvf, block, lf);
1263 }
1264}
1265
1266static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
1267 u16 pcifunc)
1268{
1269 struct rvu_hwinfo *hw = rvu->hw;
1270 bool detach_all = true;
1271 struct rvu_block *block;
1272 int blkid;
1273
1274 mutex_lock(&rvu->rsrc_lock);
1275
1276
1277 if (detach && detach->partial)
1278 detach_all = false;
1279
1280
1281
1282
1283 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1284 block = &hw->block[blkid];
1285 if (!block->lf.bmap)
1286 continue;
1287 if (!detach_all && detach) {
1288 if (blkid == BLKADDR_NPA && !detach->npalf)
1289 continue;
1290 else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
1291 continue;
1292 else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
1293 continue;
1294 else if ((blkid == BLKADDR_SSO) && !detach->sso)
1295 continue;
1296 else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
1297 continue;
1298 else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
1299 continue;
1300 else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
1301 continue;
1302 else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
1303 continue;
1304 }
1305 rvu_detach_block(rvu, pcifunc, block->type);
1306 }
1307
1308 mutex_unlock(&rvu->rsrc_lock);
1309 return 0;
1310}
1311
1312int rvu_mbox_handler_detach_resources(struct rvu *rvu,
1313 struct rsrc_detach *detach,
1314 struct msg_rsp *rsp)
1315{
1316 return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
1317}
1318
1319int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
1320{
1321 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1322 int blkaddr = BLKADDR_NIX0, vf;
1323 struct rvu_pfvf *pf;
1324
1325
1326 if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
1327 pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1328 blkaddr = pf->nix_blkaddr;
1329 } else if (is_afvf(pcifunc)) {
1330 vf = pcifunc - 1;
1331
1332
1333
1334 blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
1335
1336 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1337 blkaddr = BLKADDR_NIX0;
1338 }
1339
1340 switch (blkaddr) {
1341 case BLKADDR_NIX1:
1342 pfvf->nix_blkaddr = BLKADDR_NIX1;
1343 pfvf->nix_rx_intf = NIX_INTFX_RX(1);
1344 pfvf->nix_tx_intf = NIX_INTFX_TX(1);
1345 break;
1346 case BLKADDR_NIX0:
1347 default:
1348 pfvf->nix_blkaddr = BLKADDR_NIX0;
1349 pfvf->nix_rx_intf = NIX_INTFX_RX(0);
1350 pfvf->nix_tx_intf = NIX_INTFX_TX(0);
1351 break;
1352 }
1353
1354 return pfvf->nix_blkaddr;
1355}
1356
1357static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
1358 u16 pcifunc, struct rsrc_attach *attach)
1359{
1360 int blkaddr;
1361
1362 switch (blktype) {
1363 case BLKTYPE_NIX:
1364 blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
1365 break;
1366 case BLKTYPE_CPT:
1367 if (attach->hdr.ver < RVU_MULTI_BLK_VER)
1368 return rvu_get_blkaddr(rvu, blktype, 0);
1369 blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
1370 BLKADDR_CPT0;
1371 if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
1372 return -ENODEV;
1373 break;
1374 default:
1375 return rvu_get_blkaddr(rvu, blktype, 0);
1376 }
1377
1378 if (is_block_implemented(rvu->hw, blkaddr))
1379 return blkaddr;
1380
1381 return -ENODEV;
1382}
1383
1384static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
1385 int num_lfs, struct rsrc_attach *attach)
1386{
1387 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1388 struct rvu_hwinfo *hw = rvu->hw;
1389 struct rvu_block *block;
1390 int slot, lf;
1391 int blkaddr;
1392 u64 cfg;
1393
1394 if (!num_lfs)
1395 return;
1396
1397 blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
1398 if (blkaddr < 0)
1399 return;
1400
1401 block = &hw->block[blkaddr];
1402 if (!block->lf.bmap)
1403 return;
1404
1405 for (slot = 0; slot < num_lfs; slot++) {
1406
1407 lf = rvu_alloc_rsrc(&block->lf);
1408 if (lf < 0)
1409 return;
1410
1411 cfg = (1ULL << 63) | (pcifunc << 8) | slot;
1412 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1413 (lf << block->lfshift), cfg);
1414 rvu_update_rsrc_map(rvu, pfvf, block,
1415 pcifunc, lf, true);
1416
1417
1418 rvu_set_msix_offset(rvu, pfvf, block, lf);
1419 }
1420}
1421
1422static int rvu_check_rsrc_availability(struct rvu *rvu,
1423 struct rsrc_attach *req, u16 pcifunc)
1424{
1425 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1426 int free_lfs, mappedlfs, blkaddr;
1427 struct rvu_hwinfo *hw = rvu->hw;
1428 struct rvu_block *block;
1429
1430
1431 if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
1432 block = &hw->block[BLKADDR_NPA];
1433 free_lfs = rvu_rsrc_free_count(&block->lf);
1434 if (!free_lfs)
1435 goto fail;
1436 } else if (req->npalf) {
1437 dev_err(&rvu->pdev->dev,
1438 "Func 0x%x: Invalid req, already has NPA\n",
1439 pcifunc);
1440 return -EINVAL;
1441 }
1442
1443
1444 if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
1445 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
1446 pcifunc, req);
1447 if (blkaddr < 0)
1448 return blkaddr;
1449 block = &hw->block[blkaddr];
1450 free_lfs = rvu_rsrc_free_count(&block->lf);
1451 if (!free_lfs)
1452 goto fail;
1453 } else if (req->nixlf) {
1454 dev_err(&rvu->pdev->dev,
1455 "Func 0x%x: Invalid req, already has NIX\n",
1456 pcifunc);
1457 return -EINVAL;
1458 }
1459
1460 if (req->sso) {
1461 block = &hw->block[BLKADDR_SSO];
1462
1463 if (req->sso > block->lf.max) {
1464 dev_err(&rvu->pdev->dev,
1465 "Func 0x%x: Invalid SSO req, %d > max %d\n",
1466 pcifunc, req->sso, block->lf.max);
1467 return -EINVAL;
1468 }
1469 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1470 free_lfs = rvu_rsrc_free_count(&block->lf);
1471
1472 if (req->sso > mappedlfs &&
1473 ((req->sso - mappedlfs) > free_lfs))
1474 goto fail;
1475 }
1476
1477 if (req->ssow) {
1478 block = &hw->block[BLKADDR_SSOW];
1479 if (req->ssow > block->lf.max) {
1480 dev_err(&rvu->pdev->dev,
1481 "Func 0x%x: Invalid SSOW req, %d > max %d\n",
1482 pcifunc, req->sso, block->lf.max);
1483 return -EINVAL;
1484 }
1485 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1486 free_lfs = rvu_rsrc_free_count(&block->lf);
1487 if (req->ssow > mappedlfs &&
1488 ((req->ssow - mappedlfs) > free_lfs))
1489 goto fail;
1490 }
1491
1492 if (req->timlfs) {
1493 block = &hw->block[BLKADDR_TIM];
1494 if (req->timlfs > block->lf.max) {
1495 dev_err(&rvu->pdev->dev,
1496 "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1497 pcifunc, req->timlfs, block->lf.max);
1498 return -EINVAL;
1499 }
1500 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1501 free_lfs = rvu_rsrc_free_count(&block->lf);
1502 if (req->timlfs > mappedlfs &&
1503 ((req->timlfs - mappedlfs) > free_lfs))
1504 goto fail;
1505 }
1506
1507 if (req->cptlfs) {
1508 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
1509 pcifunc, req);
1510 if (blkaddr < 0)
1511 return blkaddr;
1512 block = &hw->block[blkaddr];
1513 if (req->cptlfs > block->lf.max) {
1514 dev_err(&rvu->pdev->dev,
1515 "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1516 pcifunc, req->cptlfs, block->lf.max);
1517 return -EINVAL;
1518 }
1519 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1520 free_lfs = rvu_rsrc_free_count(&block->lf);
1521 if (req->cptlfs > mappedlfs &&
1522 ((req->cptlfs - mappedlfs) > free_lfs))
1523 goto fail;
1524 }
1525
1526 return 0;
1527
1528fail:
1529 dev_info(rvu->dev, "Request for %s failed\n", block->name);
1530 return -ENOSPC;
1531}
1532
1533static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
1534 struct rsrc_attach *attach)
1535{
1536 int blkaddr, num_lfs;
1537
1538 blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
1539 attach->hdr.pcifunc, attach);
1540 if (blkaddr < 0)
1541 return false;
1542
1543 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
1544 blkaddr);
1545
1546 return !!num_lfs;
1547}
1548
1549int rvu_mbox_handler_attach_resources(struct rvu *rvu,
1550 struct rsrc_attach *attach,
1551 struct msg_rsp *rsp)
1552{
1553 u16 pcifunc = attach->hdr.pcifunc;
1554 int err;
1555
1556
1557 if (!attach->modify)
1558 rvu_detach_rsrcs(rvu, NULL, pcifunc);
1559
1560 mutex_lock(&rvu->rsrc_lock);
1561
1562
1563 err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
1564 if (err)
1565 goto exit;
1566
1567
1568 if (attach->npalf)
1569 rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
1570
1571 if (attach->nixlf)
1572 rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
1573
1574 if (attach->sso) {
1575
1576
1577
1578
1579
1580 if (attach->modify)
1581 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1582 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
1583 attach->sso, attach);
1584 }
1585
1586 if (attach->ssow) {
1587 if (attach->modify)
1588 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1589 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
1590 attach->ssow, attach);
1591 }
1592
1593 if (attach->timlfs) {
1594 if (attach->modify)
1595 rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1596 rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
1597 attach->timlfs, attach);
1598 }
1599
1600 if (attach->cptlfs) {
1601 if (attach->modify &&
1602 rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
1603 rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
1604 rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
1605 attach->cptlfs, attach);
1606 }
1607
1608exit:
1609 mutex_unlock(&rvu->rsrc_lock);
1610 return err;
1611}
1612
1613static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1614 int blkaddr, int lf)
1615{
1616 u16 vec;
1617
1618 if (lf < 0)
1619 return MSIX_VECTOR_INVALID;
1620
1621 for (vec = 0; vec < pfvf->msix.max; vec++) {
1622 if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
1623 return vec;
1624 }
1625 return MSIX_VECTOR_INVALID;
1626}
1627
1628static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1629 struct rvu_block *block, int lf)
1630{
1631 u16 nvecs, vec, offset;
1632 u64 cfg;
1633
1634 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1635 (lf << block->lfshift));
1636 nvecs = (cfg >> 12) & 0xFF;
1637
1638
1639 if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
1640 return;
1641
1642 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
1643
1644
1645 rvu_write64(rvu, block->addr, block->msixcfg_reg |
1646 (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
1647
1648
1649 for (vec = 0; vec < nvecs; vec++)
1650 pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
1651}
1652
1653static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1654 struct rvu_block *block, int lf)
1655{
1656 u16 nvecs, vec, offset;
1657 u64 cfg;
1658
1659 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1660 (lf << block->lfshift));
1661 nvecs = (cfg >> 12) & 0xFF;
1662
1663
1664 rvu_write64(rvu, block->addr, block->msixcfg_reg |
1665 (lf << block->lfshift), cfg & ~0x7FFULL);
1666
1667 offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
1668
1669
1670 for (vec = 0; vec < nvecs; vec++)
1671 pfvf->msix_lfmap[offset + vec] = 0;
1672
1673
1674 rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
1675}
1676
1677int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
1678 struct msix_offset_rsp *rsp)
1679{
1680 struct rvu_hwinfo *hw = rvu->hw;
1681 u16 pcifunc = req->hdr.pcifunc;
1682 struct rvu_pfvf *pfvf;
1683 int lf, slot, blkaddr;
1684
1685 pfvf = rvu_get_pfvf(rvu, pcifunc);
1686 if (!pfvf->msix.bmap)
1687 return 0;
1688
1689
1690 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
1691 rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
1692
1693
1694 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1695 if (blkaddr < 0) {
1696 rsp->nix_msixoff = MSIX_VECTOR_INVALID;
1697 } else {
1698 lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1699 rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
1700 }
1701
1702 rsp->sso = pfvf->sso;
1703 for (slot = 0; slot < rsp->sso; slot++) {
1704 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
1705 rsp->sso_msixoff[slot] =
1706 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
1707 }
1708
1709 rsp->ssow = pfvf->ssow;
1710 for (slot = 0; slot < rsp->ssow; slot++) {
1711 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
1712 rsp->ssow_msixoff[slot] =
1713 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
1714 }
1715
1716 rsp->timlfs = pfvf->timlfs;
1717 for (slot = 0; slot < rsp->timlfs; slot++) {
1718 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
1719 rsp->timlf_msixoff[slot] =
1720 rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
1721 }
1722
1723 rsp->cptlfs = pfvf->cptlfs;
1724 for (slot = 0; slot < rsp->cptlfs; slot++) {
1725 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
1726 rsp->cptlf_msixoff[slot] =
1727 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
1728 }
1729
1730 rsp->cpt1_lfs = pfvf->cpt1_lfs;
1731 for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
1732 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
1733 rsp->cpt1_lf_msixoff[slot] =
1734 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
1735 }
1736
1737 return 0;
1738}
1739
1740int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
1741 struct msg_rsp *rsp)
1742{
1743 u16 pcifunc = req->hdr.pcifunc;
1744 u16 vf, numvfs;
1745 u64 cfg;
1746
1747 vf = pcifunc & RVU_PFVF_FUNC_MASK;
1748 cfg = rvu_read64(rvu, BLKADDR_RVUM,
1749 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
1750 numvfs = (cfg >> 12) & 0xFF;
1751
1752 if (vf && vf <= numvfs)
1753 __rvu_flr_handler(rvu, pcifunc);
1754 else
1755 return RVU_INVALID_VF_ID;
1756
1757 return 0;
1758}
1759
1760int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
1761 struct get_hw_cap_rsp *rsp)
1762{
1763 struct rvu_hwinfo *hw = rvu->hw;
1764
1765 rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
1766 rsp->nix_shaping = hw->cap.nix_shaping;
1767
1768 return 0;
1769}
1770
1771int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
1772 struct msg_rsp *rsp)
1773{
1774 struct rvu_hwinfo *hw = rvu->hw;
1775 u16 pcifunc = req->hdr.pcifunc;
1776 struct rvu_pfvf *pfvf;
1777 int blkaddr, nixlf;
1778 u16 target;
1779
1780
1781 if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
1782 return -EOPNOTSUPP;
1783
1784 target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
1785 pfvf = rvu_get_pfvf(rvu, target);
1786
1787 if (req->flags & RESET_VF_PERM) {
1788 pfvf->flags &= RVU_CLEAR_VF_PERM;
1789 } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
1790 (req->flags & VF_TRUSTED)) {
1791 change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
1792
1793 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
1794 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
1795 if (blkaddr < 0)
1796 return 0;
1797 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1798 target, 0);
1799 if (nixlf < 0)
1800 return 0;
1801 npc_enadis_default_mce_entry(rvu, target, nixlf,
1802 NIXLF_ALLMULTI_ENTRY,
1803 false);
1804 npc_enadis_default_mce_entry(rvu, target, nixlf,
1805 NIXLF_PROMISC_ENTRY,
1806 false);
1807 }
1808 }
1809
1810 return 0;
1811}
1812
1813static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
1814 struct mbox_msghdr *req)
1815{
1816 struct rvu *rvu = pci_get_drvdata(mbox->pdev);
1817
1818
1819 if (req->sig != OTX2_MBOX_REQ_SIG)
1820 goto bad_message;
1821
1822 switch (req->id) {
1823#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
1824 case _id: { \
1825 struct _rsp_type *rsp; \
1826 int err; \
1827 \
1828 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
1829 mbox, devid, \
1830 sizeof(struct _rsp_type)); \
1831 \
1832 \
1833 if (!rsp && \
1834 _id != MBOX_MSG_DETACH_RESOURCES && \
1835 _id != MBOX_MSG_NIX_TXSCH_FREE && \
1836 _id != MBOX_MSG_VF_FLR) \
1837 return -ENOMEM; \
1838 if (rsp) { \
1839 rsp->hdr.id = _id; \
1840 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
1841 rsp->hdr.pcifunc = req->pcifunc; \
1842 rsp->hdr.rc = 0; \
1843 } \
1844 \
1845 err = rvu_mbox_handler_ ## _fn_name(rvu, \
1846 (struct _req_type *)req, \
1847 rsp); \
1848 if (rsp && err) \
1849 rsp->hdr.rc = err; \
1850 \
1851 trace_otx2_msg_process(mbox->pdev, _id, err); \
1852 return rsp ? err : -ENOMEM; \
1853 }
1854MBOX_MESSAGES
1855#undef M
1856
1857bad_message:
1858 default:
1859 otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
1860 return -ENODEV;
1861 }
1862}
1863
1864static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
1865{
1866 struct rvu *rvu = mwork->rvu;
1867 int offset, err, id, devid;
1868 struct otx2_mbox_dev *mdev;
1869 struct mbox_hdr *req_hdr;
1870 struct mbox_msghdr *msg;
1871 struct mbox_wq_info *mw;
1872 struct otx2_mbox *mbox;
1873
1874 switch (type) {
1875 case TYPE_AFPF:
1876 mw = &rvu->afpf_wq_info;
1877 break;
1878 case TYPE_AFVF:
1879 mw = &rvu->afvf_wq_info;
1880 break;
1881 default:
1882 return;
1883 }
1884
1885 devid = mwork - mw->mbox_wrk;
1886 mbox = &mw->mbox;
1887 mdev = &mbox->dev[devid];
1888
1889
1890 req_hdr = mdev->mbase + mbox->rx_start;
1891 if (mw->mbox_wrk[devid].num_msgs == 0)
1892 return;
1893
1894 offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
1895
1896 for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
1897 msg = mdev->mbase + offset;
1898
1899
1900 switch (type) {
1901 case TYPE_AFPF:
1902 msg->pcifunc &=
1903 ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
1904 msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
1905 break;
1906 case TYPE_AFVF:
1907 msg->pcifunc &=
1908 ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
1909 msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
1910 break;
1911 }
1912
1913 err = rvu_process_mbox_msg(mbox, devid, msg);
1914 if (!err) {
1915 offset = mbox->rx_start + msg->next_msgoff;
1916 continue;
1917 }
1918
1919 if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
1920 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
1921 err, otx2_mbox_id2name(msg->id),
1922 msg->id, rvu_get_pf(msg->pcifunc),
1923 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1924 else
1925 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
1926 err, otx2_mbox_id2name(msg->id),
1927 msg->id, devid);
1928 }
1929 mw->mbox_wrk[devid].num_msgs = 0;
1930
1931
1932 otx2_mbox_msg_send(mbox, devid);
1933}
1934
1935static inline void rvu_afpf_mbox_handler(struct work_struct *work)
1936{
1937 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1938
1939 __rvu_mbox_handler(mwork, TYPE_AFPF);
1940}
1941
1942static inline void rvu_afvf_mbox_handler(struct work_struct *work)
1943{
1944 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1945
1946 __rvu_mbox_handler(mwork, TYPE_AFVF);
1947}
1948
1949static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
1950{
1951 struct rvu *rvu = mwork->rvu;
1952 struct otx2_mbox_dev *mdev;
1953 struct mbox_hdr *rsp_hdr;
1954 struct mbox_msghdr *msg;
1955 struct mbox_wq_info *mw;
1956 struct otx2_mbox *mbox;
1957 int offset, id, devid;
1958
1959 switch (type) {
1960 case TYPE_AFPF:
1961 mw = &rvu->afpf_wq_info;
1962 break;
1963 case TYPE_AFVF:
1964 mw = &rvu->afvf_wq_info;
1965 break;
1966 default:
1967 return;
1968 }
1969
1970 devid = mwork - mw->mbox_wrk_up;
1971 mbox = &mw->mbox_up;
1972 mdev = &mbox->dev[devid];
1973
1974 rsp_hdr = mdev->mbase + mbox->rx_start;
1975 if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
1976 dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
1977 return;
1978 }
1979
1980 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
1981
1982 for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
1983 msg = mdev->mbase + offset;
1984
1985 if (msg->id >= MBOX_MSG_MAX) {
1986 dev_err(rvu->dev,
1987 "Mbox msg with unknown ID 0x%x\n", msg->id);
1988 goto end;
1989 }
1990
1991 if (msg->sig != OTX2_MBOX_RSP_SIG) {
1992 dev_err(rvu->dev,
1993 "Mbox msg with wrong signature %x, ID 0x%x\n",
1994 msg->sig, msg->id);
1995 goto end;
1996 }
1997
1998 switch (msg->id) {
1999 case MBOX_MSG_CGX_LINK_EVENT:
2000 break;
2001 default:
2002 if (msg->rc)
2003 dev_err(rvu->dev,
2004 "Mbox msg response has err %d, ID 0x%x\n",
2005 msg->rc, msg->id);
2006 break;
2007 }
2008end:
2009 offset = mbox->rx_start + msg->next_msgoff;
2010 mdev->msgs_acked++;
2011 }
2012 mw->mbox_wrk_up[devid].up_num_msgs = 0;
2013
2014 otx2_mbox_reset(mbox, devid);
2015}
2016
2017static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
2018{
2019 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2020
2021 __rvu_mbox_up_handler(mwork, TYPE_AFPF);
2022}
2023
2024static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
2025{
2026 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2027
2028 __rvu_mbox_up_handler(mwork, TYPE_AFVF);
2029}
2030
2031static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
2032 int num, int type)
2033{
2034 struct rvu_hwinfo *hw = rvu->hw;
2035 int region;
2036 u64 bar4;
2037
2038
2039
2040
2041
2042 if (type == TYPE_AFVF) {
2043 for (region = 0; region < num; region++) {
2044 if (hw->cap.per_pf_mbox_regs) {
2045 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2046 RVU_AF_PFX_BAR4_ADDR(0)) +
2047 MBOX_SIZE;
2048 bar4 += region * MBOX_SIZE;
2049 } else {
2050 bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
2051 bar4 += region * MBOX_SIZE;
2052 }
2053 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2054 if (!mbox_addr[region])
2055 goto error;
2056 }
2057 return 0;
2058 }
2059
2060
2061
2062
2063
2064 for (region = 0; region < num; region++) {
2065 if (hw->cap.per_pf_mbox_regs) {
2066 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2067 RVU_AF_PFX_BAR4_ADDR(region));
2068 } else {
2069 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2070 RVU_AF_PF_BAR4_ADDR);
2071 bar4 += region * MBOX_SIZE;
2072 }
2073 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2074 if (!mbox_addr[region])
2075 goto error;
2076 }
2077 return 0;
2078
2079error:
2080 while (region--)
2081 iounmap((void __iomem *)mbox_addr[region]);
2082 return -ENOMEM;
2083}
2084
2085static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
2086 int type, int num,
2087 void (mbox_handler)(struct work_struct *),
2088 void (mbox_up_handler)(struct work_struct *))
2089{
2090 int err = -EINVAL, i, dir, dir_up;
2091 void __iomem *reg_base;
2092 struct rvu_work *mwork;
2093 void **mbox_regions;
2094 const char *name;
2095
2096 mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
2097 if (!mbox_regions)
2098 return -ENOMEM;
2099
2100 switch (type) {
2101 case TYPE_AFPF:
2102 name = "rvu_afpf_mailbox";
2103 dir = MBOX_DIR_AFPF;
2104 dir_up = MBOX_DIR_AFPF_UP;
2105 reg_base = rvu->afreg_base;
2106 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
2107 if (err)
2108 goto free_regions;
2109 break;
2110 case TYPE_AFVF:
2111 name = "rvu_afvf_mailbox";
2112 dir = MBOX_DIR_PFVF;
2113 dir_up = MBOX_DIR_PFVF_UP;
2114 reg_base = rvu->pfreg_base;
2115 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
2116 if (err)
2117 goto free_regions;
2118 break;
2119 default:
2120 return err;
2121 }
2122
2123 mw->mbox_wq = alloc_workqueue(name,
2124 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2125 num);
2126 if (!mw->mbox_wq) {
2127 err = -ENOMEM;
2128 goto unmap_regions;
2129 }
2130
2131 mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
2132 sizeof(struct rvu_work), GFP_KERNEL);
2133 if (!mw->mbox_wrk) {
2134 err = -ENOMEM;
2135 goto exit;
2136 }
2137
2138 mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
2139 sizeof(struct rvu_work), GFP_KERNEL);
2140 if (!mw->mbox_wrk_up) {
2141 err = -ENOMEM;
2142 goto exit;
2143 }
2144
2145 err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
2146 reg_base, dir, num);
2147 if (err)
2148 goto exit;
2149
2150 err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
2151 reg_base, dir_up, num);
2152 if (err)
2153 goto exit;
2154
2155 for (i = 0; i < num; i++) {
2156 mwork = &mw->mbox_wrk[i];
2157 mwork->rvu = rvu;
2158 INIT_WORK(&mwork->work, mbox_handler);
2159
2160 mwork = &mw->mbox_wrk_up[i];
2161 mwork->rvu = rvu;
2162 INIT_WORK(&mwork->work, mbox_up_handler);
2163 }
2164 kfree(mbox_regions);
2165 return 0;
2166
2167exit:
2168 destroy_workqueue(mw->mbox_wq);
2169unmap_regions:
2170 while (num--)
2171 iounmap((void __iomem *)mbox_regions[num]);
2172free_regions:
2173 kfree(mbox_regions);
2174 return err;
2175}
2176
2177static void rvu_mbox_destroy(struct mbox_wq_info *mw)
2178{
2179 struct otx2_mbox *mbox = &mw->mbox;
2180 struct otx2_mbox_dev *mdev;
2181 int devid;
2182
2183 if (mw->mbox_wq) {
2184 flush_workqueue(mw->mbox_wq);
2185 destroy_workqueue(mw->mbox_wq);
2186 mw->mbox_wq = NULL;
2187 }
2188
2189 for (devid = 0; devid < mbox->ndevs; devid++) {
2190 mdev = &mbox->dev[devid];
2191 if (mdev->hwbase)
2192 iounmap((void __iomem *)mdev->hwbase);
2193 }
2194
2195 otx2_mbox_destroy(&mw->mbox);
2196 otx2_mbox_destroy(&mw->mbox_up);
2197}
2198
2199static void rvu_queue_work(struct mbox_wq_info *mw, int first,
2200 int mdevs, u64 intr)
2201{
2202 struct otx2_mbox_dev *mdev;
2203 struct otx2_mbox *mbox;
2204 struct mbox_hdr *hdr;
2205 int i;
2206
2207 for (i = first; i < mdevs; i++) {
2208
2209 if (!(intr & BIT_ULL(i - first)))
2210 continue;
2211
2212 mbox = &mw->mbox;
2213 mdev = &mbox->dev[i];
2214 hdr = mdev->mbase + mbox->rx_start;
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224 if (hdr->num_msgs) {
2225 mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
2226 hdr->num_msgs = 0;
2227 queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
2228 }
2229 mbox = &mw->mbox_up;
2230 mdev = &mbox->dev[i];
2231 hdr = mdev->mbase + mbox->rx_start;
2232 if (hdr->num_msgs) {
2233 mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
2234 hdr->num_msgs = 0;
2235 queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
2236 }
2237 }
2238}
2239
2240static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
2241{
2242 struct rvu *rvu = (struct rvu *)rvu_irq;
2243 int vfs = rvu->vfs;
2244 u64 intr;
2245
2246 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
2247
2248 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
2249 if (intr)
2250 trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
2251
2252
2253 rmb();
2254
2255 rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
2256
2257
2258 if (vfs > 64) {
2259 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
2260 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
2261
2262 rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
2263 vfs -= 64;
2264 }
2265
2266 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
2267 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
2268 if (intr)
2269 trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
2270
2271 rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
2272
2273 return IRQ_HANDLED;
2274}
2275
2276static void rvu_enable_mbox_intr(struct rvu *rvu)
2277{
2278 struct rvu_hwinfo *hw = rvu->hw;
2279
2280
2281 rvu_write64(rvu, BLKADDR_RVUM,
2282 RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
2283
2284
2285 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
2286 INTR_MASK(hw->total_pfs) & ~1ULL);
2287}
2288
2289static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
2290{
2291 struct rvu_block *block;
2292 int slot, lf, num_lfs;
2293 int err;
2294
2295 block = &rvu->hw->block[blkaddr];
2296 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
2297 block->addr);
2298 if (!num_lfs)
2299 return;
2300 for (slot = 0; slot < num_lfs; slot++) {
2301 lf = rvu_get_lf(rvu, block, pcifunc, slot);
2302 if (lf < 0)
2303 continue;
2304
2305
2306 if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
2307 rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
2308 else if (block->addr == BLKADDR_NPA)
2309 rvu_npa_lf_teardown(rvu, pcifunc, lf);
2310 else if ((block->addr == BLKADDR_CPT0) ||
2311 (block->addr == BLKADDR_CPT1))
2312 rvu_cpt_lf_teardown(rvu, pcifunc, lf, slot);
2313
2314 err = rvu_lf_reset(rvu, block, lf);
2315 if (err) {
2316 dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
2317 block->addr, lf);
2318 }
2319 }
2320}
2321
2322static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
2323{
2324 mutex_lock(&rvu->flr_lock);
2325
2326
2327
2328
2329
2330 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
2331 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
2332 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
2333 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
2334 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
2335 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
2336 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
2337 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
2338 rvu_reset_lmt_map_tbl(rvu, pcifunc);
2339 rvu_detach_rsrcs(rvu, NULL, pcifunc);
2340 mutex_unlock(&rvu->flr_lock);
2341}
2342
2343static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
2344{
2345 int reg = 0;
2346
2347
2348 __rvu_flr_handler(rvu, vf + 1);
2349
2350 if (vf >= 64) {
2351 reg = 1;
2352 vf = vf - 64;
2353 }
2354
2355
2356 rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
2357 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
2358}
2359
2360static void rvu_flr_handler(struct work_struct *work)
2361{
2362 struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
2363 struct rvu *rvu = flrwork->rvu;
2364 u16 pcifunc, numvfs, vf;
2365 u64 cfg;
2366 int pf;
2367
2368 pf = flrwork - rvu->flr_wrk;
2369 if (pf >= rvu->hw->total_pfs) {
2370 rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
2371 return;
2372 }
2373
2374 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2375 numvfs = (cfg >> 12) & 0xFF;
2376 pcifunc = pf << RVU_PFVF_PF_SHIFT;
2377
2378 for (vf = 0; vf < numvfs; vf++)
2379 __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
2380
2381 __rvu_flr_handler(rvu, pcifunc);
2382
2383
2384 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
2385
2386
2387 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf));
2388}
2389
2390static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
2391{
2392 int dev, vf, reg = 0;
2393 u64 intr;
2394
2395 if (start_vf >= 64)
2396 reg = 1;
2397
2398 intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
2399 if (!intr)
2400 return;
2401
2402 for (vf = 0; vf < numvfs; vf++) {
2403 if (!(intr & BIT_ULL(vf)))
2404 continue;
2405 dev = vf + start_vf + rvu->hw->total_pfs;
2406 queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
2407
2408 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
2409 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
2410 }
2411}
2412
2413static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
2414{
2415 struct rvu *rvu = (struct rvu *)rvu_irq;
2416 u64 intr;
2417 u8 pf;
2418
2419 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
2420 if (!intr)
2421 goto afvf_flr;
2422
2423 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2424 if (intr & (1ULL << pf)) {
2425
2426 queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
2427
2428 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
2429 BIT_ULL(pf));
2430
2431 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2432 BIT_ULL(pf));
2433 }
2434 }
2435
2436afvf_flr:
2437 rvu_afvf_queue_flr_work(rvu, 0, 64);
2438 if (rvu->vfs > 64)
2439 rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
2440
2441 return IRQ_HANDLED;
2442}
2443
2444static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
2445{
2446 int vf;
2447
2448
2449
2450
2451 for (vf = 0; vf < 64; vf++) {
2452 if (intr & (1ULL << vf)) {
2453
2454 rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
2455
2456 rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
2457 }
2458 }
2459}
2460
2461
2462static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
2463{
2464 struct rvu *rvu = (struct rvu *)rvu_irq;
2465 int vfset;
2466 u64 intr;
2467
2468 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2469
2470 for (vfset = 0; vfset <= 1; vfset++) {
2471 intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
2472 if (intr)
2473 rvu_me_handle_vfset(rvu, vfset, intr);
2474 }
2475
2476 return IRQ_HANDLED;
2477}
2478
2479
2480static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
2481{
2482 struct rvu *rvu = (struct rvu *)rvu_irq;
2483 u64 intr;
2484 u8 pf;
2485
2486 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2487
2488
2489
2490
2491 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2492 if (intr & (1ULL << pf)) {
2493
2494 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
2495 BIT_ULL(pf));
2496
2497 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
2498 BIT_ULL(pf));
2499 }
2500 }
2501
2502 return IRQ_HANDLED;
2503}
2504
2505static void rvu_unregister_interrupts(struct rvu *rvu)
2506{
2507 int irq;
2508
2509
2510 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
2511 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2512
2513
2514 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2515 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2516
2517
2518 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
2519 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2520
2521 for (irq = 0; irq < rvu->num_vec; irq++) {
2522 if (rvu->irq_allocated[irq]) {
2523 free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
2524 rvu->irq_allocated[irq] = false;
2525 }
2526 }
2527
2528 pci_free_irq_vectors(rvu->pdev);
2529 rvu->num_vec = 0;
2530}
2531
2532static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
2533{
2534 struct rvu_pfvf *pfvf = &rvu->pf[0];
2535 int offset;
2536
2537 pfvf = &rvu->pf[0];
2538 offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2539
2540
2541
2542
2543
2544 return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
2545 offset;
2546}
2547
2548static int rvu_register_interrupts(struct rvu *rvu)
2549{
2550 int ret, offset, pf_vec_start;
2551
2552 rvu->num_vec = pci_msix_vec_count(rvu->pdev);
2553
2554 rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
2555 NAME_SIZE, GFP_KERNEL);
2556 if (!rvu->irq_name)
2557 return -ENOMEM;
2558
2559 rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
2560 sizeof(bool), GFP_KERNEL);
2561 if (!rvu->irq_allocated)
2562 return -ENOMEM;
2563
2564
2565 ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
2566 rvu->num_vec, PCI_IRQ_MSIX);
2567 if (ret < 0) {
2568 dev_err(rvu->dev,
2569 "RVUAF: Request for %d msix vectors failed, ret %d\n",
2570 rvu->num_vec, ret);
2571 return ret;
2572 }
2573
2574
2575 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
2576 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
2577 rvu_mbox_intr_handler, 0,
2578 &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
2579 if (ret) {
2580 dev_err(rvu->dev,
2581 "RVUAF: IRQ registration failed for mbox irq\n");
2582 goto fail;
2583 }
2584
2585 rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
2586
2587
2588 rvu_enable_mbox_intr(rvu);
2589
2590
2591 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2592 "RVUAF FLR");
2593 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
2594 rvu_flr_intr_handler, 0,
2595 &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2596 rvu);
2597 if (ret) {
2598 dev_err(rvu->dev,
2599 "RVUAF: IRQ registration failed for FLR\n");
2600 goto fail;
2601 }
2602 rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
2603
2604
2605 rvu_write64(rvu, BLKADDR_RVUM,
2606 RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
2607
2608 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
2609 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2610
2611
2612 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2613 "RVUAF ME");
2614 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
2615 rvu_me_pf_intr_handler, 0,
2616 &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2617 rvu);
2618 if (ret) {
2619 dev_err(rvu->dev,
2620 "RVUAF: IRQ registration failed for ME\n");
2621 }
2622 rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
2623
2624
2625 rvu_write64(rvu, BLKADDR_RVUM,
2626 RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
2627
2628 rvu_write64(rvu, BLKADDR_RVUM,
2629 RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
2630
2631 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
2632 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2633
2634 if (!rvu_afvf_msix_vectors_num_ok(rvu))
2635 return 0;
2636
2637
2638 pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
2639 RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2640
2641
2642 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
2643 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
2644 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2645 rvu_mbox_intr_handler, 0,
2646 &rvu->irq_name[offset * NAME_SIZE],
2647 rvu);
2648 if (ret)
2649 dev_err(rvu->dev,
2650 "RVUAF: IRQ registration failed for Mbox0\n");
2651
2652 rvu->irq_allocated[offset] = true;
2653
2654
2655
2656
2657 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
2658 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
2659 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2660 rvu_mbox_intr_handler, 0,
2661 &rvu->irq_name[offset * NAME_SIZE],
2662 rvu);
2663 if (ret)
2664 dev_err(rvu->dev,
2665 "RVUAF: IRQ registration failed for Mbox1\n");
2666
2667 rvu->irq_allocated[offset] = true;
2668
2669
2670 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
2671 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
2672 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2673 rvu_flr_intr_handler, 0,
2674 &rvu->irq_name[offset * NAME_SIZE], rvu);
2675 if (ret) {
2676 dev_err(rvu->dev,
2677 "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
2678 goto fail;
2679 }
2680 rvu->irq_allocated[offset] = true;
2681
2682 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
2683 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
2684 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2685 rvu_flr_intr_handler, 0,
2686 &rvu->irq_name[offset * NAME_SIZE], rvu);
2687 if (ret) {
2688 dev_err(rvu->dev,
2689 "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
2690 goto fail;
2691 }
2692 rvu->irq_allocated[offset] = true;
2693
2694
2695 offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
2696 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
2697 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2698 rvu_me_vf_intr_handler, 0,
2699 &rvu->irq_name[offset * NAME_SIZE], rvu);
2700 if (ret) {
2701 dev_err(rvu->dev,
2702 "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
2703 goto fail;
2704 }
2705 rvu->irq_allocated[offset] = true;
2706
2707 offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
2708 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
2709 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2710 rvu_me_vf_intr_handler, 0,
2711 &rvu->irq_name[offset * NAME_SIZE], rvu);
2712 if (ret) {
2713 dev_err(rvu->dev,
2714 "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
2715 goto fail;
2716 }
2717 rvu->irq_allocated[offset] = true;
2718 return 0;
2719
2720fail:
2721 rvu_unregister_interrupts(rvu);
2722 return ret;
2723}
2724
2725static void rvu_flr_wq_destroy(struct rvu *rvu)
2726{
2727 if (rvu->flr_wq) {
2728 flush_workqueue(rvu->flr_wq);
2729 destroy_workqueue(rvu->flr_wq);
2730 rvu->flr_wq = NULL;
2731 }
2732}
2733
2734static int rvu_flr_init(struct rvu *rvu)
2735{
2736 int dev, num_devs;
2737 u64 cfg;
2738 int pf;
2739
2740
2741 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2742 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2743 rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
2744 cfg | BIT_ULL(22));
2745 }
2746
2747 rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
2748 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2749 1);
2750 if (!rvu->flr_wq)
2751 return -ENOMEM;
2752
2753 num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
2754 rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
2755 sizeof(struct rvu_work), GFP_KERNEL);
2756 if (!rvu->flr_wrk) {
2757 destroy_workqueue(rvu->flr_wq);
2758 return -ENOMEM;
2759 }
2760
2761 for (dev = 0; dev < num_devs; dev++) {
2762 rvu->flr_wrk[dev].rvu = rvu;
2763 INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
2764 }
2765
2766 mutex_init(&rvu->flr_lock);
2767
2768 return 0;
2769}
2770
2771static void rvu_disable_afvf_intr(struct rvu *rvu)
2772{
2773 int vfs = rvu->vfs;
2774
2775 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
2776 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
2777 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
2778 if (vfs <= 64)
2779 return;
2780
2781 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
2782 INTR_MASK(vfs - 64));
2783 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
2784 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
2785}
2786
2787static void rvu_enable_afvf_intr(struct rvu *rvu)
2788{
2789 int vfs = rvu->vfs;
2790
2791
2792
2793
2794
2795 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
2796 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
2797
2798
2799 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
2800 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
2801 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
2802
2803
2804 if (vfs <= 64)
2805 return;
2806
2807 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
2808 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
2809 INTR_MASK(vfs - 64));
2810
2811 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
2812 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
2813 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
2814}
2815
2816int rvu_get_num_lbk_chans(void)
2817{
2818 struct pci_dev *pdev;
2819 void __iomem *base;
2820 int ret = -EIO;
2821
2822 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
2823 NULL);
2824 if (!pdev)
2825 goto err;
2826
2827 base = pci_ioremap_bar(pdev, 0);
2828 if (!base)
2829 goto err_put;
2830
2831
2832 ret = (readq(base + 0x10) >> 32) & 0xffff;
2833 iounmap(base);
2834err_put:
2835 pci_dev_put(pdev);
2836err:
2837 return ret;
2838}
2839
2840static int rvu_enable_sriov(struct rvu *rvu)
2841{
2842 struct pci_dev *pdev = rvu->pdev;
2843 int err, chans, vfs;
2844
2845 if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
2846 dev_warn(&pdev->dev,
2847 "Skipping SRIOV enablement since not enough IRQs are available\n");
2848 return 0;
2849 }
2850
2851 chans = rvu_get_num_lbk_chans();
2852 if (chans < 0)
2853 return chans;
2854
2855 vfs = pci_sriov_get_totalvfs(pdev);
2856
2857
2858 if (vfs > chans)
2859 vfs = chans;
2860
2861 if (!vfs)
2862 return 0;
2863
2864
2865
2866
2867 if (vfs > 62)
2868 vfs = 62;
2869
2870
2871
2872
2873
2874 rvu->vfs = vfs;
2875
2876 err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
2877 rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
2878 if (err)
2879 return err;
2880
2881 rvu_enable_afvf_intr(rvu);
2882
2883 mb();
2884
2885 err = pci_enable_sriov(pdev, vfs);
2886 if (err) {
2887 rvu_disable_afvf_intr(rvu);
2888 rvu_mbox_destroy(&rvu->afvf_wq_info);
2889 return err;
2890 }
2891
2892 return 0;
2893}
2894
2895static void rvu_disable_sriov(struct rvu *rvu)
2896{
2897 rvu_disable_afvf_intr(rvu);
2898 rvu_mbox_destroy(&rvu->afvf_wq_info);
2899 pci_disable_sriov(rvu->pdev);
2900}
2901
2902static void rvu_update_module_params(struct rvu *rvu)
2903{
2904 const char *default_pfl_name = "default";
2905
2906 strscpy(rvu->mkex_pfl_name,
2907 mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
2908 strscpy(rvu->kpu_pfl_name,
2909 kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
2910}
2911
2912static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2913{
2914 struct device *dev = &pdev->dev;
2915 struct rvu *rvu;
2916 int err;
2917
2918 rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
2919 if (!rvu)
2920 return -ENOMEM;
2921
2922 rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
2923 if (!rvu->hw) {
2924 devm_kfree(dev, rvu);
2925 return -ENOMEM;
2926 }
2927
2928 pci_set_drvdata(pdev, rvu);
2929 rvu->pdev = pdev;
2930 rvu->dev = &pdev->dev;
2931
2932 err = pci_enable_device(pdev);
2933 if (err) {
2934 dev_err(dev, "Failed to enable PCI device\n");
2935 goto err_freemem;
2936 }
2937
2938 err = pci_request_regions(pdev, DRV_NAME);
2939 if (err) {
2940 dev_err(dev, "PCI request regions failed 0x%x\n", err);
2941 goto err_disable_device;
2942 }
2943
2944 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
2945 if (err) {
2946 dev_err(dev, "DMA mask config failed, abort\n");
2947 goto err_release_regions;
2948 }
2949
2950 pci_set_master(pdev);
2951
2952 rvu->ptp = ptp_get();
2953 if (IS_ERR(rvu->ptp)) {
2954 err = PTR_ERR(rvu->ptp);
2955 if (err == -EPROBE_DEFER)
2956 goto err_release_regions;
2957 rvu->ptp = NULL;
2958 }
2959
2960
2961 rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
2962 rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
2963 if (!rvu->afreg_base || !rvu->pfreg_base) {
2964 dev_err(dev, "Unable to map admin function CSRs, aborting\n");
2965 err = -ENOMEM;
2966 goto err_put_ptp;
2967 }
2968
2969
2970 rvu_update_module_params(rvu);
2971
2972
2973 rvu_check_block_implemented(rvu);
2974
2975 rvu_reset_all_blocks(rvu);
2976
2977 rvu_setup_hw_capabilities(rvu);
2978
2979 err = rvu_setup_hw_resources(rvu);
2980 if (err)
2981 goto err_put_ptp;
2982
2983
2984 err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
2985 rvu->hw->total_pfs, rvu_afpf_mbox_handler,
2986 rvu_afpf_mbox_up_handler);
2987 if (err)
2988 goto err_hwsetup;
2989
2990 err = rvu_flr_init(rvu);
2991 if (err)
2992 goto err_mbox;
2993
2994 err = rvu_register_interrupts(rvu);
2995 if (err)
2996 goto err_flr;
2997
2998 err = rvu_register_dl(rvu);
2999 if (err)
3000 goto err_irq;
3001
3002 rvu_setup_rvum_blk_revid(rvu);
3003
3004
3005 err = rvu_enable_sriov(rvu);
3006 if (err)
3007 goto err_dl;
3008
3009
3010 rvu_dbg_init(rvu);
3011
3012 mutex_init(&rvu->rswitch.switch_lock);
3013
3014 return 0;
3015err_dl:
3016 rvu_unregister_dl(rvu);
3017err_irq:
3018 rvu_unregister_interrupts(rvu);
3019err_flr:
3020 rvu_flr_wq_destroy(rvu);
3021err_mbox:
3022 rvu_mbox_destroy(&rvu->afpf_wq_info);
3023err_hwsetup:
3024 rvu_cgx_exit(rvu);
3025 rvu_fwdata_exit(rvu);
3026 rvu_reset_all_blocks(rvu);
3027 rvu_free_hw_resources(rvu);
3028 rvu_clear_rvum_blk_revid(rvu);
3029err_put_ptp:
3030 ptp_put(rvu->ptp);
3031err_release_regions:
3032 pci_release_regions(pdev);
3033err_disable_device:
3034 pci_disable_device(pdev);
3035err_freemem:
3036 pci_set_drvdata(pdev, NULL);
3037 devm_kfree(&pdev->dev, rvu->hw);
3038 devm_kfree(dev, rvu);
3039 return err;
3040}
3041
3042static void rvu_remove(struct pci_dev *pdev)
3043{
3044 struct rvu *rvu = pci_get_drvdata(pdev);
3045
3046 rvu_dbg_exit(rvu);
3047 rvu_unregister_dl(rvu);
3048 rvu_unregister_interrupts(rvu);
3049 rvu_flr_wq_destroy(rvu);
3050 rvu_cgx_exit(rvu);
3051 rvu_fwdata_exit(rvu);
3052 rvu_mbox_destroy(&rvu->afpf_wq_info);
3053 rvu_disable_sriov(rvu);
3054 rvu_reset_all_blocks(rvu);
3055 rvu_free_hw_resources(rvu);
3056 rvu_clear_rvum_blk_revid(rvu);
3057 ptp_put(rvu->ptp);
3058 pci_release_regions(pdev);
3059 pci_disable_device(pdev);
3060 pci_set_drvdata(pdev, NULL);
3061
3062 devm_kfree(&pdev->dev, rvu->hw);
3063 devm_kfree(&pdev->dev, rvu);
3064}
3065
3066static struct pci_driver rvu_driver = {
3067 .name = DRV_NAME,
3068 .id_table = rvu_id_table,
3069 .probe = rvu_probe,
3070 .remove = rvu_remove,
3071};
3072
3073static int __init rvu_init_module(void)
3074{
3075 int err;
3076
3077 pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
3078
3079 err = pci_register_driver(&cgx_driver);
3080 if (err < 0)
3081 return err;
3082
3083 err = pci_register_driver(&ptp_driver);
3084 if (err < 0)
3085 goto ptp_err;
3086
3087 err = pci_register_driver(&rvu_driver);
3088 if (err < 0)
3089 goto rvu_err;
3090
3091 return 0;
3092rvu_err:
3093 pci_unregister_driver(&ptp_driver);
3094ptp_err:
3095 pci_unregister_driver(&cgx_driver);
3096
3097 return err;
3098}
3099
3100static void __exit rvu_cleanup_module(void)
3101{
3102 pci_unregister_driver(&rvu_driver);
3103 pci_unregister_driver(&ptp_driver);
3104 pci_unregister_driver(&cgx_driver);
3105}
3106
3107module_init(rvu_init_module);
3108module_exit(rvu_cleanup_module);
3109