1
2#include <linux/aer.h>
3#include <linux/delay.h>
4#include <linux/firmware.h>
5#include <linux/list.h>
6#include <linux/module.h>
7#include <linux/mutex.h>
8#include <linux/pci.h>
9#include <linux/pci_ids.h>
10
11#include "nitrox_dev.h"
12#include "nitrox_common.h"
13#include "nitrox_csr.h"
14#include "nitrox_hal.h"
15#include "nitrox_isr.h"
16#include "nitrox_debugfs.h"
17
18#define CNN55XX_DEV_ID 0x12
19#define UCODE_HLEN 48
20#define DEFAULT_SE_GROUP 0
21#define DEFAULT_AE_GROUP 0
22
23#define DRIVER_VERSION "1.2"
24#define CNN55XX_UCD_BLOCK_SIZE 32768
25#define CNN55XX_MAX_UCODE_SIZE (CNN55XX_UCD_BLOCK_SIZE * 2)
26#define FW_DIR "cavium/"
27
28#define SE_FW FW_DIR "cnn55xx_se.fw"
29
30#define AE_FW FW_DIR "cnn55xx_ae.fw"
31
32static const char nitrox_driver_name[] = "CNN55XX";
33
34static LIST_HEAD(ndevlist);
35static DEFINE_MUTEX(devlist_lock);
36static unsigned int num_devices;
37
38
39
40
41static const struct pci_device_id nitrox_pci_tbl[] = {
42 {PCI_VDEVICE(CAVIUM, CNN55XX_DEV_ID), 0},
43
44 {0, }
45};
46MODULE_DEVICE_TABLE(pci, nitrox_pci_tbl);
47
48static unsigned int qlen = DEFAULT_CMD_QLEN;
49module_param(qlen, uint, 0644);
50MODULE_PARM_DESC(qlen, "Command queue length - default 2048");
51
52
53
54
55
56
57
58
59
60struct ucode {
61 u8 id;
62 char version[VERSION_LEN - 1];
63 __be32 code_size;
64 u8 raz[12];
65 u64 code[];
66};
67
68
69
70
71static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size,
72 u64 *ucode_data, int block_num)
73{
74 u32 code_size;
75 u64 offset, data;
76 int i = 0;
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94 offset = UCD_UCODE_LOAD_BLOCK_NUM;
95 nitrox_write_csr(ndev, offset, block_num);
96
97 code_size = roundup(ucode_size, 16);
98 while (code_size) {
99 data = ucode_data[i];
100
101 offset = UCD_UCODE_LOAD_IDX_DATAX(i);
102 nitrox_write_csr(ndev, offset, data);
103 code_size -= 8;
104 i++;
105 }
106
107 usleep_range(300, 400);
108}
109
110static int nitrox_load_fw(struct nitrox_device *ndev)
111{
112 const struct firmware *fw;
113 const char *fw_name;
114 struct ucode *ucode;
115 u64 *ucode_data;
116 u64 offset;
117 union ucd_core_eid_ucode_block_num core_2_eid_val;
118 union aqm_grp_execmsk_lo aqm_grp_execmask_lo;
119 union aqm_grp_execmsk_hi aqm_grp_execmask_hi;
120 u32 ucode_size;
121 int ret, i = 0;
122
123 fw_name = SE_FW;
124 dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
125
126 ret = request_firmware(&fw, fw_name, DEV(ndev));
127 if (ret < 0) {
128 dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
129 return ret;
130 }
131
132 ucode = (struct ucode *)fw->data;
133
134 ucode_size = be32_to_cpu(ucode->code_size) * 2;
135 if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) {
136 dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n",
137 ucode_size, fw_name);
138 release_firmware(fw);
139 return -EINVAL;
140 }
141 ucode_data = ucode->code;
142
143
144 memcpy(&ndev->hw.fw_name[0][0], ucode->version, (VERSION_LEN - 2));
145 ndev->hw.fw_name[0][VERSION_LEN - 1] = '\0';
146
147
148 write_to_ucd_unit(ndev, ucode_size, ucode_data, 0);
149
150 release_firmware(fw);
151
152
153 offset = POM_GRP_EXECMASKX(DEFAULT_SE_GROUP);
154 nitrox_write_csr(ndev, offset, (~0ULL));
155
156
157
158
159
160
161 core_2_eid_val.value = 0ULL;
162 core_2_eid_val.ucode_blk = 0;
163 if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
164 core_2_eid_val.ucode_len = 1;
165 else
166 core_2_eid_val.ucode_len = 0;
167
168 for (i = 0; i < ndev->hw.se_cores; i++) {
169 offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i);
170 nitrox_write_csr(ndev, offset, core_2_eid_val.value);
171 }
172
173
174 fw_name = AE_FW;
175 dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
176
177 ret = request_firmware(&fw, fw_name, DEV(ndev));
178 if (ret < 0) {
179 dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
180 return ret;
181 }
182
183 ucode = (struct ucode *)fw->data;
184
185 ucode_size = be32_to_cpu(ucode->code_size) * 2;
186 if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) {
187 dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n",
188 ucode_size, fw_name);
189 release_firmware(fw);
190 return -EINVAL;
191 }
192 ucode_data = ucode->code;
193
194
195 memcpy(&ndev->hw.fw_name[1][0], ucode->version, (VERSION_LEN - 2));
196 ndev->hw.fw_name[1][VERSION_LEN - 1] = '\0';
197
198
199 write_to_ucd_unit(ndev, ucode_size, ucode_data, 2);
200
201 release_firmware(fw);
202
203
204 offset = AQM_GRP_EXECMSK_LOX(DEFAULT_AE_GROUP);
205 aqm_grp_execmask_lo.exec_0_to_39 = 0xFFFFFFFFFFULL;
206 nitrox_write_csr(ndev, offset, aqm_grp_execmask_lo.value);
207 offset = AQM_GRP_EXECMSK_HIX(DEFAULT_AE_GROUP);
208 aqm_grp_execmask_hi.exec_40_to_79 = 0xFFFFFFFFFFULL;
209 nitrox_write_csr(ndev, offset, aqm_grp_execmask_hi.value);
210
211
212
213
214
215
216 core_2_eid_val.value = 0ULL;
217 core_2_eid_val.ucode_blk = 2;
218 if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
219 core_2_eid_val.ucode_len = 1;
220 else
221 core_2_eid_val.ucode_len = 0;
222
223 for (i = 0; i < ndev->hw.ae_cores; i++) {
224 offset = UCD_AE_EID_UCODE_BLOCK_NUMX(i);
225 nitrox_write_csr(ndev, offset, core_2_eid_val.value);
226 }
227
228 return 0;
229}
230
231
232
233
234
235static int nitrox_add_to_devlist(struct nitrox_device *ndev)
236{
237 struct nitrox_device *dev;
238 int ret = 0;
239
240 INIT_LIST_HEAD(&ndev->list);
241 refcount_set(&ndev->refcnt, 1);
242
243 mutex_lock(&devlist_lock);
244 list_for_each_entry(dev, &ndevlist, list) {
245 if (dev == ndev) {
246 ret = -EEXIST;
247 goto unlock;
248 }
249 }
250 ndev->idx = num_devices++;
251 list_add_tail(&ndev->list, &ndevlist);
252unlock:
253 mutex_unlock(&devlist_lock);
254 return ret;
255}
256
257
258
259
260
261
262static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
263{
264 mutex_lock(&devlist_lock);
265 list_del(&ndev->list);
266 num_devices--;
267 mutex_unlock(&devlist_lock);
268}
269
270struct nitrox_device *nitrox_get_first_device(void)
271{
272 struct nitrox_device *ndev;
273
274 mutex_lock(&devlist_lock);
275 list_for_each_entry(ndev, &ndevlist, list) {
276 if (nitrox_ready(ndev))
277 break;
278 }
279 mutex_unlock(&devlist_lock);
280 if (&ndev->list == &ndevlist)
281 return NULL;
282
283 refcount_inc(&ndev->refcnt);
284
285 smp_mb__after_atomic();
286 return ndev;
287}
288
289void nitrox_put_device(struct nitrox_device *ndev)
290{
291 if (!ndev)
292 return;
293
294 refcount_dec(&ndev->refcnt);
295
296 smp_mb__after_atomic();
297}
298
299static int nitrox_device_flr(struct pci_dev *pdev)
300{
301 int pos = 0;
302
303 pos = pci_save_state(pdev);
304 if (pos) {
305 dev_err(&pdev->dev, "Failed to save pci state\n");
306 return -ENOMEM;
307 }
308
309
310 if (pcie_has_flr(pdev))
311 pcie_flr(pdev);
312
313 pci_restore_state(pdev);
314
315 return 0;
316}
317
318static int nitrox_pf_sw_init(struct nitrox_device *ndev)
319{
320 int err;
321
322 err = nitrox_common_sw_init(ndev);
323 if (err)
324 return err;
325
326 err = nitrox_register_interrupts(ndev);
327 if (err)
328 nitrox_common_sw_cleanup(ndev);
329
330 return err;
331}
332
333static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
334{
335 nitrox_unregister_interrupts(ndev);
336 nitrox_common_sw_cleanup(ndev);
337}
338
339
340
341
342
343static int nitrox_bist_check(struct nitrox_device *ndev)
344{
345 u64 value = 0;
346 int i;
347
348 for (i = 0; i < NR_CLUSTERS; i++) {
349 value += nitrox_read_csr(ndev, EMU_BIST_STATUSX(i));
350 value += nitrox_read_csr(ndev, EFL_CORE_BIST_REGX(i));
351 }
352 value += nitrox_read_csr(ndev, UCD_BIST_STATUS);
353 value += nitrox_read_csr(ndev, NPS_CORE_BIST_REG);
354 value += nitrox_read_csr(ndev, NPS_CORE_NPC_BIST_REG);
355 value += nitrox_read_csr(ndev, NPS_PKT_SLC_BIST_REG);
356 value += nitrox_read_csr(ndev, NPS_PKT_IN_BIST_REG);
357 value += nitrox_read_csr(ndev, POM_BIST_REG);
358 value += nitrox_read_csr(ndev, BMI_BIST_REG);
359 value += nitrox_read_csr(ndev, EFL_TOP_BIST_STAT);
360 value += nitrox_read_csr(ndev, BMO_BIST_REG);
361 value += nitrox_read_csr(ndev, LBC_BIST_STATUS);
362 value += nitrox_read_csr(ndev, PEM_BIST_STATUSX(0));
363 if (value)
364 return -EIO;
365 return 0;
366}
367
368static int nitrox_pf_hw_init(struct nitrox_device *ndev)
369{
370 int err;
371
372 err = nitrox_bist_check(ndev);
373 if (err) {
374 dev_err(&ndev->pdev->dev, "BIST check failed\n");
375 return err;
376 }
377
378 nitrox_get_hwinfo(ndev);
379
380 nitrox_config_nps_core_unit(ndev);
381 nitrox_config_aqm_unit(ndev);
382 nitrox_config_nps_pkt_unit(ndev);
383 nitrox_config_pom_unit(ndev);
384 nitrox_config_efl_unit(ndev);
385
386 nitrox_config_bmi_unit(ndev);
387 nitrox_config_bmo_unit(ndev);
388
389 nitrox_config_lbc_unit(ndev);
390 nitrox_config_rand_unit(ndev);
391
392
393 err = nitrox_load_fw(ndev);
394 if (err)
395 return err;
396
397 nitrox_config_emu_unit(ndev);
398
399 return 0;
400}
401
402
403
404
405
406
407
408
409
410static int nitrox_probe(struct pci_dev *pdev,
411 const struct pci_device_id *id)
412{
413 struct nitrox_device *ndev;
414 int err;
415
416 dev_info_once(&pdev->dev, "%s driver version %s\n",
417 nitrox_driver_name, DRIVER_VERSION);
418
419 err = pci_enable_device_mem(pdev);
420 if (err)
421 return err;
422
423
424 err = nitrox_device_flr(pdev);
425 if (err) {
426 dev_err(&pdev->dev, "FLR failed\n");
427 goto flr_fail;
428 }
429
430 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
431 dev_dbg(&pdev->dev, "DMA to 64-BIT address\n");
432 } else {
433 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
434 if (err) {
435 dev_err(&pdev->dev, "DMA configuration failed\n");
436 goto flr_fail;
437 }
438 }
439
440 err = pci_request_mem_regions(pdev, nitrox_driver_name);
441 if (err)
442 goto flr_fail;
443 pci_set_master(pdev);
444
445 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
446 if (!ndev) {
447 err = -ENOMEM;
448 goto ndev_fail;
449 }
450
451 pci_set_drvdata(pdev, ndev);
452 ndev->pdev = pdev;
453
454
455 nitrox_add_to_devlist(ndev);
456
457 ndev->hw.vendor_id = pdev->vendor;
458 ndev->hw.device_id = pdev->device;
459 ndev->hw.revision_id = pdev->revision;
460
461 ndev->timeout = msecs_to_jiffies(CMD_TIMEOUT);
462 ndev->node = dev_to_node(&pdev->dev);
463 if (ndev->node == NUMA_NO_NODE)
464 ndev->node = 0;
465
466 ndev->bar_addr = ioremap(pci_resource_start(pdev, 0),
467 pci_resource_len(pdev, 0));
468 if (!ndev->bar_addr) {
469 err = -EIO;
470 goto ioremap_err;
471 }
472
473 ndev->nr_queues = min_t(u32, MAX_PF_QUEUES, num_online_cpus());
474 ndev->qlen = qlen;
475
476 err = nitrox_pf_sw_init(ndev);
477 if (err)
478 goto pf_sw_fail;
479
480 err = nitrox_pf_hw_init(ndev);
481 if (err)
482 goto pf_hw_fail;
483
484 nitrox_debugfs_init(ndev);
485
486
487 atomic64_set(&ndev->stats.posted, 0);
488 atomic64_set(&ndev->stats.completed, 0);
489 atomic64_set(&ndev->stats.dropped, 0);
490
491 atomic_set(&ndev->state, __NDEV_READY);
492
493 smp_mb__after_atomic();
494
495 err = nitrox_crypto_register();
496 if (err)
497 goto crypto_fail;
498
499 return 0;
500
501crypto_fail:
502 nitrox_debugfs_exit(ndev);
503 atomic_set(&ndev->state, __NDEV_NOT_READY);
504
505 smp_mb__after_atomic();
506pf_hw_fail:
507 nitrox_pf_sw_cleanup(ndev);
508pf_sw_fail:
509 iounmap(ndev->bar_addr);
510ioremap_err:
511 nitrox_remove_from_devlist(ndev);
512 kfree(ndev);
513 pci_set_drvdata(pdev, NULL);
514ndev_fail:
515 pci_release_mem_regions(pdev);
516flr_fail:
517 pci_disable_device(pdev);
518 return err;
519}
520
521
522
523
524
525static void nitrox_remove(struct pci_dev *pdev)
526{
527 struct nitrox_device *ndev = pci_get_drvdata(pdev);
528
529 if (!ndev)
530 return;
531
532 if (!refcount_dec_and_test(&ndev->refcnt)) {
533 dev_err(DEV(ndev), "Device refcnt not zero (%d)\n",
534 refcount_read(&ndev->refcnt));
535 return;
536 }
537
538 dev_info(DEV(ndev), "Removing Device %x:%x\n",
539 ndev->hw.vendor_id, ndev->hw.device_id);
540
541 atomic_set(&ndev->state, __NDEV_NOT_READY);
542
543 smp_mb__after_atomic();
544
545 nitrox_remove_from_devlist(ndev);
546
547
548 nitrox_sriov_configure(pdev, 0);
549 nitrox_crypto_unregister();
550 nitrox_debugfs_exit(ndev);
551 nitrox_pf_sw_cleanup(ndev);
552
553 iounmap(ndev->bar_addr);
554 kfree(ndev);
555
556 pci_set_drvdata(pdev, NULL);
557 pci_release_mem_regions(pdev);
558 pci_disable_device(pdev);
559}
560
561static void nitrox_shutdown(struct pci_dev *pdev)
562{
563 pci_set_drvdata(pdev, NULL);
564 pci_release_mem_regions(pdev);
565 pci_disable_device(pdev);
566}
567
568static struct pci_driver nitrox_driver = {
569 .name = nitrox_driver_name,
570 .id_table = nitrox_pci_tbl,
571 .probe = nitrox_probe,
572 .remove = nitrox_remove,
573 .shutdown = nitrox_shutdown,
574 .sriov_configure = nitrox_sriov_configure,
575};
576
577module_pci_driver(nitrox_driver);
578
579MODULE_AUTHOR("Srikanth Jampala <Jampala.Srikanth@cavium.com>");
580MODULE_DESCRIPTION("Cavium CNN55XX PF Driver" DRIVER_VERSION " ");
581MODULE_LICENSE("GPL");
582MODULE_VERSION(DRIVER_VERSION);
583MODULE_FIRMWARE(SE_FW);
584