1
2
3
4#ifndef _GDMA_H
5#define _GDMA_H
6
7#include <linux/dma-mapping.h>
8#include <linux/netdevice.h>
9
10#include "shm_channel.h"
11
12
13
14
15
16enum gdma_request_type {
17 GDMA_VERIFY_VF_DRIVER_VERSION = 1,
18 GDMA_QUERY_MAX_RESOURCES = 2,
19 GDMA_LIST_DEVICES = 3,
20 GDMA_REGISTER_DEVICE = 4,
21 GDMA_DEREGISTER_DEVICE = 5,
22 GDMA_GENERATE_TEST_EQE = 10,
23 GDMA_CREATE_QUEUE = 12,
24 GDMA_DISABLE_QUEUE = 13,
25 GDMA_CREATE_DMA_REGION = 25,
26 GDMA_DMA_REGION_ADD_PAGES = 26,
27 GDMA_DESTROY_DMA_REGION = 27,
28};
29
30enum gdma_queue_type {
31 GDMA_INVALID_QUEUE,
32 GDMA_SQ,
33 GDMA_RQ,
34 GDMA_CQ,
35 GDMA_EQ,
36};
37
38enum gdma_work_request_flags {
39 GDMA_WR_NONE = 0,
40 GDMA_WR_OOB_IN_SGL = BIT(0),
41 GDMA_WR_PAD_BY_SGE0 = BIT(1),
42};
43
44enum gdma_eqe_type {
45 GDMA_EQE_COMPLETION = 3,
46 GDMA_EQE_TEST_EVENT = 64,
47 GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
48 GDMA_EQE_HWC_INIT_DATA = 130,
49 GDMA_EQE_HWC_INIT_DONE = 131,
50};
51
52enum {
53 GDMA_DEVICE_NONE = 0,
54 GDMA_DEVICE_HWC = 1,
55 GDMA_DEVICE_MANA = 2,
56};
57
58struct gdma_resource {
59
60 spinlock_t lock;
61
62
63 u32 size;
64
65
66 unsigned long *map;
67};
68
69union gdma_doorbell_entry {
70 u64 as_uint64;
71
72 struct {
73 u64 id : 24;
74 u64 reserved : 8;
75 u64 tail_ptr : 31;
76 u64 arm : 1;
77 } cq;
78
79 struct {
80 u64 id : 24;
81 u64 wqe_cnt : 8;
82 u64 tail_ptr : 32;
83 } rq;
84
85 struct {
86 u64 id : 24;
87 u64 reserved : 8;
88 u64 tail_ptr : 32;
89 } sq;
90
91 struct {
92 u64 id : 16;
93 u64 reserved : 16;
94 u64 tail_ptr : 31;
95 u64 arm : 1;
96 } eq;
97};
98
99struct gdma_msg_hdr {
100 u32 hdr_type;
101 u32 msg_type;
102 u16 msg_version;
103 u16 hwc_msg_id;
104 u32 msg_size;
105};
106
107struct gdma_dev_id {
108 union {
109 struct {
110 u16 type;
111 u16 instance;
112 };
113
114 u32 as_uint32;
115 };
116};
117
118struct gdma_req_hdr {
119 struct gdma_msg_hdr req;
120 struct gdma_msg_hdr resp;
121 struct gdma_dev_id dev_id;
122 u32 activity_id;
123};
124
125struct gdma_resp_hdr {
126 struct gdma_msg_hdr response;
127 struct gdma_dev_id dev_id;
128 u32 activity_id;
129 u32 status;
130 u32 reserved;
131};
132
133struct gdma_general_req {
134 struct gdma_req_hdr hdr;
135};
136
137#define GDMA_MESSAGE_V1 1
138
139struct gdma_general_resp {
140 struct gdma_resp_hdr hdr;
141};
142
143#define GDMA_STANDARD_HEADER_TYPE 0
144
145static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
146 u32 req_size, u32 resp_size)
147{
148 hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
149 hdr->req.msg_type = code;
150 hdr->req.msg_version = GDMA_MESSAGE_V1;
151 hdr->req.msg_size = req_size;
152
153 hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
154 hdr->resp.msg_type = code;
155 hdr->resp.msg_version = GDMA_MESSAGE_V1;
156 hdr->resp.msg_size = resp_size;
157}
158
159
160struct gdma_sge {
161 u64 address;
162 u32 mem_key;
163 u32 size;
164};
165
166struct gdma_wqe_request {
167 struct gdma_sge *sgl;
168 u32 num_sge;
169
170 u32 inline_oob_size;
171 const void *inline_oob_data;
172
173 u32 flags;
174 u32 client_data_unit;
175};
176
177enum gdma_page_type {
178 GDMA_PAGE_TYPE_4K,
179};
180
181#define GDMA_INVALID_DMA_REGION 0
182
183struct gdma_mem_info {
184 struct device *dev;
185
186 dma_addr_t dma_handle;
187 void *virt_addr;
188 u64 length;
189
190
191 u64 gdma_region;
192};
193
194#define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
195
196struct gdma_dev {
197 struct gdma_context *gdma_context;
198
199 struct gdma_dev_id dev_id;
200
201 u32 pdid;
202 u32 doorbell;
203 u32 gpa_mkey;
204
205
206 void *driver_data;
207};
208
209#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
210
211#define GDMA_CQE_SIZE 64
212#define GDMA_EQE_SIZE 16
213#define GDMA_MAX_SQE_SIZE 512
214#define GDMA_MAX_RQE_SIZE 256
215
216#define GDMA_COMP_DATA_SIZE 0x3C
217
218#define GDMA_EVENT_DATA_SIZE 0xC
219
220
221#define GDMA_WQE_BU_SIZE 32
222
223#define INVALID_PDID UINT_MAX
224#define INVALID_DOORBELL UINT_MAX
225#define INVALID_MEM_KEY UINT_MAX
226#define INVALID_QUEUE_ID UINT_MAX
227#define INVALID_PCI_MSIX_INDEX UINT_MAX
228
229struct gdma_comp {
230 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
231 u32 wq_num;
232 bool is_sq;
233};
234
235struct gdma_event {
236 u32 details[GDMA_EVENT_DATA_SIZE / 4];
237 u8 type;
238};
239
240struct gdma_queue;
241
242#define CQE_POLLING_BUFFER 512
243struct mana_eq {
244 struct gdma_queue *eq;
245 struct gdma_comp cqe_poll[CQE_POLLING_BUFFER];
246};
247
248typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
249 struct gdma_event *e);
250
251typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267struct gdma_queue {
268 struct gdma_dev *gdma_dev;
269
270 enum gdma_queue_type type;
271 u32 id;
272
273 struct gdma_mem_info mem_info;
274
275 void *queue_mem_ptr;
276 u32 queue_size;
277
278 bool monitor_avl_buf;
279
280 u32 head;
281 u32 tail;
282
283
284 union {
285 struct {
286 bool disable_needed;
287
288 gdma_eq_callback *callback;
289 void *context;
290
291 unsigned int msix_index;
292
293 u32 log2_throttle_limit;
294
295
296 struct napi_struct napi;
297 int work_done;
298 int budget;
299 } eq;
300
301 struct {
302 gdma_cq_callback *callback;
303 void *context;
304
305 struct gdma_queue *parent;
306 } cq;
307 };
308};
309
310struct gdma_queue_spec {
311 enum gdma_queue_type type;
312 bool monitor_avl_buf;
313 unsigned int queue_size;
314
315
316 union {
317 struct {
318 gdma_eq_callback *callback;
319 void *context;
320
321 unsigned long log2_throttle_limit;
322
323
324 struct net_device *ndev;
325 } eq;
326
327 struct {
328 gdma_cq_callback *callback;
329 void *context;
330
331 struct gdma_queue *parent_eq;
332
333 } cq;
334 };
335};
336
337struct gdma_irq_context {
338 void (*handler)(void *arg);
339 void *arg;
340};
341
342struct gdma_context {
343 struct device *dev;
344
345
346 unsigned int max_num_queues;
347 unsigned int max_num_msix;
348 unsigned int num_msix_usable;
349 struct gdma_resource msix_resource;
350 struct gdma_irq_context *irq_contexts;
351
352
353 unsigned int max_num_cqs;
354 struct gdma_queue **cq_table;
355
356
357 struct mutex eq_test_event_mutex;
358 struct completion eq_test_event;
359 u32 test_event_eq_id;
360
361 void __iomem *bar0_va;
362 void __iomem *shm_base;
363 void __iomem *db_page_base;
364 u32 db_page_size;
365
366
367 struct shm_channel shm_channel;
368
369
370 struct gdma_dev hwc;
371
372
373 struct gdma_dev mana;
374};
375
376#define MAX_NUM_GDMA_DEVICES 4
377
378static inline bool mana_gd_is_mana(struct gdma_dev *gd)
379{
380 return gd->dev_id.type == GDMA_DEVICE_MANA;
381}
382
383static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
384{
385 return gd->dev_id.type == GDMA_DEVICE_HWC;
386}
387
388u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
389u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
390
391int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
392
393int mana_gd_create_hwc_queue(struct gdma_dev *gd,
394 const struct gdma_queue_spec *spec,
395 struct gdma_queue **queue_ptr);
396
397int mana_gd_create_mana_eq(struct gdma_dev *gd,
398 const struct gdma_queue_spec *spec,
399 struct gdma_queue **queue_ptr);
400
401int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
402 const struct gdma_queue_spec *spec,
403 struct gdma_queue **queue_ptr);
404
405void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
406
407int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
408
409void mana_gd_arm_cq(struct gdma_queue *cq);
410
411struct gdma_wqe {
412 u32 reserved :24;
413 u32 last_vbytes :8;
414
415 union {
416 u32 flags;
417
418 struct {
419 u32 num_sge :8;
420 u32 inline_oob_size_div4:3;
421 u32 client_oob_in_sgl :1;
422 u32 reserved1 :4;
423 u32 client_data_unit :14;
424 u32 reserved2 :2;
425 };
426 };
427};
428
429#define INLINE_OOB_SMALL_SIZE 8
430#define INLINE_OOB_LARGE_SIZE 24
431
432#define MAX_TX_WQE_SIZE 512
433#define MAX_RX_WQE_SIZE 256
434
435struct gdma_cqe {
436 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
437
438 union {
439 u32 as_uint32;
440
441 struct {
442 u32 wq_num : 24;
443 u32 is_sq : 1;
444 u32 reserved : 4;
445 u32 owner_bits : 3;
446 };
447 } cqe_info;
448};
449
450#define GDMA_CQE_OWNER_BITS 3
451
452#define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
453
454#define SET_ARM_BIT 1
455
456#define GDMA_EQE_OWNER_BITS 3
457
458union gdma_eqe_info {
459 u32 as_uint32;
460
461 struct {
462 u32 type : 8;
463 u32 reserved1 : 8;
464 u32 client_id : 2;
465 u32 reserved2 : 11;
466 u32 owner_bits : 3;
467 };
468};
469
470#define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
471#define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
472
473struct gdma_eqe {
474 u32 details[GDMA_EVENT_DATA_SIZE / 4];
475 u32 eqe_info;
476};
477
478#define GDMA_REG_DB_PAGE_OFFSET 8
479#define GDMA_REG_DB_PAGE_SIZE 0x10
480#define GDMA_REG_SHM_OFFSET 0x18
481
482struct gdma_posted_wqe_info {
483 u32 wqe_size_in_bu;
484};
485
486
487struct gdma_generate_test_event_req {
488 struct gdma_req_hdr hdr;
489 u32 queue_index;
490};
491
492
493enum {
494 GDMA_PROTOCOL_V1 = 1,
495 GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1,
496 GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1,
497};
498
499struct gdma_verify_ver_req {
500 struct gdma_req_hdr hdr;
501
502
503 u64 protocol_ver_min;
504 u64 protocol_ver_max;
505 u64 drv_cap_flags1;
506 u64 drv_cap_flags2;
507 u64 drv_cap_flags3;
508 u64 drv_cap_flags4;
509
510
511 u64 drv_ver;
512 u32 os_type;
513 u32 reserved;
514 u32 os_ver_major;
515 u32 os_ver_minor;
516 u32 os_ver_build;
517 u32 os_ver_platform;
518 u64 reserved_2;
519 u8 os_ver_str1[128];
520 u8 os_ver_str2[128];
521 u8 os_ver_str3[128];
522 u8 os_ver_str4[128];
523};
524
525struct gdma_verify_ver_resp {
526 struct gdma_resp_hdr hdr;
527 u64 gdma_protocol_ver;
528 u64 pf_cap_flags1;
529 u64 pf_cap_flags2;
530 u64 pf_cap_flags3;
531 u64 pf_cap_flags4;
532};
533
534
535struct gdma_query_max_resources_resp {
536 struct gdma_resp_hdr hdr;
537 u32 status;
538 u32 max_sq;
539 u32 max_rq;
540 u32 max_cq;
541 u32 max_eq;
542 u32 max_db;
543 u32 max_mst;
544 u32 max_cq_mod_ctx;
545 u32 max_mod_cq;
546 u32 max_msix;
547};
548
549
550struct gdma_list_devices_resp {
551 struct gdma_resp_hdr hdr;
552 u32 num_of_devs;
553 u32 reserved;
554 struct gdma_dev_id devs[64];
555};
556
557
558struct gdma_register_device_resp {
559 struct gdma_resp_hdr hdr;
560 u32 pdid;
561 u32 gpa_mkey;
562 u32 db_id;
563};
564
565
566struct gdma_create_queue_req {
567 struct gdma_req_hdr hdr;
568 u32 type;
569 u32 reserved1;
570 u32 pdid;
571 u32 doolbell_id;
572 u64 gdma_region;
573 u32 reserved2;
574 u32 queue_size;
575 u32 log2_throttle_limit;
576 u32 eq_pci_msix_index;
577 u32 cq_mod_ctx_id;
578 u32 cq_parent_eq_id;
579 u8 rq_drop_on_overrun;
580 u8 rq_err_on_wqe_overflow;
581 u8 rq_chain_rec_wqes;
582 u8 sq_hw_db;
583 u32 reserved3;
584};
585
586struct gdma_create_queue_resp {
587 struct gdma_resp_hdr hdr;
588 u32 queue_index;
589};
590
591
592struct gdma_disable_queue_req {
593 struct gdma_req_hdr hdr;
594 u32 type;
595 u32 queue_index;
596 u32 alloc_res_id_on_creation;
597};
598
599
600struct gdma_create_dma_region_req {
601 struct gdma_req_hdr hdr;
602
603
604 u64 length;
605
606
607 u32 offset_in_page;
608
609
610 u32 gdma_page_type;
611
612
613 u32 page_count;
614
615
616
617
618
619 u32 page_addr_list_len;
620 u64 page_addr_list[];
621};
622
623struct gdma_create_dma_region_resp {
624 struct gdma_resp_hdr hdr;
625 u64 gdma_region;
626};
627
628
629struct gdma_dma_region_add_pages_req {
630 struct gdma_req_hdr hdr;
631
632 u64 gdma_region;
633
634 u32 page_addr_list_len;
635 u32 reserved3;
636
637 u64 page_addr_list[];
638};
639
640
641struct gdma_destroy_dma_region_req {
642 struct gdma_req_hdr hdr;
643
644 u64 gdma_region;
645};
646
647int mana_gd_verify_vf_version(struct pci_dev *pdev);
648
649int mana_gd_register_device(struct gdma_dev *gd);
650int mana_gd_deregister_device(struct gdma_dev *gd);
651
652int mana_gd_post_work_request(struct gdma_queue *wq,
653 const struct gdma_wqe_request *wqe_req,
654 struct gdma_posted_wqe_info *wqe_info);
655
656int mana_gd_post_and_ring(struct gdma_queue *queue,
657 const struct gdma_wqe_request *wqe,
658 struct gdma_posted_wqe_info *wqe_info);
659
660int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
661void mana_gd_free_res_map(struct gdma_resource *r);
662
663void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
664 struct gdma_queue *queue);
665
666int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
667 struct gdma_mem_info *gmi);
668
669void mana_gd_free_memory(struct gdma_mem_info *gmi);
670
671int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
672 u32 resp_len, void *resp);
673#endif
674