1
2
3
4
5
6
7#ifndef _QED_SP_H
8#define _QED_SP_H
9
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/list.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <linux/qed/qed_chain.h>
16#include "qed.h"
17#include "qed_hsi.h"
18
19enum spq_mode {
20 QED_SPQ_MODE_BLOCK,
21 QED_SPQ_MODE_CB,
22 QED_SPQ_MODE_EBLOCK,
23};
24
25struct qed_spq_comp_cb {
26 void (*function)(struct qed_hwfn *,
27 void *,
28 union event_ring_data *,
29 u8 fw_return_code);
30 void *cookie;
31};
32
33
34
35
36
37
38
39
40
41
42int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
43 struct eth_slow_path_rx_cqe *cqe);
44
45
46
47
48
49
50
51union ramrod_data {
52 struct pf_start_ramrod_data pf_start;
53 struct pf_update_ramrod_data pf_update;
54 struct rx_queue_start_ramrod_data rx_queue_start;
55 struct rx_queue_update_ramrod_data rx_queue_update;
56 struct rx_queue_stop_ramrod_data rx_queue_stop;
57 struct tx_queue_start_ramrod_data tx_queue_start;
58 struct tx_queue_stop_ramrod_data tx_queue_stop;
59 struct vport_start_ramrod_data vport_start;
60 struct vport_stop_ramrod_data vport_stop;
61 struct rx_update_gft_filter_data rx_update_gft;
62 struct vport_update_ramrod_data vport_update;
63 struct core_rx_start_ramrod_data core_rx_queue_start;
64 struct core_rx_stop_ramrod_data core_rx_queue_stop;
65 struct core_tx_start_ramrod_data core_tx_queue_start;
66 struct core_tx_stop_ramrod_data core_tx_queue_stop;
67 struct vport_filter_update_ramrod_data vport_filter_update;
68
69 struct rdma_init_func_ramrod_data rdma_init_func;
70 struct rdma_close_func_ramrod_data rdma_close_func;
71 struct rdma_register_tid_ramrod_data rdma_register_tid;
72 struct rdma_deregister_tid_ramrod_data rdma_deregister_tid;
73 struct roce_create_qp_resp_ramrod_data roce_create_qp_resp;
74 struct roce_create_qp_req_ramrod_data roce_create_qp_req;
75 struct roce_modify_qp_resp_ramrod_data roce_modify_qp_resp;
76 struct roce_modify_qp_req_ramrod_data roce_modify_qp_req;
77 struct roce_query_qp_resp_ramrod_data roce_query_qp_resp;
78 struct roce_query_qp_req_ramrod_data roce_query_qp_req;
79 struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
80 struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
81 struct roce_init_func_ramrod_data roce_init_func;
82 struct rdma_create_cq_ramrod_data rdma_create_cq;
83 struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
84 struct rdma_srq_create_ramrod_data rdma_create_srq;
85 struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
86 struct rdma_srq_modify_ramrod_data rdma_modify_srq;
87 struct iwarp_create_qp_ramrod_data iwarp_create_qp;
88 struct iwarp_tcp_offload_ramrod_data iwarp_tcp_offload;
89 struct iwarp_mpa_offload_ramrod_data iwarp_mpa_offload;
90 struct iwarp_modify_qp_ramrod_data iwarp_modify_qp;
91 struct iwarp_init_func_ramrod_data iwarp_init_func;
92 struct fcoe_init_ramrod_params fcoe_init;
93 struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld;
94 struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate;
95 struct fcoe_stat_ramrod_params fcoe_stat;
96
97 struct iscsi_init_ramrod_params iscsi_init;
98 struct iscsi_spe_conn_offload iscsi_conn_offload;
99 struct iscsi_conn_update_ramrod_params iscsi_conn_update;
100 struct iscsi_spe_conn_mac_update iscsi_conn_mac_update;
101 struct iscsi_spe_conn_termination iscsi_conn_terminate;
102
103 struct nvmetcp_init_ramrod_params nvmetcp_init;
104 struct nvmetcp_spe_conn_offload nvmetcp_conn_offload;
105 struct nvmetcp_conn_update_ramrod_params nvmetcp_conn_update;
106 struct nvmetcp_spe_conn_termination nvmetcp_conn_terminate;
107
108 struct vf_start_ramrod_data vf_start;
109 struct vf_stop_ramrod_data vf_stop;
110};
111
112#define EQ_MAX_CREDIT 0xffffffff
113
114enum spq_priority {
115 QED_SPQ_PRIORITY_NORMAL,
116 QED_SPQ_PRIORITY_HIGH,
117};
118
119union qed_spq_req_comp {
120 struct qed_spq_comp_cb cb;
121 u64 *done_addr;
122};
123
124struct qed_spq_comp_done {
125 unsigned int done;
126 u8 fw_return_code;
127};
128
129struct qed_spq_entry {
130 struct list_head list;
131
132 u8 flags;
133
134
135 struct slow_path_element elem;
136
137 union ramrod_data ramrod;
138
139 enum spq_priority priority;
140
141
142 struct list_head *queue;
143
144 enum spq_mode comp_mode;
145 struct qed_spq_comp_cb comp_cb;
146 struct qed_spq_comp_done comp_done;
147
148
149 struct qed_spq_entry *post_ent;
150};
151
152struct qed_eq {
153 struct qed_chain chain;
154 u8 eq_sb_index;
155 __le16 *p_fw_cons;
156};
157
158struct qed_consq {
159 struct qed_chain chain;
160};
161
162typedef int (*qed_spq_async_comp_cb)(struct qed_hwfn *p_hwfn, u8 opcode,
163 __le16 echo, union event_ring_data *data,
164 u8 fw_return_code);
165
166int
167qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
168 enum protocol_type protocol_id,
169 qed_spq_async_comp_cb cb);
170
171void
172qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
173 enum protocol_type protocol_id);
174
175struct qed_spq {
176 spinlock_t lock;
177
178 struct list_head unlimited_pending;
179 struct list_head pending;
180 struct list_head completion_pending;
181 struct list_head free_pool;
182
183 struct qed_chain chain;
184
185
186 dma_addr_t p_phys;
187 struct qed_spq_entry *p_virt;
188
189#define SPQ_RING_SIZE \
190 (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
191
192
193 DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE);
194 u8 comp_bitmap_idx;
195
196
197 u32 unlimited_pending_count;
198 u32 normal_count;
199 u32 high_count;
200 u32 comp_sent_count;
201 u32 comp_count;
202
203 u32 cid;
204 u32 db_addr_offset;
205 struct core_db_data db_data;
206 qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE];
207};
208
209
210
211
212
213
214
215
216
217
218int qed_spq_post(struct qed_hwfn *p_hwfn,
219 struct qed_spq_entry *p_ent,
220 u8 *fw_return_code);
221
222
223
224
225
226
227
228
229int qed_spq_alloc(struct qed_hwfn *p_hwfn);
230
231
232
233
234
235
236void qed_spq_setup(struct qed_hwfn *p_hwfn);
237
238
239
240
241
242
243void qed_spq_free(struct qed_hwfn *p_hwfn);
244
245
246
247
248
249
250
251
252
253
254
255
256int
257qed_spq_get_entry(struct qed_hwfn *p_hwfn,
258 struct qed_spq_entry **pp_ent);
259
260
261
262
263
264
265
266
267void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
268 struct qed_spq_entry *p_ent);
269
270
271
272
273
274
275
276
277int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem);
278
279
280
281
282
283
284void qed_eq_setup(struct qed_hwfn *p_hwfn);
285
286
287
288
289
290
291void qed_eq_free(struct qed_hwfn *p_hwfn);
292
293
294
295
296
297
298
299void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
300 u16 prod);
301
302
303
304
305
306
307
308
309
310int qed_eq_completion(struct qed_hwfn *p_hwfn,
311 void *cookie);
312
313
314
315
316
317
318
319
320
321
322int qed_spq_completion(struct qed_hwfn *p_hwfn,
323 __le16 echo,
324 u8 fw_return_code,
325 union event_ring_data *p_data);
326
327
328
329
330
331
332
333
334u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
335
336
337
338
339
340
341
342
343
344int qed_consq_alloc(struct qed_hwfn *p_hwfn);
345
346
347
348
349
350
351void qed_consq_setup(struct qed_hwfn *p_hwfn);
352
353
354
355
356
357
358void qed_consq_free(struct qed_hwfn *p_hwfn);
359int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
360
361
362
363
364
365
366
367#define QED_SP_EQ_COMPLETION 0x01
368#define QED_SP_CQE_COMPLETION 0x02
369
370struct qed_sp_init_data {
371 u32 cid;
372 u16 opaque_fid;
373
374
375 enum spq_mode comp_mode;
376 struct qed_spq_comp_cb *p_comp_data;
377};
378
379
380
381
382
383
384
385
386
387void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
388 struct qed_spq_entry *p_ent);
389
390int qed_sp_init_request(struct qed_hwfn *p_hwfn,
391 struct qed_spq_entry **pp_ent,
392 u8 cmd,
393 u8 protocol,
394 struct qed_sp_init_data *p_data);
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
416 struct qed_ptt *p_ptt,
417 struct qed_tunnel_info *p_tunn,
418 bool allow_npar_tx_switch);
419
420
421
422
423
424
425
426
427
428
429
430
431int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
432
433
434
435
436
437
438
439
440int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn);
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn);
464
465int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
466
467int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
468 struct qed_ptt *p_ptt,
469 struct qed_tunnel_info *p_tunn,
470 enum spq_mode comp_mode,
471 struct qed_spq_comp_cb *p_comp_data);
472
473
474
475
476
477
478
479
480int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
481
482#endif
483