1
2
3#include <asm/page.h>
4#include <linux/acpi.h>
5#include <linux/aer.h>
6#include <linux/bitmap.h>
7#include <linux/debugfs.h>
8#include <linux/dma-mapping.h>
9#include <linux/idr.h>
10#include <linux/io.h>
11#include <linux/irqreturn.h>
12#include <linux/log2.h>
13#include <linux/seq_file.h>
14#include <linux/slab.h>
15#include <linux/uacce.h>
16#include <linux/uaccess.h>
17#include <uapi/misc/uacce/hisi_qm.h>
18#include "qm.h"
19
20
21#define QM_VF_AEQ_INT_SOURCE 0x0
22#define QM_VF_AEQ_INT_MASK 0x4
23#define QM_VF_EQ_INT_SOURCE 0x8
24#define QM_VF_EQ_INT_MASK 0xc
25#define QM_IRQ_NUM_V1 1
26#define QM_IRQ_NUM_PF_V2 4
27#define QM_IRQ_NUM_VF_V2 2
28#define QM_IRQ_NUM_VF_V3 3
29
30#define QM_EQ_EVENT_IRQ_VECTOR 0
31#define QM_AEQ_EVENT_IRQ_VECTOR 1
32#define QM_CMD_EVENT_IRQ_VECTOR 2
33#define QM_ABNORMAL_EVENT_IRQ_VECTOR 3
34
35
36#define QM_MB_CMD_SQC 0x0
37#define QM_MB_CMD_CQC 0x1
38#define QM_MB_CMD_EQC 0x2
39#define QM_MB_CMD_AEQC 0x3
40#define QM_MB_CMD_SQC_BT 0x4
41#define QM_MB_CMD_CQC_BT 0x5
42#define QM_MB_CMD_SQC_VFT_V2 0x6
43#define QM_MB_CMD_STOP_QP 0x8
44#define QM_MB_CMD_SRC 0xc
45#define QM_MB_CMD_DST 0xd
46
47#define QM_MB_CMD_SEND_BASE 0x300
48#define QM_MB_EVENT_SHIFT 8
49#define QM_MB_BUSY_SHIFT 13
50#define QM_MB_OP_SHIFT 14
51#define QM_MB_CMD_DATA_ADDR_L 0x304
52#define QM_MB_CMD_DATA_ADDR_H 0x308
53#define QM_MB_PING_ALL_VFS 0xffff
54#define QM_MB_CMD_DATA_SHIFT 32
55#define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
56
57
58#define QM_SQ_HOP_NUM_SHIFT 0
59#define QM_SQ_PAGE_SIZE_SHIFT 4
60#define QM_SQ_BUF_SIZE_SHIFT 8
61#define QM_SQ_SQE_SIZE_SHIFT 12
62#define QM_SQ_PRIORITY_SHIFT 0
63#define QM_SQ_ORDERS_SHIFT 4
64#define QM_SQ_TYPE_SHIFT 8
65#define QM_QC_PASID_ENABLE 0x1
66#define QM_QC_PASID_ENABLE_SHIFT 7
67
68#define QM_SQ_TYPE_MASK GENMASK(3, 0)
69#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
70
71
72#define QM_CQ_HOP_NUM_SHIFT 0
73#define QM_CQ_PAGE_SIZE_SHIFT 4
74#define QM_CQ_BUF_SIZE_SHIFT 8
75#define QM_CQ_CQE_SIZE_SHIFT 12
76#define QM_CQ_PHASE_SHIFT 0
77#define QM_CQ_FLAG_SHIFT 1
78
79#define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
80#define QM_QC_CQE_SIZE 4
81#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
82
83
84#define QM_EQE_AEQE_SIZE (2UL << 12)
85#define QM_EQC_PHASE_SHIFT 16
86
87#define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
88#define QM_EQE_CQN_MASK GENMASK(15, 0)
89
90#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
91#define QM_AEQE_TYPE_SHIFT 17
92
93#define QM_DOORBELL_CMD_SQ 0
94#define QM_DOORBELL_CMD_CQ 1
95#define QM_DOORBELL_CMD_EQ 2
96#define QM_DOORBELL_CMD_AEQ 3
97
98#define QM_DOORBELL_BASE_V1 0x340
99#define QM_DB_CMD_SHIFT_V1 16
100#define QM_DB_INDEX_SHIFT_V1 32
101#define QM_DB_PRIORITY_SHIFT_V1 48
102#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000
103#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000
104#define QM_QUE_ISO_CFG_V 0x0030
105#define QM_PAGE_SIZE 0x0034
106#define QM_QUE_ISO_EN 0x100154
107#define QM_CAPBILITY 0x100158
108#define QM_QP_NUN_MASK GENMASK(10, 0)
109#define QM_QP_DB_INTERVAL 0x10000
110#define QM_QP_MAX_NUM_SHIFT 11
111#define QM_DB_CMD_SHIFT_V2 12
112#define QM_DB_RAND_SHIFT_V2 16
113#define QM_DB_INDEX_SHIFT_V2 32
114#define QM_DB_PRIORITY_SHIFT_V2 48
115
116#define QM_MEM_START_INIT 0x100040
117#define QM_MEM_INIT_DONE 0x100044
118#define QM_VFT_CFG_RDY 0x10006c
119#define QM_VFT_CFG_OP_WR 0x100058
120#define QM_VFT_CFG_TYPE 0x10005c
121#define QM_SQC_VFT 0x0
122#define QM_CQC_VFT 0x1
123#define QM_VFT_CFG 0x100060
124#define QM_VFT_CFG_OP_ENABLE 0x100054
125
126#define QM_VFT_CFG_DATA_L 0x100064
127#define QM_VFT_CFG_DATA_H 0x100068
128#define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
129#define QM_SQC_VFT_SQC_SIZE (5ULL << 12)
130#define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16)
131#define QM_SQC_VFT_START_SQN_SHIFT 28
132#define QM_SQC_VFT_VALID (1ULL << 44)
133#define QM_SQC_VFT_SQN_SHIFT 45
134#define QM_CQC_VFT_BUF_SIZE (7ULL << 8)
135#define QM_CQC_VFT_SQC_SIZE (5ULL << 12)
136#define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16)
137#define QM_CQC_VFT_VALID (1ULL << 28)
138
139#define QM_SQC_VFT_BASE_SHIFT_V2 28
140#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
141#define QM_SQC_VFT_NUM_SHIFT_V2 45
142#define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0)
143
144#define QM_DFX_CNT_CLR_CE 0x100118
145
146#define QM_ABNORMAL_INT_SOURCE 0x100000
147#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(14, 0)
148#define QM_ABNORMAL_INT_MASK 0x100004
149#define QM_ABNORMAL_INT_MASK_VALUE 0x7fff
150#define QM_ABNORMAL_INT_STATUS 0x100008
151#define QM_ABNORMAL_INT_SET 0x10000c
152#define QM_ABNORMAL_INF00 0x100010
153#define QM_FIFO_OVERFLOW_TYPE 0xc0
154#define QM_FIFO_OVERFLOW_TYPE_SHIFT 6
155#define QM_FIFO_OVERFLOW_VF 0x3f
156#define QM_ABNORMAL_INF01 0x100014
157#define QM_DB_TIMEOUT_TYPE 0xc0
158#define QM_DB_TIMEOUT_TYPE_SHIFT 6
159#define QM_DB_TIMEOUT_VF 0x3f
160#define QM_RAS_CE_ENABLE 0x1000ec
161#define QM_RAS_FE_ENABLE 0x1000f0
162#define QM_RAS_NFE_ENABLE 0x1000f4
163#define QM_RAS_CE_THRESHOLD 0x1000f8
164#define QM_RAS_CE_TIMES_PER_IRQ 1
165#define QM_RAS_MSI_INT_SEL 0x1040f4
166#define QM_OOO_SHUTDOWN_SEL 0x1040f8
167
168#define QM_RESET_WAIT_TIMEOUT 400
169#define QM_PEH_VENDOR_ID 0x1000d8
170#define ACC_VENDOR_ID_VALUE 0x5a5a
171#define QM_PEH_DFX_INFO0 0x1000fc
172#define QM_PEH_DFX_INFO1 0x100100
173#define QM_PEH_DFX_MASK (BIT(0) | BIT(2))
174#define QM_PEH_MSI_FINISH_MASK GENMASK(19, 16)
175#define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3
176#define ACC_PEH_MSI_DISABLE GENMASK(31, 0)
177#define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
178#define ACC_MASTER_TRANS_RETURN_RW 3
179#define ACC_MASTER_TRANS_RETURN 0x300150
180#define ACC_MASTER_GLOBAL_CTRL 0x300000
181#define ACC_AM_CFG_PORT_WR_EN 0x30001c
182#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
183#define ACC_AM_ROB_ECC_INT_STS 0x300104
184#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
185#define QM_MSI_CAP_ENABLE BIT(16)
186
187
188#define QM_IFC_READY_STATUS 0x100128
189#define QM_IFC_C_STS_M 0x10012C
190#define QM_IFC_INT_SET_P 0x100130
191#define QM_IFC_INT_CFG 0x100134
192#define QM_IFC_INT_SOURCE_P 0x100138
193#define QM_IFC_INT_SOURCE_V 0x0020
194#define QM_IFC_INT_MASK 0x0024
195#define QM_IFC_INT_STATUS 0x0028
196#define QM_IFC_INT_SET_V 0x002C
197#define QM_IFC_SEND_ALL_VFS GENMASK(6, 0)
198#define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0)
199#define QM_IFC_INT_SOURCE_MASK BIT(0)
200#define QM_IFC_INT_DISABLE BIT(0)
201#define QM_IFC_INT_STATUS_MASK BIT(0)
202#define QM_IFC_INT_SET_MASK BIT(0)
203#define QM_WAIT_DST_ACK 10
204#define QM_MAX_PF_WAIT_COUNT 10
205#define QM_MAX_VF_WAIT_COUNT 40
206#define QM_VF_RESET_WAIT_US 20000
207#define QM_VF_RESET_WAIT_CNT 3000
208#define QM_VF_RESET_WAIT_TIMEOUT_US \
209 (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
210
211#define QM_DFX_MB_CNT_VF 0x104010
212#define QM_DFX_DB_CNT_VF 0x104020
213#define QM_DFX_SQE_CNT_VF_SQN 0x104030
214#define QM_DFX_CQE_CNT_VF_CQN 0x104040
215#define QM_DFX_QN_SHIFT 16
216#define CURRENT_FUN_MASK GENMASK(5, 0)
217#define CURRENT_Q_MASK GENMASK(31, 16)
218
219#define POLL_PERIOD 10
220#define POLL_TIMEOUT 1000
221#define WAIT_PERIOD_US_MAX 200
222#define WAIT_PERIOD_US_MIN 100
223#define MAX_WAIT_COUNTS 1000
224#define QM_CACHE_WB_START 0x204
225#define QM_CACHE_WB_DONE 0x208
226
227#define PCI_BAR_2 2
228#define PCI_BAR_4 4
229#define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0)
230#define QMC_ALIGN(sz) ALIGN(sz, 32)
231
232#define QM_DBG_READ_LEN 256
233#define QM_DBG_WRITE_LEN 1024
234#define QM_DBG_TMP_BUF_LEN 22
235#define QM_PCI_COMMAND_INVALID ~0
236
237#define WAIT_PERIOD 20
238#define REMOVE_WAIT_DELAY 10
239#define QM_SQE_ADDR_MASK GENMASK(7, 0)
240#define QM_EQ_DEPTH (1024 * 2)
241
242#define QM_DRIVER_REMOVING 0
243#define QM_RST_SCHED 1
244#define QM_RESETTING 2
245#define QM_QOS_PARAM_NUM 2
246#define QM_QOS_VAL_NUM 1
247#define QM_QOS_BDF_PARAM_NUM 4
248#define QM_QOS_MAX_VAL 1000
249#define QM_QOS_RATE 100
250#define QM_QOS_EXPAND_RATE 1000
251#define QM_SHAPER_CIR_B_MASK GENMASK(7, 0)
252#define QM_SHAPER_CIR_U_MASK GENMASK(10, 8)
253#define QM_SHAPER_CIR_S_MASK GENMASK(14, 11)
254#define QM_SHAPER_FACTOR_CIR_U_SHIFT 8
255#define QM_SHAPER_FACTOR_CIR_S_SHIFT 11
256#define QM_SHAPER_FACTOR_CBS_B_SHIFT 15
257#define QM_SHAPER_FACTOR_CBS_S_SHIFT 19
258#define QM_SHAPER_CBS_B 1
259#define QM_SHAPER_CBS_S 16
260#define QM_SHAPER_VFT_OFFSET 6
261#define WAIT_FOR_QOS_VF 100
262#define QM_QOS_MIN_ERROR_RATE 5
263#define QM_QOS_TYPICAL_NUM 8
264#define QM_SHAPER_MIN_CBS_S 8
265#define QM_QOS_TICK 0x300U
266#define QM_QOS_DIVISOR_CLK 0x1f40U
267#define QM_QOS_MAX_CIR_B 200
268#define QM_QOS_MIN_CIR_B 100
269#define QM_QOS_MAX_CIR_U 6
270#define QM_QOS_MAX_CIR_S 11
271#define QM_QOS_VAL_MAX_LEN 32
272
273#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
274 (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
275 ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
276 ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
277 ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
278
279#define QM_MK_CQC_DW3_V2(cqe_sz) \
280 ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
281
282#define QM_MK_SQC_W13(priority, orders, alg_type) \
283 (((priority) << QM_SQ_PRIORITY_SHIFT) | \
284 ((orders) << QM_SQ_ORDERS_SHIFT) | \
285 (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT))
286
287#define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \
288 (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \
289 ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \
290 ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
291 ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
292
293#define QM_MK_SQC_DW3_V2(sqe_sz) \
294 ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
295
296#define INIT_QC_COMMON(qc, base, pasid) do { \
297 (qc)->head = 0; \
298 (qc)->tail = 0; \
299 (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \
300 (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \
301 (qc)->dw3 = 0; \
302 (qc)->w8 = 0; \
303 (qc)->rsvd0 = 0; \
304 (qc)->pasid = cpu_to_le16(pasid); \
305 (qc)->w11 = 0; \
306 (qc)->rsvd1 = 0; \
307} while (0)
308
309enum vft_type {
310 SQC_VFT = 0,
311 CQC_VFT,
312 SHAPER_VFT,
313};
314
315enum acc_err_result {
316 ACC_ERR_NONE,
317 ACC_ERR_NEED_RESET,
318 ACC_ERR_RECOVERED,
319};
320
321enum qm_alg_type {
322 ALG_TYPE_0,
323 ALG_TYPE_1,
324};
325
326enum qm_mb_cmd {
327 QM_PF_FLR_PREPARE = 0x01,
328 QM_PF_SRST_PREPARE,
329 QM_PF_RESET_DONE,
330 QM_VF_PREPARE_DONE,
331 QM_VF_PREPARE_FAIL,
332 QM_VF_START_DONE,
333 QM_VF_START_FAIL,
334 QM_PF_SET_QOS,
335 QM_VF_GET_QOS,
336};
337
338struct qm_cqe {
339 __le32 rsvd0;
340 __le16 cmd_id;
341 __le16 rsvd1;
342 __le16 sq_head;
343 __le16 sq_num;
344 __le16 rsvd2;
345 __le16 w7;
346};
347
348struct qm_eqe {
349 __le32 dw0;
350};
351
352struct qm_aeqe {
353 __le32 dw0;
354};
355
356struct qm_sqc {
357 __le16 head;
358 __le16 tail;
359 __le32 base_l;
360 __le32 base_h;
361 __le32 dw3;
362 __le16 w8;
363 __le16 rsvd0;
364 __le16 pasid;
365 __le16 w11;
366 __le16 cq_num;
367 __le16 w13;
368 __le32 rsvd1;
369};
370
371struct qm_cqc {
372 __le16 head;
373 __le16 tail;
374 __le32 base_l;
375 __le32 base_h;
376 __le32 dw3;
377 __le16 w8;
378 __le16 rsvd0;
379 __le16 pasid;
380 __le16 w11;
381 __le32 dw6;
382 __le32 rsvd1;
383};
384
385struct qm_eqc {
386 __le16 head;
387 __le16 tail;
388 __le32 base_l;
389 __le32 base_h;
390 __le32 dw3;
391 __le32 rsvd[2];
392 __le32 dw6;
393};
394
395struct qm_aeqc {
396 __le16 head;
397 __le16 tail;
398 __le32 base_l;
399 __le32 base_h;
400 __le32 dw3;
401 __le32 rsvd[2];
402 __le32 dw6;
403};
404
405struct qm_mailbox {
406 __le16 w0;
407 __le16 queue_num;
408 __le32 base_l;
409 __le32 base_h;
410 __le32 rsvd;
411};
412
413struct qm_doorbell {
414 __le16 queue_num;
415 __le16 cmd;
416 __le16 index;
417 __le16 priority;
418};
419
420struct hisi_qm_resource {
421 struct hisi_qm *qm;
422 int distance;
423 struct list_head list;
424};
425
426struct hisi_qm_hw_ops {
427 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
428 void (*qm_db)(struct hisi_qm *qm, u16 qn,
429 u8 cmd, u16 index, u8 priority);
430 u32 (*get_irq_num)(struct hisi_qm *qm);
431 int (*debug_init)(struct hisi_qm *qm);
432 void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
433 void (*hw_error_uninit)(struct hisi_qm *qm);
434 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
435 int (*stop_qp)(struct hisi_qp *qp);
436 int (*set_msi)(struct hisi_qm *qm, bool set);
437 int (*ping_all_vfs)(struct hisi_qm *qm, u64 cmd);
438 int (*ping_pf)(struct hisi_qm *qm, u64 cmd);
439};
440
441struct qm_dfx_item {
442 const char *name;
443 u32 offset;
444};
445
446static struct qm_dfx_item qm_dfx_files[] = {
447 {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
448 {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
449 {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
450 {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
451 {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
452};
453
454static const char * const qm_debug_file_name[] = {
455 [CURRENT_QM] = "current_qm",
456 [CURRENT_Q] = "current_q",
457 [CLEAR_ENABLE] = "clear_enable",
458};
459
460struct hisi_qm_hw_error {
461 u32 int_msk;
462 const char *msg;
463};
464
465static const struct hisi_qm_hw_error qm_hw_error[] = {
466 { .int_msk = BIT(0), .msg = "qm_axi_rresp" },
467 { .int_msk = BIT(1), .msg = "qm_axi_bresp" },
468 { .int_msk = BIT(2), .msg = "qm_ecc_mbit" },
469 { .int_msk = BIT(3), .msg = "qm_ecc_1bit" },
470 { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" },
471 { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" },
472 { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" },
473 { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" },
474 { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" },
475 { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" },
476 { .int_msk = BIT(10), .msg = "qm_db_timeout" },
477 { .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
478 { .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
479 { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" },
480 { .int_msk = BIT(14), .msg = "qm_flr_timeout" },
481 { }
482};
483
484static const char * const qm_db_timeout[] = {
485 "sq", "cq", "eq", "aeq",
486};
487
488static const char * const qm_fifo_overflow[] = {
489 "cq", "eq", "aeq",
490};
491
492static const char * const qm_s[] = {
493 "init", "start", "close", "stop",
494};
495
496static const char * const qp_s[] = {
497 "none", "init", "start", "stop", "close",
498};
499
500static const u32 typical_qos_val[QM_QOS_TYPICAL_NUM] = {100, 250, 500, 1000,
501 10000, 25000, 50000, 100000};
502static const u32 typical_qos_cbs_s[QM_QOS_TYPICAL_NUM] = {9, 10, 11, 12, 16,
503 17, 18, 19};
504
505static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
506{
507 enum qm_state curr = atomic_read(&qm->status.flags);
508 bool avail = false;
509
510 switch (curr) {
511 case QM_INIT:
512 if (new == QM_START || new == QM_CLOSE)
513 avail = true;
514 break;
515 case QM_START:
516 if (new == QM_STOP)
517 avail = true;
518 break;
519 case QM_STOP:
520 if (new == QM_CLOSE || new == QM_START)
521 avail = true;
522 break;
523 default:
524 break;
525 }
526
527 dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
528 qm_s[curr], qm_s[new]);
529
530 if (!avail)
531 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
532 qm_s[curr], qm_s[new]);
533
534 return avail;
535}
536
537static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
538 enum qp_state new)
539{
540 enum qm_state qm_curr = atomic_read(&qm->status.flags);
541 enum qp_state qp_curr = 0;
542 bool avail = false;
543
544 if (qp)
545 qp_curr = atomic_read(&qp->qp_status.flags);
546
547 switch (new) {
548 case QP_INIT:
549 if (qm_curr == QM_START || qm_curr == QM_INIT)
550 avail = true;
551 break;
552 case QP_START:
553 if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
554 (qm_curr == QM_START && qp_curr == QP_STOP))
555 avail = true;
556 break;
557 case QP_STOP:
558 if ((qm_curr == QM_START && qp_curr == QP_START) ||
559 (qp_curr == QP_INIT))
560 avail = true;
561 break;
562 case QP_CLOSE:
563 if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
564 (qm_curr == QM_START && qp_curr == QP_STOP) ||
565 (qm_curr == QM_STOP && qp_curr == QP_STOP) ||
566 (qm_curr == QM_STOP && qp_curr == QP_INIT))
567 avail = true;
568 break;
569 default:
570 break;
571 }
572
573 dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
574 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
575
576 if (!avail)
577 dev_warn(&qm->pdev->dev,
578 "Can not change qp state from %s to %s in QM %s\n",
579 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
580
581 return avail;
582}
583
584static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd,
585 u64 base, u16 queue, bool op)
586{
587 mailbox->w0 = cpu_to_le16((cmd) |
588 ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) |
589 (0x1 << QM_MB_BUSY_SHIFT));
590 mailbox->queue_num = cpu_to_le16(queue);
591 mailbox->base_l = cpu_to_le32(lower_32_bits(base));
592 mailbox->base_h = cpu_to_le32(upper_32_bits(base));
593 mailbox->rsvd = 0;
594}
595
596
597static int qm_wait_mb_ready(struct hisi_qm *qm)
598{
599 u32 val;
600
601 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
602 val, !((val >> QM_MB_BUSY_SHIFT) &
603 0x1), POLL_PERIOD, POLL_TIMEOUT);
604}
605
606
607static void qm_mb_write(struct hisi_qm *qm, const void *src)
608{
609 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
610 unsigned long tmp0 = 0, tmp1 = 0;
611
612 if (!IS_ENABLED(CONFIG_ARM64)) {
613 memcpy_toio(fun_base, src, 16);
614 wmb();
615 return;
616 }
617
618 asm volatile("ldp %0, %1, %3\n"
619 "stp %0, %1, %2\n"
620 "dsb sy\n"
621 : "=&r" (tmp0),
622 "=&r" (tmp1),
623 "+Q" (*((char __iomem *)fun_base))
624 : "Q" (*((char *)src))
625 : "memory");
626}
627
628static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
629{
630 if (unlikely(qm_wait_mb_ready(qm))) {
631 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
632 goto mb_busy;
633 }
634
635 qm_mb_write(qm, mailbox);
636
637 if (unlikely(qm_wait_mb_ready(qm))) {
638 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
639 goto mb_busy;
640 }
641
642 return 0;
643
644mb_busy:
645 atomic64_inc(&qm->debug.dfx.mb_err_cnt);
646 return -EBUSY;
647}
648
649static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
650 bool op)
651{
652 struct qm_mailbox mailbox;
653 int ret;
654
655 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
656 queue, cmd, (unsigned long long)dma_addr);
657
658 qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op);
659
660 mutex_lock(&qm->mailbox_lock);
661 ret = qm_mb_nolock(qm, &mailbox);
662 mutex_unlock(&qm->mailbox_lock);
663
664 return ret;
665}
666
667static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
668{
669 u64 doorbell;
670
671 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) |
672 ((u64)index << QM_DB_INDEX_SHIFT_V1) |
673 ((u64)priority << QM_DB_PRIORITY_SHIFT_V1);
674
675 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1);
676}
677
678static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
679{
680 void __iomem *io_base = qm->io_base;
681 u16 randata = 0;
682 u64 doorbell;
683
684 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
685 io_base = qm->db_io_base + (u64)qn * qm->db_interval +
686 QM_DOORBELL_SQ_CQ_BASE_V2;
687 else
688 io_base += QM_DOORBELL_EQ_AEQ_BASE_V2;
689
690 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
691 ((u64)randata << QM_DB_RAND_SHIFT_V2) |
692 ((u64)index << QM_DB_INDEX_SHIFT_V2) |
693 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
694
695 writeq(doorbell, io_base);
696}
697
698static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
699{
700 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n",
701 qn, cmd, index);
702
703 qm->ops->qm_db(qm, qn, cmd, index, priority);
704}
705
706static int qm_dev_mem_reset(struct hisi_qm *qm)
707{
708 u32 val;
709
710 writel(0x1, qm->io_base + QM_MEM_START_INIT);
711 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val,
712 val & BIT(0), POLL_PERIOD,
713 POLL_TIMEOUT);
714}
715
716static u32 qm_get_irq_num_v1(struct hisi_qm *qm)
717{
718 return QM_IRQ_NUM_V1;
719}
720
721static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
722{
723 if (qm->fun_type == QM_HW_PF)
724 return QM_IRQ_NUM_PF_V2;
725 else
726 return QM_IRQ_NUM_VF_V2;
727}
728
729static u32 qm_get_irq_num_v3(struct hisi_qm *qm)
730{
731 if (qm->fun_type == QM_HW_PF)
732 return QM_IRQ_NUM_PF_V2;
733
734 return QM_IRQ_NUM_VF_V3;
735}
736
737static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
738{
739 u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
740
741 return &qm->qp_array[cqn];
742}
743
744static void qm_cq_head_update(struct hisi_qp *qp)
745{
746 if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
747 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
748 qp->qp_status.cq_head = 0;
749 } else {
750 qp->qp_status.cq_head++;
751 }
752}
753
754static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
755{
756 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
757 return;
758
759 if (qp->event_cb) {
760 qp->event_cb(qp);
761 return;
762 }
763
764 if (qp->req_cb) {
765 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
766
767 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
768 dma_rmb();
769 qp->req_cb(qp, qp->sqe + qm->sqe_size *
770 le16_to_cpu(cqe->sq_head));
771 qm_cq_head_update(qp);
772 cqe = qp->cqe + qp->qp_status.cq_head;
773 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
774 qp->qp_status.cq_head, 0);
775 atomic_dec(&qp->qp_status.used);
776 }
777
778
779 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
780 qp->qp_status.cq_head, 1);
781 }
782}
783
784static void qm_work_process(struct work_struct *work)
785{
786 struct hisi_qm *qm = container_of(work, struct hisi_qm, work);
787 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
788 struct hisi_qp *qp;
789 int eqe_num = 0;
790
791 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
792 eqe_num++;
793 qp = qm_to_hisi_qp(qm, eqe);
794 qm_poll_qp(qp, qm);
795
796 if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
797 qm->status.eqc_phase = !qm->status.eqc_phase;
798 eqe = qm->eqe;
799 qm->status.eq_head = 0;
800 } else {
801 eqe++;
802 qm->status.eq_head++;
803 }
804
805 if (eqe_num == QM_EQ_DEPTH / 2 - 1) {
806 eqe_num = 0;
807 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
808 }
809 }
810
811 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
812}
813
814static irqreturn_t do_qm_irq(int irq, void *data)
815{
816 struct hisi_qm *qm = (struct hisi_qm *)data;
817
818
819 if (qm->wq)
820 queue_work(qm->wq, &qm->work);
821 else
822 schedule_work(&qm->work);
823
824 return IRQ_HANDLED;
825}
826
827static irqreturn_t qm_irq(int irq, void *data)
828{
829 struct hisi_qm *qm = data;
830
831 if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
832 return do_qm_irq(irq, data);
833
834 atomic64_inc(&qm->debug.dfx.err_irq_cnt);
835 dev_err(&qm->pdev->dev, "invalid int source\n");
836 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
837
838 return IRQ_NONE;
839}
840
841static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
842{
843 struct hisi_qm *qm = data;
844 u32 val;
845
846 val = readl(qm->io_base + QM_IFC_INT_STATUS);
847 val &= QM_IFC_INT_STATUS_MASK;
848 if (!val)
849 return IRQ_NONE;
850
851 schedule_work(&qm->cmd_process);
852
853 return IRQ_HANDLED;
854}
855
856static irqreturn_t qm_aeq_irq(int irq, void *data)
857{
858 struct hisi_qm *qm = data;
859 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
860 u32 type;
861
862 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
863 if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
864 return IRQ_NONE;
865
866 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
867 type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
868 if (type < ARRAY_SIZE(qm_fifo_overflow))
869 dev_err(&qm->pdev->dev, "%s overflow\n",
870 qm_fifo_overflow[type]);
871 else
872 dev_err(&qm->pdev->dev, "unknown error type %u\n",
873 type);
874
875 if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
876 qm->status.aeqc_phase = !qm->status.aeqc_phase;
877 aeqe = qm->aeqe;
878 qm->status.aeq_head = 0;
879 } else {
880 aeqe++;
881 qm->status.aeq_head++;
882 }
883
884 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
885 }
886
887 return IRQ_HANDLED;
888}
889
890static void qm_irq_unregister(struct hisi_qm *qm)
891{
892 struct pci_dev *pdev = qm->pdev;
893
894 free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
895
896 if (qm->ver > QM_HW_V1) {
897 free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
898
899 if (qm->fun_type == QM_HW_PF)
900 free_irq(pci_irq_vector(pdev,
901 QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
902 }
903
904 if (qm->ver > QM_HW_V2)
905 free_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), qm);
906}
907
908static void qm_init_qp_status(struct hisi_qp *qp)
909{
910 struct hisi_qp_status *qp_status = &qp->qp_status;
911
912 qp_status->sq_tail = 0;
913 qp_status->cq_head = 0;
914 qp_status->cqc_phase = true;
915 atomic_set(&qp_status->used, 0);
916}
917
918static void qm_init_prefetch(struct hisi_qm *qm)
919{
920 struct device *dev = &qm->pdev->dev;
921 u32 page_type = 0x0;
922
923 if (qm->ver < QM_HW_V3)
924 return;
925
926 switch (PAGE_SIZE) {
927 case SZ_4K:
928 page_type = 0x0;
929 break;
930 case SZ_16K:
931 page_type = 0x1;
932 break;
933 case SZ_64K:
934 page_type = 0x2;
935 break;
936 default:
937 dev_err(dev, "system page size is not support: %lu, default set to 4KB",
938 PAGE_SIZE);
939 }
940
941 writel(page_type, qm->io_base + QM_PAGE_SIZE);
942}
943
944
945
946
947
948
949
950
951
952static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s)
953{
954 return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) /
955 (QM_QOS_TICK * (1 << cir_s));
956}
957
958static u32 acc_shaper_calc_cbs_s(u32 ir)
959{
960 int i;
961
962 if (ir < typical_qos_val[0])
963 return QM_SHAPER_MIN_CBS_S;
964
965 for (i = 1; i < QM_QOS_TYPICAL_NUM; i++) {
966 if (ir >= typical_qos_val[i - 1] && ir < typical_qos_val[i])
967 return typical_qos_cbs_s[i - 1];
968 }
969
970 return typical_qos_cbs_s[QM_QOS_TYPICAL_NUM - 1];
971}
972
973static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor)
974{
975 u32 cir_b, cir_u, cir_s, ir_calc;
976 u32 error_rate;
977
978 factor->cbs_s = acc_shaper_calc_cbs_s(ir);
979
980 for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) {
981 for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) {
982 for (cir_s = 0; cir_s <= QM_QOS_MAX_CIR_S; cir_s++) {
983
984
985
986
987
988 ir_calc = acc_shaper_para_calc(cir_b, cir_u,
989 cir_s);
990 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
991 if (error_rate <= QM_QOS_MIN_ERROR_RATE) {
992 factor->cir_b = cir_b;
993 factor->cir_u = cir_u;
994 factor->cir_s = cir_s;
995
996 return 0;
997 }
998 }
999 }
1000 }
1001
1002 return -EINVAL;
1003}
1004
1005static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
1006 u32 number, struct qm_shaper_factor *factor)
1007{
1008 u64 tmp = 0;
1009
1010 if (number > 0) {
1011 switch (type) {
1012 case SQC_VFT:
1013 if (qm->ver == QM_HW_V1) {
1014 tmp = QM_SQC_VFT_BUF_SIZE |
1015 QM_SQC_VFT_SQC_SIZE |
1016 QM_SQC_VFT_INDEX_NUMBER |
1017 QM_SQC_VFT_VALID |
1018 (u64)base << QM_SQC_VFT_START_SQN_SHIFT;
1019 } else {
1020 tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT |
1021 QM_SQC_VFT_VALID |
1022 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT;
1023 }
1024 break;
1025 case CQC_VFT:
1026 if (qm->ver == QM_HW_V1) {
1027 tmp = QM_CQC_VFT_BUF_SIZE |
1028 QM_CQC_VFT_SQC_SIZE |
1029 QM_CQC_VFT_INDEX_NUMBER |
1030 QM_CQC_VFT_VALID;
1031 } else {
1032 tmp = QM_CQC_VFT_VALID;
1033 }
1034 break;
1035 case SHAPER_VFT:
1036 if (qm->ver >= QM_HW_V3) {
1037 tmp = factor->cir_b |
1038 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
1039 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
1040 (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) |
1041 (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
1042 }
1043 break;
1044 }
1045 }
1046
1047 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
1048 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
1049}
1050
1051static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
1052 u32 fun_num, u32 base, u32 number)
1053{
1054 struct qm_shaper_factor *factor = &qm->factor[fun_num];
1055 unsigned int val;
1056 int ret;
1057
1058 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1059 val & BIT(0), POLL_PERIOD,
1060 POLL_TIMEOUT);
1061 if (ret)
1062 return ret;
1063
1064 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
1065 writel(type, qm->io_base + QM_VFT_CFG_TYPE);
1066 if (type == SHAPER_VFT)
1067 fun_num |= base << QM_SHAPER_VFT_OFFSET;
1068
1069 writel(fun_num, qm->io_base + QM_VFT_CFG);
1070
1071 qm_vft_data_cfg(qm, type, base, number, factor);
1072
1073 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
1074 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
1075
1076 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1077 val & BIT(0), POLL_PERIOD,
1078 POLL_TIMEOUT);
1079}
1080
1081static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
1082{
1083 int ret, i;
1084
1085 qm->factor[fun_num].func_qos = QM_QOS_MAX_VAL;
1086 ret = qm_get_shaper_para(QM_QOS_MAX_VAL * QM_QOS_RATE, &qm->factor[fun_num]);
1087 if (ret) {
1088 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
1089 return ret;
1090 }
1091 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG);
1092 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
1093
1094 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1);
1095 if (ret)
1096 return ret;
1097 }
1098
1099 return 0;
1100}
1101
1102
1103static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
1104 u32 number)
1105{
1106 int ret, i;
1107
1108 for (i = SQC_VFT; i <= CQC_VFT; i++) {
1109 ret = qm_set_vft_common(qm, i, fun_num, base, number);
1110 if (ret)
1111 return ret;
1112 }
1113
1114
1115 if (qm->ver >= QM_HW_V3) {
1116 ret = qm_shaper_init_vft(qm, fun_num);
1117 if (ret)
1118 goto back_sqc_cqc;
1119 }
1120
1121 return 0;
1122back_sqc_cqc:
1123 for (i = SQC_VFT; i <= CQC_VFT; i++) {
1124 ret = qm_set_vft_common(qm, i, fun_num, 0, 0);
1125 if (ret)
1126 return ret;
1127 }
1128 return ret;
1129}
1130
1131static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
1132{
1133 u64 sqc_vft;
1134 int ret;
1135
1136 ret = qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
1137 if (ret)
1138 return ret;
1139
1140 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
1141 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
1142 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
1143 *number = (QM_SQC_VFT_NUM_MASK_v2 &
1144 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
1145
1146 return 0;
1147}
1148
1149static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
1150{
1151 u32 remain_q_num, vfq_num;
1152 u32 num_vfs = qm->vfs_num;
1153
1154 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
1155 if (vfq_num >= qm->max_qp_num)
1156 return qm->max_qp_num;
1157
1158 remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
1159 if (vfq_num + remain_q_num <= qm->max_qp_num)
1160 return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
1161
1162
1163
1164
1165
1166 return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
1167}
1168
1169static struct hisi_qm *file_to_qm(struct debugfs_file *file)
1170{
1171 struct qm_debug *debug = file->debug;
1172
1173 return container_of(debug, struct hisi_qm, debug);
1174}
1175
1176static u32 current_q_read(struct debugfs_file *file)
1177{
1178 struct hisi_qm *qm = file_to_qm(file);
1179
1180 return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
1181}
1182
1183static int current_q_write(struct debugfs_file *file, u32 val)
1184{
1185 struct hisi_qm *qm = file_to_qm(file);
1186 u32 tmp;
1187
1188 if (val >= qm->debug.curr_qm_qp_num)
1189 return -EINVAL;
1190
1191 tmp = val << QM_DFX_QN_SHIFT |
1192 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
1193 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
1194
1195 tmp = val << QM_DFX_QN_SHIFT |
1196 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
1197 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
1198
1199 return 0;
1200}
1201
1202static u32 clear_enable_read(struct debugfs_file *file)
1203{
1204 struct hisi_qm *qm = file_to_qm(file);
1205
1206 return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
1207}
1208
1209
1210static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
1211{
1212 struct hisi_qm *qm = file_to_qm(file);
1213
1214 if (rd_clr_ctrl > 1)
1215 return -EINVAL;
1216
1217 writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
1218
1219 return 0;
1220}
1221
1222static u32 current_qm_read(struct debugfs_file *file)
1223{
1224 struct hisi_qm *qm = file_to_qm(file);
1225
1226 return readl(qm->io_base + QM_DFX_MB_CNT_VF);
1227}
1228
1229static int current_qm_write(struct debugfs_file *file, u32 val)
1230{
1231 struct hisi_qm *qm = file_to_qm(file);
1232 u32 tmp;
1233
1234 if (val > qm->vfs_num)
1235 return -EINVAL;
1236
1237
1238 if (!val)
1239 qm->debug.curr_qm_qp_num = qm->qp_num;
1240 else
1241 qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
1242
1243 writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
1244 writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
1245
1246 tmp = val |
1247 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
1248 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
1249
1250 tmp = val |
1251 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
1252 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
1253
1254 return 0;
1255}
1256
1257static ssize_t qm_debug_read(struct file *filp, char __user *buf,
1258 size_t count, loff_t *pos)
1259{
1260 struct debugfs_file *file = filp->private_data;
1261 enum qm_debug_file index = file->index;
1262 char tbuf[QM_DBG_TMP_BUF_LEN];
1263 u32 val;
1264 int ret;
1265
1266 mutex_lock(&file->lock);
1267 switch (index) {
1268 case CURRENT_QM:
1269 val = current_qm_read(file);
1270 break;
1271 case CURRENT_Q:
1272 val = current_q_read(file);
1273 break;
1274 case CLEAR_ENABLE:
1275 val = clear_enable_read(file);
1276 break;
1277 default:
1278 mutex_unlock(&file->lock);
1279 return -EINVAL;
1280 }
1281 mutex_unlock(&file->lock);
1282
1283 ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
1284 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
1285}
1286
1287static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
1288 size_t count, loff_t *pos)
1289{
1290 struct debugfs_file *file = filp->private_data;
1291 enum qm_debug_file index = file->index;
1292 unsigned long val;
1293 char tbuf[QM_DBG_TMP_BUF_LEN];
1294 int len, ret;
1295
1296 if (*pos != 0)
1297 return 0;
1298
1299 if (count >= QM_DBG_TMP_BUF_LEN)
1300 return -ENOSPC;
1301
1302 len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
1303 count);
1304 if (len < 0)
1305 return len;
1306
1307 tbuf[len] = '\0';
1308 if (kstrtoul(tbuf, 0, &val))
1309 return -EFAULT;
1310
1311 mutex_lock(&file->lock);
1312 switch (index) {
1313 case CURRENT_QM:
1314 ret = current_qm_write(file, val);
1315 break;
1316 case CURRENT_Q:
1317 ret = current_q_write(file, val);
1318 break;
1319 case CLEAR_ENABLE:
1320 ret = clear_enable_write(file, val);
1321 break;
1322 default:
1323 ret = -EINVAL;
1324 }
1325 mutex_unlock(&file->lock);
1326
1327 if (ret)
1328 return ret;
1329
1330 return count;
1331}
1332
1333static const struct file_operations qm_debug_fops = {
1334 .owner = THIS_MODULE,
1335 .open = simple_open,
1336 .read = qm_debug_read,
1337 .write = qm_debug_write,
1338};
1339
1340struct qm_dfx_registers {
1341 char *reg_name;
1342 u64 reg_offset;
1343};
1344
1345#define CNT_CYC_REGS_NUM 10
1346static struct qm_dfx_registers qm_dfx_regs[] = {
1347
1348 {"QM_ECC_1BIT_CNT ", 0x104000ull},
1349 {"QM_ECC_MBIT_CNT ", 0x104008ull},
1350 {"QM_DFX_MB_CNT ", 0x104018ull},
1351 {"QM_DFX_DB_CNT ", 0x104028ull},
1352 {"QM_DFX_SQE_CNT ", 0x104038ull},
1353 {"QM_DFX_CQE_CNT ", 0x104048ull},
1354 {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull},
1355 {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull},
1356 {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull},
1357 {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull},
1358 {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
1359 {"QM_ECC_1BIT_INF ", 0x104004ull},
1360 {"QM_ECC_MBIT_INF ", 0x10400cull},
1361 {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull},
1362 {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull},
1363 {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull},
1364 {"QM_DFX_FF_ST0 ", 0x1040c8ull},
1365 {"QM_DFX_FF_ST1 ", 0x1040ccull},
1366 {"QM_DFX_FF_ST2 ", 0x1040d0ull},
1367 {"QM_DFX_FF_ST3 ", 0x1040d4ull},
1368 {"QM_DFX_FF_ST4 ", 0x1040d8ull},
1369 {"QM_DFX_FF_ST5 ", 0x1040dcull},
1370 {"QM_DFX_FF_ST6 ", 0x1040e0ull},
1371 {"QM_IN_IDLE_ST ", 0x1040e4ull},
1372 { NULL, 0}
1373};
1374
1375static struct qm_dfx_registers qm_vf_dfx_regs[] = {
1376 {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
1377 { NULL, 0}
1378};
1379
1380static int qm_regs_show(struct seq_file *s, void *unused)
1381{
1382 struct hisi_qm *qm = s->private;
1383 struct qm_dfx_registers *regs;
1384 u32 val;
1385
1386 if (qm->fun_type == QM_HW_PF)
1387 regs = qm_dfx_regs;
1388 else
1389 regs = qm_vf_dfx_regs;
1390
1391 while (regs->reg_name) {
1392 val = readl(qm->io_base + regs->reg_offset);
1393 seq_printf(s, "%s= 0x%08x\n", regs->reg_name, val);
1394 regs++;
1395 }
1396
1397 return 0;
1398}
1399
1400DEFINE_SHOW_ATTRIBUTE(qm_regs);
1401
1402static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
1403 size_t count, loff_t *pos)
1404{
1405 char buf[QM_DBG_READ_LEN];
1406 int len;
1407
1408 len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
1409 "Please echo help to cmd to get help information");
1410
1411 return simple_read_from_buffer(buffer, count, pos, buf, len);
1412}
1413
1414static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
1415 dma_addr_t *dma_addr)
1416{
1417 struct device *dev = &qm->pdev->dev;
1418 void *ctx_addr;
1419
1420 ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
1421 if (!ctx_addr)
1422 return ERR_PTR(-ENOMEM);
1423
1424 *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
1425 if (dma_mapping_error(dev, *dma_addr)) {
1426 dev_err(dev, "DMA mapping error!\n");
1427 kfree(ctx_addr);
1428 return ERR_PTR(-ENOMEM);
1429 }
1430
1431 return ctx_addr;
1432}
1433
1434static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
1435 const void *ctx_addr, dma_addr_t *dma_addr)
1436{
1437 struct device *dev = &qm->pdev->dev;
1438
1439 dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
1440 kfree(ctx_addr);
1441}
1442
1443static int dump_show(struct hisi_qm *qm, void *info,
1444 unsigned int info_size, char *info_name)
1445{
1446 struct device *dev = &qm->pdev->dev;
1447 u8 *info_buf, *info_curr = info;
1448 u32 i;
1449#define BYTE_PER_DW 4
1450
1451 info_buf = kzalloc(info_size, GFP_KERNEL);
1452 if (!info_buf)
1453 return -ENOMEM;
1454
1455 for (i = 0; i < info_size; i++, info_curr++) {
1456 if (i % BYTE_PER_DW == 0)
1457 info_buf[i + 3UL] = *info_curr;
1458 else if (i % BYTE_PER_DW == 1)
1459 info_buf[i + 1UL] = *info_curr;
1460 else if (i % BYTE_PER_DW == 2)
1461 info_buf[i - 1] = *info_curr;
1462 else if (i % BYTE_PER_DW == 3)
1463 info_buf[i - 3] = *info_curr;
1464 }
1465
1466 dev_info(dev, "%s DUMP\n", info_name);
1467 for (i = 0; i < info_size; i += BYTE_PER_DW) {
1468 pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
1469 info_buf[i], info_buf[i + 1UL],
1470 info_buf[i + 2UL], info_buf[i + 3UL]);
1471 }
1472
1473 kfree(info_buf);
1474
1475 return 0;
1476}
1477
1478static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1479{
1480 return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
1481}
1482
1483static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1484{
1485 return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
1486}
1487
1488static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
1489{
1490 struct device *dev = &qm->pdev->dev;
1491 struct qm_sqc *sqc, *sqc_curr;
1492 dma_addr_t sqc_dma;
1493 u32 qp_id;
1494 int ret;
1495
1496 if (!s)
1497 return -EINVAL;
1498
1499 ret = kstrtou32(s, 0, &qp_id);
1500 if (ret || qp_id >= qm->qp_num) {
1501 dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
1502 return -EINVAL;
1503 }
1504
1505 sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
1506 if (IS_ERR(sqc))
1507 return PTR_ERR(sqc);
1508
1509 ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id);
1510 if (ret) {
1511 down_read(&qm->qps_lock);
1512 if (qm->sqc) {
1513 sqc_curr = qm->sqc + qp_id;
1514
1515 ret = dump_show(qm, sqc_curr, sizeof(*sqc),
1516 "SOFT SQC");
1517 if (ret)
1518 dev_info(dev, "Show soft sqc failed!\n");
1519 }
1520 up_read(&qm->qps_lock);
1521
1522 goto err_free_ctx;
1523 }
1524
1525 ret = dump_show(qm, sqc, sizeof(*sqc), "SQC");
1526 if (ret)
1527 dev_info(dev, "Show hw sqc failed!\n");
1528
1529err_free_ctx:
1530 qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
1531 return ret;
1532}
1533
1534static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
1535{
1536 struct device *dev = &qm->pdev->dev;
1537 struct qm_cqc *cqc, *cqc_curr;
1538 dma_addr_t cqc_dma;
1539 u32 qp_id;
1540 int ret;
1541
1542 if (!s)
1543 return -EINVAL;
1544
1545 ret = kstrtou32(s, 0, &qp_id);
1546 if (ret || qp_id >= qm->qp_num) {
1547 dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
1548 return -EINVAL;
1549 }
1550
1551 cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
1552 if (IS_ERR(cqc))
1553 return PTR_ERR(cqc);
1554
1555 ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id);
1556 if (ret) {
1557 down_read(&qm->qps_lock);
1558 if (qm->cqc) {
1559 cqc_curr = qm->cqc + qp_id;
1560
1561 ret = dump_show(qm, cqc_curr, sizeof(*cqc),
1562 "SOFT CQC");
1563 if (ret)
1564 dev_info(dev, "Show soft cqc failed!\n");
1565 }
1566 up_read(&qm->qps_lock);
1567
1568 goto err_free_ctx;
1569 }
1570
1571 ret = dump_show(qm, cqc, sizeof(*cqc), "CQC");
1572 if (ret)
1573 dev_info(dev, "Show hw cqc failed!\n");
1574
1575err_free_ctx:
1576 qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
1577 return ret;
1578}
1579
1580static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
1581 int cmd, char *name)
1582{
1583 struct device *dev = &qm->pdev->dev;
1584 dma_addr_t xeqc_dma;
1585 void *xeqc;
1586 int ret;
1587
1588 if (strsep(&s, " ")) {
1589 dev_err(dev, "Please do not input extra characters!\n");
1590 return -EINVAL;
1591 }
1592
1593 xeqc = qm_ctx_alloc(qm, size, &xeqc_dma);
1594 if (IS_ERR(xeqc))
1595 return PTR_ERR(xeqc);
1596
1597 ret = qm_mb(qm, cmd, xeqc_dma, 0, 1);
1598 if (ret)
1599 goto err_free_ctx;
1600
1601 ret = dump_show(qm, xeqc, size, name);
1602 if (ret)
1603 dev_info(dev, "Show hw %s failed!\n", name);
1604
1605err_free_ctx:
1606 qm_ctx_free(qm, size, xeqc, &xeqc_dma);
1607 return ret;
1608}
1609
1610static int q_dump_param_parse(struct hisi_qm *qm, char *s,
1611 u32 *e_id, u32 *q_id)
1612{
1613 struct device *dev = &qm->pdev->dev;
1614 unsigned int qp_num = qm->qp_num;
1615 char *presult;
1616 int ret;
1617
1618 presult = strsep(&s, " ");
1619 if (!presult) {
1620 dev_err(dev, "Please input qp number!\n");
1621 return -EINVAL;
1622 }
1623
1624 ret = kstrtou32(presult, 0, q_id);
1625 if (ret || *q_id >= qp_num) {
1626 dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
1627 return -EINVAL;
1628 }
1629
1630 presult = strsep(&s, " ");
1631 if (!presult) {
1632 dev_err(dev, "Please input sqe number!\n");
1633 return -EINVAL;
1634 }
1635
1636 ret = kstrtou32(presult, 0, e_id);
1637 if (ret || *e_id >= QM_Q_DEPTH) {
1638 dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1);
1639 return -EINVAL;
1640 }
1641
1642 if (strsep(&s, " ")) {
1643 dev_err(dev, "Please do not input extra characters!\n");
1644 return -EINVAL;
1645 }
1646
1647 return 0;
1648}
1649
1650static int qm_sq_dump(struct hisi_qm *qm, char *s)
1651{
1652 struct device *dev = &qm->pdev->dev;
1653 void *sqe, *sqe_curr;
1654 struct hisi_qp *qp;
1655 u32 qp_id, sqe_id;
1656 int ret;
1657
1658 ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id);
1659 if (ret)
1660 return ret;
1661
1662 sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL);
1663 if (!sqe)
1664 return -ENOMEM;
1665
1666 qp = &qm->qp_array[qp_id];
1667 memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH);
1668 sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
1669 memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
1670 qm->debug.sqe_mask_len);
1671
1672 ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
1673 if (ret)
1674 dev_info(dev, "Show sqe failed!\n");
1675
1676 kfree(sqe);
1677
1678 return ret;
1679}
1680
1681static int qm_cq_dump(struct hisi_qm *qm, char *s)
1682{
1683 struct device *dev = &qm->pdev->dev;
1684 struct qm_cqe *cqe_curr;
1685 struct hisi_qp *qp;
1686 u32 qp_id, cqe_id;
1687 int ret;
1688
1689 ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id);
1690 if (ret)
1691 return ret;
1692
1693 qp = &qm->qp_array[qp_id];
1694 cqe_curr = qp->cqe + cqe_id;
1695 ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
1696 if (ret)
1697 dev_info(dev, "Show cqe failed!\n");
1698
1699 return ret;
1700}
1701
1702static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
1703 size_t size, char *name)
1704{
1705 struct device *dev = &qm->pdev->dev;
1706 void *xeqe;
1707 u32 xeqe_id;
1708 int ret;
1709
1710 if (!s)
1711 return -EINVAL;
1712
1713 ret = kstrtou32(s, 0, &xeqe_id);
1714 if (ret)
1715 return -EINVAL;
1716
1717 if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) {
1718 dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1);
1719 return -EINVAL;
1720 } else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) {
1721 dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
1722 return -EINVAL;
1723 }
1724
1725 down_read(&qm->qps_lock);
1726
1727 if (qm->eqe && !strcmp(name, "EQE")) {
1728 xeqe = qm->eqe + xeqe_id;
1729 } else if (qm->aeqe && !strcmp(name, "AEQE")) {
1730 xeqe = qm->aeqe + xeqe_id;
1731 } else {
1732 ret = -EINVAL;
1733 goto err_unlock;
1734 }
1735
1736 ret = dump_show(qm, xeqe, size, name);
1737 if (ret)
1738 dev_info(dev, "Show %s failed!\n", name);
1739
1740err_unlock:
1741 up_read(&qm->qps_lock);
1742 return ret;
1743}
1744
1745static int qm_dbg_help(struct hisi_qm *qm, char *s)
1746{
1747 struct device *dev = &qm->pdev->dev;
1748
1749 if (strsep(&s, " ")) {
1750 dev_err(dev, "Please do not input extra characters!\n");
1751 return -EINVAL;
1752 }
1753
1754 dev_info(dev, "available commands:\n");
1755 dev_info(dev, "sqc <num>\n");
1756 dev_info(dev, "cqc <num>\n");
1757 dev_info(dev, "eqc\n");
1758 dev_info(dev, "aeqc\n");
1759 dev_info(dev, "sq <num> <e>\n");
1760 dev_info(dev, "cq <num> <e>\n");
1761 dev_info(dev, "eq <e>\n");
1762 dev_info(dev, "aeq <e>\n");
1763
1764 return 0;
1765}
1766
1767static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
1768{
1769 struct device *dev = &qm->pdev->dev;
1770 char *presult, *s, *s_tmp;
1771 int ret;
1772
1773 s = kstrdup(cmd_buf, GFP_KERNEL);
1774 if (!s)
1775 return -ENOMEM;
1776
1777 s_tmp = s;
1778 presult = strsep(&s, " ");
1779 if (!presult) {
1780 ret = -EINVAL;
1781 goto err_buffer_free;
1782 }
1783
1784 if (!strcmp(presult, "sqc"))
1785 ret = qm_sqc_dump(qm, s);
1786 else if (!strcmp(presult, "cqc"))
1787 ret = qm_cqc_dump(qm, s);
1788 else if (!strcmp(presult, "eqc"))
1789 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
1790 QM_MB_CMD_EQC, "EQC");
1791 else if (!strcmp(presult, "aeqc"))
1792 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
1793 QM_MB_CMD_AEQC, "AEQC");
1794 else if (!strcmp(presult, "sq"))
1795 ret = qm_sq_dump(qm, s);
1796 else if (!strcmp(presult, "cq"))
1797 ret = qm_cq_dump(qm, s);
1798 else if (!strcmp(presult, "eq"))
1799 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
1800 else if (!strcmp(presult, "aeq"))
1801 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
1802 else if (!strcmp(presult, "help"))
1803 ret = qm_dbg_help(qm, s);
1804 else
1805 ret = -EINVAL;
1806
1807 if (ret)
1808 dev_info(dev, "Please echo help\n");
1809
1810err_buffer_free:
1811 kfree(s_tmp);
1812
1813 return ret;
1814}
1815
1816static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
1817 size_t count, loff_t *pos)
1818{
1819 struct hisi_qm *qm = filp->private_data;
1820 char *cmd_buf, *cmd_buf_tmp;
1821 int ret;
1822
1823 if (*pos)
1824 return 0;
1825
1826
1827 if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
1828 return 0;
1829
1830 if (count > QM_DBG_WRITE_LEN)
1831 return -ENOSPC;
1832
1833 cmd_buf = memdup_user_nul(buffer, count);
1834 if (IS_ERR(cmd_buf))
1835 return PTR_ERR(cmd_buf);
1836
1837 cmd_buf_tmp = strchr(cmd_buf, '\n');
1838 if (cmd_buf_tmp) {
1839 *cmd_buf_tmp = '\0';
1840 count = cmd_buf_tmp - cmd_buf + 1;
1841 }
1842
1843 ret = qm_cmd_write_dump(qm, cmd_buf);
1844 if (ret) {
1845 kfree(cmd_buf);
1846 return ret;
1847 }
1848
1849 kfree(cmd_buf);
1850
1851 return count;
1852}
1853
1854static const struct file_operations qm_cmd_fops = {
1855 .owner = THIS_MODULE,
1856 .open = simple_open,
1857 .read = qm_cmd_read,
1858 .write = qm_cmd_write,
1859};
1860
1861static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
1862 enum qm_debug_file index)
1863{
1864 struct debugfs_file *file = qm->debug.files + index;
1865
1866 debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
1867 &qm_debug_fops);
1868
1869 file->index = index;
1870 mutex_init(&file->lock);
1871 file->debug = &qm->debug;
1872}
1873
1874static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
1875{
1876 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1877}
1878
1879static void qm_hw_error_cfg(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
1880{
1881 qm->error_mask = ce | nfe | fe;
1882
1883 writel(QM_ABNORMAL_INT_SOURCE_CLR,
1884 qm->io_base + QM_ABNORMAL_INT_SOURCE);
1885
1886
1887 writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
1888 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
1889 writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1890 writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
1891}
1892
1893static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
1894{
1895 u32 irq_enable = ce | nfe | fe;
1896 u32 irq_unmask = ~irq_enable;
1897
1898 qm_hw_error_cfg(qm, ce, nfe, fe);
1899
1900 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1901 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1902}
1903
1904static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
1905{
1906 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1907}
1908
1909static void qm_hw_error_init_v3(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
1910{
1911 u32 irq_enable = ce | nfe | fe;
1912 u32 irq_unmask = ~irq_enable;
1913
1914 qm_hw_error_cfg(qm, ce, nfe, fe);
1915
1916
1917 writel(nfe & (~QM_DB_RANDOM_INVALID), qm->io_base + QM_OOO_SHUTDOWN_SEL);
1918
1919 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1920 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1921}
1922
1923static void qm_hw_error_uninit_v3(struct hisi_qm *qm)
1924{
1925 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1926
1927
1928 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL);
1929}
1930
1931static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
1932{
1933 const struct hisi_qm_hw_error *err;
1934 struct device *dev = &qm->pdev->dev;
1935 u32 reg_val, type, vf_num;
1936 int i;
1937
1938 for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
1939 err = &qm_hw_error[i];
1940 if (!(err->int_msk & error_status))
1941 continue;
1942
1943 dev_err(dev, "%s [error status=0x%x] found\n",
1944 err->msg, err->int_msk);
1945
1946 if (err->int_msk & QM_DB_TIMEOUT) {
1947 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
1948 type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
1949 QM_DB_TIMEOUT_TYPE_SHIFT;
1950 vf_num = reg_val & QM_DB_TIMEOUT_VF;
1951 dev_err(dev, "qm %s doorbell timeout in function %u\n",
1952 qm_db_timeout[type], vf_num);
1953 } else if (err->int_msk & QM_OF_FIFO_OF) {
1954 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
1955 type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
1956 QM_FIFO_OVERFLOW_TYPE_SHIFT;
1957 vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
1958
1959 if (type < ARRAY_SIZE(qm_fifo_overflow))
1960 dev_err(dev, "qm %s fifo overflow in function %u\n",
1961 qm_fifo_overflow[type], vf_num);
1962 else
1963 dev_err(dev, "unknown error type\n");
1964 }
1965 }
1966}
1967
1968static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
1969{
1970 u32 error_status, tmp, val;
1971
1972
1973 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
1974 error_status = qm->error_mask & tmp;
1975
1976 if (error_status) {
1977 if (error_status & QM_ECC_MBIT)
1978 qm->err_status.is_qm_ecc_mbit = true;
1979
1980 qm_log_hw_error(qm, error_status);
1981 val = error_status | QM_DB_RANDOM_INVALID | QM_BASE_CE;
1982
1983 if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) {
1984 writel(error_status, qm->io_base +
1985 QM_ABNORMAL_INT_SOURCE);
1986 writel(qm->err_info.nfe,
1987 qm->io_base + QM_RAS_NFE_ENABLE);
1988 return ACC_ERR_RECOVERED;
1989 }
1990
1991 return ACC_ERR_NEED_RESET;
1992 }
1993
1994 return ACC_ERR_RECOVERED;
1995}
1996
1997static u32 qm_get_hw_error_status(struct hisi_qm *qm)
1998{
1999 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
2000}
2001
2002static u32 qm_get_dev_err_status(struct hisi_qm *qm)
2003{
2004 return qm->err_ini->get_dev_hw_err_status(qm);
2005}
2006
2007
2008static int qm_check_dev_error(struct hisi_qm *qm)
2009{
2010 u32 val, dev_val;
2011
2012 if (qm->fun_type == QM_HW_VF)
2013 return 0;
2014
2015 val = qm_get_hw_error_status(qm);
2016 dev_val = qm_get_dev_err_status(qm);
2017
2018 if (qm->ver < QM_HW_V3)
2019 return (val & QM_ECC_MBIT) ||
2020 (dev_val & qm->err_info.ecc_2bits_mask);
2021
2022 return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) ||
2023 (dev_val & (~qm->err_info.dev_ce_mask));
2024}
2025
2026static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num)
2027{
2028 struct qm_mailbox mailbox;
2029 int ret;
2030
2031 qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0);
2032 mutex_lock(&qm->mailbox_lock);
2033 ret = qm_mb_nolock(qm, &mailbox);
2034 if (ret)
2035 goto err_unlock;
2036
2037 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
2038 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
2039
2040err_unlock:
2041 mutex_unlock(&qm->mailbox_lock);
2042 return ret;
2043}
2044
2045static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
2046{
2047 u32 val;
2048
2049 if (qm->fun_type == QM_HW_PF)
2050 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P);
2051
2052 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V);
2053 val |= QM_IFC_INT_SOURCE_MASK;
2054 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V);
2055}
2056
2057static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
2058{
2059 struct device *dev = &qm->pdev->dev;
2060 u32 cmd;
2061 u64 msg;
2062 int ret;
2063
2064 ret = qm_get_mb_cmd(qm, &msg, vf_id);
2065 if (ret) {
2066 dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id);
2067 return;
2068 }
2069
2070 cmd = msg & QM_MB_CMD_DATA_MASK;
2071 switch (cmd) {
2072 case QM_VF_PREPARE_FAIL:
2073 dev_err(dev, "failed to stop VF(%u)!\n", vf_id);
2074 break;
2075 case QM_VF_START_FAIL:
2076 dev_err(dev, "failed to start VF(%u)!\n", vf_id);
2077 break;
2078 case QM_VF_PREPARE_DONE:
2079 case QM_VF_START_DONE:
2080 break;
2081 default:
2082 dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id);
2083 break;
2084 }
2085}
2086
2087static int qm_wait_vf_prepare_finish(struct hisi_qm *qm)
2088{
2089 struct device *dev = &qm->pdev->dev;
2090 u32 vfs_num = qm->vfs_num;
2091 int cnt = 0;
2092 int ret = 0;
2093 u64 val;
2094 u32 i;
2095
2096 if (!qm->vfs_num || qm->ver < QM_HW_V3)
2097 return 0;
2098
2099 while (true) {
2100 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
2101
2102 if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1))
2103 break;
2104
2105 if (++cnt > QM_MAX_PF_WAIT_COUNT) {
2106 ret = -EBUSY;
2107 break;
2108 }
2109
2110 msleep(QM_WAIT_DST_ACK);
2111 }
2112
2113
2114 for (i = 1; i <= vfs_num; i++) {
2115 if (val & BIT(i))
2116 qm_handle_vf_msg(qm, i);
2117 else
2118 dev_err(dev, "VF(%u) not ping PF!\n", i);
2119 }
2120
2121
2122 qm_clear_cmd_interrupt(qm, val);
2123
2124 return ret;
2125}
2126
2127static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num)
2128{
2129 u32 val;
2130
2131 val = readl(qm->io_base + QM_IFC_INT_CFG);
2132 val &= ~QM_IFC_SEND_ALL_VFS;
2133 val |= fun_num;
2134 writel(val, qm->io_base + QM_IFC_INT_CFG);
2135
2136 val = readl(qm->io_base + QM_IFC_INT_SET_P);
2137 val |= QM_IFC_INT_SET_MASK;
2138 writel(val, qm->io_base + QM_IFC_INT_SET_P);
2139}
2140
2141static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
2142{
2143 u32 val;
2144
2145 val = readl(qm->io_base + QM_IFC_INT_SET_V);
2146 val |= QM_IFC_INT_SET_MASK;
2147 writel(val, qm->io_base + QM_IFC_INT_SET_V);
2148}
2149
2150static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
2151{
2152 struct device *dev = &qm->pdev->dev;
2153 struct qm_mailbox mailbox;
2154 int cnt = 0;
2155 u64 val;
2156 int ret;
2157
2158 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0);
2159 mutex_lock(&qm->mailbox_lock);
2160 ret = qm_mb_nolock(qm, &mailbox);
2161 if (ret) {
2162 dev_err(dev, "failed to send command to vf(%u)!\n", fun_num);
2163 goto err_unlock;
2164 }
2165
2166 qm_trigger_vf_interrupt(qm, fun_num);
2167 while (true) {
2168 msleep(QM_WAIT_DST_ACK);
2169 val = readq(qm->io_base + QM_IFC_READY_STATUS);
2170
2171 if (!(val & BIT(fun_num)))
2172 goto err_unlock;
2173
2174 if (++cnt > QM_MAX_PF_WAIT_COUNT) {
2175 dev_err(dev, "failed to get response from VF(%u)!\n", fun_num);
2176 ret = -ETIMEDOUT;
2177 break;
2178 }
2179 }
2180
2181err_unlock:
2182 mutex_unlock(&qm->mailbox_lock);
2183 return ret;
2184}
2185
2186static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
2187{
2188 struct device *dev = &qm->pdev->dev;
2189 u32 vfs_num = qm->vfs_num;
2190 struct qm_mailbox mailbox;
2191 u64 val = 0;
2192 int cnt = 0;
2193 int ret;
2194 u32 i;
2195
2196 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0);
2197 mutex_lock(&qm->mailbox_lock);
2198
2199 ret = qm_mb_nolock(qm, &mailbox);
2200 if (ret) {
2201 dev_err(dev, "failed to send command to VFs!\n");
2202 mutex_unlock(&qm->mailbox_lock);
2203 return ret;
2204 }
2205
2206 qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS);
2207 while (true) {
2208 msleep(QM_WAIT_DST_ACK);
2209 val = readq(qm->io_base + QM_IFC_READY_STATUS);
2210
2211 if (!(val & GENMASK(vfs_num, 1))) {
2212 mutex_unlock(&qm->mailbox_lock);
2213 return 0;
2214 }
2215
2216 if (++cnt > QM_MAX_PF_WAIT_COUNT)
2217 break;
2218 }
2219
2220 mutex_unlock(&qm->mailbox_lock);
2221
2222
2223 for (i = 1; i <= vfs_num; i++) {
2224 if (val & BIT(i))
2225 dev_err(dev, "failed to get response from VF(%u)!\n", i);
2226 }
2227
2228 return -ETIMEDOUT;
2229}
2230
2231static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
2232{
2233 struct qm_mailbox mailbox;
2234 int cnt = 0;
2235 u32 val;
2236 int ret;
2237
2238 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0);
2239 mutex_lock(&qm->mailbox_lock);
2240 ret = qm_mb_nolock(qm, &mailbox);
2241 if (ret) {
2242 dev_err(&qm->pdev->dev, "failed to send command to PF!\n");
2243 goto unlock;
2244 }
2245
2246 qm_trigger_pf_interrupt(qm);
2247
2248 while (true) {
2249 msleep(QM_WAIT_DST_ACK);
2250 val = readl(qm->io_base + QM_IFC_INT_SET_V);
2251 if (!(val & QM_IFC_INT_STATUS_MASK))
2252 break;
2253
2254 if (++cnt > QM_MAX_VF_WAIT_COUNT) {
2255 ret = -ETIMEDOUT;
2256 break;
2257 }
2258 }
2259
2260unlock:
2261 mutex_unlock(&qm->mailbox_lock);
2262 return ret;
2263}
2264
2265static int qm_stop_qp(struct hisi_qp *qp)
2266{
2267 return qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
2268}
2269
2270static int qm_set_msi(struct hisi_qm *qm, bool set)
2271{
2272 struct pci_dev *pdev = qm->pdev;
2273
2274 if (set) {
2275 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
2276 0);
2277 } else {
2278 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
2279 ACC_PEH_MSI_DISABLE);
2280 if (qm->err_status.is_qm_ecc_mbit ||
2281 qm->err_status.is_dev_ecc_mbit)
2282 return 0;
2283
2284 mdelay(1);
2285 if (readl(qm->io_base + QM_PEH_DFX_INFO0))
2286 return -EFAULT;
2287 }
2288
2289 return 0;
2290}
2291
2292static void qm_wait_msi_finish(struct hisi_qm *qm)
2293{
2294 struct pci_dev *pdev = qm->pdev;
2295 u32 cmd = ~0;
2296 int cnt = 0;
2297 u32 val;
2298 int ret;
2299
2300 while (true) {
2301 pci_read_config_dword(pdev, pdev->msi_cap +
2302 PCI_MSI_PENDING_64, &cmd);
2303 if (!cmd)
2304 break;
2305
2306 if (++cnt > MAX_WAIT_COUNTS) {
2307 pci_warn(pdev, "failed to empty MSI PENDING!\n");
2308 break;
2309 }
2310
2311 udelay(1);
2312 }
2313
2314 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0,
2315 val, !(val & QM_PEH_DFX_MASK),
2316 POLL_PERIOD, POLL_TIMEOUT);
2317 if (ret)
2318 pci_warn(pdev, "failed to empty PEH MSI!\n");
2319
2320 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1,
2321 val, !(val & QM_PEH_MSI_FINISH_MASK),
2322 POLL_PERIOD, POLL_TIMEOUT);
2323 if (ret)
2324 pci_warn(pdev, "failed to finish MSI operation!\n");
2325}
2326
2327static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
2328{
2329 struct pci_dev *pdev = qm->pdev;
2330 int ret = -ETIMEDOUT;
2331 u32 cmd, i;
2332
2333 pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
2334 if (set)
2335 cmd |= QM_MSI_CAP_ENABLE;
2336 else
2337 cmd &= ~QM_MSI_CAP_ENABLE;
2338
2339 pci_write_config_dword(pdev, pdev->msi_cap, cmd);
2340 if (set) {
2341 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
2342 pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
2343 if (cmd & QM_MSI_CAP_ENABLE)
2344 return 0;
2345
2346 udelay(1);
2347 }
2348 } else {
2349 udelay(WAIT_PERIOD_US_MIN);
2350 qm_wait_msi_finish(qm);
2351 ret = 0;
2352 }
2353
2354 return ret;
2355}
2356
2357static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
2358 .qm_db = qm_db_v1,
2359 .get_irq_num = qm_get_irq_num_v1,
2360 .hw_error_init = qm_hw_error_init_v1,
2361 .set_msi = qm_set_msi,
2362};
2363
2364static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
2365 .get_vft = qm_get_vft_v2,
2366 .qm_db = qm_db_v2,
2367 .get_irq_num = qm_get_irq_num_v2,
2368 .hw_error_init = qm_hw_error_init_v2,
2369 .hw_error_uninit = qm_hw_error_uninit_v2,
2370 .hw_error_handle = qm_hw_error_handle_v2,
2371 .set_msi = qm_set_msi,
2372};
2373
2374static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
2375 .get_vft = qm_get_vft_v2,
2376 .qm_db = qm_db_v2,
2377 .get_irq_num = qm_get_irq_num_v3,
2378 .hw_error_init = qm_hw_error_init_v3,
2379 .hw_error_uninit = qm_hw_error_uninit_v3,
2380 .hw_error_handle = qm_hw_error_handle_v2,
2381 .stop_qp = qm_stop_qp,
2382 .set_msi = qm_set_msi_v3,
2383 .ping_all_vfs = qm_ping_all_vfs,
2384 .ping_pf = qm_ping_pf,
2385};
2386
2387static void *qm_get_avail_sqe(struct hisi_qp *qp)
2388{
2389 struct hisi_qp_status *qp_status = &qp->qp_status;
2390 u16 sq_tail = qp_status->sq_tail;
2391
2392 if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1))
2393 return NULL;
2394
2395 return qp->sqe + sq_tail * qp->qm->sqe_size;
2396}
2397
2398static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
2399{
2400 struct device *dev = &qm->pdev->dev;
2401 struct hisi_qp *qp;
2402 int qp_id;
2403
2404 if (!qm_qp_avail_state(qm, NULL, QP_INIT))
2405 return ERR_PTR(-EPERM);
2406
2407 if (qm->qp_in_used == qm->qp_num) {
2408 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
2409 qm->qp_num);
2410 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
2411 return ERR_PTR(-EBUSY);
2412 }
2413
2414 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
2415 if (qp_id < 0) {
2416 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
2417 qm->qp_num);
2418 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
2419 return ERR_PTR(-EBUSY);
2420 }
2421
2422 qp = &qm->qp_array[qp_id];
2423
2424 memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
2425
2426 qp->event_cb = NULL;
2427 qp->req_cb = NULL;
2428 qp->qp_id = qp_id;
2429 qp->alg_type = alg_type;
2430 qp->is_in_kernel = true;
2431 qm->qp_in_used++;
2432 atomic_set(&qp->qp_status.flags, QP_INIT);
2433
2434 return qp;
2435}
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
2446{
2447 struct hisi_qp *qp;
2448
2449 down_write(&qm->qps_lock);
2450 qp = qm_create_qp_nolock(qm, alg_type);
2451 up_write(&qm->qps_lock);
2452
2453 return qp;
2454}
2455EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
2456
2457
2458
2459
2460
2461
2462
2463void hisi_qm_release_qp(struct hisi_qp *qp)
2464{
2465 struct hisi_qm *qm = qp->qm;
2466
2467 down_write(&qm->qps_lock);
2468
2469 if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
2470 up_write(&qm->qps_lock);
2471 return;
2472 }
2473
2474 qm->qp_in_used--;
2475 idr_remove(&qm->qp_idr, qp->qp_id);
2476
2477 up_write(&qm->qps_lock);
2478}
2479EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
2480
2481static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
2482{
2483 struct hisi_qm *qm = qp->qm;
2484 struct device *dev = &qm->pdev->dev;
2485 enum qm_hw_ver ver = qm->ver;
2486 struct qm_sqc *sqc;
2487 dma_addr_t sqc_dma;
2488 int ret;
2489
2490 sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
2491 if (!sqc)
2492 return -ENOMEM;
2493
2494 INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
2495 if (ver == QM_HW_V1) {
2496 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
2497 sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
2498 } else {
2499 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
2500 sqc->w8 = 0;
2501 }
2502 sqc->cq_num = cpu_to_le16(qp_id);
2503 sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
2504
2505 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
2506 sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE <<
2507 QM_QC_PASID_ENABLE_SHIFT);
2508
2509 sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
2510 DMA_TO_DEVICE);
2511 if (dma_mapping_error(dev, sqc_dma)) {
2512 kfree(sqc);
2513 return -ENOMEM;
2514 }
2515
2516 ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
2517 dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
2518 kfree(sqc);
2519
2520 return ret;
2521}
2522
2523static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
2524{
2525 struct hisi_qm *qm = qp->qm;
2526 struct device *dev = &qm->pdev->dev;
2527 enum qm_hw_ver ver = qm->ver;
2528 struct qm_cqc *cqc;
2529 dma_addr_t cqc_dma;
2530 int ret;
2531
2532 cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
2533 if (!cqc)
2534 return -ENOMEM;
2535
2536 INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
2537 if (ver == QM_HW_V1) {
2538 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0,
2539 QM_QC_CQE_SIZE));
2540 cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
2541 } else {
2542 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE));
2543 cqc->w8 = 0;
2544 }
2545 cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
2546
2547 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
2548 cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE);
2549
2550 cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
2551 DMA_TO_DEVICE);
2552 if (dma_mapping_error(dev, cqc_dma)) {
2553 kfree(cqc);
2554 return -ENOMEM;
2555 }
2556
2557 ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
2558 dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
2559 kfree(cqc);
2560
2561 return ret;
2562}
2563
2564static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
2565{
2566 int ret;
2567
2568 qm_init_qp_status(qp);
2569
2570 ret = qm_sq_ctx_cfg(qp, qp_id, pasid);
2571 if (ret)
2572 return ret;
2573
2574 return qm_cq_ctx_cfg(qp, qp_id, pasid);
2575}
2576
2577static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
2578{
2579 struct hisi_qm *qm = qp->qm;
2580 struct device *dev = &qm->pdev->dev;
2581 int qp_id = qp->qp_id;
2582 u32 pasid = arg;
2583 int ret;
2584
2585 if (!qm_qp_avail_state(qm, qp, QP_START))
2586 return -EPERM;
2587
2588 ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
2589 if (ret)
2590 return ret;
2591
2592 atomic_set(&qp->qp_status.flags, QP_START);
2593 dev_dbg(dev, "queue %d started\n", qp_id);
2594
2595 return 0;
2596}
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
2607{
2608 struct hisi_qm *qm = qp->qm;
2609 int ret;
2610
2611 down_write(&qm->qps_lock);
2612 ret = qm_start_qp_nolock(qp, arg);
2613 up_write(&qm->qps_lock);
2614
2615 return ret;
2616}
2617EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
2618
2619
2620
2621
2622
2623
2624
2625static void qp_stop_fail_cb(struct hisi_qp *qp)
2626{
2627 int qp_used = atomic_read(&qp->qp_status.used);
2628 u16 cur_tail = qp->qp_status.sq_tail;
2629 u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH;
2630 struct hisi_qm *qm = qp->qm;
2631 u16 pos;
2632 int i;
2633
2634 for (i = 0; i < qp_used; i++) {
2635 pos = (i + cur_head) % QM_Q_DEPTH;
2636 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
2637 atomic_dec(&qp->qp_status.used);
2638 }
2639}
2640
2641
2642
2643
2644
2645
2646
2647
2648static int qm_drain_qp(struct hisi_qp *qp)
2649{
2650 size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc);
2651 struct hisi_qm *qm = qp->qm;
2652 struct device *dev = &qm->pdev->dev;
2653 struct qm_sqc *sqc;
2654 struct qm_cqc *cqc;
2655 dma_addr_t dma_addr;
2656 int ret = 0, i = 0;
2657 void *addr;
2658
2659
2660 if (qm_check_dev_error(qm))
2661 return 0;
2662
2663
2664 if (qm->ops->stop_qp) {
2665 ret = qm->ops->stop_qp(qp);
2666 if (ret)
2667 dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id);
2668 return ret;
2669 }
2670
2671 addr = qm_ctx_alloc(qm, size, &dma_addr);
2672 if (IS_ERR(addr)) {
2673 dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
2674 return -ENOMEM;
2675 }
2676
2677 while (++i) {
2678 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id);
2679 if (ret) {
2680 dev_err_ratelimited(dev, "Failed to dump sqc!\n");
2681 break;
2682 }
2683 sqc = addr;
2684
2685 ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)),
2686 qp->qp_id);
2687 if (ret) {
2688 dev_err_ratelimited(dev, "Failed to dump cqc!\n");
2689 break;
2690 }
2691 cqc = addr + sizeof(struct qm_sqc);
2692
2693 if ((sqc->tail == cqc->tail) &&
2694 (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)))
2695 break;
2696
2697 if (i == MAX_WAIT_COUNTS) {
2698 dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id);
2699 ret = -EBUSY;
2700 break;
2701 }
2702
2703 usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
2704 }
2705
2706 qm_ctx_free(qm, size, addr, &dma_addr);
2707
2708 return ret;
2709}
2710
2711static int qm_stop_qp_nolock(struct hisi_qp *qp)
2712{
2713 struct device *dev = &qp->qm->pdev->dev;
2714 int ret;
2715
2716
2717
2718
2719
2720
2721
2722 if (atomic_read(&qp->qp_status.flags) == QP_STOP) {
2723 qp->is_resetting = false;
2724 return 0;
2725 }
2726
2727 if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
2728 return -EPERM;
2729
2730 atomic_set(&qp->qp_status.flags, QP_STOP);
2731
2732 ret = qm_drain_qp(qp);
2733 if (ret)
2734 dev_err(dev, "Failed to drain out data for stopping!\n");
2735
2736 if (qp->qm->wq)
2737 flush_workqueue(qp->qm->wq);
2738 else
2739 flush_work(&qp->qm->work);
2740
2741 if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
2742 qp_stop_fail_cb(qp);
2743
2744 dev_dbg(dev, "stop queue %u!", qp->qp_id);
2745
2746 return 0;
2747}
2748
2749
2750
2751
2752
2753
2754
2755int hisi_qm_stop_qp(struct hisi_qp *qp)
2756{
2757 int ret;
2758
2759 down_write(&qp->qm->qps_lock);
2760 ret = qm_stop_qp_nolock(qp);
2761 up_write(&qp->qm->qps_lock);
2762
2763 return ret;
2764}
2765EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782int hisi_qp_send(struct hisi_qp *qp, const void *msg)
2783{
2784 struct hisi_qp_status *qp_status = &qp->qp_status;
2785 u16 sq_tail = qp_status->sq_tail;
2786 u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
2787 void *sqe = qm_get_avail_sqe(qp);
2788
2789 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
2790 atomic_read(&qp->qm->status.flags) == QM_STOP ||
2791 qp->is_resetting)) {
2792 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
2793 return -EAGAIN;
2794 }
2795
2796 if (!sqe)
2797 return -EBUSY;
2798
2799 memcpy(sqe, msg, qp->qm->sqe_size);
2800
2801 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
2802 atomic_inc(&qp->qp_status.used);
2803 qp_status->sq_tail = sq_tail_next;
2804
2805 return 0;
2806}
2807EXPORT_SYMBOL_GPL(hisi_qp_send);
2808
2809static void hisi_qm_cache_wb(struct hisi_qm *qm)
2810{
2811 unsigned int val;
2812
2813 if (qm->ver == QM_HW_V1)
2814 return;
2815
2816 writel(0x1, qm->io_base + QM_CACHE_WB_START);
2817 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
2818 val, val & BIT(0), POLL_PERIOD,
2819 POLL_TIMEOUT))
2820 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
2821}
2822
2823static void qm_qp_event_notifier(struct hisi_qp *qp)
2824{
2825 wake_up_interruptible(&qp->uacce_q->wait);
2826}
2827
2828static int hisi_qm_get_available_instances(struct uacce_device *uacce)
2829{
2830 return hisi_qm_get_free_qp_num(uacce->priv);
2831}
2832
2833static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
2834 unsigned long arg,
2835 struct uacce_queue *q)
2836{
2837 struct hisi_qm *qm = uacce->priv;
2838 struct hisi_qp *qp;
2839 u8 alg_type = 0;
2840
2841 qp = hisi_qm_create_qp(qm, alg_type);
2842 if (IS_ERR(qp))
2843 return PTR_ERR(qp);
2844
2845 q->priv = qp;
2846 q->uacce = uacce;
2847 qp->uacce_q = q;
2848 qp->event_cb = qm_qp_event_notifier;
2849 qp->pasid = arg;
2850 qp->is_in_kernel = false;
2851
2852 return 0;
2853}
2854
2855static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
2856{
2857 struct hisi_qp *qp = q->priv;
2858
2859 hisi_qm_cache_wb(qp->qm);
2860 hisi_qm_release_qp(qp);
2861}
2862
2863
2864static int hisi_qm_uacce_mmap(struct uacce_queue *q,
2865 struct vm_area_struct *vma,
2866 struct uacce_qfile_region *qfr)
2867{
2868 struct hisi_qp *qp = q->priv;
2869 struct hisi_qm *qm = qp->qm;
2870 resource_size_t phys_base = qm->db_phys_base +
2871 qp->qp_id * qm->db_interval;
2872 size_t sz = vma->vm_end - vma->vm_start;
2873 struct pci_dev *pdev = qm->pdev;
2874 struct device *dev = &pdev->dev;
2875 unsigned long vm_pgoff;
2876 int ret;
2877
2878 switch (qfr->type) {
2879 case UACCE_QFRT_MMIO:
2880 if (qm->ver == QM_HW_V1) {
2881 if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
2882 return -EINVAL;
2883 } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) {
2884 if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
2885 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
2886 return -EINVAL;
2887 } else {
2888 if (sz > qm->db_interval)
2889 return -EINVAL;
2890 }
2891
2892 vma->vm_flags |= VM_IO;
2893
2894 return remap_pfn_range(vma, vma->vm_start,
2895 phys_base >> PAGE_SHIFT,
2896 sz, pgprot_noncached(vma->vm_page_prot));
2897 case UACCE_QFRT_DUS:
2898 if (sz != qp->qdma.size)
2899 return -EINVAL;
2900
2901
2902
2903
2904
2905 vm_pgoff = vma->vm_pgoff;
2906 vma->vm_pgoff = 0;
2907 ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
2908 qp->qdma.dma, sz);
2909 vma->vm_pgoff = vm_pgoff;
2910 return ret;
2911
2912 default:
2913 return -EINVAL;
2914 }
2915}
2916
2917static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
2918{
2919 struct hisi_qp *qp = q->priv;
2920
2921 return hisi_qm_start_qp(qp, qp->pasid);
2922}
2923
2924static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
2925{
2926 hisi_qm_stop_qp(q->priv);
2927}
2928
2929static int hisi_qm_is_q_updated(struct uacce_queue *q)
2930{
2931 struct hisi_qp *qp = q->priv;
2932 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
2933 int updated = 0;
2934
2935 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
2936
2937 dma_rmb();
2938 qm_cq_head_update(qp);
2939 cqe = qp->cqe + qp->qp_status.cq_head;
2940 updated = 1;
2941 }
2942
2943 return updated;
2944}
2945
2946static void qm_set_sqctype(struct uacce_queue *q, u16 type)
2947{
2948 struct hisi_qm *qm = q->uacce->priv;
2949 struct hisi_qp *qp = q->priv;
2950
2951 down_write(&qm->qps_lock);
2952 qp->alg_type = type;
2953 up_write(&qm->qps_lock);
2954}
2955
2956static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
2957 unsigned long arg)
2958{
2959 struct hisi_qp *qp = q->priv;
2960 struct hisi_qp_ctx qp_ctx;
2961
2962 if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
2963 if (copy_from_user(&qp_ctx, (void __user *)arg,
2964 sizeof(struct hisi_qp_ctx)))
2965 return -EFAULT;
2966
2967 if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
2968 return -EINVAL;
2969
2970 qm_set_sqctype(q, qp_ctx.qc_type);
2971 qp_ctx.id = qp->qp_id;
2972
2973 if (copy_to_user((void __user *)arg, &qp_ctx,
2974 sizeof(struct hisi_qp_ctx)))
2975 return -EFAULT;
2976 } else {
2977 return -EINVAL;
2978 }
2979
2980 return 0;
2981}
2982
2983static const struct uacce_ops uacce_qm_ops = {
2984 .get_available_instances = hisi_qm_get_available_instances,
2985 .get_queue = hisi_qm_uacce_get_queue,
2986 .put_queue = hisi_qm_uacce_put_queue,
2987 .start_queue = hisi_qm_uacce_start_queue,
2988 .stop_queue = hisi_qm_uacce_stop_queue,
2989 .mmap = hisi_qm_uacce_mmap,
2990 .ioctl = hisi_qm_uacce_ioctl,
2991 .is_q_updated = hisi_qm_is_q_updated,
2992};
2993
2994static int qm_alloc_uacce(struct hisi_qm *qm)
2995{
2996 struct pci_dev *pdev = qm->pdev;
2997 struct uacce_device *uacce;
2998 unsigned long mmio_page_nr;
2999 unsigned long dus_page_nr;
3000 struct uacce_interface interface = {
3001 .flags = UACCE_DEV_SVA,
3002 .ops = &uacce_qm_ops,
3003 };
3004 int ret;
3005
3006 ret = strscpy(interface.name, pdev->driver->name,
3007 sizeof(interface.name));
3008 if (ret < 0)
3009 return -ENAMETOOLONG;
3010
3011 uacce = uacce_alloc(&pdev->dev, &interface);
3012 if (IS_ERR(uacce))
3013 return PTR_ERR(uacce);
3014
3015 if (uacce->flags & UACCE_DEV_SVA && qm->mode == UACCE_MODE_SVA) {
3016 qm->use_sva = true;
3017 } else {
3018
3019 uacce_remove(uacce);
3020 qm->uacce = NULL;
3021 return -EINVAL;
3022 }
3023
3024 uacce->is_vf = pdev->is_virtfn;
3025 uacce->priv = qm;
3026 uacce->algs = qm->algs;
3027
3028 if (qm->ver == QM_HW_V1)
3029 uacce->api_ver = HISI_QM_API_VER_BASE;
3030 else if (qm->ver == QM_HW_V2)
3031 uacce->api_ver = HISI_QM_API_VER2_BASE;
3032 else
3033 uacce->api_ver = HISI_QM_API_VER3_BASE;
3034
3035 if (qm->ver == QM_HW_V1)
3036 mmio_page_nr = QM_DOORBELL_PAGE_NR;
3037 else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation)
3038 mmio_page_nr = QM_DOORBELL_PAGE_NR +
3039 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
3040 else
3041 mmio_page_nr = qm->db_interval / PAGE_SIZE;
3042
3043 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
3044 sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT;
3045
3046 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
3047 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
3048
3049 qm->uacce = uacce;
3050
3051 return 0;
3052}
3053
3054
3055
3056
3057
3058
3059
3060
3061static int qm_frozen(struct hisi_qm *qm)
3062{
3063 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl))
3064 return 0;
3065
3066 down_write(&qm->qps_lock);
3067
3068 if (!qm->qp_in_used) {
3069 qm->qp_in_used = qm->qp_num;
3070 up_write(&qm->qps_lock);
3071 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl);
3072 return 0;
3073 }
3074
3075 up_write(&qm->qps_lock);
3076
3077 return -EBUSY;
3078}
3079
3080static int qm_try_frozen_vfs(struct pci_dev *pdev,
3081 struct hisi_qm_list *qm_list)
3082{
3083 struct hisi_qm *qm, *vf_qm;
3084 struct pci_dev *dev;
3085 int ret = 0;
3086
3087 if (!qm_list || !pdev)
3088 return -EINVAL;
3089
3090
3091 mutex_lock(&qm_list->lock);
3092 list_for_each_entry(qm, &qm_list->list, list) {
3093 dev = qm->pdev;
3094 if (dev == pdev)
3095 continue;
3096 if (pci_physfn(dev) == pdev) {
3097 vf_qm = pci_get_drvdata(dev);
3098 ret = qm_frozen(vf_qm);
3099 if (ret)
3100 goto frozen_fail;
3101 }
3102 }
3103
3104frozen_fail:
3105 mutex_unlock(&qm_list->lock);
3106
3107 return ret;
3108}
3109
3110
3111
3112
3113
3114
3115
3116void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
3117{
3118 while (qm_frozen(qm) ||
3119 ((qm->fun_type == QM_HW_PF) &&
3120 qm_try_frozen_vfs(qm->pdev, qm_list))) {
3121 msleep(WAIT_PERIOD);
3122 }
3123
3124 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) ||
3125 test_bit(QM_RESETTING, &qm->misc_ctl))
3126 msleep(WAIT_PERIOD);
3127
3128 udelay(REMOVE_WAIT_DELAY);
3129}
3130EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
3131
3132
3133
3134
3135
3136
3137
3138int hisi_qm_get_free_qp_num(struct hisi_qm *qm)
3139{
3140 int ret;
3141
3142 down_read(&qm->qps_lock);
3143 ret = qm->qp_num - qm->qp_in_used;
3144 up_read(&qm->qps_lock);
3145
3146 return ret;
3147}
3148EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num);
3149
3150static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
3151{
3152 struct device *dev = &qm->pdev->dev;
3153 struct qm_dma *qdma;
3154 int i;
3155
3156 for (i = num - 1; i >= 0; i--) {
3157 qdma = &qm->qp_array[i].qdma;
3158 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
3159 }
3160
3161 kfree(qm->qp_array);
3162}
3163
3164static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
3165{
3166 struct device *dev = &qm->pdev->dev;
3167 size_t off = qm->sqe_size * QM_Q_DEPTH;
3168 struct hisi_qp *qp;
3169
3170 qp = &qm->qp_array[id];
3171 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
3172 GFP_KERNEL);
3173 if (!qp->qdma.va)
3174 return -ENOMEM;
3175
3176 qp->sqe = qp->qdma.va;
3177 qp->sqe_dma = qp->qdma.dma;
3178 qp->cqe = qp->qdma.va + off;
3179 qp->cqe_dma = qp->qdma.dma + off;
3180 qp->qdma.size = dma_size;
3181 qp->qm = qm;
3182 qp->qp_id = id;
3183
3184 return 0;
3185}
3186
3187static void hisi_qm_pre_init(struct hisi_qm *qm)
3188{
3189 struct pci_dev *pdev = qm->pdev;
3190
3191 if (qm->ver == QM_HW_V1)
3192 qm->ops = &qm_hw_ops_v1;
3193 else if (qm->ver == QM_HW_V2)
3194 qm->ops = &qm_hw_ops_v2;
3195 else
3196 qm->ops = &qm_hw_ops_v3;
3197
3198 pci_set_drvdata(pdev, qm);
3199 mutex_init(&qm->mailbox_lock);
3200 init_rwsem(&qm->qps_lock);
3201 qm->qp_in_used = 0;
3202 qm->misc_ctl = false;
3203}
3204
3205static void qm_cmd_uninit(struct hisi_qm *qm)
3206{
3207 u32 val;
3208
3209 if (qm->ver < QM_HW_V3)
3210 return;
3211
3212 val = readl(qm->io_base + QM_IFC_INT_MASK);
3213 val |= QM_IFC_INT_DISABLE;
3214 writel(val, qm->io_base + QM_IFC_INT_MASK);
3215}
3216
3217static void qm_cmd_init(struct hisi_qm *qm)
3218{
3219 u32 val;
3220
3221 if (qm->ver < QM_HW_V3)
3222 return;
3223
3224
3225 qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR);
3226
3227
3228 val = readl(qm->io_base + QM_IFC_INT_MASK);
3229 val &= ~QM_IFC_INT_DISABLE;
3230 writel(val, qm->io_base + QM_IFC_INT_MASK);
3231}
3232
3233static void qm_put_pci_res(struct hisi_qm *qm)
3234{
3235 struct pci_dev *pdev = qm->pdev;
3236
3237 if (qm->use_db_isolation)
3238 iounmap(qm->db_io_base);
3239
3240 iounmap(qm->io_base);
3241 pci_release_mem_regions(pdev);
3242}
3243
3244static void hisi_qm_pci_uninit(struct hisi_qm *qm)
3245{
3246 struct pci_dev *pdev = qm->pdev;
3247
3248 pci_free_irq_vectors(pdev);
3249 qm_put_pci_res(qm);
3250 pci_disable_device(pdev);
3251}
3252
3253
3254
3255
3256
3257
3258
3259void hisi_qm_uninit(struct hisi_qm *qm)
3260{
3261 struct pci_dev *pdev = qm->pdev;
3262 struct device *dev = &pdev->dev;
3263
3264 qm_cmd_uninit(qm);
3265 kfree(qm->factor);
3266 down_write(&qm->qps_lock);
3267
3268 if (!qm_avail_state(qm, QM_CLOSE)) {
3269 up_write(&qm->qps_lock);
3270 return;
3271 }
3272
3273 hisi_qp_memory_uninit(qm, qm->qp_num);
3274 idr_destroy(&qm->qp_idr);
3275
3276 if (qm->qdma.va) {
3277 hisi_qm_cache_wb(qm);
3278 dma_free_coherent(dev, qm->qdma.size,
3279 qm->qdma.va, qm->qdma.dma);
3280 }
3281
3282 qm_irq_unregister(qm);
3283 hisi_qm_pci_uninit(qm);
3284 uacce_remove(qm->uacce);
3285 qm->uacce = NULL;
3286
3287 up_write(&qm->qps_lock);
3288}
3289EXPORT_SYMBOL_GPL(hisi_qm_uninit);
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
3304{
3305 if (!base || !number)
3306 return -EINVAL;
3307
3308 if (!qm->ops->get_vft) {
3309 dev_err(&qm->pdev->dev, "Don't support vft read!\n");
3310 return -EINVAL;
3311 }
3312
3313 return qm->ops->get_vft(qm, base, number);
3314}
3315EXPORT_SYMBOL_GPL(hisi_qm_get_vft);
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
3332 u32 number)
3333{
3334 u32 max_q_num = qm->ctrl_qp_num;
3335
3336 if (base >= max_q_num || number > max_q_num ||
3337 (base + number) > max_q_num)
3338 return -EINVAL;
3339
3340 return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
3341}
3342
3343static void qm_init_eq_aeq_status(struct hisi_qm *qm)
3344{
3345 struct hisi_qm_status *status = &qm->status;
3346
3347 status->eq_head = 0;
3348 status->aeq_head = 0;
3349 status->eqc_phase = true;
3350 status->aeqc_phase = true;
3351}
3352
3353static int qm_eq_ctx_cfg(struct hisi_qm *qm)
3354{
3355 struct device *dev = &qm->pdev->dev;
3356 struct qm_eqc *eqc;
3357 dma_addr_t eqc_dma;
3358 int ret;
3359
3360 eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
3361 if (!eqc)
3362 return -ENOMEM;
3363
3364 eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
3365 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
3366 if (qm->ver == QM_HW_V1)
3367 eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
3368 eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
3369
3370 eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
3371 DMA_TO_DEVICE);
3372 if (dma_mapping_error(dev, eqc_dma)) {
3373 kfree(eqc);
3374 return -ENOMEM;
3375 }
3376
3377 ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
3378 dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
3379 kfree(eqc);
3380
3381 return ret;
3382}
3383
3384static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
3385{
3386 struct device *dev = &qm->pdev->dev;
3387 struct qm_aeqc *aeqc;
3388 dma_addr_t aeqc_dma;
3389 int ret;
3390
3391 aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
3392 if (!aeqc)
3393 return -ENOMEM;
3394
3395 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
3396 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
3397 aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
3398
3399 aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
3400 DMA_TO_DEVICE);
3401 if (dma_mapping_error(dev, aeqc_dma)) {
3402 kfree(aeqc);
3403 return -ENOMEM;
3404 }
3405
3406 ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
3407 dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
3408 kfree(aeqc);
3409
3410 return ret;
3411}
3412
3413static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
3414{
3415 struct device *dev = &qm->pdev->dev;
3416 int ret;
3417
3418 qm_init_eq_aeq_status(qm);
3419
3420 ret = qm_eq_ctx_cfg(qm);
3421 if (ret) {
3422 dev_err(dev, "Set eqc failed!\n");
3423 return ret;
3424 }
3425
3426 return qm_aeq_ctx_cfg(qm);
3427}
3428
3429static int __hisi_qm_start(struct hisi_qm *qm)
3430{
3431 int ret;
3432
3433 WARN_ON(!qm->qdma.va);
3434
3435 if (qm->fun_type == QM_HW_PF) {
3436 ret = qm_dev_mem_reset(qm);
3437 if (ret)
3438 return ret;
3439
3440 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
3441 if (ret)
3442 return ret;
3443 }
3444
3445 ret = qm_eq_aeq_ctx_cfg(qm);
3446 if (ret)
3447 return ret;
3448
3449 ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
3450 if (ret)
3451 return ret;
3452
3453 ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
3454 if (ret)
3455 return ret;
3456
3457 qm_init_prefetch(qm);
3458
3459 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
3460 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
3461
3462 return 0;
3463}
3464
3465
3466
3467
3468
3469
3470
3471int hisi_qm_start(struct hisi_qm *qm)
3472{
3473 struct device *dev = &qm->pdev->dev;
3474 int ret = 0;
3475
3476 down_write(&qm->qps_lock);
3477
3478 if (!qm_avail_state(qm, QM_START)) {
3479 up_write(&qm->qps_lock);
3480 return -EPERM;
3481 }
3482
3483 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num);
3484
3485 if (!qm->qp_num) {
3486 dev_err(dev, "qp_num should not be 0\n");
3487 ret = -EINVAL;
3488 goto err_unlock;
3489 }
3490
3491 ret = __hisi_qm_start(qm);
3492 if (!ret)
3493 atomic_set(&qm->status.flags, QM_START);
3494
3495err_unlock:
3496 up_write(&qm->qps_lock);
3497 return ret;
3498}
3499EXPORT_SYMBOL_GPL(hisi_qm_start);
3500
3501static int qm_restart(struct hisi_qm *qm)
3502{
3503 struct device *dev = &qm->pdev->dev;
3504 struct hisi_qp *qp;
3505 int ret, i;
3506
3507 ret = hisi_qm_start(qm);
3508 if (ret < 0)
3509 return ret;
3510
3511 down_write(&qm->qps_lock);
3512 for (i = 0; i < qm->qp_num; i++) {
3513 qp = &qm->qp_array[i];
3514 if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
3515 qp->is_resetting == true) {
3516 ret = qm_start_qp_nolock(qp, 0);
3517 if (ret < 0) {
3518 dev_err(dev, "Failed to start qp%d!\n", i);
3519
3520 up_write(&qm->qps_lock);
3521 return ret;
3522 }
3523 qp->is_resetting = false;
3524 }
3525 }
3526 up_write(&qm->qps_lock);
3527
3528 return 0;
3529}
3530
3531
3532static int qm_stop_started_qp(struct hisi_qm *qm)
3533{
3534 struct device *dev = &qm->pdev->dev;
3535 struct hisi_qp *qp;
3536 int i, ret;
3537
3538 for (i = 0; i < qm->qp_num; i++) {
3539 qp = &qm->qp_array[i];
3540 if (qp && atomic_read(&qp->qp_status.flags) == QP_START) {
3541 qp->is_resetting = true;
3542 ret = qm_stop_qp_nolock(qp);
3543 if (ret < 0) {
3544 dev_err(dev, "Failed to stop qp%d!\n", i);
3545 return ret;
3546 }
3547 }
3548 }
3549
3550 return 0;
3551}
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561static void qm_clear_queues(struct hisi_qm *qm)
3562{
3563 struct hisi_qp *qp;
3564 int i;
3565
3566 for (i = 0; i < qm->qp_num; i++) {
3567 qp = &qm->qp_array[i];
3568 if (qp->is_resetting)
3569 memset(qp->qdma.va, 0, qp->qdma.size);
3570 }
3571
3572 memset(qm->qdma.va, 0, qm->qdma.size);
3573}
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
3585{
3586 struct device *dev = &qm->pdev->dev;
3587 int ret = 0;
3588
3589 down_write(&qm->qps_lock);
3590
3591 qm->status.stop_reason = r;
3592 if (!qm_avail_state(qm, QM_STOP)) {
3593 ret = -EPERM;
3594 goto err_unlock;
3595 }
3596
3597 if (qm->status.stop_reason == QM_SOFT_RESET ||
3598 qm->status.stop_reason == QM_FLR) {
3599 ret = qm_stop_started_qp(qm);
3600 if (ret < 0) {
3601 dev_err(dev, "Failed to stop started qp!\n");
3602 goto err_unlock;
3603 }
3604 }
3605
3606
3607 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
3608 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
3609
3610 if (qm->fun_type == QM_HW_PF) {
3611 ret = hisi_qm_set_vft(qm, 0, 0, 0);
3612 if (ret < 0) {
3613 dev_err(dev, "Failed to set vft!\n");
3614 ret = -EBUSY;
3615 goto err_unlock;
3616 }
3617 }
3618
3619 qm_clear_queues(qm);
3620 atomic_set(&qm->status.flags, QM_STOP);
3621
3622err_unlock:
3623 up_write(&qm->qps_lock);
3624 return ret;
3625}
3626EXPORT_SYMBOL_GPL(hisi_qm_stop);
3627
3628static ssize_t qm_status_read(struct file *filp, char __user *buffer,
3629 size_t count, loff_t *pos)
3630{
3631 struct hisi_qm *qm = filp->private_data;
3632 char buf[QM_DBG_READ_LEN];
3633 int val, len;
3634
3635 val = atomic_read(&qm->status.flags);
3636 len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
3637
3638 return simple_read_from_buffer(buffer, count, pos, buf, len);
3639}
3640
3641static const struct file_operations qm_status_fops = {
3642 .owner = THIS_MODULE,
3643 .open = simple_open,
3644 .read = qm_status_read,
3645};
3646
3647static int qm_debugfs_atomic64_set(void *data, u64 val)
3648{
3649 if (val)
3650 return -EINVAL;
3651
3652 atomic64_set((atomic64_t *)data, 0);
3653
3654 return 0;
3655}
3656
3657static int qm_debugfs_atomic64_get(void *data, u64 *val)
3658{
3659 *val = atomic64_read((atomic64_t *)data);
3660
3661 return 0;
3662}
3663
3664DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
3665 qm_debugfs_atomic64_set, "%llu\n");
3666
3667static void qm_hw_error_init(struct hisi_qm *qm)
3668{
3669 struct hisi_qm_err_info *err_info = &qm->err_info;
3670
3671 if (!qm->ops->hw_error_init) {
3672 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
3673 return;
3674 }
3675
3676 qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe);
3677}
3678
3679static void qm_hw_error_uninit(struct hisi_qm *qm)
3680{
3681 if (!qm->ops->hw_error_uninit) {
3682 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
3683 return;
3684 }
3685
3686 qm->ops->hw_error_uninit(qm);
3687}
3688
3689static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
3690{
3691 if (!qm->ops->hw_error_handle) {
3692 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
3693 return ACC_ERR_NONE;
3694 }
3695
3696 return qm->ops->hw_error_handle(qm);
3697}
3698
3699
3700
3701
3702
3703
3704
3705void hisi_qm_dev_err_init(struct hisi_qm *qm)
3706{
3707 if (qm->fun_type == QM_HW_VF)
3708 return;
3709
3710 qm_hw_error_init(qm);
3711
3712 if (!qm->err_ini->hw_err_enable) {
3713 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
3714 return;
3715 }
3716 qm->err_ini->hw_err_enable(qm);
3717}
3718EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
3719
3720
3721
3722
3723
3724
3725
3726void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
3727{
3728 if (qm->fun_type == QM_HW_VF)
3729 return;
3730
3731 qm_hw_error_uninit(qm);
3732
3733 if (!qm->err_ini->hw_err_disable) {
3734 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
3735 return;
3736 }
3737 qm->err_ini->hw_err_disable(qm);
3738}
3739EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
3740
3741
3742
3743
3744
3745
3746void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
3747{
3748 int i;
3749
3750 if (!qps || qp_num <= 0)
3751 return;
3752
3753 for (i = qp_num - 1; i >= 0; i--)
3754 hisi_qm_release_qp(qps[i]);
3755}
3756EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
3757
3758static void free_list(struct list_head *head)
3759{
3760 struct hisi_qm_resource *res, *tmp;
3761
3762 list_for_each_entry_safe(res, tmp, head, list) {
3763 list_del(&res->list);
3764 kfree(res);
3765 }
3766}
3767
3768static int hisi_qm_sort_devices(int node, struct list_head *head,
3769 struct hisi_qm_list *qm_list)
3770{
3771 struct hisi_qm_resource *res, *tmp;
3772 struct hisi_qm *qm;
3773 struct list_head *n;
3774 struct device *dev;
3775 int dev_node = 0;
3776
3777 list_for_each_entry(qm, &qm_list->list, list) {
3778 dev = &qm->pdev->dev;
3779
3780 if (IS_ENABLED(CONFIG_NUMA)) {
3781 dev_node = dev_to_node(dev);
3782 if (dev_node < 0)
3783 dev_node = 0;
3784 }
3785
3786 res = kzalloc(sizeof(*res), GFP_KERNEL);
3787 if (!res)
3788 return -ENOMEM;
3789
3790 res->qm = qm;
3791 res->distance = node_distance(dev_node, node);
3792 n = head;
3793 list_for_each_entry(tmp, head, list) {
3794 if (res->distance < tmp->distance) {
3795 n = &tmp->list;
3796 break;
3797 }
3798 }
3799 list_add_tail(&res->list, n);
3800 }
3801
3802 return 0;
3803}
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
3818 u8 alg_type, int node, struct hisi_qp **qps)
3819{
3820 struct hisi_qm_resource *tmp;
3821 int ret = -ENODEV;
3822 LIST_HEAD(head);
3823 int i;
3824
3825 if (!qps || !qm_list || qp_num <= 0)
3826 return -EINVAL;
3827
3828 mutex_lock(&qm_list->lock);
3829 if (hisi_qm_sort_devices(node, &head, qm_list)) {
3830 mutex_unlock(&qm_list->lock);
3831 goto err;
3832 }
3833
3834 list_for_each_entry(tmp, &head, list) {
3835 for (i = 0; i < qp_num; i++) {
3836 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
3837 if (IS_ERR(qps[i])) {
3838 hisi_qm_free_qps(qps, i);
3839 break;
3840 }
3841 }
3842
3843 if (i == qp_num) {
3844 ret = 0;
3845 break;
3846 }
3847 }
3848
3849 mutex_unlock(&qm_list->lock);
3850 if (ret)
3851 pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n",
3852 node, alg_type, qp_num);
3853
3854err:
3855 free_list(&head);
3856 return ret;
3857}
3858EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
3859
3860static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
3861{
3862 u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j;
3863 u32 max_qp_num = qm->max_qp_num;
3864 u32 q_base = qm->qp_num;
3865 int ret;
3866
3867 if (!num_vfs)
3868 return -EINVAL;
3869
3870 vfs_q_num = qm->ctrl_qp_num - qm->qp_num;
3871
3872
3873 if (vfs_q_num < num_vfs)
3874 return -EINVAL;
3875
3876 q_num = vfs_q_num / num_vfs;
3877 remain_q_num = vfs_q_num % num_vfs;
3878
3879 for (i = num_vfs; i > 0; i--) {
3880
3881
3882
3883
3884 if (i == num_vfs && q_num + remain_q_num <= max_qp_num) {
3885 act_q_num = q_num + remain_q_num;
3886 remain_q_num = 0;
3887 } else if (remain_q_num > 0) {
3888 act_q_num = q_num + 1;
3889 remain_q_num--;
3890 } else {
3891 act_q_num = q_num;
3892 }
3893
3894 act_q_num = min_t(int, act_q_num, max_qp_num);
3895 ret = hisi_qm_set_vft(qm, i, q_base, act_q_num);
3896 if (ret) {
3897 for (j = num_vfs; j > i; j--)
3898 hisi_qm_set_vft(qm, j, 0, 0);
3899 return ret;
3900 }
3901 q_base += act_q_num;
3902 }
3903
3904 return 0;
3905}
3906
3907static int qm_clear_vft_config(struct hisi_qm *qm)
3908{
3909 int ret;
3910 u32 i;
3911
3912 for (i = 1; i <= qm->vfs_num; i++) {
3913 ret = hisi_qm_set_vft(qm, i, 0, 0);
3914 if (ret)
3915 return ret;
3916 }
3917 qm->vfs_num = 0;
3918
3919 return 0;
3920}
3921
3922static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
3923{
3924 struct device *dev = &qm->pdev->dev;
3925 u32 ir = qos * QM_QOS_RATE;
3926 int ret, total_vfs, i;
3927
3928 total_vfs = pci_sriov_get_totalvfs(qm->pdev);
3929 if (fun_index > total_vfs)
3930 return -EINVAL;
3931
3932 qm->factor[fun_index].func_qos = qos;
3933
3934 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]);
3935 if (ret) {
3936 dev_err(dev, "failed to calculate shaper parameter!\n");
3937 return -EINVAL;
3938 }
3939
3940 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
3941
3942 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
3943 if (ret) {
3944 dev_err(dev, "type: %d, failed to set shaper vft!\n", i);
3945 return -EINVAL;
3946 }
3947 }
3948
3949 return 0;
3950}
3951
3952static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
3953{
3954 u64 cir_u = 0, cir_b = 0, cir_s = 0;
3955 u64 shaper_vft, ir_calc, ir;
3956 unsigned int val;
3957 u32 error_rate;
3958 int ret;
3959
3960 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
3961 val & BIT(0), POLL_PERIOD,
3962 POLL_TIMEOUT);
3963 if (ret)
3964 return 0;
3965
3966 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
3967 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE);
3968 writel(fun_index, qm->io_base + QM_VFT_CFG);
3969
3970 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
3971 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
3972
3973 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
3974 val & BIT(0), POLL_PERIOD,
3975 POLL_TIMEOUT);
3976 if (ret)
3977 return 0;
3978
3979 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
3980 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32);
3981
3982 cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK;
3983 cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK;
3984 cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT;
3985
3986 cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK;
3987 cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT;
3988
3989 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
3990
3991 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE;
3992
3993 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
3994 if (error_rate > QM_QOS_MIN_ERROR_RATE) {
3995 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate);
3996 return 0;
3997 }
3998
3999 return ir;
4000}
4001
4002static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
4003{
4004 struct device *dev = &qm->pdev->dev;
4005 u64 mb_cmd;
4006 u32 qos;
4007 int ret;
4008
4009 qos = qm_get_shaper_vft_qos(qm, fun_num);
4010 if (!qos) {
4011 dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num);
4012 return;
4013 }
4014
4015 mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT;
4016 ret = qm_ping_single_vf(qm, mb_cmd, fun_num);
4017 if (ret)
4018 dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num);
4019}
4020
4021static int qm_vf_read_qos(struct hisi_qm *qm)
4022{
4023 int cnt = 0;
4024 int ret;
4025
4026
4027 qm->mb_qos = 0;
4028
4029
4030 if (qm->ops->ping_pf) {
4031 ret = qm->ops->ping_pf(qm, QM_VF_GET_QOS);
4032 if (ret) {
4033 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
4034 return ret;
4035 }
4036 }
4037
4038 while (true) {
4039 msleep(QM_WAIT_DST_ACK);
4040 if (qm->mb_qos)
4041 break;
4042
4043 if (++cnt > QM_MAX_VF_WAIT_COUNT) {
4044 pci_err(qm->pdev, "PF ping VF timeout!\n");
4045 return -ETIMEDOUT;
4046 }
4047 }
4048
4049 return ret;
4050}
4051
4052static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
4053 size_t count, loff_t *pos)
4054{
4055 struct hisi_qm *qm = filp->private_data;
4056 char tbuf[QM_DBG_READ_LEN];
4057 u32 qos_val, ir;
4058 int ret;
4059
4060
4061 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
4062 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n");
4063 return -EAGAIN;
4064 }
4065
4066 if (qm->fun_type == QM_HW_PF) {
4067 ir = qm_get_shaper_vft_qos(qm, 0);
4068 } else {
4069 ret = qm_vf_read_qos(qm);
4070 if (ret)
4071 goto err_get_status;
4072 ir = qm->mb_qos;
4073 }
4074
4075 qos_val = ir / QM_QOS_RATE;
4076 ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val);
4077
4078 ret = simple_read_from_buffer(buf, count, pos, tbuf, ret);
4079
4080err_get_status:
4081 clear_bit(QM_RESETTING, &qm->misc_ctl);
4082 return ret;
4083}
4084
4085static ssize_t qm_qos_value_init(const char *buf, unsigned long *val)
4086{
4087 int buflen = strlen(buf);
4088 int ret, i;
4089
4090 for (i = 0; i < buflen; i++) {
4091 if (!isdigit(buf[i]))
4092 return -EINVAL;
4093 }
4094
4095 ret = sscanf(buf, "%ld", val);
4096 if (ret != QM_QOS_VAL_NUM)
4097 return -EINVAL;
4098
4099 return 0;
4100}
4101
4102static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
4103 size_t count, loff_t *pos)
4104{
4105 struct hisi_qm *qm = filp->private_data;
4106 char tbuf[QM_DBG_READ_LEN];
4107 int tmp1, bus, device, function;
4108 char tbuf_bdf[QM_DBG_READ_LEN] = {0};
4109 char val_buf[QM_QOS_VAL_MAX_LEN] = {0};
4110 unsigned int fun_index;
4111 unsigned long val = 0;
4112 int len, ret;
4113
4114 if (qm->fun_type == QM_HW_VF)
4115 return -EINVAL;
4116
4117
4118 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
4119 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n");
4120 return -EAGAIN;
4121 }
4122
4123 if (*pos != 0) {
4124 ret = 0;
4125 goto err_get_status;
4126 }
4127
4128 if (count >= QM_DBG_READ_LEN) {
4129 ret = -ENOSPC;
4130 goto err_get_status;
4131 }
4132
4133 len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count);
4134 if (len < 0) {
4135 ret = len;
4136 goto err_get_status;
4137 }
4138
4139 tbuf[len] = '\0';
4140 ret = sscanf(tbuf, "%s %s", tbuf_bdf, val_buf);
4141 if (ret != QM_QOS_PARAM_NUM) {
4142 ret = -EINVAL;
4143 goto err_get_status;
4144 }
4145
4146 ret = qm_qos_value_init(val_buf, &val);
4147 if (val == 0 || val > QM_QOS_MAX_VAL || ret) {
4148 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n");
4149 ret = -EINVAL;
4150 goto err_get_status;
4151 }
4152
4153 ret = sscanf(tbuf_bdf, "%d:%x:%d.%d", &tmp1, &bus, &device, &function);
4154 if (ret != QM_QOS_BDF_PARAM_NUM) {
4155 pci_err(qm->pdev, "input pci bdf value is error!\n");
4156 ret = -EINVAL;
4157 goto err_get_status;
4158 }
4159
4160 fun_index = device * 8 + function;
4161
4162 ret = qm_func_shaper_enable(qm, fun_index, val);
4163 if (ret) {
4164 pci_err(qm->pdev, "failed to enable function shaper!\n");
4165 ret = -EINVAL;
4166 goto err_get_status;
4167 }
4168
4169 ret = count;
4170
4171err_get_status:
4172 clear_bit(QM_RESETTING, &qm->misc_ctl);
4173 return ret;
4174}
4175
4176static const struct file_operations qm_algqos_fops = {
4177 .owner = THIS_MODULE,
4178 .open = simple_open,
4179 .read = qm_algqos_read,
4180 .write = qm_algqos_write,
4181};
4182
4183
4184
4185
4186
4187
4188
4189static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
4190{
4191 if (qm->fun_type == QM_HW_PF)
4192 debugfs_create_file("alg_qos", 0644,