1
2
3
4#include <linux/dma-mapping.h>
5#include <linux/slab.h>
6#include <linux/pci.h>
7#include <linux/device.h>
8#include <linux/err.h>
9#include <linux/dma-direction.h>
10#include "hclge_cmd.h"
11#include "hnae3.h"
12#include "hclge_main.h"
13
14#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
15
16static int hclge_ring_space(struct hclge_cmq_ring *ring)
17{
18 int ntu = ring->next_to_use;
19 int ntc = ring->next_to_clean;
20 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
21
22 return ring->desc_num - used - 1;
23}
24
25static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
26{
27 int ntu = ring->next_to_use;
28 int ntc = ring->next_to_clean;
29
30 if (ntu > ntc)
31 return head >= ntc && head <= ntu;
32
33 return head >= ntc || head <= ntu;
34}
35
36static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
37{
38 int size = ring->desc_num * sizeof(struct hclge_desc);
39
40 ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
41 &ring->desc_dma_addr, GFP_KERNEL);
42 if (!ring->desc)
43 return -ENOMEM;
44
45 return 0;
46}
47
48static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
49{
50 int size = ring->desc_num * sizeof(struct hclge_desc);
51
52 if (ring->desc) {
53 dma_free_coherent(cmq_ring_to_dev(ring), size,
54 ring->desc, ring->desc_dma_addr);
55 ring->desc = NULL;
56 }
57}
58
59static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
60{
61 struct hclge_hw *hw = &hdev->hw;
62 struct hclge_cmq_ring *ring =
63 (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
64 int ret;
65
66 ring->ring_type = ring_type;
67 ring->dev = hdev;
68
69 ret = hclge_alloc_cmd_desc(ring);
70 if (ret) {
71 dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n",
72 (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
73 return ret;
74 }
75
76 return 0;
77}
78
79void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
80{
81 desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
82 if (is_read)
83 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
84 else
85 desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
86}
87
88void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
89 enum hclge_opcode_type opcode, bool is_read)
90{
91 memset((void *)desc, 0, sizeof(struct hclge_desc));
92 desc->opcode = cpu_to_le16(opcode);
93 desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
94
95 if (is_read)
96 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
97}
98
99static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
100{
101 dma_addr_t dma = ring->desc_dma_addr;
102 struct hclge_dev *hdev = ring->dev;
103 struct hclge_hw *hw = &hdev->hw;
104 u32 reg_val;
105
106 if (ring->ring_type == HCLGE_TYPE_CSQ) {
107 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
108 lower_32_bits(dma));
109 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
110 upper_32_bits(dma));
111 reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG);
112 reg_val &= HCLGE_NIC_SW_RST_RDY;
113 reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S;
114 hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
115 hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
116 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
117 } else {
118 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
119 lower_32_bits(dma));
120 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
121 upper_32_bits(dma));
122 hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
123 ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
124 hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
125 hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
126 }
127}
128
129static void hclge_cmd_init_regs(struct hclge_hw *hw)
130{
131 hclge_cmd_config_regs(&hw->cmq.csq);
132 hclge_cmd_config_regs(&hw->cmq.crq);
133}
134
135static int hclge_cmd_csq_clean(struct hclge_hw *hw)
136{
137 struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
138 struct hclge_cmq_ring *csq = &hw->cmq.csq;
139 u32 head;
140 int clean;
141
142 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
143 rmb();
144
145 if (!is_valid_csq_clean_head(csq, head)) {
146 dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
147 csq->next_to_use, csq->next_to_clean);
148 dev_warn(&hdev->pdev->dev,
149 "Disabling any further commands to IMP firmware\n");
150 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
151 dev_warn(&hdev->pdev->dev,
152 "IMP firmware watchdog reset soon expected!\n");
153 return -EIO;
154 }
155
156 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
157 csq->next_to_clean = head;
158 return clean;
159}
160
161static int hclge_cmd_csq_done(struct hclge_hw *hw)
162{
163 u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
164 return head == hw->cmq.csq.next_to_use;
165}
166
167static bool hclge_is_special_opcode(u16 opcode)
168{
169
170
171
172 u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT,
173 HCLGE_OPC_STATS_32_BIT,
174 HCLGE_OPC_STATS_MAC,
175 HCLGE_OPC_STATS_MAC_ALL,
176 HCLGE_OPC_QUERY_32_BIT_REG,
177 HCLGE_OPC_QUERY_64_BIT_REG,
178 HCLGE_QUERY_CLEAR_MPF_RAS_INT,
179 HCLGE_QUERY_CLEAR_PF_RAS_INT,
180 HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
181 HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
182 HCLGE_QUERY_ALL_ERR_INFO};
183 int i;
184
185 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
186 if (spec_opcode[i] == opcode)
187 return true;
188 }
189
190 return false;
191}
192
193struct errcode {
194 u32 imp_errcode;
195 int common_errno;
196};
197
198static void hclge_cmd_copy_desc(struct hclge_hw *hw, struct hclge_desc *desc,
199 int num)
200{
201 struct hclge_desc *desc_to_use;
202 int handle = 0;
203
204 while (handle < num) {
205 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
206 *desc_to_use = desc[handle];
207 (hw->cmq.csq.next_to_use)++;
208 if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
209 hw->cmq.csq.next_to_use = 0;
210 handle++;
211 }
212}
213
214static int hclge_cmd_convert_err_code(u16 desc_ret)
215{
216 struct errcode hclge_cmd_errcode[] = {
217 {HCLGE_CMD_EXEC_SUCCESS, 0},
218 {HCLGE_CMD_NO_AUTH, -EPERM},
219 {HCLGE_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
220 {HCLGE_CMD_QUEUE_FULL, -EXFULL},
221 {HCLGE_CMD_NEXT_ERR, -ENOSR},
222 {HCLGE_CMD_UNEXE_ERR, -ENOTBLK},
223 {HCLGE_CMD_PARA_ERR, -EINVAL},
224 {HCLGE_CMD_RESULT_ERR, -ERANGE},
225 {HCLGE_CMD_TIMEOUT, -ETIME},
226 {HCLGE_CMD_HILINK_ERR, -ENOLINK},
227 {HCLGE_CMD_QUEUE_ILLEGAL, -ENXIO},
228 {HCLGE_CMD_INVALID, -EBADR},
229 };
230 u32 errcode_count = ARRAY_SIZE(hclge_cmd_errcode);
231 u32 i;
232
233 for (i = 0; i < errcode_count; i++)
234 if (hclge_cmd_errcode[i].imp_errcode == desc_ret)
235 return hclge_cmd_errcode[i].common_errno;
236
237 return -EIO;
238}
239
240static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
241 int num, int ntc)
242{
243 u16 opcode, desc_ret;
244 int handle;
245
246 opcode = le16_to_cpu(desc[0].opcode);
247 for (handle = 0; handle < num; handle++) {
248 desc[handle] = hw->cmq.csq.desc[ntc];
249 ntc++;
250 if (ntc >= hw->cmq.csq.desc_num)
251 ntc = 0;
252 }
253 if (likely(!hclge_is_special_opcode(opcode)))
254 desc_ret = le16_to_cpu(desc[num - 1].retval);
255 else
256 desc_ret = le16_to_cpu(desc[0].retval);
257
258 hw->cmq.last_status = desc_ret;
259
260 return hclge_cmd_convert_err_code(desc_ret);
261}
262
263static int hclge_cmd_check_result(struct hclge_hw *hw, struct hclge_desc *desc,
264 int num, int ntc)
265{
266 struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
267 bool is_completed = false;
268 u32 timeout = 0;
269 int handle, ret;
270
271
272
273
274
275 if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
276 do {
277 if (hclge_cmd_csq_done(hw)) {
278 is_completed = true;
279 break;
280 }
281 udelay(1);
282 timeout++;
283 } while (timeout < hw->cmq.tx_timeout);
284 }
285
286 if (!is_completed)
287 ret = -EBADE;
288 else
289 ret = hclge_cmd_check_retval(hw, desc, num, ntc);
290
291
292 handle = hclge_cmd_csq_clean(hw);
293 if (handle < 0)
294 ret = handle;
295 else if (handle != num)
296 dev_warn(&hdev->pdev->dev,
297 "cleaned %d, need to clean %d\n", handle, num);
298 return ret;
299}
300
301
302
303
304
305
306
307
308
309
310int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
311{
312 struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
313 struct hclge_cmq_ring *csq = &hw->cmq.csq;
314 int ret;
315 int ntc;
316
317 spin_lock_bh(&hw->cmq.csq.lock);
318
319 if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
320 spin_unlock_bh(&hw->cmq.csq.lock);
321 return -EBUSY;
322 }
323
324 if (num > hclge_ring_space(&hw->cmq.csq)) {
325
326
327
328 csq->next_to_clean = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
329 spin_unlock_bh(&hw->cmq.csq.lock);
330 return -EBUSY;
331 }
332
333
334
335
336
337 ntc = hw->cmq.csq.next_to_use;
338
339 hclge_cmd_copy_desc(hw, desc, num);
340
341
342 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
343
344 ret = hclge_cmd_check_result(hw, desc, num, ntc);
345
346 spin_unlock_bh(&hw->cmq.csq.lock);
347
348 return ret;
349}
350
351static void hclge_set_default_capability(struct hclge_dev *hdev)
352{
353 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
354
355 set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
356 set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
357 if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
358 set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
359 set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
360 }
361}
362
363static void hclge_parse_capability(struct hclge_dev *hdev,
364 struct hclge_query_version_cmd *cmd)
365{
366 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
367 u32 caps;
368
369 caps = __le32_to_cpu(cmd->caps[0]);
370 if (hnae3_get_bit(caps, HCLGE_CAP_UDP_GSO_B))
371 set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
372 if (hnae3_get_bit(caps, HCLGE_CAP_PTP_B))
373 set_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps);
374 if (hnae3_get_bit(caps, HCLGE_CAP_INT_QL_B))
375 set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
376 if (hnae3_get_bit(caps, HCLGE_CAP_TQP_TXRX_INDEP_B))
377 set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
378 if (hnae3_get_bit(caps, HCLGE_CAP_HW_TX_CSUM_B))
379 set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps);
380 if (hnae3_get_bit(caps, HCLGE_CAP_UDP_TUNNEL_CSUM_B))
381 set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
382 if (hnae3_get_bit(caps, HCLGE_CAP_FD_FORWARD_TC_B))
383 set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps);
384 if (hnae3_get_bit(caps, HCLGE_CAP_FEC_B))
385 set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
386 if (hnae3_get_bit(caps, HCLGE_CAP_PAUSE_B))
387 set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
388 if (hnae3_get_bit(caps, HCLGE_CAP_PHY_IMP_B))
389 set_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps);
390 if (hnae3_get_bit(caps, HCLGE_CAP_RAS_IMP_B))
391 set_bit(HNAE3_DEV_SUPPORT_RAS_IMP_B, ae_dev->caps);
392 if (hnae3_get_bit(caps, HCLGE_CAP_RXD_ADV_LAYOUT_B))
393 set_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, ae_dev->caps);
394 if (hnae3_get_bit(caps, HCLGE_CAP_PORT_VLAN_BYPASS_B)) {
395 set_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps);
396 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
397 }
398}
399
400static __le32 hclge_build_api_caps(void)
401{
402 u32 api_caps = 0;
403
404 hnae3_set_bit(api_caps, HCLGE_API_CAP_FLEX_RSS_TBL_B, 1);
405
406 return cpu_to_le32(api_caps);
407}
408
409static enum hclge_cmd_status
410hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
411{
412 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
413 struct hclge_query_version_cmd *resp;
414 struct hclge_desc desc;
415 int ret;
416
417 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
418 resp = (struct hclge_query_version_cmd *)desc.data;
419 resp->api_caps = hclge_build_api_caps();
420
421 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
422 if (ret)
423 return ret;
424
425 hdev->fw_version = le32_to_cpu(resp->firmware);
426
427 ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
428 HNAE3_PCI_REVISION_BIT_SIZE;
429 ae_dev->dev_version |= hdev->pdev->revision;
430
431 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
432 hclge_set_default_capability(hdev);
433
434 hclge_parse_capability(hdev, resp);
435
436 return ret;
437}
438
439int hclge_cmd_queue_init(struct hclge_dev *hdev)
440{
441 int ret;
442
443
444 spin_lock_init(&hdev->hw.cmq.csq.lock);
445 spin_lock_init(&hdev->hw.cmq.crq.lock);
446
447
448 hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
449 hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
450
451
452 hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;
453
454
455 ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ);
456 if (ret) {
457 dev_err(&hdev->pdev->dev,
458 "CSQ ring setup error %d\n", ret);
459 return ret;
460 }
461
462 ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ);
463 if (ret) {
464 dev_err(&hdev->pdev->dev,
465 "CRQ ring setup error %d\n", ret);
466 goto err_csq;
467 }
468
469 return 0;
470err_csq:
471 hclge_free_cmd_desc(&hdev->hw.cmq.csq);
472 return ret;
473}
474
475static int hclge_firmware_compat_config(struct hclge_dev *hdev)
476{
477 struct hclge_firmware_compat_cmd *req;
478 struct hclge_desc desc;
479 u32 compat = 0;
480
481 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
482
483 req = (struct hclge_firmware_compat_cmd *)desc.data;
484
485 hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
486 hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
487 if (hnae3_dev_phy_imp_supported(hdev))
488 hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
489 req->compat = cpu_to_le32(compat);
490
491 return hclge_cmd_send(&hdev->hw, &desc, 1);
492}
493
494int hclge_cmd_init(struct hclge_dev *hdev)
495{
496 int ret;
497
498 spin_lock_bh(&hdev->hw.cmq.csq.lock);
499 spin_lock(&hdev->hw.cmq.crq.lock);
500
501 hdev->hw.cmq.csq.next_to_clean = 0;
502 hdev->hw.cmq.csq.next_to_use = 0;
503 hdev->hw.cmq.crq.next_to_clean = 0;
504 hdev->hw.cmq.crq.next_to_use = 0;
505
506 hclge_cmd_init_regs(&hdev->hw);
507
508 spin_unlock(&hdev->hw.cmq.crq.lock);
509 spin_unlock_bh(&hdev->hw.cmq.csq.lock);
510
511 clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
512
513
514
515
516 if ((hclge_is_reset_pending(hdev))) {
517 dev_err(&hdev->pdev->dev,
518 "failed to init cmd since reset %#lx pending\n",
519 hdev->reset_pending);
520 ret = -EBUSY;
521 goto err_cmd_init;
522 }
523
524
525 ret = hclge_cmd_query_version_and_capability(hdev);
526 if (ret) {
527 dev_err(&hdev->pdev->dev,
528 "failed to query version and capabilities, ret = %d\n",
529 ret);
530 goto err_cmd_init;
531 }
532
533 dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
534 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
535 HNAE3_FW_VERSION_BYTE3_SHIFT),
536 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
537 HNAE3_FW_VERSION_BYTE2_SHIFT),
538 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
539 HNAE3_FW_VERSION_BYTE1_SHIFT),
540 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
541 HNAE3_FW_VERSION_BYTE0_SHIFT));
542
543
544
545
546 ret = hclge_firmware_compat_config(hdev);
547 if (ret)
548 dev_warn(&hdev->pdev->dev,
549 "Firmware compatible features not enabled(%d).\n",
550 ret);
551
552 return 0;
553
554err_cmd_init:
555 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
556
557 return ret;
558}
559
560static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
561{
562 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0);
563 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0);
564 hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0);
565 hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
566 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
567 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0);
568 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0);
569 hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0);
570 hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
571 hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
572}
573
574void hclge_cmd_uninit(struct hclge_dev *hdev)
575{
576 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
577
578
579
580 msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME);
581 spin_lock_bh(&hdev->hw.cmq.csq.lock);
582 spin_lock(&hdev->hw.cmq.crq.lock);
583 hclge_cmd_uninit_regs(&hdev->hw);
584 spin_unlock(&hdev->hw.cmq.crq.lock);
585 spin_unlock_bh(&hdev->hw.cmq.csq.lock);
586
587 hclge_free_cmd_desc(&hdev->hw.cmq.csq);
588 hclge_free_cmd_desc(&hdev->hw.cmq.crq);
589}
590