1
2
3
4#include <linux/acpi.h>
5#include <linux/device.h>
6#include <linux/etherdevice.h>
7#include <linux/init.h>
8#include <linux/interrupt.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
12#include <linux/pci.h>
13#include <linux/platform_device.h>
14#include <linux/if_vlan.h>
15#include <linux/crash_dump.h>
16#include <net/ipv6.h>
17#include <net/rtnetlink.h>
18#include "hclge_cmd.h"
19#include "hclge_dcb.h"
20#include "hclge_main.h"
21#include "hclge_mbx.h"
22#include "hclge_mdio.h"
23#include "hclge_tm.h"
24#include "hclge_err.h"
25#include "hnae3.h"
26
27#define HCLGE_NAME "hclge"
28#define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30
31#define HCLGE_BUF_SIZE_UNIT 256U
32#define HCLGE_BUF_MUL_BY 2
33#define HCLGE_BUF_DIV_BY 2
34#define NEED_RESERVE_TC_NUM 2
35#define BUF_MAX_PERCENT 100
36#define BUF_RESERVE_PERCENT 90
37
38#define HCLGE_RESET_MAX_FAIL_CNT 5
39#define HCLGE_RESET_SYNC_TIME 100
40#define HCLGE_PF_RESET_SYNC_TIME 20
41#define HCLGE_PF_RESET_SYNC_CNT 1500
42
43
44#define HCLGE_DFX_BIOS_BD_OFFSET 1
45#define HCLGE_DFX_SSU_0_BD_OFFSET 2
46#define HCLGE_DFX_SSU_1_BD_OFFSET 3
47#define HCLGE_DFX_IGU_BD_OFFSET 4
48#define HCLGE_DFX_RPU_0_BD_OFFSET 5
49#define HCLGE_DFX_RPU_1_BD_OFFSET 6
50#define HCLGE_DFX_NCSI_BD_OFFSET 7
51#define HCLGE_DFX_RTC_BD_OFFSET 8
52#define HCLGE_DFX_PPP_BD_OFFSET 9
53#define HCLGE_DFX_RCB_BD_OFFSET 10
54#define HCLGE_DFX_TQP_BD_OFFSET 11
55#define HCLGE_DFX_SSU_2_BD_OFFSET 12
56
57#define HCLGE_LINK_STATUS_MS 10
58
59static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60static int hclge_init_vlan_config(struct hclge_dev *hdev);
61static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
66static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67 unsigned long *addr);
68static int hclge_set_default_loopback(struct hclge_dev *hdev);
69
70static void hclge_sync_mac_table(struct hclge_dev *hdev);
71static void hclge_restore_hw_table(struct hclge_dev *hdev);
72static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73static void hclge_sync_fd_table(struct hclge_dev *hdev);
74
75static struct hnae3_ae_algo ae_algo;
76
77static struct workqueue_struct *hclge_wq;
78
79static const struct pci_device_id ae_algo_pci_tbl[] = {
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88
89 {0, }
90};
91
92MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93
94static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 HCLGE_CMDQ_TX_ADDR_H_REG,
96 HCLGE_CMDQ_TX_DEPTH_REG,
97 HCLGE_CMDQ_TX_TAIL_REG,
98 HCLGE_CMDQ_TX_HEAD_REG,
99 HCLGE_CMDQ_RX_ADDR_L_REG,
100 HCLGE_CMDQ_RX_ADDR_H_REG,
101 HCLGE_CMDQ_RX_DEPTH_REG,
102 HCLGE_CMDQ_RX_TAIL_REG,
103 HCLGE_CMDQ_RX_HEAD_REG,
104 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 HCLGE_CMDQ_INTR_STS_REG,
106 HCLGE_CMDQ_INTR_EN_REG,
107 HCLGE_CMDQ_INTR_GEN_REG};
108
109static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 HCLGE_VECTOR0_OTER_EN_REG,
111 HCLGE_MISC_RESET_STS_REG,
112 HCLGE_MISC_VECTOR_INT_STS,
113 HCLGE_GLOBAL_RESET_REG,
114 HCLGE_FUN_RST_ING,
115 HCLGE_GRO_EN_REG};
116
117static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 HCLGE_RING_RX_ADDR_H_REG,
119 HCLGE_RING_RX_BD_NUM_REG,
120 HCLGE_RING_RX_BD_LENGTH_REG,
121 HCLGE_RING_RX_MERGE_EN_REG,
122 HCLGE_RING_RX_TAIL_REG,
123 HCLGE_RING_RX_HEAD_REG,
124 HCLGE_RING_RX_FBD_NUM_REG,
125 HCLGE_RING_RX_OFFSET_REG,
126 HCLGE_RING_RX_FBD_OFFSET_REG,
127 HCLGE_RING_RX_STASH_REG,
128 HCLGE_RING_RX_BD_ERR_REG,
129 HCLGE_RING_TX_ADDR_L_REG,
130 HCLGE_RING_TX_ADDR_H_REG,
131 HCLGE_RING_TX_BD_NUM_REG,
132 HCLGE_RING_TX_PRIORITY_REG,
133 HCLGE_RING_TX_TC_REG,
134 HCLGE_RING_TX_MERGE_EN_REG,
135 HCLGE_RING_TX_TAIL_REG,
136 HCLGE_RING_TX_HEAD_REG,
137 HCLGE_RING_TX_FBD_NUM_REG,
138 HCLGE_RING_TX_OFFSET_REG,
139 HCLGE_RING_TX_EBD_NUM_REG,
140 HCLGE_RING_TX_EBD_OFFSET_REG,
141 HCLGE_RING_TX_BD_ERR_REG,
142 HCLGE_RING_EN_REG};
143
144static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 HCLGE_TQP_INTR_GL0_REG,
146 HCLGE_TQP_INTR_GL1_REG,
147 HCLGE_TQP_INTR_GL2_REG,
148 HCLGE_TQP_INTR_RL_REG};
149
150static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 "App Loopback test",
152 "Serdes serial Loopback test",
153 "Serdes parallel Loopback test",
154 "Phy Loopback test"
155};
156
157static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 {"mac_tx_mac_pause_num",
159 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 {"mac_rx_mac_pause_num",
161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 {"mac_tx_control_pkt_num",
163 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 {"mac_rx_control_pkt_num",
165 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 {"mac_tx_pfc_pkt_num",
167 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 {"mac_tx_pfc_pri0_pkt_num",
169 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 {"mac_tx_pfc_pri1_pkt_num",
171 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 {"mac_tx_pfc_pri2_pkt_num",
173 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 {"mac_tx_pfc_pri3_pkt_num",
175 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 {"mac_tx_pfc_pri4_pkt_num",
177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 {"mac_tx_pfc_pri5_pkt_num",
179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 {"mac_tx_pfc_pri6_pkt_num",
181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 {"mac_tx_pfc_pri7_pkt_num",
183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 {"mac_rx_pfc_pkt_num",
185 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 {"mac_rx_pfc_pri0_pkt_num",
187 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 {"mac_rx_pfc_pri1_pkt_num",
189 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 {"mac_rx_pfc_pri2_pkt_num",
191 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 {"mac_rx_pfc_pri3_pkt_num",
193 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 {"mac_rx_pfc_pri4_pkt_num",
195 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 {"mac_rx_pfc_pri5_pkt_num",
197 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 {"mac_rx_pfc_pri6_pkt_num",
199 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 {"mac_rx_pfc_pri7_pkt_num",
201 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 {"mac_tx_total_pkt_num",
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 {"mac_tx_total_oct_num",
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 {"mac_tx_good_pkt_num",
207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 {"mac_tx_bad_pkt_num",
209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 {"mac_tx_good_oct_num",
211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 {"mac_tx_bad_oct_num",
213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 {"mac_tx_uni_pkt_num",
215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 {"mac_tx_multi_pkt_num",
217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 {"mac_tx_broad_pkt_num",
219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 {"mac_tx_undersize_pkt_num",
221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 {"mac_tx_oversize_pkt_num",
223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 {"mac_tx_64_oct_pkt_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 {"mac_tx_65_127_oct_pkt_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 {"mac_tx_128_255_oct_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 {"mac_tx_256_511_oct_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 {"mac_tx_512_1023_oct_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 {"mac_tx_1024_1518_oct_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 {"mac_tx_1519_2047_oct_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 {"mac_tx_2048_4095_oct_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 {"mac_tx_4096_8191_oct_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 {"mac_tx_8192_9216_oct_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 {"mac_tx_9217_12287_oct_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 {"mac_tx_12288_16383_oct_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 {"mac_tx_1519_max_good_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 {"mac_tx_1519_max_bad_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 {"mac_rx_total_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 {"mac_rx_total_oct_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 {"mac_rx_good_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 {"mac_rx_bad_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 {"mac_rx_good_oct_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 {"mac_rx_bad_oct_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 {"mac_rx_uni_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 {"mac_rx_multi_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 {"mac_rx_broad_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 {"mac_rx_undersize_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 {"mac_rx_oversize_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 {"mac_rx_64_oct_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 {"mac_rx_65_127_oct_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 {"mac_rx_128_255_oct_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 {"mac_rx_256_511_oct_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 {"mac_rx_512_1023_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 {"mac_rx_1024_1518_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 {"mac_rx_1519_2047_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 {"mac_rx_2048_4095_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 {"mac_rx_4096_8191_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 {"mac_rx_8192_9216_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 {"mac_rx_9217_12287_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 {"mac_rx_12288_16383_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 {"mac_rx_1519_max_good_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 {"mac_rx_1519_max_bad_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302
303 {"mac_tx_fragment_pkt_num",
304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 {"mac_tx_undermin_pkt_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 {"mac_tx_jabber_pkt_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 {"mac_tx_err_all_pkt_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 {"mac_tx_from_app_good_pkt_num",
312 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 {"mac_tx_from_app_bad_pkt_num",
314 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 {"mac_rx_fragment_pkt_num",
316 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 {"mac_rx_undermin_pkt_num",
318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 {"mac_rx_jabber_pkt_num",
320 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 {"mac_rx_fcs_err_pkt_num",
322 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 {"mac_rx_send_app_good_pkt_num",
324 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 {"mac_rx_send_app_bad_pkt_num",
326 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327};
328
329static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 {
331 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 .ethter_type = cpu_to_le16(ETH_P_LLDP),
333 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 .i_port_bitmap = 0x1,
335 },
336};
337
338static const u8 hclge_hash_key[] = {
339 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344};
345
346static const u32 hclge_dfx_bd_offset_list[] = {
347 HCLGE_DFX_BIOS_BD_OFFSET,
348 HCLGE_DFX_SSU_0_BD_OFFSET,
349 HCLGE_DFX_SSU_1_BD_OFFSET,
350 HCLGE_DFX_IGU_BD_OFFSET,
351 HCLGE_DFX_RPU_0_BD_OFFSET,
352 HCLGE_DFX_RPU_1_BD_OFFSET,
353 HCLGE_DFX_NCSI_BD_OFFSET,
354 HCLGE_DFX_RTC_BD_OFFSET,
355 HCLGE_DFX_PPP_BD_OFFSET,
356 HCLGE_DFX_RCB_BD_OFFSET,
357 HCLGE_DFX_TQP_BD_OFFSET,
358 HCLGE_DFX_SSU_2_BD_OFFSET
359};
360
361static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 HCLGE_OPC_DFX_SSU_REG_0,
364 HCLGE_OPC_DFX_SSU_REG_1,
365 HCLGE_OPC_DFX_IGU_EGU_REG,
366 HCLGE_OPC_DFX_RPU_REG_0,
367 HCLGE_OPC_DFX_RPU_REG_1,
368 HCLGE_OPC_DFX_NCSI_REG,
369 HCLGE_OPC_DFX_RTC_REG,
370 HCLGE_OPC_DFX_PPP_REG,
371 HCLGE_OPC_DFX_RCB_REG,
372 HCLGE_OPC_DFX_TQP_REG,
373 HCLGE_OPC_DFX_SSU_REG_2
374};
375
376static const struct key_info meta_data_key_info[] = {
377 { PACKET_TYPE_ID, 6},
378 { IP_FRAGEMENT, 1},
379 { ROCE_TYPE, 1},
380 { NEXT_KEY, 5},
381 { VLAN_NUMBER, 2},
382 { SRC_VPORT, 12},
383 { DST_VPORT, 12},
384 { TUNNEL_PACKET, 1},
385};
386
387static const struct key_info tuple_key_info[] = {
388 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
389 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
391 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
392 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
393 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
394 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
395 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
396 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
397 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
398 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
399 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
400 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
402 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
403 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
404 { INNER_DST_MAC, 48, KEY_OPT_MAC,
405 offsetof(struct hclge_fd_rule, tuples.dst_mac),
406 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
407 { INNER_SRC_MAC, 48, KEY_OPT_MAC,
408 offsetof(struct hclge_fd_rule, tuples.src_mac),
409 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
410 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
411 offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
412 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
413 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
414 { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
415 offsetof(struct hclge_fd_rule, tuples.ether_proto),
416 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
417 { INNER_L2_RSV, 16, KEY_OPT_LE16,
418 offsetof(struct hclge_fd_rule, tuples.l2_user_def),
419 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
420 { INNER_IP_TOS, 8, KEY_OPT_U8,
421 offsetof(struct hclge_fd_rule, tuples.ip_tos),
422 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
423 { INNER_IP_PROTO, 8, KEY_OPT_U8,
424 offsetof(struct hclge_fd_rule, tuples.ip_proto),
425 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
426 { INNER_SRC_IP, 32, KEY_OPT_IP,
427 offsetof(struct hclge_fd_rule, tuples.src_ip),
428 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
429 { INNER_DST_IP, 32, KEY_OPT_IP,
430 offsetof(struct hclge_fd_rule, tuples.dst_ip),
431 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
432 { INNER_L3_RSV, 16, KEY_OPT_LE16,
433 offsetof(struct hclge_fd_rule, tuples.l3_user_def),
434 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
435 { INNER_SRC_PORT, 16, KEY_OPT_LE16,
436 offsetof(struct hclge_fd_rule, tuples.src_port),
437 offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
438 { INNER_DST_PORT, 16, KEY_OPT_LE16,
439 offsetof(struct hclge_fd_rule, tuples.dst_port),
440 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
441 { INNER_L4_RSV, 32, KEY_OPT_LE32,
442 offsetof(struct hclge_fd_rule, tuples.l4_user_def),
443 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
444};
445
446static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
447{
448#define HCLGE_MAC_CMD_NUM 21
449
450 u64 *data = (u64 *)(&hdev->mac_stats);
451 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
452 __le64 *desc_data;
453 int i, k, n;
454 int ret;
455
456 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
457 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
458 if (ret) {
459 dev_err(&hdev->pdev->dev,
460 "Get MAC pkt stats fail, status = %d.\n", ret);
461
462 return ret;
463 }
464
465 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
466
467 if (unlikely(i == 0)) {
468 desc_data = (__le64 *)(&desc[i].data[0]);
469 n = HCLGE_RD_FIRST_STATS_NUM;
470 } else {
471 desc_data = (__le64 *)(&desc[i]);
472 n = HCLGE_RD_OTHER_STATS_NUM;
473 }
474
475 for (k = 0; k < n; k++) {
476 *data += le64_to_cpu(*desc_data);
477 data++;
478 desc_data++;
479 }
480 }
481
482 return 0;
483}
484
485static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
486{
487 u64 *data = (u64 *)(&hdev->mac_stats);
488 struct hclge_desc *desc;
489 __le64 *desc_data;
490 u16 i, k, n;
491 int ret;
492
493
494
495
496 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
497 if (!desc)
498 return -ENOMEM;
499
500 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
501 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
502 if (ret) {
503 kfree(desc);
504 return ret;
505 }
506
507 for (i = 0; i < desc_num; i++) {
508
509 if (i == 0) {
510 desc_data = (__le64 *)(&desc[i].data[0]);
511 n = HCLGE_RD_FIRST_STATS_NUM;
512 } else {
513 desc_data = (__le64 *)(&desc[i]);
514 n = HCLGE_RD_OTHER_STATS_NUM;
515 }
516
517 for (k = 0; k < n; k++) {
518 *data += le64_to_cpu(*desc_data);
519 data++;
520 desc_data++;
521 }
522 }
523
524 kfree(desc);
525
526 return 0;
527}
528
529static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
530{
531 struct hclge_desc desc;
532 __le32 *desc_data;
533 u32 reg_num;
534 int ret;
535
536 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
537 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
538 if (ret)
539 return ret;
540
541 desc_data = (__le32 *)(&desc.data[0]);
542 reg_num = le32_to_cpu(*desc_data);
543
544 *desc_num = 1 + ((reg_num - 3) >> 2) +
545 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
546
547 return 0;
548}
549
550static int hclge_mac_update_stats(struct hclge_dev *hdev)
551{
552 u32 desc_num;
553 int ret;
554
555 ret = hclge_mac_query_reg_num(hdev, &desc_num);
556
557 if (!ret)
558 ret = hclge_mac_update_stats_complete(hdev, desc_num);
559 else if (ret == -EOPNOTSUPP)
560 ret = hclge_mac_update_stats_defective(hdev);
561 else
562 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
563
564 return ret;
565}
566
567static int hclge_tqps_update_stats(struct hnae3_handle *handle)
568{
569 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
570 struct hclge_vport *vport = hclge_get_vport(handle);
571 struct hclge_dev *hdev = vport->back;
572 struct hnae3_queue *queue;
573 struct hclge_desc desc[1];
574 struct hclge_tqp *tqp;
575 int ret, i;
576
577 for (i = 0; i < kinfo->num_tqps; i++) {
578 queue = handle->kinfo.tqp[i];
579 tqp = container_of(queue, struct hclge_tqp, q);
580
581 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
582 true);
583
584 desc[0].data[0] = cpu_to_le32(tqp->index);
585 ret = hclge_cmd_send(&hdev->hw, desc, 1);
586 if (ret) {
587 dev_err(&hdev->pdev->dev,
588 "Query tqp stat fail, status = %d,queue = %d\n",
589 ret, i);
590 return ret;
591 }
592 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
593 le32_to_cpu(desc[0].data[1]);
594 }
595
596 for (i = 0; i < kinfo->num_tqps; i++) {
597 queue = handle->kinfo.tqp[i];
598 tqp = container_of(queue, struct hclge_tqp, q);
599
600 hclge_cmd_setup_basic_desc(&desc[0],
601 HCLGE_OPC_QUERY_TX_STATS,
602 true);
603
604 desc[0].data[0] = cpu_to_le32(tqp->index);
605 ret = hclge_cmd_send(&hdev->hw, desc, 1);
606 if (ret) {
607 dev_err(&hdev->pdev->dev,
608 "Query tqp stat fail, status = %d,queue = %d\n",
609 ret, i);
610 return ret;
611 }
612 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
613 le32_to_cpu(desc[0].data[1]);
614 }
615
616 return 0;
617}
618
619static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
620{
621 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622 struct hclge_tqp *tqp;
623 u64 *buff = data;
624 int i;
625
626 for (i = 0; i < kinfo->num_tqps; i++) {
627 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
628 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
629 }
630
631 for (i = 0; i < kinfo->num_tqps; i++) {
632 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
633 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
634 }
635
636 return buff;
637}
638
639static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
640{
641 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
642
643
644 return kinfo->num_tqps * (2);
645}
646
647static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
648{
649 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
650 u8 *buff = data;
651 int i;
652
653 for (i = 0; i < kinfo->num_tqps; i++) {
654 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
655 struct hclge_tqp, q);
656 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
657 tqp->index);
658 buff = buff + ETH_GSTRING_LEN;
659 }
660
661 for (i = 0; i < kinfo->num_tqps; i++) {
662 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
663 struct hclge_tqp, q);
664 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
665 tqp->index);
666 buff = buff + ETH_GSTRING_LEN;
667 }
668
669 return buff;
670}
671
672static u64 *hclge_comm_get_stats(const void *comm_stats,
673 const struct hclge_comm_stats_str strs[],
674 int size, u64 *data)
675{
676 u64 *buf = data;
677 u32 i;
678
679 for (i = 0; i < size; i++)
680 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
681
682 return buf + size;
683}
684
685static u8 *hclge_comm_get_strings(u32 stringset,
686 const struct hclge_comm_stats_str strs[],
687 int size, u8 *data)
688{
689 char *buff = (char *)data;
690 u32 i;
691
692 if (stringset != ETH_SS_STATS)
693 return buff;
694
695 for (i = 0; i < size; i++) {
696 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
697 buff = buff + ETH_GSTRING_LEN;
698 }
699
700 return (u8 *)buff;
701}
702
703static void hclge_update_stats_for_all(struct hclge_dev *hdev)
704{
705 struct hnae3_handle *handle;
706 int status;
707
708 handle = &hdev->vport[0].nic;
709 if (handle->client) {
710 status = hclge_tqps_update_stats(handle);
711 if (status) {
712 dev_err(&hdev->pdev->dev,
713 "Update TQPS stats fail, status = %d.\n",
714 status);
715 }
716 }
717
718 status = hclge_mac_update_stats(hdev);
719 if (status)
720 dev_err(&hdev->pdev->dev,
721 "Update MAC stats fail, status = %d.\n", status);
722}
723
724static void hclge_update_stats(struct hnae3_handle *handle,
725 struct net_device_stats *net_stats)
726{
727 struct hclge_vport *vport = hclge_get_vport(handle);
728 struct hclge_dev *hdev = vport->back;
729 int status;
730
731 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
732 return;
733
734 status = hclge_mac_update_stats(hdev);
735 if (status)
736 dev_err(&hdev->pdev->dev,
737 "Update MAC stats fail, status = %d.\n",
738 status);
739
740 status = hclge_tqps_update_stats(handle);
741 if (status)
742 dev_err(&hdev->pdev->dev,
743 "Update TQPS stats fail, status = %d.\n",
744 status);
745
746 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
747}
748
749static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
750{
751#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
752 HNAE3_SUPPORT_PHY_LOOPBACK |\
753 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
754 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
755
756 struct hclge_vport *vport = hclge_get_vport(handle);
757 struct hclge_dev *hdev = vport->back;
758 int count = 0;
759
760
761
762
763
764
765 if (stringset == ETH_SS_TEST) {
766
767 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
768 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
769 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
770 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
771 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
772 count += 1;
773 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
774 }
775
776 count += 2;
777 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
778 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
779
780 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
781 hdev->hw.mac.phydev->drv->set_loopback) ||
782 hnae3_dev_phy_imp_supported(hdev)) {
783 count += 1;
784 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
785 }
786 } else if (stringset == ETH_SS_STATS) {
787 count = ARRAY_SIZE(g_mac_stats_string) +
788 hclge_tqps_get_sset_count(handle, stringset);
789 }
790
791 return count;
792}
793
794static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
795 u8 *data)
796{
797 u8 *p = (char *)data;
798 int size;
799
800 if (stringset == ETH_SS_STATS) {
801 size = ARRAY_SIZE(g_mac_stats_string);
802 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
803 size, p);
804 p = hclge_tqps_get_strings(handle, p);
805 } else if (stringset == ETH_SS_TEST) {
806 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
807 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
808 ETH_GSTRING_LEN);
809 p += ETH_GSTRING_LEN;
810 }
811 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
812 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
813 ETH_GSTRING_LEN);
814 p += ETH_GSTRING_LEN;
815 }
816 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
817 memcpy(p,
818 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
819 ETH_GSTRING_LEN);
820 p += ETH_GSTRING_LEN;
821 }
822 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
823 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
824 ETH_GSTRING_LEN);
825 p += ETH_GSTRING_LEN;
826 }
827 }
828}
829
830static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
831{
832 struct hclge_vport *vport = hclge_get_vport(handle);
833 struct hclge_dev *hdev = vport->back;
834 u64 *p;
835
836 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
837 ARRAY_SIZE(g_mac_stats_string), data);
838 p = hclge_tqps_get_stats(handle, p);
839}
840
841static void hclge_get_mac_stat(struct hnae3_handle *handle,
842 struct hns3_mac_stats *mac_stats)
843{
844 struct hclge_vport *vport = hclge_get_vport(handle);
845 struct hclge_dev *hdev = vport->back;
846
847 hclge_update_stats(handle, NULL);
848
849 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
850 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
851}
852
853static int hclge_parse_func_status(struct hclge_dev *hdev,
854 struct hclge_func_status_cmd *status)
855{
856#define HCLGE_MAC_ID_MASK 0xF
857
858 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
859 return -EINVAL;
860
861
862 if (status->pf_state & HCLGE_PF_STATE_MAIN)
863 hdev->flag |= HCLGE_FLAG_MAIN;
864 else
865 hdev->flag &= ~HCLGE_FLAG_MAIN;
866
867 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
868 return 0;
869}
870
871static int hclge_query_function_status(struct hclge_dev *hdev)
872{
873#define HCLGE_QUERY_MAX_CNT 5
874
875 struct hclge_func_status_cmd *req;
876 struct hclge_desc desc;
877 int timeout = 0;
878 int ret;
879
880 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
881 req = (struct hclge_func_status_cmd *)desc.data;
882
883 do {
884 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
885 if (ret) {
886 dev_err(&hdev->pdev->dev,
887 "query function status failed %d.\n", ret);
888 return ret;
889 }
890
891
892 if (req->pf_state)
893 break;
894 usleep_range(1000, 2000);
895 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
896
897 return hclge_parse_func_status(hdev, req);
898}
899
900static int hclge_query_pf_resource(struct hclge_dev *hdev)
901{
902 struct hclge_pf_res_cmd *req;
903 struct hclge_desc desc;
904 int ret;
905
906 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
907 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
908 if (ret) {
909 dev_err(&hdev->pdev->dev,
910 "query pf resource failed %d.\n", ret);
911 return ret;
912 }
913
914 req = (struct hclge_pf_res_cmd *)desc.data;
915 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
916 le16_to_cpu(req->ext_tqp_num);
917 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
918
919 if (req->tx_buf_size)
920 hdev->tx_buf_size =
921 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
922 else
923 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
924
925 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
926
927 if (req->dv_buf_size)
928 hdev->dv_buf_size =
929 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
930 else
931 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
932
933 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
934
935 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
936 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
937 dev_err(&hdev->pdev->dev,
938 "only %u msi resources available, not enough for pf(min:2).\n",
939 hdev->num_nic_msi);
940 return -EINVAL;
941 }
942
943 if (hnae3_dev_roce_supported(hdev)) {
944 hdev->num_roce_msi =
945 le16_to_cpu(req->pf_intr_vector_number_roce);
946
947
948
949
950 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
951 } else {
952 hdev->num_msi = hdev->num_nic_msi;
953 }
954
955 return 0;
956}
957
958static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
959{
960 switch (speed_cmd) {
961 case 6:
962 *speed = HCLGE_MAC_SPEED_10M;
963 break;
964 case 7:
965 *speed = HCLGE_MAC_SPEED_100M;
966 break;
967 case 0:
968 *speed = HCLGE_MAC_SPEED_1G;
969 break;
970 case 1:
971 *speed = HCLGE_MAC_SPEED_10G;
972 break;
973 case 2:
974 *speed = HCLGE_MAC_SPEED_25G;
975 break;
976 case 3:
977 *speed = HCLGE_MAC_SPEED_40G;
978 break;
979 case 4:
980 *speed = HCLGE_MAC_SPEED_50G;
981 break;
982 case 5:
983 *speed = HCLGE_MAC_SPEED_100G;
984 break;
985 case 8:
986 *speed = HCLGE_MAC_SPEED_200G;
987 break;
988 default:
989 return -EINVAL;
990 }
991
992 return 0;
993}
994
995static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
996{
997 struct hclge_vport *vport = hclge_get_vport(handle);
998 struct hclge_dev *hdev = vport->back;
999 u32 speed_ability = hdev->hw.mac.speed_ability;
1000 u32 speed_bit = 0;
1001
1002 switch (speed) {
1003 case HCLGE_MAC_SPEED_10M:
1004 speed_bit = HCLGE_SUPPORT_10M_BIT;
1005 break;
1006 case HCLGE_MAC_SPEED_100M:
1007 speed_bit = HCLGE_SUPPORT_100M_BIT;
1008 break;
1009 case HCLGE_MAC_SPEED_1G:
1010 speed_bit = HCLGE_SUPPORT_1G_BIT;
1011 break;
1012 case HCLGE_MAC_SPEED_10G:
1013 speed_bit = HCLGE_SUPPORT_10G_BIT;
1014 break;
1015 case HCLGE_MAC_SPEED_25G:
1016 speed_bit = HCLGE_SUPPORT_25G_BIT;
1017 break;
1018 case HCLGE_MAC_SPEED_40G:
1019 speed_bit = HCLGE_SUPPORT_40G_BIT;
1020 break;
1021 case HCLGE_MAC_SPEED_50G:
1022 speed_bit = HCLGE_SUPPORT_50G_BIT;
1023 break;
1024 case HCLGE_MAC_SPEED_100G:
1025 speed_bit = HCLGE_SUPPORT_100G_BIT;
1026 break;
1027 case HCLGE_MAC_SPEED_200G:
1028 speed_bit = HCLGE_SUPPORT_200G_BIT;
1029 break;
1030 default:
1031 return -EINVAL;
1032 }
1033
1034 if (speed_bit & speed_ability)
1035 return 0;
1036
1037 return -EINVAL;
1038}
1039
1040static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1041{
1042 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1044 mac->supported);
1045 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1047 mac->supported);
1048 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1050 mac->supported);
1051 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1053 mac->supported);
1054 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1056 mac->supported);
1057 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1059 mac->supported);
1060}
1061
1062static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1063{
1064 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1066 mac->supported);
1067 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1069 mac->supported);
1070 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1072 mac->supported);
1073 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1075 mac->supported);
1076 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1078 mac->supported);
1079 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1080 linkmode_set_bit(
1081 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1082 mac->supported);
1083}
1084
1085static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1086{
1087 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1089 mac->supported);
1090 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1092 mac->supported);
1093 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095 mac->supported);
1096 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1098 mac->supported);
1099 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1101 mac->supported);
1102 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1104 mac->supported);
1105}
1106
1107static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1108{
1109 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1111 mac->supported);
1112 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1114 mac->supported);
1115 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1117 mac->supported);
1118 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1120 mac->supported);
1121 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1123 mac->supported);
1124 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1126 mac->supported);
1127 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1129 mac->supported);
1130}
1131
1132static void hclge_convert_setting_fec(struct hclge_mac *mac)
1133{
1134 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1136
1137 switch (mac->speed) {
1138 case HCLGE_MAC_SPEED_10G:
1139 case HCLGE_MAC_SPEED_40G:
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1141 mac->supported);
1142 mac->fec_ability =
1143 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1144 break;
1145 case HCLGE_MAC_SPEED_25G:
1146 case HCLGE_MAC_SPEED_50G:
1147 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1148 mac->supported);
1149 mac->fec_ability =
1150 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 BIT(HNAE3_FEC_AUTO);
1152 break;
1153 case HCLGE_MAC_SPEED_100G:
1154 case HCLGE_MAC_SPEED_200G:
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1157 break;
1158 default:
1159 mac->fec_ability = 0;
1160 break;
1161 }
1162}
1163
1164static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1165 u16 speed_ability)
1166{
1167 struct hclge_mac *mac = &hdev->hw.mac;
1168
1169 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1171 mac->supported);
1172
1173 hclge_convert_setting_sr(mac, speed_ability);
1174 hclge_convert_setting_lr(mac, speed_ability);
1175 hclge_convert_setting_cr(mac, speed_ability);
1176 if (hnae3_dev_fec_supported(hdev))
1177 hclge_convert_setting_fec(mac);
1178
1179 if (hnae3_dev_pause_supported(hdev))
1180 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1184}
1185
1186static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1187 u16 speed_ability)
1188{
1189 struct hclge_mac *mac = &hdev->hw.mac;
1190
1191 hclge_convert_setting_kr(mac, speed_ability);
1192 if (hnae3_dev_fec_supported(hdev))
1193 hclge_convert_setting_fec(mac);
1194
1195 if (hnae3_dev_pause_supported(hdev))
1196 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1197
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1200}
1201
1202static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1203 u16 speed_ability)
1204{
1205 unsigned long *supported = hdev->hw.mac.supported;
1206
1207
1208 if (!speed_ability)
1209 speed_ability = HCLGE_SUPPORT_GE;
1210
1211 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1213 supported);
1214
1215 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1217 supported);
1218 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1219 supported);
1220 }
1221
1222 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1225 }
1226
1227 if (hnae3_dev_pause_supported(hdev)) {
1228 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1230 }
1231
1232 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1234}
1235
1236static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1237{
1238 u8 media_type = hdev->hw.mac.media_type;
1239
1240 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 hclge_parse_copper_link_mode(hdev, speed_ability);
1244 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 hclge_parse_backplane_link_mode(hdev, speed_ability);
1246}
1247
1248static u32 hclge_get_max_speed(u16 speed_ability)
1249{
1250 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 return HCLGE_MAC_SPEED_200G;
1252
1253 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 return HCLGE_MAC_SPEED_100G;
1255
1256 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 return HCLGE_MAC_SPEED_50G;
1258
1259 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 return HCLGE_MAC_SPEED_40G;
1261
1262 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 return HCLGE_MAC_SPEED_25G;
1264
1265 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 return HCLGE_MAC_SPEED_10G;
1267
1268 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 return HCLGE_MAC_SPEED_1G;
1270
1271 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 return HCLGE_MAC_SPEED_100M;
1273
1274 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 return HCLGE_MAC_SPEED_10M;
1276
1277 return HCLGE_MAC_SPEED_1G;
1278}
1279
1280static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1281{
1282#define HCLGE_TX_SPARE_SIZE_UNIT 4096
1283#define SPEED_ABILITY_EXT_SHIFT 8
1284
1285 struct hclge_cfg_param_cmd *req;
1286 u64 mac_addr_tmp_high;
1287 u16 speed_ability_ext;
1288 u64 mac_addr_tmp;
1289 unsigned int i;
1290
1291 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1292
1293
1294 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1295 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1296 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1297 HCLGE_CFG_TQP_DESC_N_M,
1298 HCLGE_CFG_TQP_DESC_N_S);
1299
1300 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1301 HCLGE_CFG_PHY_ADDR_M,
1302 HCLGE_CFG_PHY_ADDR_S);
1303 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1304 HCLGE_CFG_MEDIA_TP_M,
1305 HCLGE_CFG_MEDIA_TP_S);
1306 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 HCLGE_CFG_RX_BUF_LEN_M,
1308 HCLGE_CFG_RX_BUF_LEN_S);
1309
1310 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1311 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1312 HCLGE_CFG_MAC_ADDR_H_M,
1313 HCLGE_CFG_MAC_ADDR_H_S);
1314
1315 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1316
1317 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1318 HCLGE_CFG_DEFAULT_SPEED_M,
1319 HCLGE_CFG_DEFAULT_SPEED_S);
1320 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1321 HCLGE_CFG_RSS_SIZE_M,
1322 HCLGE_CFG_RSS_SIZE_S);
1323
1324 for (i = 0; i < ETH_ALEN; i++)
1325 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1326
1327 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1328 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1329
1330 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1331 HCLGE_CFG_SPEED_ABILITY_M,
1332 HCLGE_CFG_SPEED_ABILITY_S);
1333 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1334 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1335 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1336 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1337
1338 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1339 HCLGE_CFG_VLAN_FLTR_CAP_M,
1340 HCLGE_CFG_VLAN_FLTR_CAP_S);
1341
1342 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1343 HCLGE_CFG_UMV_TBL_SPACE_M,
1344 HCLGE_CFG_UMV_TBL_SPACE_S);
1345 if (!cfg->umv_space)
1346 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1347
1348 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1349 HCLGE_CFG_PF_RSS_SIZE_M,
1350 HCLGE_CFG_PF_RSS_SIZE_S);
1351
1352
1353
1354
1355
1356
1357
1358
1359 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1360 1U << cfg->pf_rss_size_max :
1361 cfg->vf_rss_size_max;
1362
1363
1364
1365
1366
1367 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1368 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1369 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1370 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1371}
1372
1373
1374
1375
1376
1377static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1378{
1379 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1380 struct hclge_cfg_param_cmd *req;
1381 unsigned int i;
1382 int ret;
1383
1384 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1385 u32 offset = 0;
1386
1387 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1388 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1389 true);
1390 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1391 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1392
1393 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1394 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1395 req->offset = cpu_to_le32(offset);
1396 }
1397
1398 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1399 if (ret) {
1400 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1401 return ret;
1402 }
1403
1404 hclge_parse_cfg(hcfg, desc);
1405
1406 return 0;
1407}
1408
1409static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1410{
1411#define HCLGE_MAX_NON_TSO_BD_NUM 8U
1412
1413 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1414
1415 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1416 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1417 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1418 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1419 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1420 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1421 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1422}
1423
1424static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1425 struct hclge_desc *desc)
1426{
1427 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1428 struct hclge_dev_specs_0_cmd *req0;
1429 struct hclge_dev_specs_1_cmd *req1;
1430
1431 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1432 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1433
1434 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1435 ae_dev->dev_specs.rss_ind_tbl_size =
1436 le16_to_cpu(req0->rss_ind_tbl_size);
1437 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1438 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1439 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1440 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1441 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1442 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1443}
1444
1445static void hclge_check_dev_specs(struct hclge_dev *hdev)
1446{
1447 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1448
1449 if (!dev_specs->max_non_tso_bd_num)
1450 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1451 if (!dev_specs->rss_ind_tbl_size)
1452 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1453 if (!dev_specs->rss_key_size)
1454 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1455 if (!dev_specs->max_tm_rate)
1456 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1457 if (!dev_specs->max_qset_num)
1458 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1459 if (!dev_specs->max_int_gl)
1460 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1461 if (!dev_specs->max_frm_size)
1462 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1463}
1464
1465static int hclge_query_dev_specs(struct hclge_dev *hdev)
1466{
1467 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1468 int ret;
1469 int i;
1470
1471
1472
1473
1474 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1475 hclge_set_default_dev_specs(hdev);
1476 return 0;
1477 }
1478
1479 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1480 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1481 true);
1482 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1483 }
1484 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1485
1486 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1487 if (ret)
1488 return ret;
1489
1490 hclge_parse_dev_specs(hdev, desc);
1491 hclge_check_dev_specs(hdev);
1492
1493 return 0;
1494}
1495
1496static int hclge_get_cap(struct hclge_dev *hdev)
1497{
1498 int ret;
1499
1500 ret = hclge_query_function_status(hdev);
1501 if (ret) {
1502 dev_err(&hdev->pdev->dev,
1503 "query function status error %d.\n", ret);
1504 return ret;
1505 }
1506
1507
1508 return hclge_query_pf_resource(hdev);
1509}
1510
1511static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1512{
1513#define HCLGE_MIN_TX_DESC 64
1514#define HCLGE_MIN_RX_DESC 64
1515
1516 if (!is_kdump_kernel())
1517 return;
1518
1519 dev_info(&hdev->pdev->dev,
1520 "Running kdump kernel. Using minimal resources\n");
1521
1522
1523 hdev->num_tqps = hdev->num_req_vfs + 1;
1524 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1525 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1526}
1527
1528static int hclge_configure(struct hclge_dev *hdev)
1529{
1530 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1531 struct hclge_cfg cfg;
1532 unsigned int i;
1533 int ret;
1534
1535 ret = hclge_get_cfg(hdev, &cfg);
1536 if (ret)
1537 return ret;
1538
1539 hdev->base_tqp_pid = 0;
1540 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1541 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1542 hdev->rx_buf_len = cfg.rx_buf_len;
1543 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1544 hdev->hw.mac.media_type = cfg.media_type;
1545 hdev->hw.mac.phy_addr = cfg.phy_addr;
1546 hdev->num_tx_desc = cfg.tqp_desc_num;
1547 hdev->num_rx_desc = cfg.tqp_desc_num;
1548 hdev->tm_info.num_pg = 1;
1549 hdev->tc_max = cfg.tc_num;
1550 hdev->tm_info.hw_pfc_map = 0;
1551 hdev->wanted_umv_size = cfg.umv_space;
1552 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1553 hdev->gro_en = true;
1554 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1555 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1556
1557 if (hnae3_dev_fd_supported(hdev)) {
1558 hdev->fd_en = true;
1559 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1560 }
1561
1562 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1563 if (ret) {
1564 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1565 cfg.default_speed, ret);
1566 return ret;
1567 }
1568
1569 hclge_parse_link_mode(hdev, cfg.speed_ability);
1570
1571 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1572
1573 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1574 (hdev->tc_max < 1)) {
1575 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1576 hdev->tc_max);
1577 hdev->tc_max = 1;
1578 }
1579
1580
1581 if (!hnae3_dev_dcb_supported(hdev)) {
1582 hdev->tc_max = 1;
1583 hdev->pfc_max = 0;
1584 } else {
1585 hdev->pfc_max = hdev->tc_max;
1586 }
1587
1588 hdev->tm_info.num_tc = 1;
1589
1590
1591 for (i = 0; i < hdev->tm_info.num_tc; i++)
1592 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1593
1594 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1595
1596 hclge_init_kdump_kernel_config(hdev);
1597
1598
1599 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1600 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1601 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1602 &hdev->affinity_mask);
1603
1604 return ret;
1605}
1606
1607static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1608 u16 tso_mss_max)
1609{
1610 struct hclge_cfg_tso_status_cmd *req;
1611 struct hclge_desc desc;
1612
1613 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1614
1615 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1616 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1617 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1618
1619 return hclge_cmd_send(&hdev->hw, &desc, 1);
1620}
1621
1622static int hclge_config_gro(struct hclge_dev *hdev)
1623{
1624 struct hclge_cfg_gro_status_cmd *req;
1625 struct hclge_desc desc;
1626 int ret;
1627
1628 if (!hnae3_dev_gro_supported(hdev))
1629 return 0;
1630
1631 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1632 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1633
1634 req->gro_en = hdev->gro_en ? 1 : 0;
1635
1636 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1637 if (ret)
1638 dev_err(&hdev->pdev->dev,
1639 "GRO hardware config cmd failed, ret = %d\n", ret);
1640
1641 return ret;
1642}
1643
1644static int hclge_alloc_tqps(struct hclge_dev *hdev)
1645{
1646 struct hclge_tqp *tqp;
1647 int i;
1648
1649 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1650 sizeof(struct hclge_tqp), GFP_KERNEL);
1651 if (!hdev->htqp)
1652 return -ENOMEM;
1653
1654 tqp = hdev->htqp;
1655
1656 for (i = 0; i < hdev->num_tqps; i++) {
1657 tqp->dev = &hdev->pdev->dev;
1658 tqp->index = i;
1659
1660 tqp->q.ae_algo = &ae_algo;
1661 tqp->q.buf_size = hdev->rx_buf_len;
1662 tqp->q.tx_desc_num = hdev->num_tx_desc;
1663 tqp->q.rx_desc_num = hdev->num_rx_desc;
1664
1665
1666
1667
1668 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1669 tqp->q.io_base = hdev->hw.io_base +
1670 HCLGE_TQP_REG_OFFSET +
1671 i * HCLGE_TQP_REG_SIZE;
1672 else
1673 tqp->q.io_base = hdev->hw.io_base +
1674 HCLGE_TQP_REG_OFFSET +
1675 HCLGE_TQP_EXT_REG_OFFSET +
1676 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1677 HCLGE_TQP_REG_SIZE;
1678
1679 tqp++;
1680 }
1681
1682 return 0;
1683}
1684
1685static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1686 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1687{
1688 struct hclge_tqp_map_cmd *req;
1689 struct hclge_desc desc;
1690 int ret;
1691
1692 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1693
1694 req = (struct hclge_tqp_map_cmd *)desc.data;
1695 req->tqp_id = cpu_to_le16(tqp_pid);
1696 req->tqp_vf = func_id;
1697 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1698 if (!is_pf)
1699 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1700 req->tqp_vid = cpu_to_le16(tqp_vid);
1701
1702 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1703 if (ret)
1704 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1705
1706 return ret;
1707}
1708
1709static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1710{
1711 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1712 struct hclge_dev *hdev = vport->back;
1713 int i, alloced;
1714
1715 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1716 alloced < num_tqps; i++) {
1717 if (!hdev->htqp[i].alloced) {
1718 hdev->htqp[i].q.handle = &vport->nic;
1719 hdev->htqp[i].q.tqp_index = alloced;
1720 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1721 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1722 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1723 hdev->htqp[i].alloced = true;
1724 alloced++;
1725 }
1726 }
1727 vport->alloc_tqps = alloced;
1728 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1729 vport->alloc_tqps / hdev->tm_info.num_tc);
1730
1731
1732 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1733 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1734
1735 return 0;
1736}
1737
1738static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1739 u16 num_tx_desc, u16 num_rx_desc)
1740
1741{
1742 struct hnae3_handle *nic = &vport->nic;
1743 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1744 struct hclge_dev *hdev = vport->back;
1745 int ret;
1746
1747 kinfo->num_tx_desc = num_tx_desc;
1748 kinfo->num_rx_desc = num_rx_desc;
1749
1750 kinfo->rx_buf_len = hdev->rx_buf_len;
1751 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1752
1753 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1754 sizeof(struct hnae3_queue *), GFP_KERNEL);
1755 if (!kinfo->tqp)
1756 return -ENOMEM;
1757
1758 ret = hclge_assign_tqp(vport, num_tqps);
1759 if (ret)
1760 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1761
1762 return ret;
1763}
1764
1765static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1766 struct hclge_vport *vport)
1767{
1768 struct hnae3_handle *nic = &vport->nic;
1769 struct hnae3_knic_private_info *kinfo;
1770 u16 i;
1771
1772 kinfo = &nic->kinfo;
1773 for (i = 0; i < vport->alloc_tqps; i++) {
1774 struct hclge_tqp *q =
1775 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1776 bool is_pf;
1777 int ret;
1778
1779 is_pf = !(vport->vport_id);
1780 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1781 i, is_pf);
1782 if (ret)
1783 return ret;
1784 }
1785
1786 return 0;
1787}
1788
1789static int hclge_map_tqp(struct hclge_dev *hdev)
1790{
1791 struct hclge_vport *vport = hdev->vport;
1792 u16 i, num_vport;
1793
1794 num_vport = hdev->num_req_vfs + 1;
1795 for (i = 0; i < num_vport; i++) {
1796 int ret;
1797
1798 ret = hclge_map_tqp_to_vport(hdev, vport);
1799 if (ret)
1800 return ret;
1801
1802 vport++;
1803 }
1804
1805 return 0;
1806}
1807
1808static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1809{
1810 struct hnae3_handle *nic = &vport->nic;
1811 struct hclge_dev *hdev = vport->back;
1812 int ret;
1813
1814 nic->pdev = hdev->pdev;
1815 nic->ae_algo = &ae_algo;
1816 nic->numa_node_mask = hdev->numa_node_mask;
1817
1818 ret = hclge_knic_setup(vport, num_tqps,
1819 hdev->num_tx_desc, hdev->num_rx_desc);
1820 if (ret)
1821 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1822
1823 return ret;
1824}
1825
1826static int hclge_alloc_vport(struct hclge_dev *hdev)
1827{
1828 struct pci_dev *pdev = hdev->pdev;
1829 struct hclge_vport *vport;
1830 u32 tqp_main_vport;
1831 u32 tqp_per_vport;
1832 int num_vport, i;
1833 int ret;
1834
1835
1836 num_vport = hdev->num_req_vfs + 1;
1837
1838 if (hdev->num_tqps < num_vport) {
1839 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1840 hdev->num_tqps, num_vport);
1841 return -EINVAL;
1842 }
1843
1844
1845 tqp_per_vport = hdev->num_tqps / num_vport;
1846 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1847
1848 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1849 GFP_KERNEL);
1850 if (!vport)
1851 return -ENOMEM;
1852
1853 hdev->vport = vport;
1854 hdev->num_alloc_vport = num_vport;
1855
1856 if (IS_ENABLED(CONFIG_PCI_IOV))
1857 hdev->num_alloc_vfs = hdev->num_req_vfs;
1858
1859 for (i = 0; i < num_vport; i++) {
1860 vport->back = hdev;
1861 vport->vport_id = i;
1862 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1863 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1864 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1865 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1866 vport->req_vlan_fltr_en = true;
1867 INIT_LIST_HEAD(&vport->vlan_list);
1868 INIT_LIST_HEAD(&vport->uc_mac_list);
1869 INIT_LIST_HEAD(&vport->mc_mac_list);
1870 spin_lock_init(&vport->mac_list_lock);
1871
1872 if (i == 0)
1873 ret = hclge_vport_setup(vport, tqp_main_vport);
1874 else
1875 ret = hclge_vport_setup(vport, tqp_per_vport);
1876 if (ret) {
1877 dev_err(&pdev->dev,
1878 "vport setup failed for vport %d, %d\n",
1879 i, ret);
1880 return ret;
1881 }
1882
1883 vport++;
1884 }
1885
1886 return 0;
1887}
1888
1889static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1890 struct hclge_pkt_buf_alloc *buf_alloc)
1891{
1892
1893#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1894#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1895 struct hclge_tx_buff_alloc_cmd *req;
1896 struct hclge_desc desc;
1897 int ret;
1898 u8 i;
1899
1900 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1901
1902 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1903 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1904 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1905
1906 req->tx_pkt_buff[i] =
1907 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1908 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1909 }
1910
1911 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1912 if (ret)
1913 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1914 ret);
1915
1916 return ret;
1917}
1918
1919static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1920 struct hclge_pkt_buf_alloc *buf_alloc)
1921{
1922 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1923
1924 if (ret)
1925 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1926
1927 return ret;
1928}
1929
1930static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1931{
1932 unsigned int i;
1933 u32 cnt = 0;
1934
1935 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1936 if (hdev->hw_tc_map & BIT(i))
1937 cnt++;
1938 return cnt;
1939}
1940
1941
1942static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1943 struct hclge_pkt_buf_alloc *buf_alloc)
1944{
1945 struct hclge_priv_buf *priv;
1946 unsigned int i;
1947 int cnt = 0;
1948
1949 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1950 priv = &buf_alloc->priv_buf[i];
1951 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1952 priv->enable)
1953 cnt++;
1954 }
1955
1956 return cnt;
1957}
1958
1959
1960static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1961 struct hclge_pkt_buf_alloc *buf_alloc)
1962{
1963 struct hclge_priv_buf *priv;
1964 unsigned int i;
1965 int cnt = 0;
1966
1967 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1968 priv = &buf_alloc->priv_buf[i];
1969 if (hdev->hw_tc_map & BIT(i) &&
1970 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1971 priv->enable)
1972 cnt++;
1973 }
1974
1975 return cnt;
1976}
1977
1978static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1979{
1980 struct hclge_priv_buf *priv;
1981 u32 rx_priv = 0;
1982 int i;
1983
1984 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1985 priv = &buf_alloc->priv_buf[i];
1986 if (priv->enable)
1987 rx_priv += priv->buf_size;
1988 }
1989 return rx_priv;
1990}
1991
1992static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1993{
1994 u32 i, total_tx_size = 0;
1995
1996 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1997 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1998
1999 return total_tx_size;
2000}
2001
2002static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2003 struct hclge_pkt_buf_alloc *buf_alloc,
2004 u32 rx_all)
2005{
2006 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2007 u32 tc_num = hclge_get_tc_num(hdev);
2008 u32 shared_buf, aligned_mps;
2009 u32 rx_priv;
2010 int i;
2011
2012 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2013
2014 if (hnae3_dev_dcb_supported(hdev))
2015 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2016 hdev->dv_buf_size;
2017 else
2018 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2019 + hdev->dv_buf_size;
2020
2021 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2022 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2023 HCLGE_BUF_SIZE_UNIT);
2024
2025 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2026 if (rx_all < rx_priv + shared_std)
2027 return false;
2028
2029 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2030 buf_alloc->s_buf.buf_size = shared_buf;
2031 if (hnae3_dev_dcb_supported(hdev)) {
2032 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2033 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2034 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2035 HCLGE_BUF_SIZE_UNIT);
2036 } else {
2037 buf_alloc->s_buf.self.high = aligned_mps +
2038 HCLGE_NON_DCB_ADDITIONAL_BUF;
2039 buf_alloc->s_buf.self.low = aligned_mps;
2040 }
2041
2042 if (hnae3_dev_dcb_supported(hdev)) {
2043 hi_thrd = shared_buf - hdev->dv_buf_size;
2044
2045 if (tc_num <= NEED_RESERVE_TC_NUM)
2046 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2047 / BUF_MAX_PERCENT;
2048
2049 if (tc_num)
2050 hi_thrd = hi_thrd / tc_num;
2051
2052 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2053 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2054 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2055 } else {
2056 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2057 lo_thrd = aligned_mps;
2058 }
2059
2060 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2061 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2062 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2063 }
2064
2065 return true;
2066}
2067
2068static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2069 struct hclge_pkt_buf_alloc *buf_alloc)
2070{
2071 u32 i, total_size;
2072
2073 total_size = hdev->pkt_buf_size;
2074
2075
2076 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2077 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2078
2079 if (hdev->hw_tc_map & BIT(i)) {
2080 if (total_size < hdev->tx_buf_size)
2081 return -ENOMEM;
2082
2083 priv->tx_buf_size = hdev->tx_buf_size;
2084 } else {
2085 priv->tx_buf_size = 0;
2086 }
2087
2088 total_size -= priv->tx_buf_size;
2089 }
2090
2091 return 0;
2092}
2093
2094static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2095 struct hclge_pkt_buf_alloc *buf_alloc)
2096{
2097 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2098 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2099 unsigned int i;
2100
2101 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2102 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2103
2104 priv->enable = 0;
2105 priv->wl.low = 0;
2106 priv->wl.high = 0;
2107 priv->buf_size = 0;
2108
2109 if (!(hdev->hw_tc_map & BIT(i)))
2110 continue;
2111
2112 priv->enable = 1;
2113
2114 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2115 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2116 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2117 HCLGE_BUF_SIZE_UNIT);
2118 } else {
2119 priv->wl.low = 0;
2120 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2121 aligned_mps;
2122 }
2123
2124 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2125 }
2126
2127 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2128}
2129
2130static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2131 struct hclge_pkt_buf_alloc *buf_alloc)
2132{
2133 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2134 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2135 int i;
2136
2137
2138 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2139 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2140 unsigned int mask = BIT((unsigned int)i);
2141
2142 if (hdev->hw_tc_map & mask &&
2143 !(hdev->tm_info.hw_pfc_map & mask)) {
2144
2145 priv->wl.low = 0;
2146 priv->wl.high = 0;
2147 priv->buf_size = 0;
2148 priv->enable = 0;
2149 no_pfc_priv_num--;
2150 }
2151
2152 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2153 no_pfc_priv_num == 0)
2154 break;
2155 }
2156
2157 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2158}
2159
2160static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2161 struct hclge_pkt_buf_alloc *buf_alloc)
2162{
2163 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2164 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2165 int i;
2166
2167
2168 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2169 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2170 unsigned int mask = BIT((unsigned int)i);
2171
2172 if (hdev->hw_tc_map & mask &&
2173 hdev->tm_info.hw_pfc_map & mask) {
2174
2175 priv->wl.low = 0;
2176 priv->enable = 0;
2177 priv->wl.high = 0;
2178 priv->buf_size = 0;
2179 pfc_priv_num--;
2180 }
2181
2182 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2183 pfc_priv_num == 0)
2184 break;
2185 }
2186
2187 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2188}
2189
2190static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2191 struct hclge_pkt_buf_alloc *buf_alloc)
2192{
2193#define COMPENSATE_BUFFER 0x3C00
2194#define COMPENSATE_HALF_MPS_NUM 5
2195#define PRIV_WL_GAP 0x1800
2196
2197 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2198 u32 tc_num = hclge_get_tc_num(hdev);
2199 u32 half_mps = hdev->mps >> 1;
2200 u32 min_rx_priv;
2201 unsigned int i;
2202
2203 if (tc_num)
2204 rx_priv = rx_priv / tc_num;
2205
2206 if (tc_num <= NEED_RESERVE_TC_NUM)
2207 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2208
2209 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2210 COMPENSATE_HALF_MPS_NUM * half_mps;
2211 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2212 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2213 if (rx_priv < min_rx_priv)
2214 return false;
2215
2216 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2217 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2218
2219 priv->enable = 0;
2220 priv->wl.low = 0;
2221 priv->wl.high = 0;
2222 priv->buf_size = 0;
2223
2224 if (!(hdev->hw_tc_map & BIT(i)))
2225 continue;
2226
2227 priv->enable = 1;
2228 priv->buf_size = rx_priv;
2229 priv->wl.high = rx_priv - hdev->dv_buf_size;
2230 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2231 }
2232
2233 buf_alloc->s_buf.buf_size = 0;
2234
2235 return true;
2236}
2237
2238
2239
2240
2241
2242
2243static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2244 struct hclge_pkt_buf_alloc *buf_alloc)
2245{
2246
2247 if (!hnae3_dev_dcb_supported(hdev)) {
2248 u32 rx_all = hdev->pkt_buf_size;
2249
2250 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2251 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2252 return -ENOMEM;
2253
2254 return 0;
2255 }
2256
2257 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2258 return 0;
2259
2260 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2261 return 0;
2262
2263
2264 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2265 return 0;
2266
2267 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2268 return 0;
2269
2270 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2271 return 0;
2272
2273 return -ENOMEM;
2274}
2275
2276static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2277 struct hclge_pkt_buf_alloc *buf_alloc)
2278{
2279 struct hclge_rx_priv_buff_cmd *req;
2280 struct hclge_desc desc;
2281 int ret;
2282 int i;
2283
2284 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2285 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2286
2287
2288 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2289 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2290
2291 req->buf_num[i] =
2292 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2293 req->buf_num[i] |=
2294 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2295 }
2296
2297 req->shared_buf =
2298 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2299 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2300
2301 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2302 if (ret)
2303 dev_err(&hdev->pdev->dev,
2304 "rx private buffer alloc cmd failed %d\n", ret);
2305
2306 return ret;
2307}
2308
2309static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2310 struct hclge_pkt_buf_alloc *buf_alloc)
2311{
2312 struct hclge_rx_priv_wl_buf *req;
2313 struct hclge_priv_buf *priv;
2314 struct hclge_desc desc[2];
2315 int i, j;
2316 int ret;
2317
2318 for (i = 0; i < 2; i++) {
2319 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2320 false);
2321 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2322
2323
2324 if (i == 0)
2325 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2326 else
2327 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2328
2329 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2330 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2331
2332 priv = &buf_alloc->priv_buf[idx];
2333 req->tc_wl[j].high =
2334 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2335 req->tc_wl[j].high |=
2336 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2337 req->tc_wl[j].low =
2338 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2339 req->tc_wl[j].low |=
2340 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2341 }
2342 }
2343
2344
2345 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2346 if (ret)
2347 dev_err(&hdev->pdev->dev,
2348 "rx private waterline config cmd failed %d\n",
2349 ret);
2350 return ret;
2351}
2352
2353static int hclge_common_thrd_config(struct hclge_dev *hdev,
2354 struct hclge_pkt_buf_alloc *buf_alloc)
2355{
2356 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2357 struct hclge_rx_com_thrd *req;
2358 struct hclge_desc desc[2];
2359 struct hclge_tc_thrd *tc;
2360 int i, j;
2361 int ret;
2362
2363 for (i = 0; i < 2; i++) {
2364 hclge_cmd_setup_basic_desc(&desc[i],
2365 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2366 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2367
2368
2369 if (i == 0)
2370 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2371 else
2372 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2373
2374 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2375 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2376
2377 req->com_thrd[j].high =
2378 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2379 req->com_thrd[j].high |=
2380 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2381 req->com_thrd[j].low =
2382 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2383 req->com_thrd[j].low |=
2384 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2385 }
2386 }
2387
2388
2389 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2390 if (ret)
2391 dev_err(&hdev->pdev->dev,
2392 "common threshold config cmd failed %d\n", ret);
2393 return ret;
2394}
2395
2396static int hclge_common_wl_config(struct hclge_dev *hdev,
2397 struct hclge_pkt_buf_alloc *buf_alloc)
2398{
2399 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2400 struct hclge_rx_com_wl *req;
2401 struct hclge_desc desc;
2402 int ret;
2403
2404 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2405
2406 req = (struct hclge_rx_com_wl *)desc.data;
2407 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2408 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2409
2410 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2411 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2412
2413 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2414 if (ret)
2415 dev_err(&hdev->pdev->dev,
2416 "common waterline config cmd failed %d\n", ret);
2417
2418 return ret;
2419}
2420
2421int hclge_buffer_alloc(struct hclge_dev *hdev)
2422{
2423 struct hclge_pkt_buf_alloc *pkt_buf;
2424 int ret;
2425
2426 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2427 if (!pkt_buf)
2428 return -ENOMEM;
2429
2430 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2431 if (ret) {
2432 dev_err(&hdev->pdev->dev,
2433 "could not calc tx buffer size for all TCs %d\n", ret);
2434 goto out;
2435 }
2436
2437 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2438 if (ret) {
2439 dev_err(&hdev->pdev->dev,
2440 "could not alloc tx buffers %d\n", ret);
2441 goto out;
2442 }
2443
2444 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2445 if (ret) {
2446 dev_err(&hdev->pdev->dev,
2447 "could not calc rx priv buffer size for all TCs %d\n",
2448 ret);
2449 goto out;
2450 }
2451
2452 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2453 if (ret) {
2454 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2455 ret);
2456 goto out;
2457 }
2458
2459 if (hnae3_dev_dcb_supported(hdev)) {
2460 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2461 if (ret) {
2462 dev_err(&hdev->pdev->dev,
2463 "could not configure rx private waterline %d\n",
2464 ret);
2465 goto out;
2466 }
2467
2468 ret = hclge_common_thrd_config(hdev, pkt_buf);
2469 if (ret) {
2470 dev_err(&hdev->pdev->dev,
2471 "could not configure common threshold %d\n",
2472 ret);
2473 goto out;
2474 }
2475 }
2476
2477 ret = hclge_common_wl_config(hdev, pkt_buf);
2478 if (ret)
2479 dev_err(&hdev->pdev->dev,
2480 "could not configure common waterline %d\n", ret);
2481
2482out:
2483 kfree(pkt_buf);
2484 return ret;
2485}
2486
2487static int hclge_init_roce_base_info(struct hclge_vport *vport)
2488{
2489 struct hnae3_handle *roce = &vport->roce;
2490 struct hnae3_handle *nic = &vport->nic;
2491 struct hclge_dev *hdev = vport->back;
2492
2493 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2494
2495 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2496 return -EINVAL;
2497
2498 roce->rinfo.base_vector = hdev->roce_base_vector;
2499
2500 roce->rinfo.netdev = nic->kinfo.netdev;
2501 roce->rinfo.roce_io_base = hdev->hw.io_base;
2502 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2503
2504 roce->pdev = nic->pdev;
2505 roce->ae_algo = nic->ae_algo;
2506 roce->numa_node_mask = nic->numa_node_mask;
2507
2508 return 0;
2509}
2510
2511static int hclge_init_msi(struct hclge_dev *hdev)
2512{
2513 struct pci_dev *pdev = hdev->pdev;
2514 int vectors;
2515 int i;
2516
2517 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2518 hdev->num_msi,
2519 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2520 if (vectors < 0) {
2521 dev_err(&pdev->dev,
2522 "failed(%d) to allocate MSI/MSI-X vectors\n",
2523 vectors);
2524 return vectors;
2525 }
2526 if (vectors < hdev->num_msi)
2527 dev_warn(&hdev->pdev->dev,
2528 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2529 hdev->num_msi, vectors);
2530
2531 hdev->num_msi = vectors;
2532 hdev->num_msi_left = vectors;
2533
2534 hdev->base_msi_vector = pdev->irq;
2535 hdev->roce_base_vector = hdev->base_msi_vector +
2536 hdev->num_nic_msi;
2537
2538 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2539 sizeof(u16), GFP_KERNEL);
2540 if (!hdev->vector_status) {
2541 pci_free_irq_vectors(pdev);
2542 return -ENOMEM;
2543 }
2544
2545 for (i = 0; i < hdev->num_msi; i++)
2546 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2547
2548 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2549 sizeof(int), GFP_KERNEL);
2550 if (!hdev->vector_irq) {
2551 pci_free_irq_vectors(pdev);
2552 return -ENOMEM;
2553 }
2554
2555 return 0;
2556}
2557
2558static u8 hclge_check_speed_dup(u8 duplex, int speed)
2559{
2560 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2561 duplex = HCLGE_MAC_FULL;
2562
2563 return duplex;
2564}
2565
2566static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2567 u8 duplex)
2568{
2569 struct hclge_config_mac_speed_dup_cmd *req;
2570 struct hclge_desc desc;
2571 int ret;
2572
2573 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2574
2575 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2576
2577 if (duplex)
2578 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2579
2580 switch (speed) {
2581 case HCLGE_MAC_SPEED_10M:
2582 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2583 HCLGE_CFG_SPEED_S, 6);
2584 break;
2585 case HCLGE_MAC_SPEED_100M:
2586 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2587 HCLGE_CFG_SPEED_S, 7);
2588 break;
2589 case HCLGE_MAC_SPEED_1G:
2590 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2591 HCLGE_CFG_SPEED_S, 0);
2592 break;
2593 case HCLGE_MAC_SPEED_10G:
2594 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2595 HCLGE_CFG_SPEED_S, 1);
2596 break;
2597 case HCLGE_MAC_SPEED_25G:
2598 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2599 HCLGE_CFG_SPEED_S, 2);
2600 break;
2601 case HCLGE_MAC_SPEED_40G:
2602 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2603 HCLGE_CFG_SPEED_S, 3);
2604 break;
2605 case HCLGE_MAC_SPEED_50G:
2606 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2607 HCLGE_CFG_SPEED_S, 4);
2608 break;
2609 case HCLGE_MAC_SPEED_100G:
2610 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2611 HCLGE_CFG_SPEED_S, 5);
2612 break;
2613 case HCLGE_MAC_SPEED_200G:
2614 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2615 HCLGE_CFG_SPEED_S, 8);
2616 break;
2617 default:
2618 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2619 return -EINVAL;
2620 }
2621
2622 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2623 1);
2624
2625 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2626 if (ret) {
2627 dev_err(&hdev->pdev->dev,
2628 "mac speed/duplex config cmd failed %d.\n", ret);
2629 return ret;
2630 }
2631
2632 return 0;
2633}
2634
2635int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2636{
2637 struct hclge_mac *mac = &hdev->hw.mac;
2638 int ret;
2639
2640 duplex = hclge_check_speed_dup(duplex, speed);
2641 if (!mac->support_autoneg && mac->speed == speed &&
2642 mac->duplex == duplex)
2643 return 0;
2644
2645 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2646 if (ret)
2647 return ret;
2648
2649 hdev->hw.mac.speed = speed;
2650 hdev->hw.mac.duplex = duplex;
2651
2652 return 0;
2653}
2654
2655static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2656 u8 duplex)
2657{
2658 struct hclge_vport *vport = hclge_get_vport(handle);
2659 struct hclge_dev *hdev = vport->back;
2660
2661 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2662}
2663
2664static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2665{
2666 struct hclge_config_auto_neg_cmd *req;
2667 struct hclge_desc desc;
2668 u32 flag = 0;
2669 int ret;
2670
2671 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2672
2673 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2674 if (enable)
2675 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2676 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2677
2678 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2679 if (ret)
2680 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2681 ret);
2682
2683 return ret;
2684}
2685
2686static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2687{
2688 struct hclge_vport *vport = hclge_get_vport(handle);
2689 struct hclge_dev *hdev = vport->back;
2690
2691 if (!hdev->hw.mac.support_autoneg) {
2692 if (enable) {
2693 dev_err(&hdev->pdev->dev,
2694 "autoneg is not supported by current port\n");
2695 return -EOPNOTSUPP;
2696 } else {
2697 return 0;
2698 }
2699 }
2700
2701 return hclge_set_autoneg_en(hdev, enable);
2702}
2703
2704static int hclge_get_autoneg(struct hnae3_handle *handle)
2705{
2706 struct hclge_vport *vport = hclge_get_vport(handle);
2707 struct hclge_dev *hdev = vport->back;
2708 struct phy_device *phydev = hdev->hw.mac.phydev;
2709
2710 if (phydev)
2711 return phydev->autoneg;
2712
2713 return hdev->hw.mac.autoneg;
2714}
2715
2716static int hclge_restart_autoneg(struct hnae3_handle *handle)
2717{
2718 struct hclge_vport *vport = hclge_get_vport(handle);
2719 struct hclge_dev *hdev = vport->back;
2720 int ret;
2721
2722 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2723
2724 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2725 if (ret)
2726 return ret;
2727 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2728}
2729
2730static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2731{
2732 struct hclge_vport *vport = hclge_get_vport(handle);
2733 struct hclge_dev *hdev = vport->back;
2734
2735 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2736 return hclge_set_autoneg_en(hdev, !halt);
2737
2738 return 0;
2739}
2740
2741static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2742{
2743 struct hclge_config_fec_cmd *req;
2744 struct hclge_desc desc;
2745 int ret;
2746
2747 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2748
2749 req = (struct hclge_config_fec_cmd *)desc.data;
2750 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2751 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2752 if (fec_mode & BIT(HNAE3_FEC_RS))
2753 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2754 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2755 if (fec_mode & BIT(HNAE3_FEC_BASER))
2756 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2757 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2758
2759 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2760 if (ret)
2761 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2762
2763 return ret;
2764}
2765
2766static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2767{
2768 struct hclge_vport *vport = hclge_get_vport(handle);
2769 struct hclge_dev *hdev = vport->back;
2770 struct hclge_mac *mac = &hdev->hw.mac;
2771 int ret;
2772
2773 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2774 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2775 return -EINVAL;
2776 }
2777
2778 ret = hclge_set_fec_hw(hdev, fec_mode);
2779 if (ret)
2780 return ret;
2781
2782 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2783 return 0;
2784}
2785
2786static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2787 u8 *fec_mode)
2788{
2789 struct hclge_vport *vport = hclge_get_vport(handle);
2790 struct hclge_dev *hdev = vport->back;
2791 struct hclge_mac *mac = &hdev->hw.mac;
2792
2793 if (fec_ability)
2794 *fec_ability = mac->fec_ability;
2795 if (fec_mode)
2796 *fec_mode = mac->fec_mode;
2797}
2798
2799static int hclge_mac_init(struct hclge_dev *hdev)
2800{
2801 struct hclge_mac *mac = &hdev->hw.mac;
2802 int ret;
2803
2804 hdev->support_sfp_query = true;
2805 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2806 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2807 hdev->hw.mac.duplex);
2808 if (ret)
2809 return ret;
2810
2811 if (hdev->hw.mac.support_autoneg) {
2812 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2813 if (ret)
2814 return ret;
2815 }
2816
2817 mac->link = 0;
2818
2819 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2820 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2821 if (ret)
2822 return ret;
2823 }
2824
2825 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2826 if (ret) {
2827 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2828 return ret;
2829 }
2830
2831 ret = hclge_set_default_loopback(hdev);
2832 if (ret)
2833 return ret;
2834
2835 ret = hclge_buffer_alloc(hdev);
2836 if (ret)
2837 dev_err(&hdev->pdev->dev,
2838 "allocate buffer fail, ret=%d\n", ret);
2839
2840 return ret;
2841}
2842
2843static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2844{
2845 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2846 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2847 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2848 hclge_wq, &hdev->service_task, 0);
2849}
2850
2851static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2852{
2853 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2854 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2855 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2856 hclge_wq, &hdev->service_task, 0);
2857}
2858
2859static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2860{
2861 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2862 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2863 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2864 hclge_wq, &hdev->service_task, 0);
2865}
2866
2867void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2868{
2869 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2870 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2871 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2872 hclge_wq, &hdev->service_task,
2873 delay_time);
2874}
2875
2876static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2877{
2878 struct hclge_link_status_cmd *req;
2879 struct hclge_desc desc;
2880 int ret;
2881
2882 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2883 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2884 if (ret) {
2885 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2886 ret);
2887 return ret;
2888 }
2889
2890 req = (struct hclge_link_status_cmd *)desc.data;
2891 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2892 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2893
2894 return 0;
2895}
2896
2897static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2898{
2899 struct phy_device *phydev = hdev->hw.mac.phydev;
2900
2901 *link_status = HCLGE_LINK_STATUS_DOWN;
2902
2903 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2904 return 0;
2905
2906 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2907 return 0;
2908
2909 return hclge_get_mac_link_status(hdev, link_status);
2910}
2911
2912static void hclge_push_link_status(struct hclge_dev *hdev)
2913{
2914 struct hclge_vport *vport;
2915 int ret;
2916 u16 i;
2917
2918 for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2919 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2920
2921 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2922 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2923 continue;
2924
2925 ret = hclge_push_vf_link_status(vport);
2926 if (ret) {
2927 dev_err(&hdev->pdev->dev,
2928 "failed to push link status to vf%u, ret = %d\n",
2929 i, ret);
2930 }
2931 }
2932}
2933
2934static void hclge_update_link_status(struct hclge_dev *hdev)
2935{
2936 struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2937 struct hnae3_handle *handle = &hdev->vport[0].nic;
2938 struct hnae3_client *rclient = hdev->roce_client;
2939 struct hnae3_client *client = hdev->nic_client;
2940 int state;
2941 int ret;
2942
2943 if (!client)
2944 return;
2945
2946 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2947 return;
2948
2949 ret = hclge_get_mac_phy_link(hdev, &state);
2950 if (ret) {
2951 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2952 return;
2953 }
2954
2955 if (state != hdev->hw.mac.link) {
2956 hdev->hw.mac.link = state;
2957 client->ops->link_status_change(handle, state);
2958 hclge_config_mac_tnl_int(hdev, state);
2959 if (rclient && rclient->ops->link_status_change)
2960 rclient->ops->link_status_change(rhandle, state);
2961
2962 hclge_push_link_status(hdev);
2963 }
2964
2965 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2966}
2967
2968static void hclge_update_port_capability(struct hclge_dev *hdev,
2969 struct hclge_mac *mac)
2970{
2971 if (hnae3_dev_fec_supported(hdev))
2972
2973 hclge_convert_setting_fec(mac);
2974
2975
2976
2977
2978 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2979 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2980 mac->module_type = HNAE3_MODULE_TYPE_KR;
2981 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2982 mac->module_type = HNAE3_MODULE_TYPE_TP;
2983
2984 if (mac->support_autoneg) {
2985 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2986 linkmode_copy(mac->advertising, mac->supported);
2987 } else {
2988 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2989 mac->supported);
2990 linkmode_zero(mac->advertising);
2991 }
2992}
2993
2994static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2995{
2996 struct hclge_sfp_info_cmd *resp;
2997 struct hclge_desc desc;
2998 int ret;
2999
3000 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3001 resp = (struct hclge_sfp_info_cmd *)desc.data;
3002 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3003 if (ret == -EOPNOTSUPP) {
3004 dev_warn(&hdev->pdev->dev,
3005 "IMP do not support get SFP speed %d\n", ret);
3006 return ret;
3007 } else if (ret) {
3008 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3009 return ret;
3010 }
3011
3012 *speed = le32_to_cpu(resp->speed);
3013
3014 return 0;
3015}
3016
3017static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3018{
3019 struct hclge_sfp_info_cmd *resp;
3020 struct hclge_desc desc;
3021 int ret;
3022
3023 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3024 resp = (struct hclge_sfp_info_cmd *)desc.data;
3025
3026 resp->query_type = QUERY_ACTIVE_SPEED;
3027
3028 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3029 if (ret == -EOPNOTSUPP) {
3030 dev_warn(&hdev->pdev->dev,
3031 "IMP does not support get SFP info %d\n", ret);
3032 return ret;
3033 } else if (ret) {
3034 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3035 return ret;
3036 }
3037
3038
3039
3040
3041 if (!le32_to_cpu(resp->speed))
3042 return 0;
3043
3044 mac->speed = le32_to_cpu(resp->speed);
3045
3046
3047
3048 if (resp->speed_ability) {
3049 mac->module_type = le32_to_cpu(resp->module_type);
3050 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3051 mac->autoneg = resp->autoneg;
3052 mac->support_autoneg = resp->autoneg_ability;
3053 mac->speed_type = QUERY_ACTIVE_SPEED;
3054 if (!resp->active_fec)
3055 mac->fec_mode = 0;
3056 else
3057 mac->fec_mode = BIT(resp->active_fec);
3058 } else {
3059 mac->speed_type = QUERY_SFP_SPEED;
3060 }
3061
3062 return 0;
3063}
3064
3065static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3066 struct ethtool_link_ksettings *cmd)
3067{
3068 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3069 struct hclge_vport *vport = hclge_get_vport(handle);
3070 struct hclge_phy_link_ksetting_0_cmd *req0;
3071 struct hclge_phy_link_ksetting_1_cmd *req1;
3072 u32 supported, advertising, lp_advertising;
3073 struct hclge_dev *hdev = vport->back;
3074 int ret;
3075
3076 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3077 true);
3078 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3079 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3080 true);
3081
3082 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3083 if (ret) {
3084 dev_err(&hdev->pdev->dev,
3085 "failed to get phy link ksetting, ret = %d.\n", ret);
3086 return ret;
3087 }
3088
3089 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3090 cmd->base.autoneg = req0->autoneg;
3091 cmd->base.speed = le32_to_cpu(req0->speed);
3092 cmd->base.duplex = req0->duplex;
3093 cmd->base.port = req0->port;
3094 cmd->base.transceiver = req0->transceiver;
3095 cmd->base.phy_address = req0->phy_address;
3096 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3097 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3098 supported = le32_to_cpu(req0->supported);
3099 advertising = le32_to_cpu(req0->advertising);
3100 lp_advertising = le32_to_cpu(req0->lp_advertising);
3101 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3102 supported);
3103 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3104 advertising);
3105 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3106 lp_advertising);
3107
3108 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3109 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3110 cmd->base.master_slave_state = req1->master_slave_state;
3111
3112 return 0;
3113}
3114
3115static int
3116hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3117 const struct ethtool_link_ksettings *cmd)
3118{
3119 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3120 struct hclge_vport *vport = hclge_get_vport(handle);
3121 struct hclge_phy_link_ksetting_0_cmd *req0;
3122 struct hclge_phy_link_ksetting_1_cmd *req1;
3123 struct hclge_dev *hdev = vport->back;
3124 u32 advertising;
3125 int ret;
3126
3127 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3128 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3129 (cmd->base.duplex != DUPLEX_HALF &&
3130 cmd->base.duplex != DUPLEX_FULL)))
3131 return -EINVAL;
3132
3133 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3134 false);
3135 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3136 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3137 false);
3138
3139 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3140 req0->autoneg = cmd->base.autoneg;
3141 req0->speed = cpu_to_le32(cmd->base.speed);
3142 req0->duplex = cmd->base.duplex;
3143 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3144 cmd->link_modes.advertising);
3145 req0->advertising = cpu_to_le32(advertising);
3146 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3147
3148 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3149 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3150
3151 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3152 if (ret) {
3153 dev_err(&hdev->pdev->dev,
3154 "failed to set phy link ksettings, ret = %d.\n", ret);
3155 return ret;
3156 }
3157
3158 hdev->hw.mac.autoneg = cmd->base.autoneg;
3159 hdev->hw.mac.speed = cmd->base.speed;
3160 hdev->hw.mac.duplex = cmd->base.duplex;
3161 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3162
3163 return 0;
3164}
3165
3166static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3167{
3168 struct ethtool_link_ksettings cmd;
3169 int ret;
3170
3171 if (!hnae3_dev_phy_imp_supported(hdev))
3172 return 0;
3173
3174 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3175 if (ret)
3176 return ret;
3177
3178 hdev->hw.mac.autoneg = cmd.base.autoneg;
3179 hdev->hw.mac.speed = cmd.base.speed;
3180 hdev->hw.mac.duplex = cmd.base.duplex;
3181
3182 return 0;
3183}
3184
3185static int hclge_tp_port_init(struct hclge_dev *hdev)
3186{
3187 struct ethtool_link_ksettings cmd;
3188
3189 if (!hnae3_dev_phy_imp_supported(hdev))
3190 return 0;
3191
3192 cmd.base.autoneg = hdev->hw.mac.autoneg;
3193 cmd.base.speed = hdev->hw.mac.speed;
3194 cmd.base.duplex = hdev->hw.mac.duplex;
3195 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3196
3197 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3198}
3199
3200static int hclge_update_port_info(struct hclge_dev *hdev)
3201{
3202 struct hclge_mac *mac = &hdev->hw.mac;
3203 int speed = HCLGE_MAC_SPEED_UNKNOWN;
3204 int ret;
3205
3206
3207 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3208 return hclge_update_tp_port_info(hdev);
3209
3210
3211 if (!hdev->support_sfp_query)
3212 return 0;
3213
3214 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3215 ret = hclge_get_sfp_info(hdev, mac);
3216 else
3217 ret = hclge_get_sfp_speed(hdev, &speed);
3218
3219 if (ret == -EOPNOTSUPP) {
3220 hdev->support_sfp_query = false;
3221 return ret;
3222 } else if (ret) {
3223 return ret;
3224 }
3225
3226 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3227 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3228 hclge_update_port_capability(hdev, mac);
3229 return 0;
3230 }
3231 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3232 HCLGE_MAC_FULL);
3233 } else {
3234 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3235 return 0;
3236
3237
3238 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3239 }
3240}
3241
3242static int hclge_get_status(struct hnae3_handle *handle)
3243{
3244 struct hclge_vport *vport = hclge_get_vport(handle);
3245 struct hclge_dev *hdev = vport->back;
3246
3247 hclge_update_link_status(hdev);
3248
3249 return hdev->hw.mac.link;
3250}
3251
3252static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3253{
3254 if (!pci_num_vf(hdev->pdev)) {
3255 dev_err(&hdev->pdev->dev,
3256 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3257 return NULL;
3258 }
3259
3260 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3261 dev_err(&hdev->pdev->dev,
3262 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3263 vf, pci_num_vf(hdev->pdev));
3264 return NULL;
3265 }
3266
3267
3268 vf += HCLGE_VF_VPORT_START_NUM;
3269 return &hdev->vport[vf];
3270}
3271
3272static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3273 struct ifla_vf_info *ivf)
3274{
3275 struct hclge_vport *vport = hclge_get_vport(handle);
3276 struct hclge_dev *hdev = vport->back;
3277
3278 vport = hclge_get_vf_vport(hdev, vf);
3279 if (!vport)
3280 return -EINVAL;
3281
3282 ivf->vf = vf;
3283 ivf->linkstate = vport->vf_info.link_state;
3284 ivf->spoofchk = vport->vf_info.spoofchk;
3285 ivf->trusted = vport->vf_info.trusted;
3286 ivf->min_tx_rate = 0;
3287 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3288 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3289 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3290 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3291 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3292
3293 return 0;
3294}
3295
3296static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3297 int link_state)
3298{
3299 struct hclge_vport *vport = hclge_get_vport(handle);
3300 struct hclge_dev *hdev = vport->back;
3301 int link_state_old;
3302 int ret;
3303
3304 vport = hclge_get_vf_vport(hdev, vf);
3305 if (!vport)
3306 return -EINVAL;
3307
3308 link_state_old = vport->vf_info.link_state;
3309 vport->vf_info.link_state = link_state;
3310
3311 ret = hclge_push_vf_link_status(vport);
3312 if (ret) {
3313 vport->vf_info.link_state = link_state_old;
3314 dev_err(&hdev->pdev->dev,
3315 "failed to push vf%d link status, ret = %d\n", vf, ret);
3316 }
3317
3318 return ret;
3319}
3320
3321static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3322{
3323 u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3324
3325
3326 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3327 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3328 hw_err_src_reg = hclge_read_dev(&hdev->hw,
3329 HCLGE_RAS_PF_OTHER_INT_STS_REG);
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3340 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3341 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3342 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3343 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3344 hdev->rst_stats.imp_rst_cnt++;
3345 return HCLGE_VECTOR0_EVENT_RST;
3346 }
3347
3348 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3349 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3350 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3351 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3352 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3353 hdev->rst_stats.global_rst_cnt++;
3354 return HCLGE_VECTOR0_EVENT_RST;
3355 }
3356
3357
3358 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3359 hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3360 return HCLGE_VECTOR0_EVENT_ERR;
3361
3362
3363 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3364 *clearval = msix_src_reg;
3365 return HCLGE_VECTOR0_EVENT_PTP;
3366 }
3367
3368
3369 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3370 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3371 *clearval = cmdq_src_reg;
3372 return HCLGE_VECTOR0_EVENT_MBX;
3373 }
3374
3375
3376 dev_info(&hdev->pdev->dev,
3377 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3378 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3379
3380 return HCLGE_VECTOR0_EVENT_OTHER;
3381}
3382
3383static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3384 u32 regclr)
3385{
3386 switch (event_type) {
3387 case HCLGE_VECTOR0_EVENT_PTP:
3388 case HCLGE_VECTOR0_EVENT_RST:
3389 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3390 break;
3391 case HCLGE_VECTOR0_EVENT_MBX:
3392 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3393 break;
3394 default:
3395 break;
3396 }
3397}
3398
3399static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3400{
3401 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3402 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3403 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3404 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3405 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3406}
3407
3408static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3409{
3410 writel(enable ? 1 : 0, vector->addr);
3411}
3412
3413static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3414{
3415 struct hclge_dev *hdev = data;
3416 unsigned long flags;
3417 u32 clearval = 0;
3418 u32 event_cause;
3419
3420 hclge_enable_vector(&hdev->misc_vector, false);
3421 event_cause = hclge_check_event_cause(hdev, &clearval);
3422
3423
3424 switch (event_cause) {
3425 case HCLGE_VECTOR0_EVENT_ERR:
3426 hclge_errhand_task_schedule(hdev);
3427 break;
3428 case HCLGE_VECTOR0_EVENT_RST:
3429 hclge_reset_task_schedule(hdev);
3430 break;
3431 case HCLGE_VECTOR0_EVENT_PTP:
3432 spin_lock_irqsave(&hdev->ptp->lock, flags);
3433 hclge_ptp_clean_tx_hwts(hdev);
3434 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3435 break;
3436 case HCLGE_VECTOR0_EVENT_MBX:
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446 hclge_mbx_task_schedule(hdev);
3447 break;
3448 default:
3449 dev_warn(&hdev->pdev->dev,
3450 "received unknown or unhandled event of vector0\n");
3451 break;
3452 }
3453
3454 hclge_clear_event_cause(hdev, event_cause, clearval);
3455
3456
3457 if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3458 event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3459 event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3460 hclge_enable_vector(&hdev->misc_vector, true);
3461
3462 return IRQ_HANDLED;
3463}
3464
3465static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3466{
3467 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3468 dev_warn(&hdev->pdev->dev,
3469 "vector(vector_id %d) has been freed.\n", vector_id);
3470 return;
3471 }
3472
3473 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3474 hdev->num_msi_left += 1;
3475 hdev->num_msi_used -= 1;
3476}
3477
3478static void hclge_get_misc_vector(struct hclge_dev *hdev)
3479{
3480 struct hclge_misc_vector *vector = &hdev->misc_vector;
3481
3482 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3483
3484 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3485 hdev->vector_status[0] = 0;
3486
3487 hdev->num_msi_left -= 1;
3488 hdev->num_msi_used += 1;
3489}
3490
3491static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3492 const cpumask_t *mask)
3493{
3494 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3495 affinity_notify);
3496
3497 cpumask_copy(&hdev->affinity_mask, mask);
3498}
3499
3500static void hclge_irq_affinity_release(struct kref *ref)
3501{
3502}
3503
3504static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3505{
3506 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3507 &hdev->affinity_mask);
3508
3509 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3510 hdev->affinity_notify.release = hclge_irq_affinity_release;
3511 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3512 &hdev->affinity_notify);
3513}
3514
3515static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3516{
3517 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3518 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3519}
3520
3521static int hclge_misc_irq_init(struct hclge_dev *hdev)
3522{
3523 int ret;
3524
3525 hclge_get_misc_vector(hdev);
3526
3527
3528 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3529 HCLGE_NAME, pci_name(hdev->pdev));
3530 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3531 0, hdev->misc_vector.name, hdev);
3532 if (ret) {
3533 hclge_free_vector(hdev, 0);
3534 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3535 hdev->misc_vector.vector_irq);
3536 }
3537
3538 return ret;
3539}
3540
3541static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3542{
3543 free_irq(hdev->misc_vector.vector_irq, hdev);
3544 hclge_free_vector(hdev, 0);
3545}
3546
3547int hclge_notify_client(struct hclge_dev *hdev,
3548 enum hnae3_reset_notify_type type)
3549{
3550 struct hnae3_handle *handle = &hdev->vport[0].nic;
3551 struct hnae3_client *client = hdev->nic_client;
3552 int ret;
3553
3554 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3555 return 0;
3556
3557 if (!client->ops->reset_notify)
3558 return -EOPNOTSUPP;
3559
3560 ret = client->ops->reset_notify(handle, type);
3561 if (ret)
3562 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3563 type, ret);
3564
3565 return ret;
3566}
3567
3568static int hclge_notify_roce_client(struct hclge_dev *hdev,
3569 enum hnae3_reset_notify_type type)
3570{
3571 struct hnae3_handle *handle = &hdev->vport[0].roce;
3572 struct hnae3_client *client = hdev->roce_client;
3573 int ret;
3574
3575 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3576 return 0;
3577
3578 if (!client->ops->reset_notify)
3579 return -EOPNOTSUPP;
3580
3581 ret = client->ops->reset_notify(handle, type);
3582 if (ret)
3583 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3584 type, ret);
3585
3586 return ret;
3587}
3588
3589static int hclge_reset_wait(struct hclge_dev *hdev)
3590{
3591#define HCLGE_RESET_WATI_MS 100
3592#define HCLGE_RESET_WAIT_CNT 350
3593
3594 u32 val, reg, reg_bit;
3595 u32 cnt = 0;
3596
3597 switch (hdev->reset_type) {
3598 case HNAE3_IMP_RESET:
3599 reg = HCLGE_GLOBAL_RESET_REG;
3600 reg_bit = HCLGE_IMP_RESET_BIT;
3601 break;
3602 case HNAE3_GLOBAL_RESET:
3603 reg = HCLGE_GLOBAL_RESET_REG;
3604 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3605 break;
3606 case HNAE3_FUNC_RESET:
3607 reg = HCLGE_FUN_RST_ING;
3608 reg_bit = HCLGE_FUN_RST_ING_B;
3609 break;
3610 default:
3611 dev_err(&hdev->pdev->dev,
3612 "Wait for unsupported reset type: %d\n",
3613 hdev->reset_type);
3614 return -EINVAL;
3615 }
3616
3617 val = hclge_read_dev(&hdev->hw, reg);
3618 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3619 msleep(HCLGE_RESET_WATI_MS);
3620 val = hclge_read_dev(&hdev->hw, reg);
3621 cnt++;
3622 }
3623
3624 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3625 dev_warn(&hdev->pdev->dev,
3626 "Wait for reset timeout: %d\n", hdev->reset_type);
3627 return -EBUSY;
3628 }
3629
3630 return 0;
3631}
3632
3633static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3634{
3635 struct hclge_vf_rst_cmd *req;
3636 struct hclge_desc desc;
3637
3638 req = (struct hclge_vf_rst_cmd *)desc.data;
3639 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3640 req->dest_vfid = func_id;
3641
3642 if (reset)
3643 req->vf_rst = 0x1;
3644
3645 return hclge_cmd_send(&hdev->hw, &desc, 1);
3646}
3647
3648static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3649{
3650 int i;
3651
3652 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3653 struct hclge_vport *vport = &hdev->vport[i];
3654 int ret;
3655
3656
3657 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3658 if (ret) {
3659 dev_err(&hdev->pdev->dev,
3660 "set vf(%u) rst failed %d!\n",
3661 vport->vport_id, ret);
3662 return ret;
3663 }
3664
3665 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3666 continue;
3667
3668
3669
3670
3671
3672 ret = hclge_inform_reset_assert_to_vf(vport);
3673 if (ret)
3674 dev_warn(&hdev->pdev->dev,
3675 "inform reset to vf(%u) failed %d!\n",
3676 vport->vport_id, ret);
3677 }
3678
3679 return 0;
3680}
3681
3682static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3683{
3684 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3685 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3686 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3687 return;
3688
3689 hclge_mbx_handler(hdev);
3690
3691 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3692}
3693
3694static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3695{
3696 struct hclge_pf_rst_sync_cmd *req;
3697 struct hclge_desc desc;
3698 int cnt = 0;
3699 int ret;
3700
3701 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3702 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3703
3704 do {
3705
3706 hclge_mailbox_service_task(hdev);
3707
3708 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3709
3710
3711
3712 if (ret == -EOPNOTSUPP) {
3713 msleep(HCLGE_RESET_SYNC_TIME);
3714 return;
3715 } else if (ret) {
3716 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3717 ret);
3718 return;
3719 } else if (req->all_vf_ready) {
3720 return;
3721 }
3722 msleep(HCLGE_PF_RESET_SYNC_TIME);
3723 hclge_cmd_reuse_desc(&desc, true);
3724 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3725
3726 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3727}
3728
3729void hclge_report_hw_error(struct hclge_dev *hdev,
3730 enum hnae3_hw_error_type type)
3731{
3732 struct hnae3_client *client = hdev->nic_client;
3733
3734 if (!client || !client->ops->process_hw_error ||
3735 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3736 return;
3737
3738 client->ops->process_hw_error(&hdev->vport[0].nic, type);
3739}
3740
3741static void hclge_handle_imp_error(struct hclge_dev *hdev)
3742{
3743 u32 reg_val;
3744
3745 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3746 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3747 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3748 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3749 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3750 }
3751
3752 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3753 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3754 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3755 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3756 }
3757}
3758
3759int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3760{
3761 struct hclge_desc desc;
3762 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3763 int ret;
3764
3765 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3766 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3767 req->fun_reset_vfid = func_id;
3768
3769 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3770 if (ret)
3771 dev_err(&hdev->pdev->dev,
3772 "send function reset cmd fail, status =%d\n", ret);
3773
3774 return ret;
3775}
3776
3777static void hclge_do_reset(struct hclge_dev *hdev)
3778{
3779 struct hnae3_handle *handle = &hdev->vport[0].nic;
3780 struct pci_dev *pdev = hdev->pdev;
3781 u32 val;
3782
3783 if (hclge_get_hw_reset_stat(handle)) {
3784 dev_info(&pdev->dev, "hardware reset not finish\n");
3785 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3786 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3787 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3788 return;
3789 }
3790
3791 switch (hdev->reset_type) {
3792 case HNAE3_GLOBAL_RESET:
3793 dev_info(&pdev->dev, "global reset requested\n");
3794 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3795 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3796 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3797 break;
3798 case HNAE3_FUNC_RESET:
3799 dev_info(&pdev->dev, "PF reset requested\n");
3800
3801 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3802 hclge_reset_task_schedule(hdev);
3803 break;
3804 default:
3805 dev_warn(&pdev->dev,
3806 "unsupported reset type: %d\n", hdev->reset_type);
3807 break;
3808 }
3809}
3810
3811static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3812 unsigned long *addr)
3813{
3814 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3815 struct hclge_dev *hdev = ae_dev->priv;
3816
3817
3818 if (test_bit(HNAE3_IMP_RESET, addr)) {
3819 rst_level = HNAE3_IMP_RESET;
3820 clear_bit(HNAE3_IMP_RESET, addr);
3821 clear_bit(HNAE3_GLOBAL_RESET, addr);
3822 clear_bit(HNAE3_FUNC_RESET, addr);
3823 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3824 rst_level = HNAE3_GLOBAL_RESET;
3825 clear_bit(HNAE3_GLOBAL_RESET, addr);
3826 clear_bit(HNAE3_FUNC_RESET, addr);
3827 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3828 rst_level = HNAE3_FUNC_RESET;
3829 clear_bit(HNAE3_FUNC_RESET, addr);
3830 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3831 rst_level = HNAE3_FLR_RESET;
3832 clear_bit(HNAE3_FLR_RESET, addr);
3833 }
3834
3835 if (hdev->reset_type != HNAE3_NONE_RESET &&
3836 rst_level < hdev->reset_type)
3837 return HNAE3_NONE_RESET;
3838
3839 return rst_level;
3840}
3841
3842static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3843{
3844 u32 clearval = 0;
3845
3846 switch (hdev->reset_type) {
3847 case HNAE3_IMP_RESET:
3848 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3849 break;
3850 case HNAE3_GLOBAL_RESET:
3851 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3852 break;
3853 default:
3854 break;
3855 }
3856
3857 if (!clearval)
3858 return;
3859
3860
3861
3862
3863 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3864 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3865 clearval);
3866
3867 hclge_enable_vector(&hdev->misc_vector, true);
3868}
3869
3870static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3871{
3872 u32 reg_val;
3873
3874 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3875 if (enable)
3876 reg_val |= HCLGE_NIC_SW_RST_RDY;
3877 else
3878 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3879
3880 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3881}
3882
3883static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3884{
3885 int ret;
3886
3887 ret = hclge_set_all_vf_rst(hdev, true);
3888 if (ret)
3889 return ret;
3890
3891 hclge_func_reset_sync_vf(hdev);
3892
3893 return 0;
3894}
3895
3896static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3897{
3898 u32 reg_val;
3899 int ret = 0;
3900
3901 switch (hdev->reset_type) {
3902 case HNAE3_FUNC_RESET:
3903 ret = hclge_func_reset_notify_vf(hdev);
3904 if (ret)
3905 return ret;
3906
3907 ret = hclge_func_reset_cmd(hdev, 0);
3908 if (ret) {
3909 dev_err(&hdev->pdev->dev,
3910 "asserting function reset fail %d!\n", ret);
3911 return ret;
3912 }
3913
3914
3915
3916
3917
3918
3919 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3920 hdev->rst_stats.pf_rst_cnt++;
3921 break;
3922 case HNAE3_FLR_RESET:
3923 ret = hclge_func_reset_notify_vf(hdev);
3924 if (ret)
3925 return ret;
3926 break;
3927 case HNAE3_IMP_RESET:
3928 hclge_handle_imp_error(hdev);
3929 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3930 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3931 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3932 break;
3933 default:
3934 break;
3935 }
3936
3937
3938 msleep(HCLGE_RESET_SYNC_TIME);
3939 hclge_reset_handshake(hdev, true);
3940 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3941
3942 return ret;
3943}
3944
3945static void hclge_show_rst_info(struct hclge_dev *hdev)
3946{
3947 char *buf;
3948
3949 buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3950 if (!buf)
3951 return;
3952
3953 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3954
3955 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3956
3957 kfree(buf);
3958}
3959
3960static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3961{
3962#define MAX_RESET_FAIL_CNT 5
3963
3964 if (hdev->reset_pending) {
3965 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3966 hdev->reset_pending);
3967 return true;
3968 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3969 HCLGE_RESET_INT_M) {
3970 dev_info(&hdev->pdev->dev,
3971 "reset failed because new reset interrupt\n");
3972 hclge_clear_reset_cause(hdev);
3973 return false;
3974 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3975 hdev->rst_stats.reset_fail_cnt++;
3976 set_bit(hdev->reset_type, &hdev->reset_pending);
3977 dev_info(&hdev->pdev->dev,
3978 "re-schedule reset task(%u)\n",
3979 hdev->rst_stats.reset_fail_cnt);
3980 return true;
3981 }
3982
3983 hclge_clear_reset_cause(hdev);
3984
3985
3986 hclge_reset_handshake(hdev, true);
3987
3988 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3989
3990 hclge_show_rst_info(hdev);
3991
3992 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3993
3994 return false;
3995}
3996
3997static void hclge_update_reset_level(struct hclge_dev *hdev)
3998{
3999 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4000 enum hnae3_reset_type reset_level;
4001
4002
4003
4004
4005
4006 hclge_get_reset_level(ae_dev, &hdev->reset_request);
4007
4008
4009
4010
4011
4012 reset_level = hclge_get_reset_level(ae_dev,
4013 &hdev->default_reset_request);
4014 if (reset_level != HNAE3_NONE_RESET)
4015 set_bit(reset_level, &hdev->reset_request);
4016}
4017
4018static int hclge_set_rst_done(struct hclge_dev *hdev)
4019{
4020 struct hclge_pf_rst_done_cmd *req;
4021 struct hclge_desc desc;
4022 int ret;
4023
4024 req = (struct hclge_pf_rst_done_cmd *)desc.data;
4025 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4026 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4027
4028 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4029
4030
4031
4032
4033 if (ret == -EOPNOTSUPP) {
4034 dev_warn(&hdev->pdev->dev,
4035 "current firmware does not support command(0x%x)!\n",
4036 HCLGE_OPC_PF_RST_DONE);
4037 return 0;
4038 } else if (ret) {
4039 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4040 ret);
4041 }
4042
4043 return ret;
4044}
4045
4046static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4047{
4048 int ret = 0;
4049
4050 switch (hdev->reset_type) {
4051 case HNAE3_FUNC_RESET:
4052 case HNAE3_FLR_RESET:
4053 ret = hclge_set_all_vf_rst(hdev, false);
4054 break;
4055 case HNAE3_GLOBAL_RESET:
4056 case HNAE3_IMP_RESET:
4057 ret = hclge_set_rst_done(hdev);
4058 break;
4059 default:
4060 break;
4061 }
4062
4063
4064 hclge_reset_handshake(hdev, false);
4065
4066 return ret;
4067}
4068
4069static int hclge_reset_stack(struct hclge_dev *hdev)
4070{
4071 int ret;
4072
4073 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4074 if (ret)
4075 return ret;
4076
4077 ret = hclge_reset_ae_dev(hdev->ae_dev);
4078 if (ret)
4079 return ret;
4080
4081 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4082}
4083
4084static int hclge_reset_prepare(struct hclge_dev *hdev)
4085{
4086 int ret;
4087
4088 hdev->rst_stats.reset_cnt++;
4089
4090 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4091 if (ret)
4092 return ret;
4093
4094 rtnl_lock();
4095 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4096 rtnl_unlock();
4097 if (ret)
4098 return ret;
4099
4100 return hclge_reset_prepare_wait(hdev);
4101}
4102
4103static int hclge_reset_rebuild(struct hclge_dev *hdev)
4104{
4105 int ret;
4106
4107 hdev->rst_stats.hw_reset_done_cnt++;
4108
4109 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4110 if (ret)
4111 return ret;
4112
4113 rtnl_lock();
4114 ret = hclge_reset_stack(hdev);
4115 rtnl_unlock();
4116 if (ret)
4117 return ret;
4118
4119 hclge_clear_reset_cause(hdev);
4120
4121 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4122
4123
4124
4125 if (ret &&
4126 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4127 return ret;
4128
4129 ret = hclge_reset_prepare_up(hdev);
4130 if (ret)
4131 return ret;
4132
4133 rtnl_lock();
4134 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4135 rtnl_unlock();
4136 if (ret)
4137 return ret;
4138
4139 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4140 if (ret)
4141 return ret;
4142
4143 hdev->last_reset_time = jiffies;
4144 hdev->rst_stats.reset_fail_cnt = 0;
4145 hdev->rst_stats.reset_done_cnt++;
4146 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4147
4148 hclge_update_reset_level(hdev);
4149
4150 return 0;
4151}
4152
4153static void hclge_reset(struct hclge_dev *hdev)
4154{
4155 if (hclge_reset_prepare(hdev))
4156 goto err_reset;
4157
4158 if (hclge_reset_wait(hdev))
4159 goto err_reset;
4160
4161 if (hclge_reset_rebuild(hdev))
4162 goto err_reset;
4163
4164 return;
4165
4166err_reset:
4167 if (hclge_reset_err_handle(hdev))
4168 hclge_reset_task_schedule(hdev);
4169}
4170
4171static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4172{
4173 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4174 struct hclge_dev *hdev = ae_dev->priv;
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191 if (time_before(jiffies, (hdev->last_reset_time +
4192 HCLGE_RESET_INTERVAL))) {
4193 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4194 return;
4195 }
4196
4197 if (hdev->default_reset_request) {
4198 hdev->reset_level =
4199 hclge_get_reset_level(ae_dev,
4200 &hdev->default_reset_request);
4201 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4202 hdev->reset_level = HNAE3_FUNC_RESET;
4203 }
4204
4205 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4206 hdev->reset_level);
4207
4208
4209 set_bit(hdev->reset_level, &hdev->reset_request);
4210 hclge_reset_task_schedule(hdev);
4211
4212 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4213 hdev->reset_level++;
4214}
4215
4216static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4217 enum hnae3_reset_type rst_type)
4218{
4219 struct hclge_dev *hdev = ae_dev->priv;
4220
4221 set_bit(rst_type, &hdev->default_reset_request);
4222}
4223
4224static void hclge_reset_timer(struct timer_list *t)
4225{
4226 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4227
4228
4229
4230
4231 if (!hdev->default_reset_request)
4232 return;
4233
4234 dev_info(&hdev->pdev->dev,
4235 "triggering reset in reset timer\n");
4236 hclge_reset_event(hdev->pdev, NULL);
4237}
4238
4239static void hclge_reset_subtask(struct hclge_dev *hdev)
4240{
4241 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252 hdev->last_reset_time = jiffies;
4253 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4254 if (hdev->reset_type != HNAE3_NONE_RESET)
4255 hclge_reset(hdev);
4256
4257
4258 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4259 if (hdev->reset_type != HNAE3_NONE_RESET)
4260 hclge_do_reset(hdev);
4261
4262 hdev->reset_type = HNAE3_NONE_RESET;
4263}
4264
4265static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4266{
4267 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4268 enum hnae3_reset_type reset_type;
4269
4270 if (ae_dev->hw_err_reset_req) {
4271 reset_type = hclge_get_reset_level(ae_dev,
4272 &ae_dev->hw_err_reset_req);
4273 hclge_set_def_reset_request(ae_dev, reset_type);
4274 }
4275
4276 if (hdev->default_reset_request && ae_dev->ops->reset_event)
4277 ae_dev->ops->reset_event(hdev->pdev, NULL);
4278
4279
4280 hclge_enable_vector(&hdev->misc_vector, true);
4281}
4282
4283static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4284{
4285 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4286
4287 ae_dev->hw_err_reset_req = 0;
4288
4289 if (hclge_find_error_source(hdev)) {
4290 hclge_handle_error_info_log(ae_dev);
4291 hclge_handle_mac_tnl(hdev);
4292 }
4293
4294 hclge_handle_err_reset_request(hdev);
4295}
4296
4297static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4298{
4299 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4300 struct device *dev = &hdev->pdev->dev;
4301 u32 msix_sts_reg;
4302
4303 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4304 if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4305 if (hclge_handle_hw_msix_error
4306 (hdev, &hdev->default_reset_request))
4307 dev_info(dev, "received msix interrupt 0x%x\n",
4308 msix_sts_reg);
4309 }
4310
4311 hclge_handle_hw_ras_error(ae_dev);
4312
4313 hclge_handle_err_reset_request(hdev);
4314}
4315
4316static void hclge_errhand_service_task(struct hclge_dev *hdev)
4317{
4318 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4319 return;
4320
4321 if (hnae3_dev_ras_imp_supported(hdev))
4322 hclge_handle_err_recovery(hdev);
4323 else
4324 hclge_misc_err_recovery(hdev);
4325}
4326
4327static void hclge_reset_service_task(struct hclge_dev *hdev)
4328{
4329 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4330 return;
4331
4332 down(&hdev->reset_sem);
4333 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4334
4335 hclge_reset_subtask(hdev);
4336
4337 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4338 up(&hdev->reset_sem);
4339}
4340
4341static void hclge_update_vport_alive(struct hclge_dev *hdev)
4342{
4343 int i;
4344
4345
4346 for (i = 1; i < hdev->num_alloc_vport; i++) {
4347 struct hclge_vport *vport = &hdev->vport[i];
4348
4349 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4350 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4351
4352
4353 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4354 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4355 }
4356}
4357
4358static void hclge_periodic_service_task(struct hclge_dev *hdev)
4359{
4360 unsigned long delta = round_jiffies_relative(HZ);
4361
4362 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4363 return;
4364
4365
4366
4367
4368 hclge_update_link_status(hdev);
4369 hclge_sync_mac_table(hdev);
4370 hclge_sync_promisc_mode(hdev);
4371 hclge_sync_fd_table(hdev);
4372
4373 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4374 delta = jiffies - hdev->last_serv_processed;
4375
4376 if (delta < round_jiffies_relative(HZ)) {
4377 delta = round_jiffies_relative(HZ) - delta;
4378 goto out;
4379 }
4380 }
4381
4382 hdev->serv_processed_cnt++;
4383 hclge_update_vport_alive(hdev);
4384
4385 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4386 hdev->last_serv_processed = jiffies;
4387 goto out;
4388 }
4389
4390 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4391 hclge_update_stats_for_all(hdev);
4392
4393 hclge_update_port_info(hdev);
4394 hclge_sync_vlan_filter(hdev);
4395
4396 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4397 hclge_rfs_filter_expire(hdev);
4398
4399 hdev->last_serv_processed = jiffies;
4400
4401out:
4402 hclge_task_schedule(hdev, delta);
4403}
4404
4405static void hclge_ptp_service_task(struct hclge_dev *hdev)
4406{
4407 unsigned long flags;
4408
4409 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4410 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4411 !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4412 return;
4413
4414
4415 spin_lock_irqsave(&hdev->ptp->lock, flags);
4416
4417
4418
4419
4420 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4421 hclge_ptp_clean_tx_hwts(hdev);
4422
4423 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4424}
4425
4426static void hclge_service_task(struct work_struct *work)
4427{
4428 struct hclge_dev *hdev =
4429 container_of(work, struct hclge_dev, service_task.work);
4430
4431 hclge_errhand_service_task(hdev);
4432 hclge_reset_service_task(hdev);
4433 hclge_ptp_service_task(hdev);
4434 hclge_mailbox_service_task(hdev);
4435 hclge_periodic_service_task(hdev);
4436
4437
4438
4439
4440
4441 hclge_errhand_service_task(hdev);
4442 hclge_reset_service_task(hdev);
4443 hclge_mailbox_service_task(hdev);
4444}
4445
4446struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4447{
4448
4449 if (!handle->client)
4450 return container_of(handle, struct hclge_vport, nic);
4451 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4452 return container_of(handle, struct hclge_vport, roce);
4453 else
4454 return container_of(handle, struct hclge_vport, nic);
4455}
4456
4457static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4458 struct hnae3_vector_info *vector_info)
4459{
4460#define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4461
4462 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4463
4464
4465 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4466 vector_info->io_addr = hdev->hw.io_base +
4467 HCLGE_VECTOR_REG_BASE +
4468 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4469 else
4470 vector_info->io_addr = hdev->hw.io_base +
4471 HCLGE_VECTOR_EXT_REG_BASE +
4472 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4473 HCLGE_VECTOR_REG_OFFSET_H +
4474 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4475 HCLGE_VECTOR_REG_OFFSET;
4476
4477 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4478 hdev->vector_irq[idx] = vector_info->vector;
4479}
4480
4481static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4482 struct hnae3_vector_info *vector_info)
4483{
4484 struct hclge_vport *vport = hclge_get_vport(handle);
4485 struct hnae3_vector_info *vector = vector_info;
4486 struct hclge_dev *hdev = vport->back;
4487 int alloc = 0;
4488 u16 i = 0;
4489 u16 j;
4490
4491 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4492 vector_num = min(hdev->num_msi_left, vector_num);
4493
4494 for (j = 0; j < vector_num; j++) {
4495 while (++i < hdev->num_nic_msi) {
4496 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4497 hclge_get_vector_info(hdev, i, vector);
4498 vector++;
4499 alloc++;
4500
4501 break;
4502 }
4503 }
4504 }
4505 hdev->num_msi_left -= alloc;
4506 hdev->num_msi_used += alloc;
4507
4508 return alloc;
4509}
4510
4511static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4512{
4513 int i;
4514
4515 for (i = 0; i < hdev->num_msi; i++)
4516 if (vector == hdev->vector_irq[i])
4517 return i;
4518
4519 return -EINVAL;
4520}
4521
4522static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4523{
4524 struct hclge_vport *vport = hclge_get_vport(handle);
4525 struct hclge_dev *hdev = vport->back;
4526 int vector_id;
4527
4528 vector_id = hclge_get_vector_index(hdev, vector);
4529 if (vector_id < 0) {
4530 dev_err(&hdev->pdev->dev,
4531 "Get vector index fail. vector = %d\n", vector);
4532 return vector_id;
4533 }
4534
4535 hclge_free_vector(hdev, vector_id);
4536
4537 return 0;
4538}
4539
4540static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4541{
4542 return HCLGE_RSS_KEY_SIZE;
4543}
4544
4545static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4546 const u8 hfunc, const u8 *key)
4547{
4548 struct hclge_rss_config_cmd *req;
4549 unsigned int key_offset = 0;
4550 struct hclge_desc desc;
4551 int key_counts;
4552 int key_size;
4553 int ret;
4554
4555 key_counts = HCLGE_RSS_KEY_SIZE;
4556 req = (struct hclge_rss_config_cmd *)desc.data;
4557
4558 while (key_counts) {
4559 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4560 false);
4561
4562 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4563 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4564
4565 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4566 memcpy(req->hash_key,
4567 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4568
4569 key_counts -= key_size;
4570 key_offset++;
4571 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4572 if (ret) {
4573 dev_err(&hdev->pdev->dev,
4574 "Configure RSS config fail, status = %d\n",
4575 ret);
4576 return ret;
4577 }
4578 }
4579 return 0;
4580}
4581
4582static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4583{
4584 struct hclge_rss_indirection_table_cmd *req;
4585 struct hclge_desc desc;
4586 int rss_cfg_tbl_num;
4587 u8 rss_msb_oft;
4588 u8 rss_msb_val;
4589 int ret;
4590 u16 qid;
4591 int i;
4592 u32 j;
4593
4594 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4595 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4596 HCLGE_RSS_CFG_TBL_SIZE;
4597
4598 for (i = 0; i < rss_cfg_tbl_num; i++) {
4599 hclge_cmd_setup_basic_desc
4600 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4601
4602 req->start_table_index =
4603 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4604 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4605 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4606 qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4607 req->rss_qid_l[j] = qid & 0xff;
4608 rss_msb_oft =
4609 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4610 rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4611 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4612 req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4613 }
4614 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4615 if (ret) {
4616 dev_err(&hdev->pdev->dev,
4617 "Configure rss indir table fail,status = %d\n",
4618 ret);
4619 return ret;
4620 }
4621 }
4622 return 0;
4623}
4624
4625static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4626 u16 *tc_size, u16 *tc_offset)
4627{
4628 struct hclge_rss_tc_mode_cmd *req;
4629 struct hclge_desc desc;
4630 int ret;
4631 int i;
4632
4633 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4634 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4635
4636 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4637 u16 mode = 0;
4638
4639 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4640 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4641 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4642 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4643 tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4644 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4645 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4646
4647 req->rss_tc_mode[i] = cpu_to_le16(mode);
4648 }
4649
4650 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4651 if (ret)
4652 dev_err(&hdev->pdev->dev,
4653 "Configure rss tc mode fail, status = %d\n", ret);
4654
4655 return ret;
4656}
4657
4658static void hclge_get_rss_type(struct hclge_vport *vport)
4659{
4660 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4661 vport->rss_tuple_sets.ipv4_udp_en ||
4662 vport->rss_tuple_sets.ipv4_sctp_en ||
4663 vport->rss_tuple_sets.ipv6_tcp_en ||
4664 vport->rss_tuple_sets.ipv6_udp_en ||
4665 vport->rss_tuple_sets.ipv6_sctp_en)
4666 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4667 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4668 vport->rss_tuple_sets.ipv6_fragment_en)
4669 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4670 else
4671 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4672}
4673
4674static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4675{
4676 struct hclge_rss_input_tuple_cmd *req;
4677 struct hclge_desc desc;
4678 int ret;
4679
4680 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4681
4682 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4683
4684
4685 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4686 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4687 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4688 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4689 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4690 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4691 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4692 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4693 hclge_get_rss_type(&hdev->vport[0]);
4694 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4695 if (ret)
4696 dev_err(&hdev->pdev->dev,
4697 "Configure rss input fail, status = %d\n", ret);
4698 return ret;
4699}
4700
4701static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4702 u8 *key, u8 *hfunc)
4703{
4704 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4705 struct hclge_vport *vport = hclge_get_vport(handle);
4706 int i;
4707
4708
4709 if (hfunc) {
4710 switch (vport->rss_algo) {
4711 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4712 *hfunc = ETH_RSS_HASH_TOP;
4713 break;
4714 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4715 *hfunc = ETH_RSS_HASH_XOR;
4716 break;
4717 default:
4718 *hfunc = ETH_RSS_HASH_UNKNOWN;
4719 break;
4720 }
4721 }
4722
4723
4724 if (key)
4725 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4726
4727
4728 if (indir)
4729 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4730 indir[i] = vport->rss_indirection_tbl[i];
4731
4732 return 0;
4733}
4734
4735static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4736 const u8 *key, const u8 hfunc)
4737{
4738 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4739 struct hclge_vport *vport = hclge_get_vport(handle);
4740 struct hclge_dev *hdev = vport->back;
4741 u8 hash_algo;
4742 int ret, i;
4743
4744
4745 if (key) {
4746 switch (hfunc) {
4747 case ETH_RSS_HASH_TOP:
4748 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4749 break;
4750 case ETH_RSS_HASH_XOR:
4751 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4752 break;
4753 case ETH_RSS_HASH_NO_CHANGE:
4754 hash_algo = vport->rss_algo;
4755 break;
4756 default:
4757 return -EINVAL;
4758 }
4759
4760 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4761 if (ret)
4762 return ret;
4763
4764
4765 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4766 vport->rss_algo = hash_algo;
4767 }
4768
4769
4770 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4771 vport->rss_indirection_tbl[i] = indir[i];
4772
4773
4774 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4775}
4776
4777static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4778{
4779 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4780
4781 if (nfc->data & RXH_L4_B_2_3)
4782 hash_sets |= HCLGE_D_PORT_BIT;
4783 else
4784 hash_sets &= ~HCLGE_D_PORT_BIT;
4785
4786 if (nfc->data & RXH_IP_SRC)
4787 hash_sets |= HCLGE_S_IP_BIT;
4788 else
4789 hash_sets &= ~HCLGE_S_IP_BIT;
4790
4791 if (nfc->data & RXH_IP_DST)
4792 hash_sets |= HCLGE_D_IP_BIT;
4793 else
4794 hash_sets &= ~HCLGE_D_IP_BIT;
4795
4796 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4797 hash_sets |= HCLGE_V_TAG_BIT;
4798
4799 return hash_sets;
4800}
4801
4802static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4803 struct ethtool_rxnfc *nfc,
4804 struct hclge_rss_input_tuple_cmd *req)
4805{
4806 struct hclge_dev *hdev = vport->back;
4807 u8 tuple_sets;
4808
4809 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4810 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4811 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4812 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4813 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4814 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4815 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4816 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4817
4818 tuple_sets = hclge_get_rss_hash_bits(nfc);
4819 switch (nfc->flow_type) {
4820 case TCP_V4_FLOW:
4821 req->ipv4_tcp_en = tuple_sets;
4822 break;
4823 case TCP_V6_FLOW:
4824 req->ipv6_tcp_en = tuple_sets;
4825 break;
4826 case UDP_V4_FLOW:
4827 req->ipv4_udp_en = tuple_sets;
4828 break;
4829 case UDP_V6_FLOW:
4830 req->ipv6_udp_en = tuple_sets;
4831 break;
4832 case SCTP_V4_FLOW:
4833 req->ipv4_sctp_en = tuple_sets;
4834 break;
4835 case SCTP_V6_FLOW:
4836 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4837 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4838 return -EINVAL;
4839
4840 req->ipv6_sctp_en = tuple_sets;
4841 break;
4842 case IPV4_FLOW:
4843 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4844 break;
4845 case IPV6_FLOW:
4846 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4847 break;
4848 default:
4849 return -EINVAL;
4850 }
4851
4852 return 0;
4853}
4854
4855static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4856 struct ethtool_rxnfc *nfc)
4857{
4858 struct hclge_vport *vport = hclge_get_vport(handle);
4859 struct hclge_dev *hdev = vport->back;
4860 struct hclge_rss_input_tuple_cmd *req;
4861 struct hclge_desc desc;
4862 int ret;
4863
4864 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4865 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4866 return -EINVAL;
4867
4868 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4869 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4870
4871 ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4872 if (ret) {
4873 dev_err(&hdev->pdev->dev,
4874 "failed to init rss tuple cmd, ret = %d\n", ret);
4875 return ret;
4876 }
4877
4878 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4879 if (ret) {
4880 dev_err(&hdev->pdev->dev,
4881 "Set rss tuple fail, status = %d\n", ret);
4882 return ret;
4883 }
4884
4885 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4886 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4887 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4888 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4889 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4890 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4891 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4892 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4893 hclge_get_rss_type(vport);
4894 return 0;
4895}
4896
4897static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4898 u8 *tuple_sets)
4899{
4900 switch (flow_type) {
4901 case TCP_V4_FLOW:
4902 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4903 break;
4904 case UDP_V4_FLOW:
4905 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4906 break;
4907 case TCP_V6_FLOW:
4908 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4909 break;
4910 case UDP_V6_FLOW:
4911 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4912 break;
4913 case SCTP_V4_FLOW:
4914 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4915 break;
4916 case SCTP_V6_FLOW:
4917 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4918 break;
4919 case IPV4_FLOW:
4920 case IPV6_FLOW:
4921 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4922 break;
4923 default:
4924 return -EINVAL;
4925 }
4926
4927 return 0;
4928}
4929
4930static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4931{
4932 u64 tuple_data = 0;
4933
4934 if (tuple_sets & HCLGE_D_PORT_BIT)
4935 tuple_data |= RXH_L4_B_2_3;
4936 if (tuple_sets & HCLGE_S_PORT_BIT)
4937 tuple_data |= RXH_L4_B_0_1;
4938 if (tuple_sets & HCLGE_D_IP_BIT)
4939 tuple_data |= RXH_IP_DST;
4940 if (tuple_sets & HCLGE_S_IP_BIT)
4941 tuple_data |= RXH_IP_SRC;
4942
4943 return tuple_data;
4944}
4945
4946static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4947 struct ethtool_rxnfc *nfc)
4948{
4949 struct hclge_vport *vport = hclge_get_vport(handle);
4950 u8 tuple_sets;
4951 int ret;
4952
4953 nfc->data = 0;
4954
4955 ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4956 if (ret || !tuple_sets)
4957 return ret;
4958
4959 nfc->data = hclge_convert_rss_tuple(tuple_sets);
4960
4961 return 0;
4962}
4963
4964static int hclge_get_tc_size(struct hnae3_handle *handle)
4965{
4966 struct hclge_vport *vport = hclge_get_vport(handle);
4967 struct hclge_dev *hdev = vport->back;
4968
4969 return hdev->pf_rss_size_max;
4970}
4971
4972static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4973{
4974 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4975 struct hclge_vport *vport = hdev->vport;
4976 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4977 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4978 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4979 struct hnae3_tc_info *tc_info;
4980 u16 roundup_size;
4981 u16 rss_size;
4982 int i;
4983
4984 tc_info = &vport->nic.kinfo.tc_info;
4985 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4986 rss_size = tc_info->tqp_count[i];
4987 tc_valid[i] = 0;
4988
4989 if (!(hdev->hw_tc_map & BIT(i)))
4990 continue;
4991
4992
4993
4994
4995
4996 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4997 rss_size == 0) {
4998 dev_err(&hdev->pdev->dev,
4999 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
5000 rss_size);
5001 return -EINVAL;
5002 }
5003
5004 roundup_size = roundup_pow_of_two(rss_size);
5005 roundup_size = ilog2(roundup_size);
5006
5007 tc_valid[i] = 1;
5008 tc_size[i] = roundup_size;
5009 tc_offset[i] = tc_info->tqp_offset[i];
5010 }
5011
5012 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5013}
5014
5015int hclge_rss_init_hw(struct hclge_dev *hdev)
5016{
5017 struct hclge_vport *vport = hdev->vport;
5018 u16 *rss_indir = vport[0].rss_indirection_tbl;
5019 u8 *key = vport[0].rss_hash_key;
5020 u8 hfunc = vport[0].rss_algo;
5021 int ret;
5022
5023 ret = hclge_set_rss_indir_table(hdev, rss_indir);
5024 if (ret)
5025 return ret;
5026
5027 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5028 if (ret)
5029 return ret;
5030
5031 ret = hclge_set_rss_input_tuple(hdev);
5032 if (ret)
5033 return ret;
5034
5035 return hclge_init_rss_tc_mode(hdev);
5036}
5037
5038void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5039{
5040 struct hclge_vport *vport = &hdev->vport[0];
5041 int i;
5042
5043 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5044 vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5045}
5046
5047static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5048{
5049 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5050 int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5051 struct hclge_vport *vport = &hdev->vport[0];
5052 u16 *rss_ind_tbl;
5053
5054 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5055 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5056
5057 vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5058 vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5059 vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5060 vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5061 vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5062 vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5063 vport->rss_tuple_sets.ipv6_sctp_en =
5064 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5065 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5066 HCLGE_RSS_INPUT_TUPLE_SCTP;
5067 vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5068
5069 vport->rss_algo = rss_algo;
5070
5071 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5072 sizeof(*rss_ind_tbl), GFP_KERNEL);
5073 if (!rss_ind_tbl)
5074 return -ENOMEM;
5075
5076 vport->rss_indirection_tbl = rss_ind_tbl;
5077 memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5078
5079 hclge_rss_indir_init_cfg(hdev);
5080
5081 return 0;
5082}
5083
5084int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5085 int vector_id, bool en,
5086 struct hnae3_ring_chain_node *ring_chain)
5087{
5088 struct hclge_dev *hdev = vport->back;
5089 struct hnae3_ring_chain_node *node;
5090 struct hclge_desc desc;
5091 struct hclge_ctrl_vector_chain_cmd *req =
5092 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
5093 enum hclge_cmd_status status;
5094 enum hclge_opcode_type op;
5095 u16 tqp_type_and_id;
5096 int i;
5097
5098 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5099 hclge_cmd_setup_basic_desc(&desc, op, false);
5100 req->int_vector_id_l = hnae3_get_field(vector_id,
5101 HCLGE_VECTOR_ID_L_M,
5102 HCLGE_VECTOR_ID_L_S);
5103 req->int_vector_id_h = hnae3_get_field(vector_id,
5104 HCLGE_VECTOR_ID_H_M,
5105 HCLGE_VECTOR_ID_H_S);
5106
5107 i = 0;
5108 for (node = ring_chain; node; node = node->next) {
5109 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5110 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
5111 HCLGE_INT_TYPE_S,
5112 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5113 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5114 HCLGE_TQP_ID_S, node->tqp_index);
5115 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5116 HCLGE_INT_GL_IDX_S,
5117 hnae3_get_field(node->int_gl_idx,
5118 HNAE3_RING_GL_IDX_M,
5119 HNAE3_RING_GL_IDX_S));
5120 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5121 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5122 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5123 req->vfid = vport->vport_id;
5124
5125 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5126 if (status) {
5127 dev_err(&hdev->pdev->dev,
5128 "Map TQP fail, status is %d.\n",
5129 status);
5130 return -EIO;
5131 }
5132 i = 0;
5133
5134 hclge_cmd_setup_basic_desc(&desc,
5135 op,
5136 false);
5137 req->int_vector_id_l =
5138 hnae3_get_field(vector_id,
5139 HCLGE_VECTOR_ID_L_M,
5140 HCLGE_VECTOR_ID_L_S);
5141 req->int_vector_id_h =
5142 hnae3_get_field(vector_id,
5143 HCLGE_VECTOR_ID_H_M,
5144 HCLGE_VECTOR_ID_H_S);
5145 }
5146 }
5147
5148 if (i > 0) {
5149 req->int_cause_num = i;
5150 req->vfid = vport->vport_id;
5151 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5152 if (status) {
5153 dev_err(&hdev->pdev->dev,
5154 "Map TQP fail, status is %d.\n", status);
5155 return -EIO;
5156 }
5157 }
5158
5159 return 0;
5160}
5161
5162static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5163 struct hnae3_ring_chain_node *ring_chain)
5164{
5165 struct hclge_vport *vport = hclge_get_vport(handle);
5166 struct hclge_dev *hdev = vport->back;
5167 int vector_id;
5168
5169 vector_id = hclge_get_vector_index(hdev, vector);
5170 if (vector_id < 0) {
5171 dev_err(&hdev->pdev->dev,
5172 "failed to get vector index. vector=%d\n", vector);
5173 return vector_id;
5174 }
5175
5176 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5177}
5178
5179static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5180 struct hnae3_ring_chain_node *ring_chain)
5181{
5182 struct hclge_vport *vport = hclge_get_vport(handle);
5183 struct hclge_dev *hdev = vport->back;
5184 int vector_id, ret;
5185
5186 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5187 return 0;
5188
5189 vector_id = hclge_get_vector_index(hdev, vector);
5190 if (vector_id < 0) {
5191 dev_err(&handle->pdev->dev,
5192 "Get vector index fail. ret =%d\n", vector_id);
5193 return vector_id;
5194 }
5195
5196 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5197 if (ret)
5198 dev_err(&handle->pdev->dev,
5199 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5200 vector_id, ret);
5201
5202 return ret;
5203}
5204
5205static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5206 bool en_uc, bool en_mc, bool en_bc)
5207{
5208 struct hclge_vport *vport = &hdev->vport[vf_id];
5209 struct hnae3_handle *handle = &vport->nic;
5210 struct hclge_promisc_cfg_cmd *req;
5211 struct hclge_desc desc;
5212 bool uc_tx_en = en_uc;
5213 u8 promisc_cfg = 0;
5214 int ret;
5215
5216 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5217
5218 req = (struct hclge_promisc_cfg_cmd *)desc.data;
5219 req->vf_id = vf_id;
5220
5221 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5222 uc_tx_en = false;
5223
5224 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5225 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5226 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5227 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5228 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5229 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5230 req->extend_promisc = promisc_cfg;
5231
5232
5233 promisc_cfg = 0;
5234 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5235 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5236 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5237 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5238 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5239 req->promisc = promisc_cfg;
5240
5241 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5242 if (ret)
5243 dev_err(&hdev->pdev->dev,
5244 "failed to set vport %u promisc mode, ret = %d.\n",
5245 vf_id, ret);
5246
5247 return ret;
5248}
5249
5250int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5251 bool en_mc_pmc, bool en_bc_pmc)
5252{
5253 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5254 en_uc_pmc, en_mc_pmc, en_bc_pmc);
5255}
5256
5257static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5258 bool en_mc_pmc)
5259{
5260 struct hclge_vport *vport = hclge_get_vport(handle);
5261 struct hclge_dev *hdev = vport->back;
5262 bool en_bc_pmc = true;
5263
5264
5265
5266
5267
5268 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5269 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5270
5271 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5272 en_bc_pmc);
5273}
5274
5275static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5276{
5277 struct hclge_vport *vport = hclge_get_vport(handle);
5278
5279 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5280}
5281
5282static void hclge_sync_fd_state(struct hclge_dev *hdev)
5283{
5284 if (hlist_empty(&hdev->fd_rule_list))
5285 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5286}
5287
5288static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5289{
5290 if (!test_bit(location, hdev->fd_bmap)) {
5291 set_bit(location, hdev->fd_bmap);
5292 hdev->hclge_fd_rule_num++;
5293 }
5294}
5295
5296static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5297{
5298 if (test_bit(location, hdev->fd_bmap)) {
5299 clear_bit(location, hdev->fd_bmap);
5300 hdev->hclge_fd_rule_num--;
5301 }
5302}
5303
5304static void hclge_fd_free_node(struct hclge_dev *hdev,
5305 struct hclge_fd_rule *rule)
5306{
5307 hlist_del(&rule->rule_node);
5308 kfree(rule);
5309 hclge_sync_fd_state(hdev);
5310}
5311
5312static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5313 struct hclge_fd_rule *old_rule,
5314 struct hclge_fd_rule *new_rule,
5315 enum HCLGE_FD_NODE_STATE state)
5316{
5317 switch (state) {
5318 case HCLGE_FD_TO_ADD:
5319 case HCLGE_FD_ACTIVE:
5320
5321
5322
5323
5324
5325
5326
5327
5328
5329 new_rule->rule_node.next = old_rule->rule_node.next;
5330 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5331 memcpy(old_rule, new_rule, sizeof(*old_rule));
5332 kfree(new_rule);
5333 break;
5334 case HCLGE_FD_DELETED:
5335 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5336 hclge_fd_free_node(hdev, old_rule);
5337 break;
5338 case HCLGE_FD_TO_DEL:
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348
5349
5350 if (old_rule->state == HCLGE_FD_TO_ADD) {
5351 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5352 hclge_fd_free_node(hdev, old_rule);
5353 return;
5354 }
5355 old_rule->state = HCLGE_FD_TO_DEL;
5356 break;
5357 }
5358}
5359
5360static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5361 u16 location,
5362 struct hclge_fd_rule **parent)
5363{
5364 struct hclge_fd_rule *rule;
5365 struct hlist_node *node;
5366
5367 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5368 if (rule->location == location)
5369 return rule;
5370 else if (rule->location > location)
5371 return NULL;
5372
5373
5374
5375 *parent = rule;
5376 }
5377
5378 return NULL;
5379}
5380
5381
5382static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5383 struct hclge_fd_rule *rule,
5384 struct hclge_fd_rule *parent)
5385{
5386 INIT_HLIST_NODE(&rule->rule_node);
5387
5388 if (parent)
5389 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5390 else
5391 hlist_add_head(&rule->rule_node, hlist);
5392}
5393
5394static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5395 struct hclge_fd_user_def_cfg *cfg)
5396{
5397 struct hclge_fd_user_def_cfg_cmd *req;
5398 struct hclge_desc desc;
5399 u16 data = 0;
5400 int ret;
5401
5402 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5403
5404 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5405
5406 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5407 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5408 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5409 req->ol2_cfg = cpu_to_le16(data);
5410
5411 data = 0;
5412 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5413 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5414 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5415 req->ol3_cfg = cpu_to_le16(data);
5416
5417 data = 0;
5418 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5419 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5420 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5421 req->ol4_cfg = cpu_to_le16(data);
5422
5423 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5424 if (ret)
5425 dev_err(&hdev->pdev->dev,
5426 "failed to set fd user def data, ret= %d\n", ret);
5427 return ret;
5428}
5429
5430static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5431{
5432 int ret;
5433
5434 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5435 return;
5436
5437 if (!locked)
5438 spin_lock_bh(&hdev->fd_rule_lock);
5439
5440 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5441 if (ret)
5442 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5443
5444 if (!locked)
5445 spin_unlock_bh(&hdev->fd_rule_lock);
5446}
5447
5448static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5449 struct hclge_fd_rule *rule)
5450{
5451 struct hlist_head *hlist = &hdev->fd_rule_list;
5452 struct hclge_fd_rule *fd_rule, *parent = NULL;
5453 struct hclge_fd_user_def_info *info, *old_info;
5454 struct hclge_fd_user_def_cfg *cfg;
5455
5456 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5457 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5458 return 0;
5459
5460
5461 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5462 info = &rule->ep.user_def;
5463
5464 if (!cfg->ref_cnt || cfg->offset == info->offset)
5465 return 0;
5466
5467 if (cfg->ref_cnt > 1)
5468 goto error;
5469
5470 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5471 if (fd_rule) {
5472 old_info = &fd_rule->ep.user_def;
5473 if (info->layer == old_info->layer)
5474 return 0;
5475 }
5476
5477error:
5478 dev_err(&hdev->pdev->dev,
5479 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5480 info->layer + 1);
5481 return -ENOSPC;
5482}
5483
5484static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5485 struct hclge_fd_rule *rule)
5486{
5487 struct hclge_fd_user_def_cfg *cfg;
5488
5489 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5490 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5491 return;
5492
5493 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5494 if (!cfg->ref_cnt) {
5495 cfg->offset = rule->ep.user_def.offset;
5496 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5497 }
5498 cfg->ref_cnt++;
5499}
5500
5501static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5502 struct hclge_fd_rule *rule)
5503{
5504 struct hclge_fd_user_def_cfg *cfg;
5505
5506 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5507 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5508 return;
5509
5510 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5511 if (!cfg->ref_cnt)
5512 return;
5513
5514 cfg->ref_cnt--;
5515 if (!cfg->ref_cnt) {
5516 cfg->offset = 0;
5517 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5518 }
5519}
5520
5521static void hclge_update_fd_list(struct hclge_dev *hdev,
5522 enum HCLGE_FD_NODE_STATE state, u16 location,
5523 struct hclge_fd_rule *new_rule)
5524{
5525 struct hlist_head *hlist = &hdev->fd_rule_list;
5526 struct hclge_fd_rule *fd_rule, *parent = NULL;
5527
5528 fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5529 if (fd_rule) {
5530 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5531 if (state == HCLGE_FD_ACTIVE)
5532 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5533 hclge_sync_fd_user_def_cfg(hdev, true);
5534
5535 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5536 return;
5537 }
5538
5539
5540
5541
5542 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5543 dev_warn(&hdev->pdev->dev,
5544 "failed to delete fd rule %u, it's inexistent\n",
5545 location);
5546 return;
5547 }
5548
5549 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5550 hclge_sync_fd_user_def_cfg(hdev, true);
5551
5552 hclge_fd_insert_rule_node(hlist, new_rule, parent);
5553 hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5554
5555 if (state == HCLGE_FD_TO_ADD) {
5556 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5557 hclge_task_schedule(hdev, 0);
5558 }
5559}
5560
5561static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5562{
5563 struct hclge_get_fd_mode_cmd *req;
5564 struct hclge_desc desc;
5565 int ret;
5566
5567 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5568
5569 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5570
5571 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5572 if (ret) {
5573 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5574 return ret;
5575 }
5576
5577 *fd_mode = req->mode;
5578
5579 return ret;
5580}
5581
5582static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5583 u32 *stage1_entry_num,
5584 u32 *stage2_entry_num,
5585 u16 *stage1_counter_num,
5586 u16 *stage2_counter_num)
5587{
5588 struct hclge_get_fd_allocation_cmd *req;
5589 struct hclge_desc desc;
5590 int ret;
5591
5592 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5593
5594 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5595
5596 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5597 if (ret) {
5598 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5599 ret);
5600 return ret;
5601 }
5602
5603 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5604 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5605 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5606 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5607
5608 return ret;
5609}
5610
5611static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5612 enum HCLGE_FD_STAGE stage_num)
5613{
5614 struct hclge_set_fd_key_config_cmd *req;
5615 struct hclge_fd_key_cfg *stage;
5616 struct hclge_desc desc;
5617 int ret;
5618
5619 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5620
5621 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5622 stage = &hdev->fd_cfg.key_cfg[stage_num];
5623 req->stage = stage_num;
5624 req->key_select = stage->key_sel;
5625 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5626 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5627 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5628 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5629 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5630 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5631
5632 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5633 if (ret)
5634 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5635
5636 return ret;
5637}
5638
5639static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5640{
5641 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5642
5643 spin_lock_bh(&hdev->fd_rule_lock);
5644 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5645 spin_unlock_bh(&hdev->fd_rule_lock);
5646
5647 hclge_fd_set_user_def_cmd(hdev, cfg);
5648}
5649
5650static int hclge_init_fd_config(struct hclge_dev *hdev)
5651{
5652#define LOW_2_WORDS 0x03
5653 struct hclge_fd_key_cfg *key_cfg;
5654 int ret;
5655
5656 if (!hnae3_dev_fd_supported(hdev))
5657 return 0;
5658
5659 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5660 if (ret)
5661 return ret;
5662
5663 switch (hdev->fd_cfg.fd_mode) {
5664 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5665 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5666 break;
5667 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5668 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5669 break;
5670 default:
5671 dev_err(&hdev->pdev->dev,
5672 "Unsupported flow director mode %u\n",
5673 hdev->fd_cfg.fd_mode);
5674 return -EOPNOTSUPP;
5675 }
5676
5677 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5678 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5679 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5680 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5681 key_cfg->outer_sipv6_word_en = 0;
5682 key_cfg->outer_dipv6_word_en = 0;
5683
5684 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5685 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5686 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5687 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5688
5689
5690 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5691 key_cfg->tuple_active |=
5692 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5693 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5694 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5695 }
5696
5697
5698
5699
5700 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5701
5702 ret = hclge_get_fd_allocation(hdev,
5703 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5704 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5705 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5706 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5707 if (ret)
5708 return ret;
5709
5710 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5711}
5712
5713static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5714 int loc, u8 *key, bool is_add)
5715{
5716 struct hclge_fd_tcam_config_1_cmd *req1;
5717 struct hclge_fd_tcam_config_2_cmd *req2;
5718 struct hclge_fd_tcam_config_3_cmd *req3;
5719 struct hclge_desc desc[3];
5720 int ret;
5721
5722 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5723 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5724 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5725 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5726 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5727
5728 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5729 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5730 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5731
5732 req1->stage = stage;
5733 req1->xy_sel = sel_x ? 1 : 0;
5734 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5735 req1->index = cpu_to_le32(loc);
5736 req1->entry_vld = sel_x ? is_add : 0;
5737
5738 if (key) {
5739 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5740 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5741 sizeof(req2->tcam_data));
5742 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5743 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5744 }
5745
5746 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5747 if (ret)
5748 dev_err(&hdev->pdev->dev,
5749 "config tcam key fail, ret=%d\n",
5750 ret);
5751
5752 return ret;
5753}
5754
5755static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5756 struct hclge_fd_ad_data *action)
5757{
5758 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5759 struct hclge_fd_ad_config_cmd *req;
5760 struct hclge_desc desc;
5761 u64 ad_data = 0;
5762 int ret;
5763
5764 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5765
5766 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5767 req->index = cpu_to_le32(loc);
5768 req->stage = stage;
5769
5770 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5771 action->write_rule_id_to_bd);
5772 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5773 action->rule_id);
5774 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5775 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5776 action->override_tc);
5777 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5778 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5779 }
5780 ad_data <<= 32;
5781 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5782 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5783 action->forward_to_direct_queue);
5784 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5785 action->queue_id);
5786 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5787 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5788 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5789 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5790 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5791 action->counter_id);
5792
5793 req->ad_data = cpu_to_le64(ad_data);
5794 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5795 if (ret)
5796 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5797
5798 return ret;
5799}
5800
5801static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5802 struct hclge_fd_rule *rule)
5803{
5804 int offset, moffset, ip_offset;
5805 enum HCLGE_FD_KEY_OPT key_opt;
5806 u16 tmp_x_s, tmp_y_s;
5807 u32 tmp_x_l, tmp_y_l;
5808 u8 *p = (u8 *)rule;
5809 int i;
5810
5811 if (rule->unused_tuple & BIT(tuple_bit))
5812 return true;
5813
5814 key_opt = tuple_key_info[tuple_bit].key_opt;
5815 offset = tuple_key_info[tuple_bit].offset;
5816 moffset = tuple_key_info[tuple_bit].moffset;
5817
5818 switch (key_opt) {
5819 case KEY_OPT_U8:
5820 calc_x(*key_x, p[offset], p[moffset]);
5821 calc_y(*key_y, p[offset], p[moffset]);
5822
5823 return true;
5824 case KEY_OPT_LE16:
5825 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5826 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5827 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5828 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5829
5830 return true;
5831 case KEY_OPT_LE32:
5832 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5833 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5834 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5835 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5836
5837 return true;
5838 case KEY_OPT_MAC:
5839 for (i = 0; i < ETH_ALEN; i++) {
5840 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5841 p[moffset + i]);
5842 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5843 p[moffset + i]);
5844 }
5845
5846 return true;
5847 case KEY_OPT_IP:
5848 ip_offset = IPV4_INDEX * sizeof(u32);
5849 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5850 *(u32 *)(&p[moffset + ip_offset]));
5851 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5852 *(u32 *)(&p[moffset + ip_offset]));
5853 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5854 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5855
5856 return true;
5857 default:
5858 return false;
5859 }
5860}
5861
5862static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5863 u8 vf_id, u8 network_port_id)
5864{
5865 u32 port_number = 0;
5866
5867 if (port_type == HOST_PORT) {
5868 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5869 pf_id);
5870 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5871 vf_id);
5872 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5873 } else {
5874 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5875 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5876 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5877 }
5878
5879 return port_number;
5880}
5881
5882static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5883 __le32 *key_x, __le32 *key_y,
5884 struct hclge_fd_rule *rule)
5885{
5886 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5887 u8 cur_pos = 0, tuple_size, shift_bits;
5888 unsigned int i;
5889
5890 for (i = 0; i < MAX_META_DATA; i++) {
5891 tuple_size = meta_data_key_info[i].key_length;
5892 tuple_bit = key_cfg->meta_data_active & BIT(i);
5893
5894 switch (tuple_bit) {
5895 case BIT(ROCE_TYPE):
5896 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5897 cur_pos += tuple_size;
5898 break;
5899 case BIT(DST_VPORT):
5900 port_number = hclge_get_port_number(HOST_PORT, 0,
5901 rule->vf_id, 0);
5902 hnae3_set_field(meta_data,
5903 GENMASK(cur_pos + tuple_size, cur_pos),
5904 cur_pos, port_number);
5905 cur_pos += tuple_size;
5906 break;
5907 default:
5908 break;
5909 }
5910 }
5911
5912 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5913 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5914 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5915
5916 *key_x = cpu_to_le32(tmp_x << shift_bits);
5917 *key_y = cpu_to_le32(tmp_y << shift_bits);
5918}
5919
5920
5921
5922
5923
5924static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5925 struct hclge_fd_rule *rule)
5926{
5927 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5928 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5929 u8 *cur_key_x, *cur_key_y;
5930 u8 meta_data_region;
5931 u8 tuple_size;
5932 int ret;
5933 u32 i;
5934
5935 memset(key_x, 0, sizeof(key_x));
5936 memset(key_y, 0, sizeof(key_y));
5937 cur_key_x = key_x;
5938 cur_key_y = key_y;
5939
5940 for (i = 0 ; i < MAX_TUPLE; i++) {
5941 bool tuple_valid;
5942
5943 tuple_size = tuple_key_info[i].key_length / 8;
5944 if (!(key_cfg->tuple_active & BIT(i)))
5945 continue;
5946
5947 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5948 cur_key_y, rule);
5949 if (tuple_valid) {
5950 cur_key_x += tuple_size;
5951 cur_key_y += tuple_size;
5952 }
5953 }
5954
5955 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5956 MAX_META_DATA_LENGTH / 8;
5957
5958 hclge_fd_convert_meta_data(key_cfg,
5959 (__le32 *)(key_x + meta_data_region),
5960 (__le32 *)(key_y + meta_data_region),
5961 rule);
5962
5963 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5964 true);
5965 if (ret) {
5966 dev_err(&hdev->pdev->dev,
5967 "fd key_y config fail, loc=%u, ret=%d\n",
5968 rule->queue_id, ret);
5969 return ret;
5970 }
5971
5972 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5973 true);
5974 if (ret)
5975 dev_err(&hdev->pdev->dev,
5976 "fd key_x config fail, loc=%u, ret=%d\n",
5977 rule->queue_id, ret);
5978 return ret;
5979}
5980
5981static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5982 struct hclge_fd_rule *rule)
5983{
5984 struct hclge_vport *vport = hdev->vport;
5985 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5986 struct hclge_fd_ad_data ad_data;
5987
5988 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5989 ad_data.ad_id = rule->location;
5990
5991 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5992 ad_data.drop_packet = true;
5993 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5994 ad_data.override_tc = true;
5995 ad_data.queue_id =
5996 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5997 ad_data.tc_size =
5998 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5999 } else {
6000 ad_data.forward_to_direct_queue = true;
6001 ad_data.queue_id = rule->queue_id;
6002 }
6003
6004 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6005 ad_data.use_counter = true;
6006 ad_data.counter_id = rule->vf_id %
6007 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6008 } else {
6009 ad_data.use_counter = false;
6010 ad_data.counter_id = 0;
6011 }
6012
6013 ad_data.use_next_stage = false;
6014 ad_data.next_input_key = 0;
6015
6016 ad_data.write_rule_id_to_bd = true;
6017 ad_data.rule_id = rule->location;
6018
6019 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6020}
6021
6022static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6023 u32 *unused_tuple)
6024{
6025 if (!spec || !unused_tuple)
6026 return -EINVAL;
6027
6028 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6029
6030 if (!spec->ip4src)
6031 *unused_tuple |= BIT(INNER_SRC_IP);
6032
6033 if (!spec->ip4dst)
6034 *unused_tuple |= BIT(INNER_DST_IP);
6035
6036 if (!spec->psrc)
6037 *unused_tuple |= BIT(INNER_SRC_PORT);
6038
6039 if (!spec->pdst)
6040 *unused_tuple |= BIT(INNER_DST_PORT);
6041
6042 if (!spec->tos)
6043 *unused_tuple |= BIT(INNER_IP_TOS);
6044
6045 return 0;
6046}
6047
6048static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6049 u32 *unused_tuple)
6050{
6051 if (!spec || !unused_tuple)
6052 return -EINVAL;
6053
6054 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6055 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6056
6057 if (!spec->ip4src)
6058 *unused_tuple |= BIT(INNER_SRC_IP);
6059
6060 if (!spec->ip4dst)
6061 *unused_tuple |= BIT(INNER_DST_IP);
6062
6063 if (!spec->tos)
6064 *unused_tuple |= BIT(INNER_IP_TOS);
6065
6066 if (!spec->proto)
6067 *unused_tuple |= BIT(INNER_IP_PROTO);
6068
6069 if (spec->l4_4_bytes)
6070 return -EOPNOTSUPP;
6071
6072 if (spec->ip_ver != ETH_RX_NFC_IP4)
6073 return -EOPNOTSUPP;
6074
6075 return 0;
6076}
6077
6078static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6079 u32 *unused_tuple)
6080{
6081 if (!spec || !unused_tuple)
6082 return -EINVAL;
6083
6084 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6085
6086
6087 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6088 *unused_tuple |= BIT(INNER_SRC_IP);
6089
6090 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6091 *unused_tuple |= BIT(INNER_DST_IP);
6092
6093 if (!spec->psrc)
6094 *unused_tuple |= BIT(INNER_SRC_PORT);
6095
6096 if (!spec->pdst)
6097 *unused_tuple |= BIT(INNER_DST_PORT);
6098
6099 if (!spec->tclass)
6100 *unused_tuple |= BIT(INNER_IP_TOS);
6101
6102 return 0;
6103}
6104
6105static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6106 u32 *unused_tuple)
6107{
6108 if (!spec || !unused_tuple)
6109 return -EINVAL;
6110
6111 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6112 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6113
6114
6115 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6116 *unused_tuple |= BIT(INNER_SRC_IP);
6117
6118 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6119 *unused_tuple |= BIT(INNER_DST_IP);
6120
6121 if (!spec->l4_proto)
6122 *unused_tuple |= BIT(INNER_IP_PROTO);
6123
6124 if (!spec->tclass)
6125 *unused_tuple |= BIT(INNER_IP_TOS);
6126
6127 if (spec->l4_4_bytes)
6128 return -EOPNOTSUPP;
6129
6130 return 0;
6131}
6132
6133static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6134{
6135 if (!spec || !unused_tuple)
6136 return -EINVAL;
6137
6138 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6139 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6140 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6141
6142 if (is_zero_ether_addr(spec->h_source))
6143 *unused_tuple |= BIT(INNER_SRC_MAC);
6144
6145 if (is_zero_ether_addr(spec->h_dest))
6146 *unused_tuple |= BIT(INNER_DST_MAC);
6147
6148 if (!spec->h_proto)
6149 *unused_tuple |= BIT(INNER_ETH_TYPE);
6150
6151 return 0;
6152}
6153
6154static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6155 struct ethtool_rx_flow_spec *fs,
6156 u32 *unused_tuple)
6157{
6158 if (fs->flow_type & FLOW_EXT) {
6159 if (fs->h_ext.vlan_etype) {
6160 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6161 return -EOPNOTSUPP;
6162 }
6163
6164 if (!fs->h_ext.vlan_tci)
6165 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6166
6167 if (fs->m_ext.vlan_tci &&
6168 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6169 dev_err(&hdev->pdev->dev,
6170 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6171 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6172 return -EINVAL;
6173 }
6174 } else {
6175 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6176 }
6177
6178 if (fs->flow_type & FLOW_MAC_EXT) {
6179 if (hdev->fd_cfg.fd_mode !=
6180 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6181 dev_err(&hdev->pdev->dev,
6182 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6183 return -EOPNOTSUPP;
6184 }
6185
6186 if (is_zero_ether_addr(fs->h_ext.h_dest))
6187 *unused_tuple |= BIT(INNER_DST_MAC);
6188 else
6189 *unused_tuple &= ~BIT(INNER_DST_MAC);
6190 }
6191
6192 return 0;
6193}
6194
6195static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6196 struct hclge_fd_user_def_info *info)
6197{
6198 switch (flow_type) {
6199 case ETHER_FLOW:
6200 info->layer = HCLGE_FD_USER_DEF_L2;
6201 *unused_tuple &= ~BIT(INNER_L2_RSV);
6202 break;
6203 case IP_USER_FLOW:
6204 case IPV6_USER_FLOW:
6205 info->layer = HCLGE_FD_USER_DEF_L3;
6206 *unused_tuple &= ~BIT(INNER_L3_RSV);
6207 break;
6208 case TCP_V4_FLOW:
6209 case UDP_V4_FLOW:
6210 case TCP_V6_FLOW:
6211 case UDP_V6_FLOW:
6212 info->layer = HCLGE_FD_USER_DEF_L4;
6213 *unused_tuple &= ~BIT(INNER_L4_RSV);
6214 break;
6215 default:
6216 return -EOPNOTSUPP;
6217 }
6218
6219 return 0;
6220}
6221
6222static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6223{
6224 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6225}
6226
6227static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6228 struct ethtool_rx_flow_spec *fs,
6229 u32 *unused_tuple,
6230 struct hclge_fd_user_def_info *info)
6231{
6232 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6233 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6234 u16 data, offset, data_mask, offset_mask;
6235 int ret;
6236
6237 info->layer = HCLGE_FD_USER_DEF_NONE;
6238 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6239
6240 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6241 return 0;
6242
6243
6244
6245
6246 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6247 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6248 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6249 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6250
6251 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6252 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6253 return -EOPNOTSUPP;
6254 }
6255
6256 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6257 dev_err(&hdev->pdev->dev,
6258 "user-def offset[%u] should be no more than %u\n",
6259 offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6260 return -EINVAL;
6261 }
6262
6263 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6264 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6265 return -EINVAL;
6266 }
6267
6268 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6269 if (ret) {
6270 dev_err(&hdev->pdev->dev,
6271 "unsupported flow type for user-def bytes, ret = %d\n",
6272 ret);
6273 return ret;
6274 }
6275
6276 info->data = data;
6277 info->data_mask = data_mask;
6278 info->offset = offset;
6279
6280 return 0;
6281}
6282
6283static int hclge_fd_check_spec(struct hclge_dev *hdev,
6284 struct ethtool_rx_flow_spec *fs,
6285 u32 *unused_tuple,
6286 struct hclge_fd_user_def_info *info)
6287{
6288 u32 flow_type;
6289 int ret;
6290
6291 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6292 dev_err(&hdev->pdev->dev,
6293 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6294 fs->location,
6295 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6296 return -EINVAL;
6297 }
6298
6299 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6300 if (ret)
6301 return ret;
6302
6303 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6304 switch (flow_type) {
6305 case SCTP_V4_FLOW:
6306 case TCP_V4_FLOW:
6307 case UDP_V4_FLOW:
6308 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6309 unused_tuple);
6310 break;
6311 case IP_USER_FLOW:
6312 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6313 unused_tuple);
6314 break;
6315 case SCTP_V6_FLOW:
6316 case TCP_V6_FLOW:
6317 case UDP_V6_FLOW:
6318 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6319 unused_tuple);
6320 break;
6321 case IPV6_USER_FLOW:
6322 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6323 unused_tuple);
6324 break;
6325 case ETHER_FLOW:
6326 if (hdev->fd_cfg.fd_mode !=
6327 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6328 dev_err(&hdev->pdev->dev,
6329 "ETHER_FLOW is not supported in current fd mode!\n");
6330 return -EOPNOTSUPP;
6331 }
6332
6333 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6334 unused_tuple);
6335 break;
6336 default:
6337 dev_err(&hdev->pdev->dev,
6338 "unsupported protocol type, protocol type = %#x\n",
6339 flow_type);
6340 return -EOPNOTSUPP;
6341 }
6342
6343 if (ret) {
6344 dev_err(&hdev->pdev->dev,
6345 "failed to check flow union tuple, ret = %d\n",
6346 ret);
6347 return ret;
6348 }
6349
6350 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6351}
6352
6353static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6354 struct ethtool_rx_flow_spec *fs,
6355 struct hclge_fd_rule *rule, u8 ip_proto)
6356{
6357 rule->tuples.src_ip[IPV4_INDEX] =
6358 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6359 rule->tuples_mask.src_ip[IPV4_INDEX] =
6360 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6361
6362 rule->tuples.dst_ip[IPV4_INDEX] =
6363 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6364 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6365 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6366
6367 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6368 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6369
6370 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6371 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6372
6373 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6374 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6375
6376 rule->tuples.ether_proto = ETH_P_IP;
6377 rule->tuples_mask.ether_proto = 0xFFFF;
6378
6379 rule->tuples.ip_proto = ip_proto;
6380 rule->tuples_mask.ip_proto = 0xFF;
6381}
6382
6383static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6384 struct ethtool_rx_flow_spec *fs,
6385 struct hclge_fd_rule *rule)
6386{
6387 rule->tuples.src_ip[IPV4_INDEX] =
6388 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6389 rule->tuples_mask.src_ip[IPV4_INDEX] =
6390 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6391
6392 rule->tuples.dst_ip[IPV4_INDEX] =
6393 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6394 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6395 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6396
6397 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6398 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6399
6400 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6401 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6402
6403 rule->tuples.ether_proto = ETH_P_IP;
6404 rule->tuples_mask.ether_proto = 0xFFFF;
6405}
6406
6407static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6408 struct ethtool_rx_flow_spec *fs,
6409 struct hclge_fd_rule *rule, u8 ip_proto)
6410{
6411 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6412 IPV6_SIZE);
6413 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6414 IPV6_SIZE);
6415
6416 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6417 IPV6_SIZE);
6418 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6419 IPV6_SIZE);
6420
6421 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6422 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6423
6424 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6425 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6426
6427 rule->tuples.ether_proto = ETH_P_IPV6;
6428 rule->tuples_mask.ether_proto = 0xFFFF;
6429
6430 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6431 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6432
6433 rule->tuples.ip_proto = ip_proto;
6434 rule->tuples_mask.ip_proto = 0xFF;
6435}
6436
6437static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6438 struct ethtool_rx_flow_spec *fs,
6439 struct hclge_fd_rule *rule)
6440{
6441 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6442 IPV6_SIZE);
6443 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6444 IPV6_SIZE);
6445
6446 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6447 IPV6_SIZE);
6448 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6449 IPV6_SIZE);
6450
6451 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6452 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6453
6454 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6455 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6456
6457 rule->tuples.ether_proto = ETH_P_IPV6;
6458 rule->tuples_mask.ether_proto = 0xFFFF;
6459}
6460
6461static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6462 struct ethtool_rx_flow_spec *fs,
6463 struct hclge_fd_rule *rule)
6464{
6465 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6466 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6467
6468 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6469 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6470
6471 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6472 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6473}
6474
6475static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6476 struct hclge_fd_rule *rule)
6477{
6478 switch (info->layer) {
6479 case HCLGE_FD_USER_DEF_L2:
6480 rule->tuples.l2_user_def = info->data;
6481 rule->tuples_mask.l2_user_def = info->data_mask;
6482 break;
6483 case HCLGE_FD_USER_DEF_L3:
6484 rule->tuples.l3_user_def = info->data;
6485 rule->tuples_mask.l3_user_def = info->data_mask;
6486 break;
6487 case HCLGE_FD_USER_DEF_L4:
6488 rule->tuples.l4_user_def = (u32)info->data << 16;
6489 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6490 break;
6491 default:
6492 break;
6493 }
6494
6495 rule->ep.user_def = *info;
6496}
6497
6498static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6499 struct ethtool_rx_flow_spec *fs,
6500 struct hclge_fd_rule *rule,
6501 struct hclge_fd_user_def_info *info)
6502{
6503 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6504
6505 switch (flow_type) {
6506 case SCTP_V4_FLOW:
6507 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6508 break;
6509 case TCP_V4_FLOW:
6510 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6511 break;
6512 case UDP_V4_FLOW:
6513 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6514 break;
6515 case IP_USER_FLOW:
6516 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6517 break;
6518 case SCTP_V6_FLOW:
6519 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6520 break;
6521 case TCP_V6_FLOW:
6522 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6523 break;
6524 case UDP_V6_FLOW:
6525 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6526 break;
6527 case IPV6_USER_FLOW:
6528 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6529 break;
6530 case ETHER_FLOW:
6531 hclge_fd_get_ether_tuple(hdev, fs, rule);
6532 break;
6533 default:
6534 return -EOPNOTSUPP;
6535 }
6536
6537 if (fs->flow_type & FLOW_EXT) {
6538 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6539 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6540 hclge_fd_get_user_def_tuple(info, rule);
6541 }
6542
6543 if (fs->flow_type & FLOW_MAC_EXT) {
6544 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6545 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6546 }
6547
6548 return 0;
6549}
6550
6551static int hclge_fd_config_rule(struct hclge_dev *hdev,
6552 struct hclge_fd_rule *rule)
6553{
6554 int ret;
6555
6556 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6557 if (ret)
6558 return ret;
6559
6560 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6561}
6562
6563static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6564 struct hclge_fd_rule *rule)
6565{
6566 int ret;
6567
6568 spin_lock_bh(&hdev->fd_rule_lock);
6569
6570 if (hdev->fd_active_type != rule->rule_type &&
6571 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6572 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6573 dev_err(&hdev->pdev->dev,
6574 "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6575 rule->rule_type, hdev->fd_active_type);
6576 spin_unlock_bh(&hdev->fd_rule_lock);
6577 return -EINVAL;
6578 }
6579
6580 ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6581 if (ret)
6582 goto out;
6583
6584 ret = hclge_clear_arfs_rules(hdev);
6585 if (ret)
6586 goto out;
6587
6588 ret = hclge_fd_config_rule(hdev, rule);
6589 if (ret)
6590 goto out;
6591
6592 rule->state = HCLGE_FD_ACTIVE;
6593 hdev->fd_active_type = rule->rule_type;
6594 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6595
6596out:
6597 spin_unlock_bh(&hdev->fd_rule_lock);
6598 return ret;
6599}
6600
6601static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6602{
6603 struct hclge_vport *vport = hclge_get_vport(handle);
6604 struct hclge_dev *hdev = vport->back;
6605
6606 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6607}
6608
6609static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6610 u16 *vport_id, u8 *action, u16 *queue_id)
6611{
6612 struct hclge_vport *vport = hdev->vport;
6613
6614 if (ring_cookie == RX_CLS_FLOW_DISC) {
6615 *action = HCLGE_FD_ACTION_DROP_PACKET;
6616 } else {
6617 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6618 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6619 u16 tqps;
6620
6621 if (vf > hdev->num_req_vfs) {
6622 dev_err(&hdev->pdev->dev,
6623 "Error: vf id (%u) > max vf num (%u)\n",
6624 vf, hdev->num_req_vfs);
6625 return -EINVAL;
6626 }
6627
6628 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6629 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6630
6631 if (ring >= tqps) {
6632 dev_err(&hdev->pdev->dev,
6633 "Error: queue id (%u) > max tqp num (%u)\n",
6634 ring, tqps - 1);
6635 return -EINVAL;
6636 }
6637
6638 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6639 *queue_id = ring;
6640 }
6641
6642 return 0;
6643}
6644
6645static int hclge_add_fd_entry(struct hnae3_handle *handle,
6646 struct ethtool_rxnfc *cmd)
6647{
6648 struct hclge_vport *vport = hclge_get_vport(handle);
6649 struct hclge_dev *hdev = vport->back;
6650 struct hclge_fd_user_def_info info;
6651 u16 dst_vport_id = 0, q_index = 0;
6652 struct ethtool_rx_flow_spec *fs;
6653 struct hclge_fd_rule *rule;
6654 u32 unused = 0;
6655 u8 action;
6656 int ret;
6657
6658 if (!hnae3_dev_fd_supported(hdev)) {
6659 dev_err(&hdev->pdev->dev,
6660 "flow table director is not supported\n");
6661 return -EOPNOTSUPP;
6662 }
6663
6664 if (!hdev->fd_en) {
6665 dev_err(&hdev->pdev->dev,
6666 "please enable flow director first\n");
6667 return -EOPNOTSUPP;
6668 }
6669
6670 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6671
6672 ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6673 if (ret)
6674 return ret;
6675
6676 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6677 &action, &q_index);
6678 if (ret)
6679 return ret;
6680
6681 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6682 if (!rule)
6683 return -ENOMEM;
6684
6685 ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6686 if (ret) {
6687 kfree(rule);
6688 return ret;
6689 }
6690
6691 rule->flow_type = fs->flow_type;
6692 rule->location = fs->location;
6693 rule->unused_tuple = unused;
6694 rule->vf_id = dst_vport_id;
6695 rule->queue_id = q_index;
6696 rule->action = action;
6697 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6698
6699 ret = hclge_add_fd_entry_common(hdev, rule);
6700 if (ret)
6701 kfree(rule);
6702
6703 return ret;
6704}
6705
6706static int hclge_del_fd_entry(struct hnae3_handle *handle,
6707 struct ethtool_rxnfc *cmd)
6708{
6709 struct hclge_vport *vport = hclge_get_vport(handle);
6710 struct hclge_dev *hdev = vport->back;
6711 struct ethtool_rx_flow_spec *fs;
6712 int ret;
6713
6714 if (!hnae3_dev_fd_supported(hdev))
6715 return -EOPNOTSUPP;
6716
6717 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6718
6719 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6720 return -EINVAL;
6721
6722 spin_lock_bh(&hdev->fd_rule_lock);
6723 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6724 !test_bit(fs->location, hdev->fd_bmap)) {
6725 dev_err(&hdev->pdev->dev,
6726 "Delete fail, rule %u is inexistent\n", fs->location);
6727 spin_unlock_bh(&hdev->fd_rule_lock);
6728 return -ENOENT;
6729 }
6730
6731 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6732 NULL, false);
6733 if (ret)
6734 goto out;
6735
6736 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6737
6738out:
6739 spin_unlock_bh(&hdev->fd_rule_lock);
6740 return ret;
6741}
6742
6743static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6744 bool clear_list)
6745{
6746 struct hclge_fd_rule *rule;
6747 struct hlist_node *node;
6748 u16 location;
6749
6750 if (!hnae3_dev_fd_supported(hdev))
6751 return;
6752
6753 spin_lock_bh(&hdev->fd_rule_lock);
6754
6755 for_each_set_bit(location, hdev->fd_bmap,
6756 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6757 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6758 NULL, false);
6759
6760 if (clear_list) {
6761 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6762 rule_node) {
6763 hlist_del(&rule->rule_node);
6764 kfree(rule);
6765 }
6766 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6767 hdev->hclge_fd_rule_num = 0;
6768 bitmap_zero(hdev->fd_bmap,
6769 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6770 }
6771
6772 spin_unlock_bh(&hdev->fd_rule_lock);
6773}
6774
6775static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6776{
6777 hclge_clear_fd_rules_in_list(hdev, true);
6778 hclge_fd_disable_user_def(hdev);
6779}
6780
6781static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6782{
6783 struct hclge_vport *vport = hclge_get_vport(handle);
6784 struct hclge_dev *hdev = vport->back;
6785 struct hclge_fd_rule *rule;
6786 struct hlist_node *node;
6787
6788
6789
6790
6791
6792 if (!hnae3_dev_fd_supported(hdev))
6793 return 0;
6794
6795
6796 if (!hdev->fd_en)
6797 return 0;
6798
6799 spin_lock_bh(&hdev->fd_rule_lock);
6800 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6801 if (rule->state == HCLGE_FD_ACTIVE)
6802 rule->state = HCLGE_FD_TO_ADD;
6803 }
6804 spin_unlock_bh(&hdev->fd_rule_lock);
6805 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6806
6807 return 0;
6808}
6809
6810static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6811 struct ethtool_rxnfc *cmd)
6812{
6813 struct hclge_vport *vport = hclge_get_vport(handle);
6814 struct hclge_dev *hdev = vport->back;
6815
6816 if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6817 return -EOPNOTSUPP;
6818
6819 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6820 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6821
6822 return 0;
6823}
6824
6825static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6826 struct ethtool_tcpip4_spec *spec,
6827 struct ethtool_tcpip4_spec *spec_mask)
6828{
6829 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6830 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6831 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6832
6833 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6834 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6835 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6836
6837 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6838 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6839 0 : cpu_to_be16(rule->tuples_mask.src_port);
6840
6841 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6842 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6843 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6844
6845 spec->tos = rule->tuples.ip_tos;
6846 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6847 0 : rule->tuples_mask.ip_tos;
6848}
6849
6850static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6851 struct ethtool_usrip4_spec *spec,
6852 struct ethtool_usrip4_spec *spec_mask)
6853{
6854 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6855 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6856 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6857
6858 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6859 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6860 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6861
6862 spec->tos = rule->tuples.ip_tos;
6863 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6864 0 : rule->tuples_mask.ip_tos;
6865
6866 spec->proto = rule->tuples.ip_proto;
6867 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6868 0 : rule->tuples_mask.ip_proto;
6869
6870 spec->ip_ver = ETH_RX_NFC_IP4;
6871}
6872
6873static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6874 struct ethtool_tcpip6_spec *spec,
6875 struct ethtool_tcpip6_spec *spec_mask)
6876{
6877 cpu_to_be32_array(spec->ip6src,
6878 rule->tuples.src_ip, IPV6_SIZE);
6879 cpu_to_be32_array(spec->ip6dst,
6880 rule->tuples.dst_ip, IPV6_SIZE);
6881 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6882 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6883 else
6884 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6885 IPV6_SIZE);
6886
6887 if (rule->unused_tuple & BIT(INNER_DST_IP))
6888 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6889 else
6890 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6891 IPV6_SIZE);
6892
6893 spec->tclass = rule->tuples.ip_tos;
6894 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6895 0 : rule->tuples_mask.ip_tos;
6896
6897 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6898 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6899 0 : cpu_to_be16(rule->tuples_mask.src_port);
6900
6901 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6902 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6903 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6904}
6905
6906static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6907 struct ethtool_usrip6_spec *spec,
6908 struct ethtool_usrip6_spec *spec_mask)
6909{
6910 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6911 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6912 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6913 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6914 else
6915 cpu_to_be32_array(spec_mask->ip6src,
6916 rule->tuples_mask.src_ip, IPV6_SIZE);
6917
6918 if (rule->unused_tuple & BIT(INNER_DST_IP))
6919 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6920 else
6921 cpu_to_be32_array(spec_mask->ip6dst,
6922 rule->tuples_mask.dst_ip, IPV6_SIZE);
6923
6924 spec->tclass = rule->tuples.ip_tos;
6925 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6926 0 : rule->tuples_mask.ip_tos;
6927
6928 spec->l4_proto = rule->tuples.ip_proto;
6929 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6930 0 : rule->tuples_mask.ip_proto;
6931}
6932
6933static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6934 struct ethhdr *spec,
6935 struct ethhdr *spec_mask)
6936{
6937 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6938 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6939
6940 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6941 eth_zero_addr(spec_mask->h_source);
6942 else
6943 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6944
6945 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6946 eth_zero_addr(spec_mask->h_dest);
6947 else
6948 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6949
6950 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6951 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6952 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6953}
6954
6955static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6956 struct hclge_fd_rule *rule)
6957{
6958 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6959 HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6960 fs->h_ext.data[0] = 0;
6961 fs->h_ext.data[1] = 0;
6962 fs->m_ext.data[0] = 0;
6963 fs->m_ext.data[1] = 0;
6964 } else {
6965 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6966 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6967 fs->m_ext.data[0] =
6968 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6969 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6970 }
6971}
6972
6973static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6974 struct hclge_fd_rule *rule)
6975{
6976 if (fs->flow_type & FLOW_EXT) {
6977 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6978 fs->m_ext.vlan_tci =
6979 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6980 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6981
6982 hclge_fd_get_user_def_info(fs, rule);
6983 }
6984
6985 if (fs->flow_type & FLOW_MAC_EXT) {
6986 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6987 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6988 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6989 else
6990 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6991 rule->tuples_mask.dst_mac);
6992 }
6993}
6994
6995static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6996 struct ethtool_rxnfc *cmd)
6997{
6998 struct hclge_vport *vport = hclge_get_vport(handle);
6999 struct hclge_fd_rule *rule = NULL;
7000 struct hclge_dev *hdev = vport->back;
7001 struct ethtool_rx_flow_spec *fs;
7002 struct hlist_node *node2;
7003
7004 if (!hnae3_dev_fd_supported(hdev))
7005 return -EOPNOTSUPP;
7006
7007 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7008
7009 spin_lock_bh(&hdev->fd_rule_lock);
7010
7011 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7012 if (rule->location >= fs->location)
7013 break;
7014 }
7015
7016 if (!rule || fs->location != rule->location) {
7017 spin_unlock_bh(&hdev->fd_rule_lock);
7018
7019 return -ENOENT;
7020 }
7021
7022 fs->flow_type = rule->flow_type;
7023 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7024 case SCTP_V4_FLOW:
7025 case TCP_V4_FLOW:
7026 case UDP_V4_FLOW:
7027 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7028 &fs->m_u.tcp_ip4_spec);
7029 break;
7030 case IP_USER_FLOW:
7031 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7032 &fs->m_u.usr_ip4_spec);
7033 break;
7034 case SCTP_V6_FLOW:
7035 case TCP_V6_FLOW:
7036 case UDP_V6_FLOW:
7037 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7038 &fs->m_u.tcp_ip6_spec);
7039 break;
7040 case IPV6_USER_FLOW:
7041 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7042 &fs->m_u.usr_ip6_spec);
7043 break;
7044
7045
7046
7047
7048 default:
7049 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7050 &fs->m_u.ether_spec);
7051 break;
7052 }
7053
7054 hclge_fd_get_ext_info(fs, rule);
7055
7056 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7057 fs->ring_cookie = RX_CLS_FLOW_DISC;
7058 } else {
7059 u64 vf_id;
7060
7061 fs->ring_cookie = rule->queue_id;
7062 vf_id = rule->vf_id;
7063 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7064 fs->ring_cookie |= vf_id;
7065 }
7066
7067 spin_unlock_bh(&hdev->fd_rule_lock);
7068
7069 return 0;
7070}
7071
7072static int hclge_get_all_rules(struct hnae3_handle *handle,
7073 struct ethtool_rxnfc *cmd, u32 *rule_locs)
7074{
7075 struct hclge_vport *vport = hclge_get_vport(handle);
7076 struct hclge_dev *hdev = vport->back;
7077 struct hclge_fd_rule *rule;
7078 struct hlist_node *node2;
7079 int cnt = 0;
7080
7081 if (!hnae3_dev_fd_supported(hdev))
7082 return -EOPNOTSUPP;
7083
7084 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7085
7086 spin_lock_bh(&hdev->fd_rule_lock);
7087 hlist_for_each_entry_safe(rule, node2,
7088 &hdev->fd_rule_list, rule_node) {
7089 if (cnt == cmd->rule_cnt) {
7090 spin_unlock_bh(&hdev->fd_rule_lock);
7091 return -EMSGSIZE;
7092 }
7093
7094 if (rule->state == HCLGE_FD_TO_DEL)
7095 continue;
7096
7097 rule_locs[cnt] = rule->location;
7098 cnt++;
7099 }
7100
7101 spin_unlock_bh(&hdev->fd_rule_lock);
7102
7103 cmd->rule_cnt = cnt;
7104
7105 return 0;
7106}
7107
7108static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7109 struct hclge_fd_rule_tuples *tuples)
7110{
7111#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7112#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7113
7114 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7115 tuples->ip_proto = fkeys->basic.ip_proto;
7116 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7117
7118 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7119 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7120 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7121 } else {
7122 int i;
7123
7124 for (i = 0; i < IPV6_SIZE; i++) {
7125 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7126 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7127 }
7128 }
7129}
7130
7131
7132static struct hclge_fd_rule *
7133hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7134 const struct hclge_fd_rule_tuples *tuples)
7135{
7136 struct hclge_fd_rule *rule = NULL;
7137 struct hlist_node *node;
7138
7139 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7140 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7141 return rule;
7142 }
7143
7144 return NULL;
7145}
7146
7147static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7148 struct hclge_fd_rule *rule)
7149{
7150 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7151 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7152 BIT(INNER_SRC_PORT);
7153 rule->action = 0;
7154 rule->vf_id = 0;
7155 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7156 rule->state = HCLGE_FD_TO_ADD;
7157 if (tuples->ether_proto == ETH_P_IP) {
7158 if (tuples->ip_proto == IPPROTO_TCP)
7159 rule->flow_type = TCP_V4_FLOW;
7160 else
7161 rule->flow_type = UDP_V4_FLOW;
7162 } else {
7163 if (tuples->ip_proto == IPPROTO_TCP)
7164 rule->flow_type = TCP_V6_FLOW;
7165 else
7166 rule->flow_type = UDP_V6_FLOW;
7167 }
7168 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7169 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7170}
7171
7172static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7173 u16 flow_id, struct flow_keys *fkeys)
7174{
7175 struct hclge_vport *vport = hclge_get_vport(handle);
7176 struct hclge_fd_rule_tuples new_tuples = {};
7177 struct hclge_dev *hdev = vport->back;
7178 struct hclge_fd_rule *rule;
7179 u16 bit_id;
7180
7181 if (!hnae3_dev_fd_supported(hdev))
7182 return -EOPNOTSUPP;
7183
7184
7185
7186
7187 spin_lock_bh(&hdev->fd_rule_lock);
7188 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7189 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7190 spin_unlock_bh(&hdev->fd_rule_lock);
7191 return -EOPNOTSUPP;
7192 }
7193
7194 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7195
7196
7197
7198
7199
7200
7201 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7202 if (!rule) {
7203 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7204 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7205 spin_unlock_bh(&hdev->fd_rule_lock);
7206 return -ENOSPC;
7207 }
7208
7209 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7210 if (!rule) {
7211 spin_unlock_bh(&hdev->fd_rule_lock);
7212 return -ENOMEM;
7213 }
7214
7215 rule->location = bit_id;
7216 rule->arfs.flow_id = flow_id;
7217 rule->queue_id = queue_id;
7218 hclge_fd_build_arfs_rule(&new_tuples, rule);
7219 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7220 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7221 } else if (rule->queue_id != queue_id) {
7222 rule->queue_id = queue_id;
7223 rule->state = HCLGE_FD_TO_ADD;
7224 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7225 hclge_task_schedule(hdev, 0);
7226 }
7227 spin_unlock_bh(&hdev->fd_rule_lock);
7228 return rule->location;
7229}
7230
7231static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7232{
7233#ifdef CONFIG_RFS_ACCEL
7234 struct hnae3_handle *handle = &hdev->vport[0].nic;
7235 struct hclge_fd_rule *rule;
7236 struct hlist_node *node;
7237
7238 spin_lock_bh(&hdev->fd_rule_lock);
7239 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7240 spin_unlock_bh(&hdev->fd_rule_lock);
7241 return;
7242 }
7243 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7244 if (rule->state != HCLGE_FD_ACTIVE)
7245 continue;
7246 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7247 rule->arfs.flow_id, rule->location)) {
7248 rule->state = HCLGE_FD_TO_DEL;
7249 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7250 }
7251 }
7252 spin_unlock_bh(&hdev->fd_rule_lock);
7253#endif
7254}
7255
7256
7257static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7258{
7259#ifdef CONFIG_RFS_ACCEL
7260 struct hclge_fd_rule *rule;
7261 struct hlist_node *node;
7262 int ret;
7263
7264 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7265 return 0;
7266
7267 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7268 switch (rule->state) {
7269 case HCLGE_FD_TO_DEL:
7270 case HCLGE_FD_ACTIVE:
7271 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7272 rule->location, NULL, false);
7273 if (ret)
7274 return ret;
7275 fallthrough;
7276 case HCLGE_FD_TO_ADD:
7277 hclge_fd_dec_rule_cnt(hdev, rule->location);
7278 hlist_del(&rule->rule_node);
7279 kfree(rule);
7280 break;
7281 default:
7282 break;
7283 }
7284 }
7285 hclge_sync_fd_state(hdev);
7286
7287#endif
7288 return 0;
7289}
7290
7291static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7292 struct hclge_fd_rule *rule)
7293{
7294 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7295 struct flow_match_basic match;
7296 u16 ethtype_key, ethtype_mask;
7297
7298 flow_rule_match_basic(flow, &match);
7299 ethtype_key = ntohs(match.key->n_proto);
7300 ethtype_mask = ntohs(match.mask->n_proto);
7301
7302 if (ethtype_key == ETH_P_ALL) {
7303 ethtype_key = 0;
7304 ethtype_mask = 0;
7305 }
7306 rule->tuples.ether_proto = ethtype_key;
7307 rule->tuples_mask.ether_proto = ethtype_mask;
7308 rule->tuples.ip_proto = match.key->ip_proto;
7309 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7310 } else {
7311 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7312 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7313 }
7314}
7315
7316static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7317 struct hclge_fd_rule *rule)
7318{
7319 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7320 struct flow_match_eth_addrs match;
7321
7322 flow_rule_match_eth_addrs(flow, &match);
7323 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7324 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7325 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7326 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7327 } else {
7328 rule->unused_tuple |= BIT(INNER_DST_MAC);
7329 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7330 }
7331}
7332
7333static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7334 struct hclge_fd_rule *rule)
7335{
7336 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7337 struct flow_match_vlan match;
7338
7339 flow_rule_match_vlan(flow, &match);
7340 rule->tuples.vlan_tag1 = match.key->vlan_id |
7341 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7342 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7343 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7344 } else {
7345 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7346 }
7347}
7348
7349static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7350 struct hclge_fd_rule *rule)
7351{
7352 u16 addr_type = 0;
7353
7354 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7355 struct flow_match_control match;
7356
7357 flow_rule_match_control(flow, &match);
7358 addr_type = match.key->addr_type;
7359 }
7360
7361 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7362 struct flow_match_ipv4_addrs match;
7363
7364 flow_rule_match_ipv4_addrs(flow, &match);
7365 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7366 rule->tuples_mask.src_ip[IPV4_INDEX] =
7367 be32_to_cpu(match.mask->src);
7368 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7369 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7370 be32_to_cpu(match.mask->dst);
7371 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7372 struct flow_match_ipv6_addrs match;
7373
7374 flow_rule_match_ipv6_addrs(flow, &match);
7375 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7376 IPV6_SIZE);
7377 be32_to_cpu_array(rule->tuples_mask.src_ip,
7378 match.mask->src.s6_addr32, IPV6_SIZE);
7379 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7380 IPV6_SIZE);
7381 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7382 match.mask->dst.s6_addr32, IPV6_SIZE);
7383 } else {
7384 rule->unused_tuple |= BIT(INNER_SRC_IP);
7385 rule->unused_tuple |= BIT(INNER_DST_IP);
7386 }
7387}
7388
7389static void hclge_get_cls_key_port(const struct flow_rule *flow,
7390 struct hclge_fd_rule *rule)
7391{
7392 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7393 struct flow_match_ports match;
7394
7395 flow_rule_match_ports(flow, &match);
7396
7397 rule->tuples.src_port = be16_to_cpu(match.key->src);
7398 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7399 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7400 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7401 } else {
7402 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7403 rule->unused_tuple |= BIT(INNER_DST_PORT);
7404 }
7405}
7406
7407static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7408 struct flow_cls_offload *cls_flower,
7409 struct hclge_fd_rule *rule)
7410{
7411 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7412 struct flow_dissector *dissector = flow->match.dissector;
7413
7414 if (dissector->used_keys &
7415 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7416 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7417 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7418 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7419 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7420 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7421 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7422 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7423 dissector->used_keys);
7424 return -EOPNOTSUPP;
7425 }
7426
7427 hclge_get_cls_key_basic(flow, rule);
7428 hclge_get_cls_key_mac(flow, rule);
7429 hclge_get_cls_key_vlan(flow, rule);
7430 hclge_get_cls_key_ip(flow, rule);
7431 hclge_get_cls_key_port(flow, rule);
7432
7433 return 0;
7434}
7435
7436static int hclge_check_cls_flower(struct hclge_dev *hdev,
7437 struct flow_cls_offload *cls_flower, int tc)
7438{
7439 u32 prio = cls_flower->common.prio;
7440
7441 if (tc < 0 || tc > hdev->tc_max) {
7442 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7443 return -EINVAL;
7444 }
7445
7446 if (prio == 0 ||
7447 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7448 dev_err(&hdev->pdev->dev,
7449 "prio %u should be in range[1, %u]\n",
7450 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7451 return -EINVAL;
7452 }
7453
7454 if (test_bit(prio - 1, hdev->fd_bmap)) {
7455 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7456 return -EINVAL;
7457 }
7458 return 0;
7459}
7460
7461static int hclge_add_cls_flower(struct hnae3_handle *handle,
7462 struct flow_cls_offload *cls_flower,
7463 int tc)
7464{
7465 struct hclge_vport *vport = hclge_get_vport(handle);
7466 struct hclge_dev *hdev = vport->back;
7467 struct hclge_fd_rule *rule;
7468 int ret;
7469
7470 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7471 if (ret) {
7472 dev_err(&hdev->pdev->dev,
7473 "failed to check cls flower params, ret = %d\n", ret);
7474 return ret;
7475 }
7476
7477 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7478 if (!rule)
7479 return -ENOMEM;
7480
7481 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7482 if (ret) {
7483 kfree(rule);
7484 return ret;
7485 }
7486
7487 rule->action = HCLGE_FD_ACTION_SELECT_TC;
7488 rule->cls_flower.tc = tc;
7489 rule->location = cls_flower->common.prio - 1;
7490 rule->vf_id = 0;
7491 rule->cls_flower.cookie = cls_flower->cookie;
7492 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7493
7494 ret = hclge_add_fd_entry_common(hdev, rule);
7495 if (ret)
7496 kfree(rule);
7497
7498 return ret;
7499}
7500
7501static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7502 unsigned long cookie)
7503{
7504 struct hclge_fd_rule *rule;
7505 struct hlist_node *node;
7506
7507 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7508 if (rule->cls_flower.cookie == cookie)
7509 return rule;
7510 }
7511
7512 return NULL;
7513}
7514
7515static int hclge_del_cls_flower(struct hnae3_handle *handle,
7516 struct flow_cls_offload *cls_flower)
7517{
7518 struct hclge_vport *vport = hclge_get_vport(handle);
7519 struct hclge_dev *hdev = vport->back;
7520 struct hclge_fd_rule *rule;
7521 int ret;
7522
7523 spin_lock_bh(&hdev->fd_rule_lock);
7524
7525 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7526 if (!rule) {
7527 spin_unlock_bh(&hdev->fd_rule_lock);
7528 return -EINVAL;
7529 }
7530
7531 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7532 NULL, false);
7533 if (ret) {
7534 spin_unlock_bh(&hdev->fd_rule_lock);
7535 return ret;
7536 }
7537
7538 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7539 spin_unlock_bh(&hdev->fd_rule_lock);
7540
7541 return 0;
7542}
7543
7544static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7545{
7546 struct hclge_fd_rule *rule;
7547 struct hlist_node *node;
7548 int ret = 0;
7549
7550 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7551 return;
7552
7553 spin_lock_bh(&hdev->fd_rule_lock);
7554
7555 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7556 switch (rule->state) {
7557 case HCLGE_FD_TO_ADD:
7558 ret = hclge_fd_config_rule(hdev, rule);
7559 if (ret)
7560 goto out;
7561 rule->state = HCLGE_FD_ACTIVE;
7562 break;
7563 case HCLGE_FD_TO_DEL:
7564 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7565 rule->location, NULL, false);
7566 if (ret)
7567 goto out;
7568 hclge_fd_dec_rule_cnt(hdev, rule->location);
7569 hclge_fd_free_node(hdev, rule);
7570 break;
7571 default:
7572 break;
7573 }
7574 }
7575
7576out:
7577 if (ret)
7578 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7579
7580 spin_unlock_bh(&hdev->fd_rule_lock);
7581}
7582
7583static void hclge_sync_fd_table(struct hclge_dev *hdev)
7584{
7585 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7586 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7587
7588 hclge_clear_fd_rules_in_list(hdev, clear_list);
7589 }
7590
7591 hclge_sync_fd_user_def_cfg(hdev, false);
7592
7593 hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7594}
7595
7596static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7597{
7598 struct hclge_vport *vport = hclge_get_vport(handle);
7599 struct hclge_dev *hdev = vport->back;
7600
7601 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7602 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7603}
7604
7605static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7606{
7607 struct hclge_vport *vport = hclge_get_vport(handle);
7608 struct hclge_dev *hdev = vport->back;
7609
7610 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7611}
7612
7613static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7614{
7615 struct hclge_vport *vport = hclge_get_vport(handle);
7616 struct hclge_dev *hdev = vport->back;
7617
7618 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7619}
7620
7621static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7622{
7623 struct hclge_vport *vport = hclge_get_vport(handle);
7624 struct hclge_dev *hdev = vport->back;
7625
7626 return hdev->rst_stats.hw_reset_done_cnt;
7627}
7628
7629static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7630{
7631 struct hclge_vport *vport = hclge_get_vport(handle);
7632 struct hclge_dev *hdev = vport->back;
7633
7634 hdev->fd_en = enable;
7635
7636 if (!enable)
7637 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7638 else
7639 hclge_restore_fd_entries(handle);
7640
7641 hclge_task_schedule(hdev, 0);
7642}
7643
7644static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7645{
7646 struct hclge_desc desc;
7647 struct hclge_config_mac_mode_cmd *req =
7648 (struct hclge_config_mac_mode_cmd *)desc.data;
7649 u32 loop_en = 0;
7650 int ret;
7651
7652 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7653
7654 if (enable) {
7655 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7656 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7657 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7658 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7659 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7660 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7661 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7662 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7663 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7664 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7665 }
7666
7667 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7668
7669 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7670 if (ret)
7671 dev_err(&hdev->pdev->dev,
7672 "mac enable fail, ret =%d.\n", ret);
7673}
7674
7675static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7676 u8 switch_param, u8 param_mask)
7677{
7678 struct hclge_mac_vlan_switch_cmd *req;
7679 struct hclge_desc desc;
7680 u32 func_id;
7681 int ret;
7682
7683 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7684 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7685
7686
7687 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7688 true);
7689 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7690 req->func_id = cpu_to_le32(func_id);
7691
7692 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7693 if (ret) {
7694 dev_err(&hdev->pdev->dev,
7695 "read mac vlan switch parameter fail, ret = %d\n", ret);
7696 return ret;
7697 }
7698
7699
7700 hclge_cmd_reuse_desc(&desc, false);
7701 req->switch_param = (req->switch_param & param_mask) | switch_param;
7702 req->param_mask = param_mask;
7703
7704 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7705 if (ret)
7706 dev_err(&hdev->pdev->dev,
7707 "set mac vlan switch parameter fail, ret = %d\n", ret);
7708 return ret;
7709}
7710
7711static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7712 int link_ret)
7713{
7714#define HCLGE_PHY_LINK_STATUS_NUM 200
7715
7716 struct phy_device *phydev = hdev->hw.mac.phydev;
7717 int i = 0;
7718 int ret;
7719
7720 do {
7721 ret = phy_read_status(phydev);
7722 if (ret) {
7723 dev_err(&hdev->pdev->dev,
7724 "phy update link status fail, ret = %d\n", ret);
7725 return;
7726 }
7727
7728 if (phydev->link == link_ret)
7729 break;
7730
7731 msleep(HCLGE_LINK_STATUS_MS);
7732 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7733}
7734
7735static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7736{
7737#define HCLGE_MAC_LINK_STATUS_NUM 100
7738
7739 int link_status;
7740 int i = 0;
7741 int ret;
7742
7743 do {
7744 ret = hclge_get_mac_link_status(hdev, &link_status);
7745 if (ret)
7746 return ret;
7747 if (link_status == link_ret)
7748 return 0;
7749
7750 msleep(HCLGE_LINK_STATUS_MS);
7751 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7752 return -EBUSY;
7753}
7754
7755static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7756 bool is_phy)
7757{
7758 int link_ret;
7759
7760 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7761
7762 if (is_phy)
7763 hclge_phy_link_status_wait(hdev, link_ret);
7764
7765 return hclge_mac_link_status_wait(hdev, link_ret);
7766}
7767
7768static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7769{
7770 struct hclge_config_mac_mode_cmd *req;
7771 struct hclge_desc desc;
7772 u32 loop_en;
7773 int ret;
7774
7775 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7776
7777 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7778 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7779 if (ret) {
7780 dev_err(&hdev->pdev->dev,
7781 "mac loopback get fail, ret =%d.\n", ret);
7782 return ret;
7783 }
7784
7785
7786 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7787 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7788
7789 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7790
7791
7792
7793
7794 hclge_cmd_reuse_desc(&desc, false);
7795 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7796 if (ret)
7797 dev_err(&hdev->pdev->dev,
7798 "mac loopback set fail, ret =%d.\n", ret);
7799 return ret;
7800}
7801
7802static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7803 enum hnae3_loop loop_mode)
7804{
7805#define HCLGE_COMMON_LB_RETRY_MS 10
7806#define HCLGE_COMMON_LB_RETRY_NUM 100
7807
7808 struct hclge_common_lb_cmd *req;
7809 struct hclge_desc desc;
7810 int ret, i = 0;
7811 u8 loop_mode_b;
7812
7813 req = (struct hclge_common_lb_cmd *)desc.data;
7814 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7815
7816 switch (loop_mode) {
7817 case HNAE3_LOOP_SERIAL_SERDES:
7818 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7819 break;
7820 case HNAE3_LOOP_PARALLEL_SERDES:
7821 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7822 break;
7823 case HNAE3_LOOP_PHY:
7824 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7825 break;
7826 default:
7827 dev_err(&hdev->pdev->dev,
7828 "unsupported common loopback mode %d\n", loop_mode);
7829 return -ENOTSUPP;
7830 }
7831
7832 if (en) {
7833 req->enable = loop_mode_b;
7834 req->mask = loop_mode_b;
7835 } else {
7836 req->mask = loop_mode_b;
7837 }
7838
7839 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7840 if (ret) {
7841 dev_err(&hdev->pdev->dev,
7842 "common loopback set fail, ret = %d\n", ret);
7843 return ret;
7844 }
7845
7846 do {
7847 msleep(HCLGE_COMMON_LB_RETRY_MS);
7848 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7849 true);
7850 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7851 if (ret) {
7852 dev_err(&hdev->pdev->dev,
7853 "common loopback get, ret = %d\n", ret);
7854 return ret;
7855 }
7856 } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7857 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7858
7859 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7860 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7861 return -EBUSY;
7862 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7863 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7864 return -EIO;
7865 }
7866 return ret;
7867}
7868
7869static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7870 enum hnae3_loop loop_mode)
7871{
7872 int ret;
7873
7874 ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7875 if (ret)
7876 return ret;
7877
7878 hclge_cfg_mac_mode(hdev, en);
7879
7880 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7881 if (ret)
7882 dev_err(&hdev->pdev->dev,
7883 "serdes loopback config mac mode timeout\n");
7884
7885 return ret;
7886}
7887
7888static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7889 struct phy_device *phydev)
7890{
7891 int ret;
7892
7893 if (!phydev->suspended) {
7894 ret = phy_suspend(phydev);
7895 if (ret)
7896 return ret;
7897 }
7898
7899 ret = phy_resume(phydev);
7900 if (ret)
7901 return ret;
7902
7903 return phy_loopback(phydev, true);
7904}
7905
7906static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7907 struct phy_device *phydev)
7908{
7909 int ret;
7910
7911 ret = phy_loopback(phydev, false);
7912 if (ret)
7913 return ret;
7914
7915 return phy_suspend(phydev);
7916}
7917
7918static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7919{
7920 struct phy_device *phydev = hdev->hw.mac.phydev;
7921 int ret;
7922
7923 if (!phydev) {
7924 if (hnae3_dev_phy_imp_supported(hdev))
7925 return hclge_set_common_loopback(hdev, en,
7926 HNAE3_LOOP_PHY);
7927 return -ENOTSUPP;
7928 }
7929
7930 if (en)
7931 ret = hclge_enable_phy_loopback(hdev, phydev);
7932 else
7933 ret = hclge_disable_phy_loopback(hdev, phydev);
7934 if (ret) {
7935 dev_err(&hdev->pdev->dev,
7936 "set phy loopback fail, ret = %d\n", ret);
7937 return ret;
7938 }
7939
7940 hclge_cfg_mac_mode(hdev, en);
7941
7942 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7943 if (ret)
7944 dev_err(&hdev->pdev->dev,
7945 "phy loopback config mac mode timeout\n");
7946
7947 return ret;
7948}
7949
7950static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7951 u16 stream_id, bool enable)
7952{
7953 struct hclge_desc desc;
7954 struct hclge_cfg_com_tqp_queue_cmd *req =
7955 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7956
7957 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7958 req->tqp_id = cpu_to_le16(tqp_id);
7959 req->stream_id = cpu_to_le16(stream_id);
7960 if (enable)
7961 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7962
7963 return hclge_cmd_send(&hdev->hw, &desc, 1);
7964}
7965
7966static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7967{
7968 struct hclge_vport *vport = hclge_get_vport(handle);
7969 struct hclge_dev *hdev = vport->back;
7970 int ret;
7971 u16 i;
7972
7973 for (i = 0; i < handle->kinfo.num_tqps; i++) {
7974 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7975 if (ret)
7976 return ret;
7977 }
7978 return 0;
7979}
7980
7981static int hclge_set_loopback(struct hnae3_handle *handle,
7982 enum hnae3_loop loop_mode, bool en)
7983{
7984 struct hclge_vport *vport = hclge_get_vport(handle);
7985 struct hclge_dev *hdev = vport->back;
7986 int ret;
7987
7988
7989
7990
7991
7992
7993 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7994 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7995
7996 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7997 HCLGE_SWITCH_ALW_LPBK_MASK);
7998 if (ret)
7999 return ret;
8000 }
8001
8002 switch (loop_mode) {
8003 case HNAE3_LOOP_APP:
8004 ret = hclge_set_app_loopback(hdev, en);
8005 break;
8006 case HNAE3_LOOP_SERIAL_SERDES:
8007 case HNAE3_LOOP_PARALLEL_SERDES:
8008 ret = hclge_set_common_loopback(hdev, en, loop_mode);
8009 break;
8010 case HNAE3_LOOP_PHY:
8011 ret = hclge_set_phy_loopback(hdev, en);
8012 break;
8013 default:
8014 ret = -ENOTSUPP;
8015 dev_err(&hdev->pdev->dev,
8016 "loop_mode %d is not supported\n", loop_mode);
8017 break;
8018 }
8019
8020 if (ret)
8021 return ret;
8022
8023 ret = hclge_tqp_enable(handle, en);
8024 if (ret)
8025 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8026 en ? "enable" : "disable", ret);
8027
8028 return ret;
8029}
8030
8031static int hclge_set_default_loopback(struct hclge_dev *hdev)
8032{
8033 int ret;
8034
8035 ret = hclge_set_app_loopback(hdev, false);
8036 if (ret)
8037 return ret;
8038
8039 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8040 if (ret)
8041 return ret;
8042
8043 return hclge_cfg_common_loopback(hdev, false,
8044 HNAE3_LOOP_PARALLEL_SERDES);
8045}
8046
8047static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8048{
8049 struct hclge_vport *vport = hclge_get_vport(handle);
8050 struct hnae3_knic_private_info *kinfo;
8051 struct hnae3_queue *queue;
8052 struct hclge_tqp *tqp;
8053 int i;
8054
8055 kinfo = &vport->nic.kinfo;
8056 for (i = 0; i < kinfo->num_tqps; i++) {
8057 queue = handle->kinfo.tqp[i];
8058 tqp = container_of(queue, struct hclge_tqp, q);
8059 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8060 }
8061}
8062
8063static void hclge_flush_link_update(struct hclge_dev *hdev)
8064{
8065#define HCLGE_FLUSH_LINK_TIMEOUT 100000
8066
8067 unsigned long last = hdev->serv_processed_cnt;
8068 int i = 0;
8069
8070 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8071 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8072 last == hdev->serv_processed_cnt)
8073 usleep_range(1, 1);
8074}
8075
8076static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8077{
8078 struct hclge_vport *vport = hclge_get_vport(handle);
8079 struct hclge_dev *hdev = vport->back;
8080
8081 if (enable) {
8082 hclge_task_schedule(hdev, 0);
8083 } else {
8084
8085 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8086
8087
8088 smp_mb__before_atomic();
8089 hclge_flush_link_update(hdev);
8090 }
8091}
8092
8093static int hclge_ae_start(struct hnae3_handle *handle)
8094{
8095 struct hclge_vport *vport = hclge_get_vport(handle);
8096 struct hclge_dev *hdev = vport->back;
8097
8098
8099 hclge_cfg_mac_mode(hdev, true);
8100 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8101 hdev->hw.mac.link = 0;
8102
8103
8104 hclge_reset_tqp_stats(handle);
8105
8106 hclge_mac_start_phy(hdev);
8107
8108 return 0;
8109}
8110
8111static void hclge_ae_stop(struct hnae3_handle *handle)
8112{
8113 struct hclge_vport *vport = hclge_get_vport(handle);
8114 struct hclge_dev *hdev = vport->back;
8115
8116 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8117 spin_lock_bh(&hdev->fd_rule_lock);
8118 hclge_clear_arfs_rules(hdev);
8119 spin_unlock_bh(&hdev->fd_rule_lock);
8120
8121
8122
8123
8124 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8125 hdev->reset_type != HNAE3_FUNC_RESET) {
8126 hclge_mac_stop_phy(hdev);
8127 hclge_update_link_status(hdev);
8128 return;
8129 }
8130
8131 hclge_reset_tqp(handle);
8132
8133 hclge_config_mac_tnl_int(hdev, false);
8134
8135
8136 hclge_cfg_mac_mode(hdev, false);
8137
8138 hclge_mac_stop_phy(hdev);
8139
8140
8141 hclge_reset_tqp_stats(handle);
8142 hclge_update_link_status(hdev);
8143}
8144
8145int hclge_vport_start(struct hclge_vport *vport)
8146{
8147 struct hclge_dev *hdev = vport->back;
8148
8149 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8150 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8151 vport->last_active_jiffies = jiffies;
8152
8153 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8154 if (vport->vport_id) {
8155 hclge_restore_mac_table_common(vport);
8156 hclge_restore_vport_vlan_table(vport);
8157 } else {
8158 hclge_restore_hw_table(hdev);
8159 }
8160 }
8161
8162 clear_bit(vport->vport_id, hdev->vport_config_block);
8163
8164 return 0;
8165}
8166
8167void hclge_vport_stop(struct hclge_vport *vport)
8168{
8169 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8170}
8171
8172static int hclge_client_start(struct hnae3_handle *handle)
8173{
8174 struct hclge_vport *vport = hclge_get_vport(handle);
8175
8176 return hclge_vport_start(vport);
8177}
8178
8179static void hclge_client_stop(struct hnae3_handle *handle)
8180{
8181 struct hclge_vport *vport = hclge_get_vport(handle);
8182
8183 hclge_vport_stop(vport);
8184}
8185
8186static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8187 u16 cmdq_resp, u8 resp_code,
8188 enum hclge_mac_vlan_tbl_opcode op)
8189{
8190 struct hclge_dev *hdev = vport->back;
8191
8192 if (cmdq_resp) {
8193 dev_err(&hdev->pdev->dev,
8194 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8195 cmdq_resp);
8196 return -EIO;
8197 }
8198
8199 if (op == HCLGE_MAC_VLAN_ADD) {
8200 if (!resp_code || resp_code == 1)
8201 return 0;
8202 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8203 resp_code == HCLGE_ADD_MC_OVERFLOW)
8204 return -ENOSPC;
8205
8206 dev_err(&hdev->pdev->dev,
8207 "add mac addr failed for undefined, code=%u.\n",
8208 resp_code);
8209 return -EIO;
8210 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8211 if (!resp_code) {
8212 return 0;
8213 } else if (resp_code == 1) {
8214 dev_dbg(&hdev->pdev->dev,
8215 "remove mac addr failed for miss.\n");
8216 return -ENOENT;
8217 }
8218
8219 dev_err(&hdev->pdev->dev,
8220 "remove mac addr failed for undefined, code=%u.\n",
8221 resp_code);
8222 return -EIO;
8223 } else if (op == HCLGE_MAC_VLAN_LKUP) {
8224 if (!resp_code) {
8225 return 0;
8226 } else if (resp_code == 1) {
8227 dev_dbg(&hdev->pdev->dev,
8228 "lookup mac addr failed for miss.\n");
8229 return -ENOENT;
8230 }
8231
8232 dev_err(&hdev->pdev->dev,
8233 "lookup mac addr failed for undefined, code=%u.\n",
8234 resp_code);
8235 return -EIO;
8236 }
8237
8238 dev_err(&hdev->pdev->dev,
8239 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8240
8241 return -EINVAL;
8242}
8243
8244static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8245{
8246#define HCLGE_VF_NUM_IN_FIRST_DESC 192
8247
8248 unsigned int word_num;
8249 unsigned int bit_num;
8250
8251 if (vfid > 255 || vfid < 0)
8252 return -EIO;
8253
8254 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8255 word_num = vfid / 32;
8256 bit_num = vfid % 32;
8257 if (clr)
8258 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8259 else
8260 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8261 } else {
8262 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8263 bit_num = vfid % 32;
8264 if (clr)
8265 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8266 else
8267 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8268 }
8269
8270 return 0;
8271}
8272
8273static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8274{
8275#define HCLGE_DESC_NUMBER 3
8276#define HCLGE_FUNC_NUMBER_PER_DESC 6
8277 int i, j;
8278
8279 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8280 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8281 if (desc[i].data[j])
8282 return false;
8283
8284 return true;
8285}
8286
8287static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8288 const u8 *addr, bool is_mc)
8289{
8290 const unsigned char *mac_addr = addr;
8291 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8292 (mac_addr[0]) | (mac_addr[1] << 8);
8293 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
8294
8295 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8296 if (is_mc) {
8297 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8298 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8299 }
8300
8301 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8302 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8303}
8304
8305static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8306 struct hclge_mac_vlan_tbl_entry_cmd *req)
8307{
8308 struct hclge_dev *hdev = vport->back;
8309 struct hclge_desc desc;
8310 u8 resp_code;
8311 u16 retval;
8312 int ret;
8313
8314 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8315
8316 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8317
8318 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8319 if (ret) {
8320 dev_err(&hdev->pdev->dev,
8321 "del mac addr failed for cmd_send, ret =%d.\n",
8322 ret);
8323 return ret;
8324 }
8325 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8326 retval = le16_to_cpu(desc.retval);
8327
8328 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8329 HCLGE_MAC_VLAN_REMOVE);
8330}
8331
8332static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8333 struct hclge_mac_vlan_tbl_entry_cmd *req,
8334 struct hclge_desc *desc,
8335 bool is_mc)
8336{
8337 struct hclge_dev *hdev = vport->back;
8338 u8 resp_code;
8339 u16 retval;
8340 int ret;
8341
8342 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8343 if (is_mc) {
8344 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8345 memcpy(desc[0].data,
8346 req,
8347 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8348 hclge_cmd_setup_basic_desc(&desc[1],
8349 HCLGE_OPC_MAC_VLAN_ADD,
8350 true);
8351 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8352 hclge_cmd_setup_basic_desc(&desc[2],
8353 HCLGE_OPC_MAC_VLAN_ADD,
8354 true);
8355 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8356 } else {
8357 memcpy(desc[0].data,
8358 req,
8359 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8360 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8361 }
8362 if (ret) {
8363 dev_err(&hdev->pdev->dev,
8364 "lookup mac addr failed for cmd_send, ret =%d.\n",
8365 ret);
8366 return ret;
8367 }
8368 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8369 retval = le16_to_cpu(desc[0].retval);
8370
8371 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8372 HCLGE_MAC_VLAN_LKUP);
8373}
8374
8375static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8376 struct hclge_mac_vlan_tbl_entry_cmd *req,
8377 struct hclge_desc *mc_desc)
8378{
8379 struct hclge_dev *hdev = vport->back;
8380 int cfg_status;
8381 u8 resp_code;
8382 u16 retval;
8383 int ret;
8384
8385 if (!mc_desc) {
8386 struct hclge_desc desc;
8387
8388 hclge_cmd_setup_basic_desc(&desc,
8389 HCLGE_OPC_MAC_VLAN_ADD,
8390 false);
8391 memcpy(desc.data, req,
8392 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8393 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8394 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8395 retval = le16_to_cpu(desc.retval);
8396
8397 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8398 resp_code,
8399 HCLGE_MAC_VLAN_ADD);
8400 } else {
8401 hclge_cmd_reuse_desc(&mc_desc[0], false);
8402 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8403 hclge_cmd_reuse_desc(&mc_desc[1], false);
8404 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8405 hclge_cmd_reuse_desc(&mc_desc[2], false);
8406 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8407 memcpy(mc_desc[0].data, req,
8408 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8409 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8410 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8411 retval = le16_to_cpu(mc_desc[0].retval);
8412
8413 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8414 resp_code,
8415 HCLGE_MAC_VLAN_ADD);
8416 }
8417
8418 if (ret) {
8419 dev_err(&hdev->pdev->dev,
8420 "add mac addr failed for cmd_send, ret =%d.\n",
8421 ret);
8422 return ret;
8423 }
8424
8425 return cfg_status;
8426}
8427
8428static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8429 u16 *allocated_size)
8430{
8431 struct hclge_umv_spc_alc_cmd *req;
8432 struct hclge_desc desc;
8433 int ret;
8434
8435 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8436 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8437
8438 req->space_size = cpu_to_le32(space_size);
8439
8440 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8441 if (ret) {
8442 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8443 ret);
8444 return ret;
8445 }
8446
8447 *allocated_size = le32_to_cpu(desc.data[1]);
8448
8449 return 0;
8450}
8451
8452static int hclge_init_umv_space(struct hclge_dev *hdev)
8453{
8454 u16 allocated_size = 0;
8455 int ret;
8456
8457 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8458 if (ret)
8459 return ret;
8460
8461 if (allocated_size < hdev->wanted_umv_size)
8462 dev_warn(&hdev->pdev->dev,
8463 "failed to alloc umv space, want %u, get %u\n",
8464 hdev->wanted_umv_size, allocated_size);
8465
8466 hdev->max_umv_size = allocated_size;
8467 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8468 hdev->share_umv_size = hdev->priv_umv_size +
8469 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8470
8471 return 0;
8472}
8473
8474static void hclge_reset_umv_space(struct hclge_dev *hdev)
8475{
8476 struct hclge_vport *vport;
8477 int i;
8478
8479 for (i = 0; i < hdev->num_alloc_vport; i++) {
8480 vport = &hdev->vport[i];
8481 vport->used_umv_num = 0;
8482 }
8483
8484 mutex_lock(&hdev->vport_lock);
8485 hdev->share_umv_size = hdev->priv_umv_size +
8486 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8487 mutex_unlock(&hdev->vport_lock);
8488}
8489
8490static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8491{
8492 struct hclge_dev *hdev = vport->back;
8493 bool is_full;
8494
8495 if (need_lock)
8496 mutex_lock(&hdev->vport_lock);
8497
8498 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8499 hdev->share_umv_size == 0);
8500
8501 if (need_lock)
8502 mutex_unlock(&hdev->vport_lock);
8503
8504 return is_full;
8505}
8506
8507static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8508{
8509 struct hclge_dev *hdev = vport->back;
8510
8511 if (is_free) {
8512 if (vport->used_umv_num > hdev->priv_umv_size)
8513 hdev->share_umv_size++;
8514
8515 if (vport->used_umv_num > 0)
8516 vport->used_umv_num--;
8517 } else {
8518 if (vport->used_umv_num >= hdev->priv_umv_size &&
8519 hdev->share_umv_size > 0)
8520 hdev->share_umv_size--;
8521 vport->used_umv_num++;
8522 }
8523}
8524
8525static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8526 const u8 *mac_addr)
8527{
8528 struct hclge_mac_node *mac_node, *tmp;
8529
8530 list_for_each_entry_safe(mac_node, tmp, list, node)
8531 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8532 return mac_node;
8533
8534 return NULL;
8535}
8536
8537static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8538 enum HCLGE_MAC_NODE_STATE state)
8539{
8540 switch (state) {
8541
8542 case HCLGE_MAC_TO_ADD:
8543 if (mac_node->state == HCLGE_MAC_TO_DEL)
8544 mac_node->state = HCLGE_MAC_ACTIVE;
8545 break;
8546
8547 case HCLGE_MAC_TO_DEL:
8548 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8549 list_del(&mac_node->node);
8550 kfree(mac_node);
8551 } else {
8552 mac_node->state = HCLGE_MAC_TO_DEL;
8553 }
8554 break;
8555
8556
8557
8558 case HCLGE_MAC_ACTIVE:
8559 if (mac_node->state == HCLGE_MAC_TO_ADD)
8560 mac_node->state = HCLGE_MAC_ACTIVE;
8561
8562 break;
8563 }
8564}
8565
8566int hclge_update_mac_list(struct hclge_vport *vport,
8567 enum HCLGE_MAC_NODE_STATE state,
8568 enum HCLGE_MAC_ADDR_TYPE mac_type,
8569 const unsigned char *addr)
8570{
8571 struct hclge_dev *hdev = vport->back;
8572 struct hclge_mac_node *mac_node;
8573 struct list_head *list;
8574
8575 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8576 &vport->uc_mac_list : &vport->mc_mac_list;
8577
8578 spin_lock_bh(&vport->mac_list_lock);
8579
8580
8581
8582
8583
8584 mac_node = hclge_find_mac_node(list, addr);
8585 if (mac_node) {
8586 hclge_update_mac_node(mac_node, state);
8587 spin_unlock_bh(&vport->mac_list_lock);
8588 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8589 return 0;
8590 }
8591
8592
8593 if (state == HCLGE_MAC_TO_DEL) {
8594 spin_unlock_bh(&vport->mac_list_lock);
8595 dev_err(&hdev->pdev->dev,
8596 "failed to delete address %pM from mac list\n",
8597 addr);
8598 return -ENOENT;
8599 }
8600
8601 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8602 if (!mac_node) {
8603 spin_unlock_bh(&vport->mac_list_lock);
8604 return -ENOMEM;
8605 }
8606
8607 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8608
8609 mac_node->state = state;
8610 ether_addr_copy(mac_node->mac_addr, addr);
8611 list_add_tail(&mac_node->node, list);
8612
8613 spin_unlock_bh(&vport->mac_list_lock);
8614
8615 return 0;
8616}
8617
8618static int hclge_add_uc_addr(struct hnae3_handle *handle,
8619 const unsigned char *addr)
8620{
8621 struct hclge_vport *vport = hclge_get_vport(handle);
8622
8623 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8624 addr);
8625}
8626
8627int hclge_add_uc_addr_common(struct hclge_vport *vport,
8628 const unsigned char *addr)
8629{
8630 struct hclge_dev *hdev = vport->back;
8631 struct hclge_mac_vlan_tbl_entry_cmd req;
8632 struct hclge_desc desc;
8633 u16 egress_port = 0;
8634 int ret;
8635
8636
8637 if (is_zero_ether_addr(addr) ||
8638 is_broadcast_ether_addr(addr) ||
8639 is_multicast_ether_addr(addr)) {
8640 dev_err(&hdev->pdev->dev,
8641 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8642 addr, is_zero_ether_addr(addr),
8643 is_broadcast_ether_addr(addr),
8644 is_multicast_ether_addr(addr));
8645 return -EINVAL;
8646 }
8647
8648 memset(&req, 0, sizeof(req));
8649
8650 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8651 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8652
8653 req.egress_port = cpu_to_le16(egress_port);
8654
8655 hclge_prepare_mac_addr(&req, addr, false);
8656
8657
8658
8659
8660
8661 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8662 if (ret == -ENOENT) {
8663 mutex_lock(&hdev->vport_lock);
8664 if (!hclge_is_umv_space_full(vport, false)) {
8665 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8666 if (!ret)
8667 hclge_update_umv_space(vport, false);
8668 mutex_unlock(&hdev->vport_lock);
8669 return ret;
8670 }
8671 mutex_unlock(&hdev->vport_lock);
8672
8673 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8674 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8675 hdev->priv_umv_size);
8676
8677 return -ENOSPC;
8678 }
8679
8680
8681 if (!ret) {
8682 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8683 vport->vport_id, addr);
8684 return 0;
8685 }
8686
8687 dev_err(&hdev->pdev->dev,
8688 "PF failed to add unicast entry(%pM) in the MAC table\n",
8689 addr);
8690
8691 return ret;
8692}
8693
8694static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8695 const unsigned char *addr)
8696{
8697 struct hclge_vport *vport = hclge_get_vport(handle);
8698
8699 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8700 addr);
8701}
8702
8703int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8704 const unsigned char *addr)
8705{
8706 struct hclge_dev *hdev = vport->back;
8707 struct hclge_mac_vlan_tbl_entry_cmd req;
8708 int ret;
8709
8710
8711 if (is_zero_ether_addr(addr) ||
8712 is_broadcast_ether_addr(addr) ||
8713 is_multicast_ether_addr(addr)) {
8714 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8715 addr);
8716 return -EINVAL;
8717 }
8718
8719 memset(&req, 0, sizeof(req));
8720 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8721 hclge_prepare_mac_addr(&req, addr, false);
8722 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8723 if (!ret) {
8724 mutex_lock(&hdev->vport_lock);
8725 hclge_update_umv_space(vport, true);
8726 mutex_unlock(&hdev->vport_lock);
8727 } else if (ret == -ENOENT) {
8728 ret = 0;
8729 }
8730
8731 return ret;
8732}
8733
8734static int hclge_add_mc_addr(struct hnae3_handle *handle,
8735 const unsigned char *addr)
8736{
8737 struct hclge_vport *vport = hclge_get_vport(handle);
8738
8739 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8740 addr);
8741}
8742
8743int hclge_add_mc_addr_common(struct hclge_vport *vport,
8744 const unsigned char *addr)
8745{
8746 struct hclge_dev *hdev = vport->back;
8747 struct hclge_mac_vlan_tbl_entry_cmd req;
8748 struct hclge_desc desc[3];
8749 int status;
8750
8751
8752 if (!is_multicast_ether_addr(addr)) {
8753 dev_err(&hdev->pdev->dev,
8754 "Add mc mac err! invalid mac:%pM.\n",
8755 addr);
8756 return -EINVAL;
8757 }
8758 memset(&req, 0, sizeof(req));
8759 hclge_prepare_mac_addr(&req, addr, true);
8760 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8761 if (status) {
8762
8763 memset(desc[0].data, 0, sizeof(desc[0].data));
8764 memset(desc[1].data, 0, sizeof(desc[0].data));
8765 memset(desc[2].data, 0, sizeof(desc[0].data));
8766 }
8767 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8768 if (status)
8769 return status;
8770 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8771
8772 if (status == -ENOSPC &&
8773 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8774 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8775
8776 return status;
8777}
8778
8779static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8780 const unsigned char *addr)
8781{
8782 struct hclge_vport *vport = hclge_get_vport(handle);
8783
8784 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8785 addr);
8786}
8787
8788int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8789 const unsigned char *addr)
8790{
8791 struct hclge_dev *hdev = vport->back;
8792 struct hclge_mac_vlan_tbl_entry_cmd req;
8793 enum hclge_cmd_status status;
8794 struct hclge_desc desc[3];
8795
8796
8797 if (!is_multicast_ether_addr(addr)) {
8798 dev_dbg(&hdev->pdev->dev,
8799 "Remove mc mac err! invalid mac:%pM.\n",
8800 addr);
8801 return -EINVAL;
8802 }
8803
8804 memset(&req, 0, sizeof(req));
8805 hclge_prepare_mac_addr(&req, addr, true);
8806 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8807 if (!status) {
8808
8809 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8810 if (status)
8811 return status;
8812
8813 if (hclge_is_all_function_id_zero(desc))
8814
8815 status = hclge_remove_mac_vlan_tbl(vport, &req);
8816 else
8817
8818 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8819 } else if (status == -ENOENT) {
8820 status = 0;
8821 }
8822
8823 return status;
8824}
8825
8826static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8827 struct list_head *list,
8828 int (*sync)(struct hclge_vport *,
8829 const unsigned char *))
8830{
8831 struct hclge_mac_node *mac_node, *tmp;
8832 int ret;
8833
8834 list_for_each_entry_safe(mac_node, tmp, list, node) {
8835 ret = sync(vport, mac_node->mac_addr);
8836 if (!ret) {
8837 mac_node->state = HCLGE_MAC_ACTIVE;
8838 } else {
8839 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8840 &vport->state);
8841 break;
8842 }
8843 }
8844}
8845
8846static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8847 struct list_head *list,
8848 int (*unsync)(struct hclge_vport *,
8849 const unsigned char *))
8850{
8851 struct hclge_mac_node *mac_node, *tmp;
8852 int ret;
8853
8854 list_for_each_entry_safe(mac_node, tmp, list, node) {
8855 ret = unsync(vport, mac_node->mac_addr);
8856 if (!ret || ret == -ENOENT) {
8857 list_del(&mac_node->node);
8858 kfree(mac_node);
8859 } else {
8860 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8861 &vport->state);
8862 break;
8863 }
8864 }
8865}
8866
8867static bool hclge_sync_from_add_list(struct list_head *add_list,
8868 struct list_head *mac_list)
8869{
8870 struct hclge_mac_node *mac_node, *tmp, *new_node;
8871 bool all_added = true;
8872
8873 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8874 if (mac_node->state == HCLGE_MAC_TO_ADD)
8875 all_added = false;
8876
8877
8878
8879
8880
8881
8882
8883
8884
8885 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8886 if (new_node) {
8887 hclge_update_mac_node(new_node, mac_node->state);
8888 list_del(&mac_node->node);
8889 kfree(mac_node);
8890 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8891 mac_node->state = HCLGE_MAC_TO_DEL;
8892 list_move_tail(&mac_node->node, mac_list);
8893 } else {
8894 list_del(&mac_node->node);
8895 kfree(mac_node);
8896 }
8897 }
8898
8899 return all_added;
8900}
8901
8902static void hclge_sync_from_del_list(struct list_head *del_list,
8903 struct list_head *mac_list)
8904{
8905 struct hclge_mac_node *mac_node, *tmp, *new_node;
8906
8907 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8908 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8909 if (new_node) {
8910
8911
8912
8913
8914
8915
8916
8917 new_node->state = HCLGE_MAC_ACTIVE;
8918 list_del(&mac_node->node);
8919 kfree(mac_node);
8920 } else {
8921 list_move_tail(&mac_node->node, mac_list);
8922 }
8923 }
8924}
8925
8926static void hclge_update_overflow_flags(struct hclge_vport *vport,
8927 enum HCLGE_MAC_ADDR_TYPE mac_type,
8928 bool is_all_added)
8929{
8930 if (mac_type == HCLGE_MAC_ADDR_UC) {
8931 if (is_all_added)
8932 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8933 else
8934 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8935 } else {
8936 if (is_all_added)
8937 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8938 else
8939 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8940 }
8941}
8942
8943static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8944 enum HCLGE_MAC_ADDR_TYPE mac_type)
8945{
8946 struct hclge_mac_node *mac_node, *tmp, *new_node;
8947 struct list_head tmp_add_list, tmp_del_list;
8948 struct list_head *list;
8949 bool all_added;
8950
8951 INIT_LIST_HEAD(&tmp_add_list);
8952 INIT_LIST_HEAD(&tmp_del_list);
8953
8954
8955
8956
8957 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8958 &vport->uc_mac_list : &vport->mc_mac_list;
8959
8960 spin_lock_bh(&vport->mac_list_lock);
8961
8962 list_for_each_entry_safe(mac_node, tmp, list, node) {
8963 switch (mac_node->state) {
8964 case HCLGE_MAC_TO_DEL:
8965 list_move_tail(&mac_node->node, &tmp_del_list);
8966 break;
8967 case HCLGE_MAC_TO_ADD:
8968 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8969 if (!new_node)
8970 goto stop_traverse;
8971 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8972 new_node->state = mac_node->state;
8973 list_add_tail(&new_node->node, &tmp_add_list);
8974 break;
8975 default:
8976 break;
8977 }
8978 }
8979
8980stop_traverse:
8981 spin_unlock_bh(&vport->mac_list_lock);
8982
8983
8984 if (mac_type == HCLGE_MAC_ADDR_UC) {
8985 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8986 hclge_rm_uc_addr_common);
8987 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8988 hclge_add_uc_addr_common);
8989 } else {
8990 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8991 hclge_rm_mc_addr_common);
8992 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8993 hclge_add_mc_addr_common);
8994 }
8995
8996
8997
8998
8999 spin_lock_bh(&vport->mac_list_lock);
9000
9001 hclge_sync_from_del_list(&tmp_del_list, list);
9002 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9003
9004 spin_unlock_bh(&vport->mac_list_lock);
9005
9006 hclge_update_overflow_flags(vport, mac_type, all_added);
9007}
9008
9009static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9010{
9011 struct hclge_dev *hdev = vport->back;
9012
9013 if (test_bit(vport->vport_id, hdev->vport_config_block))
9014 return false;
9015
9016 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9017 return true;
9018
9019 return false;
9020}
9021
9022static void hclge_sync_mac_table(struct hclge_dev *hdev)
9023{
9024 int i;
9025
9026 for (i = 0; i < hdev->num_alloc_vport; i++) {
9027 struct hclge_vport *vport = &hdev->vport[i];
9028
9029 if (!hclge_need_sync_mac_table(vport))
9030 continue;
9031
9032 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9033 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9034 }
9035}
9036
9037static void hclge_build_del_list(struct list_head *list,
9038 bool is_del_list,
9039 struct list_head *tmp_del_list)
9040{
9041 struct hclge_mac_node *mac_cfg, *tmp;
9042
9043 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9044 switch (mac_cfg->state) {
9045 case HCLGE_MAC_TO_DEL:
9046 case HCLGE_MAC_ACTIVE:
9047 list_move_tail(&mac_cfg->node, tmp_del_list);
9048 break;
9049 case HCLGE_MAC_TO_ADD:
9050 if (is_del_list) {
9051 list_del(&mac_cfg->node);
9052 kfree(mac_cfg);
9053 }
9054 break;
9055 }
9056 }
9057}
9058
9059static void hclge_unsync_del_list(struct hclge_vport *vport,
9060 int (*unsync)(struct hclge_vport *vport,
9061 const unsigned char *addr),
9062 bool is_del_list,
9063 struct list_head *tmp_del_list)
9064{
9065 struct hclge_mac_node *mac_cfg, *tmp;
9066 int ret;
9067
9068 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9069 ret = unsync(vport, mac_cfg->mac_addr);
9070 if (!ret || ret == -ENOENT) {
9071
9072
9073
9074
9075 if (!is_del_list &&
9076 mac_cfg->state == HCLGE_MAC_ACTIVE) {
9077 mac_cfg->state = HCLGE_MAC_TO_ADD;
9078 } else {
9079 list_del(&mac_cfg->node);
9080 kfree(mac_cfg);
9081 }
9082 } else if (is_del_list) {
9083 mac_cfg->state = HCLGE_MAC_TO_DEL;
9084 }
9085 }
9086}
9087
9088void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9089 enum HCLGE_MAC_ADDR_TYPE mac_type)
9090{
9091 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9092 struct hclge_dev *hdev = vport->back;
9093 struct list_head tmp_del_list, *list;
9094
9095 if (mac_type == HCLGE_MAC_ADDR_UC) {
9096 list = &vport->uc_mac_list;
9097 unsync = hclge_rm_uc_addr_common;
9098 } else {
9099 list = &vport->mc_mac_list;
9100 unsync = hclge_rm_mc_addr_common;
9101 }
9102
9103 INIT_LIST_HEAD(&tmp_del_list);
9104
9105 if (!is_del_list)
9106 set_bit(vport->vport_id, hdev->vport_config_block);
9107
9108 spin_lock_bh(&vport->mac_list_lock);
9109
9110 hclge_build_del_list(list, is_del_list, &tmp_del_list);
9111
9112 spin_unlock_bh(&vport->mac_list_lock);
9113
9114 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9115
9116 spin_lock_bh(&vport->mac_list_lock);
9117
9118 hclge_sync_from_del_list(&tmp_del_list, list);
9119
9120 spin_unlock_bh(&vport->mac_list_lock);
9121}
9122
9123
9124static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9125 enum HCLGE_MAC_ADDR_TYPE mac_type)
9126{
9127 struct hclge_mac_node *mac_node, *tmp;
9128 struct hclge_dev *hdev = vport->back;
9129 struct list_head tmp_del_list, *list;
9130
9131 INIT_LIST_HEAD(&tmp_del_list);
9132
9133 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9134 &vport->uc_mac_list : &vport->mc_mac_list;
9135
9136 spin_lock_bh(&vport->mac_list_lock);
9137
9138 list_for_each_entry_safe(mac_node, tmp, list, node) {
9139 switch (mac_node->state) {
9140 case HCLGE_MAC_TO_DEL:
9141 case HCLGE_MAC_ACTIVE:
9142 list_move_tail(&mac_node->node, &tmp_del_list);
9143 break;
9144 case HCLGE_MAC_TO_ADD:
9145 list_del(&mac_node->node);
9146 kfree(mac_node);
9147 break;
9148 }
9149 }
9150
9151 spin_unlock_bh(&vport->mac_list_lock);
9152
9153 if (mac_type == HCLGE_MAC_ADDR_UC)
9154 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9155 hclge_rm_uc_addr_common);
9156 else
9157 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9158 hclge_rm_mc_addr_common);
9159
9160 if (!list_empty(&tmp_del_list))
9161 dev_warn(&hdev->pdev->dev,
9162 "uninit %s mac list for vport %u not completely.\n",
9163 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9164 vport->vport_id);
9165
9166 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9167 list_del(&mac_node->node);
9168 kfree(mac_node);
9169 }
9170}
9171
9172static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9173{
9174 struct hclge_vport *vport;
9175 int i;
9176
9177 for (i = 0; i < hdev->num_alloc_vport; i++) {
9178 vport = &hdev->vport[i];
9179 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9180 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9181 }
9182}
9183
9184static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9185 u16 cmdq_resp, u8 resp_code)
9186{
9187#define HCLGE_ETHERTYPE_SUCCESS_ADD 0
9188#define HCLGE_ETHERTYPE_ALREADY_ADD 1
9189#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
9190#define HCLGE_ETHERTYPE_KEY_CONFLICT 3
9191
9192 int return_status;
9193
9194 if (cmdq_resp) {
9195 dev_err(&hdev->pdev->dev,
9196 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9197 cmdq_resp);
9198 return -EIO;
9199 }
9200
9201 switch (resp_code) {
9202 case HCLGE_ETHERTYPE_SUCCESS_ADD:
9203 case HCLGE_ETHERTYPE_ALREADY_ADD:
9204 return_status = 0;
9205 break;
9206 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9207 dev_err(&hdev->pdev->dev,
9208 "add mac ethertype failed for manager table overflow.\n");
9209 return_status = -EIO;
9210 break;
9211 case HCLGE_ETHERTYPE_KEY_CONFLICT:
9212 dev_err(&hdev->pdev->dev,
9213 "add mac ethertype failed for key conflict.\n");
9214 return_status = -EIO;
9215 break;
9216 default:
9217 dev_err(&hdev->pdev->dev,
9218 "add mac ethertype failed for undefined, code=%u.\n",
9219 resp_code);
9220 return_status = -EIO;
9221 }
9222
9223 return return_status;
9224}
9225
9226static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9227 u8 *mac_addr)
9228{
9229 struct hclge_mac_vlan_tbl_entry_cmd req;
9230 struct hclge_dev *hdev = vport->back;
9231 struct hclge_desc desc;
9232 u16 egress_port = 0;
9233 int i;
9234
9235 if (is_zero_ether_addr(mac_addr))
9236 return false;
9237
9238 memset(&req, 0, sizeof(req));
9239 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9240 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9241 req.egress_port = cpu_to_le16(egress_port);
9242 hclge_prepare_mac_addr(&req, mac_addr, false);
9243
9244 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9245 return true;
9246
9247 vf_idx += HCLGE_VF_VPORT_START_NUM;
9248 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9249 if (i != vf_idx &&
9250 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9251 return true;
9252
9253 return false;
9254}
9255
9256static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9257 u8 *mac_addr)
9258{
9259 struct hclge_vport *vport = hclge_get_vport(handle);
9260 struct hclge_dev *hdev = vport->back;
9261
9262 vport = hclge_get_vf_vport(hdev, vf);
9263 if (!vport)
9264 return -EINVAL;
9265
9266 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9267 dev_info(&hdev->pdev->dev,
9268 "Specified MAC(=%pM) is same as before, no change committed!\n",
9269 mac_addr);
9270 return 0;
9271 }
9272
9273 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9274 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9275 mac_addr);
9276 return -EEXIST;
9277 }
9278
9279 ether_addr_copy(vport->vf_info.mac, mac_addr);
9280
9281 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9282 dev_info(&hdev->pdev->dev,
9283 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9284 vf, mac_addr);
9285 return hclge_inform_reset_assert_to_vf(vport);
9286 }
9287
9288 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9289 vf, mac_addr);
9290 return 0;
9291}
9292
9293static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9294 const struct hclge_mac_mgr_tbl_entry_cmd *req)
9295{
9296 struct hclge_desc desc;
9297 u8 resp_code;
9298 u16 retval;
9299 int ret;
9300
9301 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9302 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9303
9304 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9305 if (ret) {
9306 dev_err(&hdev->pdev->dev,
9307 "add mac ethertype failed for cmd_send, ret =%d.\n",
9308 ret);
9309 return ret;
9310 }
9311
9312 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9313 retval = le16_to_cpu(desc.retval);
9314
9315 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9316}
9317
9318static int init_mgr_tbl(struct hclge_dev *hdev)
9319{
9320 int ret;
9321 int i;
9322
9323 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9324 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9325 if (ret) {
9326 dev_err(&hdev->pdev->dev,
9327 "add mac ethertype failed, ret =%d.\n",
9328 ret);
9329 return ret;
9330 }
9331 }
9332
9333 return 0;
9334}
9335
9336static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9337{
9338 struct hclge_vport *vport = hclge_get_vport(handle);
9339 struct hclge_dev *hdev = vport->back;
9340
9341 ether_addr_copy(p, hdev->hw.mac.mac_addr);
9342}
9343
9344int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9345 const u8 *old_addr, const u8 *new_addr)
9346{
9347 struct list_head *list = &vport->uc_mac_list;
9348 struct hclge_mac_node *old_node, *new_node;
9349
9350 new_node = hclge_find_mac_node(list, new_addr);
9351 if (!new_node) {
9352 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9353 if (!new_node)
9354 return -ENOMEM;
9355
9356 new_node->state = HCLGE_MAC_TO_ADD;
9357 ether_addr_copy(new_node->mac_addr, new_addr);
9358 list_add(&new_node->node, list);
9359 } else {
9360 if (new_node->state == HCLGE_MAC_TO_DEL)
9361 new_node->state = HCLGE_MAC_ACTIVE;
9362
9363
9364
9365
9366
9367
9368 list_move(&new_node->node, list);
9369 }
9370
9371 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9372 old_node = hclge_find_mac_node(list, old_addr);
9373 if (old_node) {
9374 if (old_node->state == HCLGE_MAC_TO_ADD) {
9375 list_del(&old_node->node);
9376 kfree(old_node);
9377 } else {
9378 old_node->state = HCLGE_MAC_TO_DEL;
9379 }
9380 }
9381 }
9382
9383 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9384
9385 return 0;
9386}
9387
9388static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9389 bool is_first)
9390{
9391 const unsigned char *new_addr = (const unsigned char *)p;
9392 struct hclge_vport *vport = hclge_get_vport(handle);
9393 struct hclge_dev *hdev = vport->back;
9394 unsigned char *old_addr = NULL;
9395 int ret;
9396
9397
9398 if (is_zero_ether_addr(new_addr) ||
9399 is_broadcast_ether_addr(new_addr) ||
9400 is_multicast_ether_addr(new_addr)) {
9401 dev_err(&hdev->pdev->dev,
9402 "change uc mac err! invalid mac: %pM.\n",
9403 new_addr);
9404 return -EINVAL;
9405 }
9406
9407 ret = hclge_pause_addr_cfg(hdev, new_addr);
9408 if (ret) {
9409 dev_err(&hdev->pdev->dev,
9410 "failed to configure mac pause address, ret = %d\n",
9411 ret);
9412 return ret;
9413 }
9414
9415 if (!is_first)
9416 old_addr = hdev->hw.mac.mac_addr;
9417
9418 spin_lock_bh(&vport->mac_list_lock);
9419 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9420 if (ret) {
9421 dev_err(&hdev->pdev->dev,
9422 "failed to change the mac addr:%pM, ret = %d\n",
9423 new_addr, ret);
9424 spin_unlock_bh(&vport->mac_list_lock);
9425
9426 if (!is_first)
9427 hclge_pause_addr_cfg(hdev, old_addr);
9428
9429 return ret;
9430 }
9431
9432
9433
9434 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9435 spin_unlock_bh(&vport->mac_list_lock);
9436
9437 hclge_task_schedule(hdev, 0);
9438
9439 return 0;
9440}
9441
9442static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9443{
9444 struct mii_ioctl_data *data = if_mii(ifr);
9445
9446 if (!hnae3_dev_phy_imp_supported(hdev))
9447 return -EOPNOTSUPP;
9448
9449 switch (cmd) {
9450 case SIOCGMIIPHY:
9451 data->phy_id = hdev->hw.mac.phy_addr;
9452
9453 fallthrough;
9454 case SIOCGMIIREG:
9455 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9456 return 0;
9457
9458 case SIOCSMIIREG:
9459 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9460 default:
9461 return -EOPNOTSUPP;
9462 }
9463}
9464
9465static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9466 int cmd)
9467{
9468 struct hclge_vport *vport = hclge_get_vport(handle);
9469 struct hclge_dev *hdev = vport->back;
9470
9471 switch (cmd) {
9472 case SIOCGHWTSTAMP:
9473 return hclge_ptp_get_cfg(hdev, ifr);
9474 case SIOCSHWTSTAMP:
9475 return hclge_ptp_set_cfg(hdev, ifr);
9476 default:
9477 if (!hdev->hw.mac.phydev)
9478 return hclge_mii_ioctl(hdev, ifr, cmd);
9479 }
9480
9481 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9482}
9483
9484static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9485 bool bypass_en)
9486{
9487 struct hclge_port_vlan_filter_bypass_cmd *req;
9488 struct hclge_desc desc;
9489 int ret;
9490
9491 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9492 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9493 req->vf_id = vf_id;
9494 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9495 bypass_en ? 1 : 0);
9496
9497 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9498 if (ret)
9499 dev_err(&hdev->pdev->dev,
9500 "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9501 vf_id, ret);
9502
9503 return ret;
9504}
9505
9506static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9507 u8 fe_type, bool filter_en, u8 vf_id)
9508{
9509 struct hclge_vlan_filter_ctrl_cmd *req;
9510 struct hclge_desc desc;
9511 int ret;
9512
9513
9514 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9515 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9516 req->vlan_type = vlan_type;
9517 req->vf_id = vf_id;
9518
9519 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9520 if (ret) {
9521 dev_err(&hdev->pdev->dev,
9522 "failed to get vlan filter config, ret = %d.\n", ret);
9523 return ret;
9524 }
9525
9526
9527 hclge_cmd_reuse_desc(&desc, false);
9528 req->vlan_fe = filter_en ?
9529 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9530
9531 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9532 if (ret)
9533 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9534 ret);
9535
9536 return ret;
9537}
9538
9539static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9540{
9541 struct hclge_dev *hdev = vport->back;
9542 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9543 int ret;
9544
9545 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9546 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9547 HCLGE_FILTER_FE_EGRESS_V1_B,
9548 enable, vport->vport_id);
9549
9550 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9551 HCLGE_FILTER_FE_EGRESS, enable,
9552 vport->vport_id);
9553 if (ret)
9554 return ret;
9555
9556 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9557 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9558 !enable);
9559 } else if (!vport->vport_id) {
9560 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9561 enable = false;
9562
9563 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9564 HCLGE_FILTER_FE_INGRESS,
9565 enable, 0);
9566 }
9567
9568 return ret;
9569}
9570
9571static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9572{
9573 struct hnae3_handle *handle = &vport->nic;
9574 struct hclge_vport_vlan_cfg *vlan, *tmp;
9575 struct hclge_dev *hdev = vport->back;
9576
9577 if (vport->vport_id) {
9578 if (vport->port_base_vlan_cfg.state !=
9579 HNAE3_PORT_BASE_VLAN_DISABLE)
9580 return true;
9581
9582 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9583 return false;
9584 } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9585 return false;
9586 }
9587
9588 if (!vport->req_vlan_fltr_en)
9589 return false;
9590
9591
9592 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9593 return true;
9594
9595 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9596 if (vlan->vlan_id != 0)
9597 return true;
9598
9599 return false;
9600}
9601
9602int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9603{
9604 struct hclge_dev *hdev = vport->back;
9605 bool need_en;
9606 int ret;
9607
9608 mutex_lock(&hdev->vport_lock);
9609
9610 vport->req_vlan_fltr_en = request_en;
9611
9612 need_en = hclge_need_enable_vport_vlan_filter(vport);
9613 if (need_en == vport->cur_vlan_fltr_en) {
9614 mutex_unlock(&hdev->vport_lock);
9615 return 0;
9616 }
9617
9618 ret = hclge_set_vport_vlan_filter(vport, need_en);
9619 if (ret) {
9620 mutex_unlock(&hdev->vport_lock);
9621 return ret;
9622 }
9623
9624 vport->cur_vlan_fltr_en = need_en;
9625
9626 mutex_unlock(&hdev->vport_lock);
9627
9628 return 0;
9629}
9630
9631static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9632{
9633 struct hclge_vport *vport = hclge_get_vport(handle);
9634
9635 return hclge_enable_vport_vlan_filter(vport, enable);
9636}
9637
9638static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9639 bool is_kill, u16 vlan,
9640 struct hclge_desc *desc)
9641{
9642 struct hclge_vlan_filter_vf_cfg_cmd *req0;
9643 struct hclge_vlan_filter_vf_cfg_cmd *req1;
9644 u8 vf_byte_val;
9645 u8 vf_byte_off;
9646 int ret;
9647
9648 hclge_cmd_setup_basic_desc(&desc[0],
9649 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9650 hclge_cmd_setup_basic_desc(&desc[1],
9651 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9652
9653 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9654
9655 vf_byte_off = vfid / 8;
9656 vf_byte_val = 1 << (vfid % 8);
9657
9658 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9659 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9660
9661 req0->vlan_id = cpu_to_le16(vlan);
9662 req0->vlan_cfg = is_kill;
9663
9664 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9665 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9666 else
9667 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9668
9669 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9670 if (ret) {
9671 dev_err(&hdev->pdev->dev,
9672 "Send vf vlan command fail, ret =%d.\n",
9673 ret);
9674 return ret;
9675 }
9676
9677 return 0;
9678}
9679
9680static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9681 bool is_kill, struct hclge_desc *desc)
9682{
9683 struct hclge_vlan_filter_vf_cfg_cmd *req;
9684
9685 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9686
9687 if (!is_kill) {
9688#define HCLGE_VF_VLAN_NO_ENTRY 2
9689 if (!req->resp_code || req->resp_code == 1)
9690 return 0;
9691
9692 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9693 set_bit(vfid, hdev->vf_vlan_full);
9694 dev_warn(&hdev->pdev->dev,
9695 "vf vlan table is full, vf vlan filter is disabled\n");
9696 return 0;
9697 }
9698
9699 dev_err(&hdev->pdev->dev,
9700 "Add vf vlan filter fail, ret =%u.\n",
9701 req->resp_code);
9702 } else {
9703#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9704 if (!req->resp_code)
9705 return 0;
9706
9707
9708
9709
9710
9711
9712 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9713 return 0;
9714
9715 dev_err(&hdev->pdev->dev,
9716 "Kill vf vlan filter fail, ret =%u.\n",
9717 req->resp_code);
9718 }
9719
9720 return -EIO;
9721}
9722
9723static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9724 bool is_kill, u16 vlan)
9725{
9726 struct hclge_vport *vport = &hdev->vport[vfid];
9727 struct hclge_desc desc[2];
9728 int ret;
9729
9730
9731
9732
9733
9734
9735 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9736 if (vport->vf_info.spoofchk && vlan) {
9737 dev_err(&hdev->pdev->dev,
9738 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9739 return -EPERM;
9740 }
9741 return 0;
9742 }
9743
9744 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9745 if (ret)
9746 return ret;
9747
9748 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9749}
9750
9751static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9752 u16 vlan_id, bool is_kill)
9753{
9754 struct hclge_vlan_filter_pf_cfg_cmd *req;
9755 struct hclge_desc desc;
9756 u8 vlan_offset_byte_val;
9757 u8 vlan_offset_byte;
9758 u8 vlan_offset_160;
9759 int ret;
9760
9761 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9762
9763 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9764 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9765 HCLGE_VLAN_BYTE_SIZE;
9766 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9767
9768 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9769 req->vlan_offset = vlan_offset_160;
9770 req->vlan_cfg = is_kill;
9771 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9772
9773 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9774 if (ret)
9775 dev_err(&hdev->pdev->dev,
9776 "port vlan command, send fail, ret =%d.\n", ret);
9777 return ret;
9778}
9779
9780static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9781 u16 vport_id, u16 vlan_id,
9782 bool is_kill)
9783{
9784 u16 vport_idx, vport_num = 0;
9785 int ret;
9786
9787 if (is_kill && !vlan_id)
9788 return 0;
9789
9790 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9791 if (ret) {
9792 dev_err(&hdev->pdev->dev,
9793 "Set %u vport vlan filter config fail, ret =%d.\n",
9794 vport_id, ret);
9795 return ret;
9796 }
9797
9798
9799 if (!is_kill && !vlan_id &&
9800 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9801 return 0;
9802
9803 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9804 dev_err(&hdev->pdev->dev,
9805 "Add port vlan failed, vport %u is already in vlan %u\n",
9806 vport_id, vlan_id);
9807 return -EINVAL;
9808 }
9809
9810 if (is_kill &&
9811 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9812 dev_err(&hdev->pdev->dev,
9813 "Delete port vlan failed, vport %u is not in vlan %u\n",
9814 vport_id, vlan_id);
9815 return -EINVAL;
9816 }
9817
9818 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9819 vport_num++;
9820
9821 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9822 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9823 is_kill);
9824
9825 return ret;
9826}
9827
9828static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9829{
9830 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9831 struct hclge_vport_vtag_tx_cfg_cmd *req;
9832 struct hclge_dev *hdev = vport->back;
9833 struct hclge_desc desc;
9834 u16 bmap_index;
9835 int status;
9836
9837 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9838
9839 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9840 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9841 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9842 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9843 vcfg->accept_tag1 ? 1 : 0);
9844 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9845 vcfg->accept_untag1 ? 1 : 0);
9846 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9847 vcfg->accept_tag2 ? 1 : 0);
9848 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9849 vcfg->accept_untag2 ? 1 : 0);
9850 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9851 vcfg->insert_tag1_en ? 1 : 0);
9852 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9853 vcfg->insert_tag2_en ? 1 : 0);
9854 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9855 vcfg->tag_shift_mode_en ? 1 : 0);
9856 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9857
9858 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9859 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9860 HCLGE_VF_NUM_PER_BYTE;
9861 req->vf_bitmap[bmap_index] =
9862 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9863
9864 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9865 if (status)
9866 dev_err(&hdev->pdev->dev,
9867 "Send port txvlan cfg command fail, ret =%d\n",
9868 status);
9869
9870 return status;
9871}
9872
9873static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9874{
9875 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9876 struct hclge_vport_vtag_rx_cfg_cmd *req;
9877 struct hclge_dev *hdev = vport->back;
9878 struct hclge_desc desc;
9879 u16 bmap_index;
9880 int status;
9881
9882 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9883
9884 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9885 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9886 vcfg->strip_tag1_en ? 1 : 0);
9887 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9888 vcfg->strip_tag2_en ? 1 : 0);
9889 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9890 vcfg->vlan1_vlan_prionly ? 1 : 0);
9891 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9892 vcfg->vlan2_vlan_prionly ? 1 : 0);
9893 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9894 vcfg->strip_tag1_discard_en ? 1 : 0);
9895 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9896 vcfg->strip_tag2_discard_en ? 1 : 0);
9897
9898 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9899 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9900 HCLGE_VF_NUM_PER_BYTE;
9901 req->vf_bitmap[bmap_index] =
9902 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9903
9904 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9905 if (status)
9906 dev_err(&hdev->pdev->dev,
9907 "Send port rxvlan cfg command fail, ret =%d\n",
9908 status);
9909
9910 return status;
9911}
9912
9913static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9914 u16 port_base_vlan_state,
9915 u16 vlan_tag, u8 qos)
9916{
9917 int ret;
9918
9919 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9920 vport->txvlan_cfg.accept_tag1 = true;
9921 vport->txvlan_cfg.insert_tag1_en = false;
9922 vport->txvlan_cfg.default_tag1 = 0;
9923 } else {
9924 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9925
9926 vport->txvlan_cfg.accept_tag1 =
9927 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9928 vport->txvlan_cfg.insert_tag1_en = true;
9929 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9930 vlan_tag;
9931 }
9932
9933 vport->txvlan_cfg.accept_untag1 = true;
9934
9935
9936
9937
9938
9939 vport->txvlan_cfg.accept_tag2 = true;
9940 vport->txvlan_cfg.accept_untag2 = true;
9941 vport->txvlan_cfg.insert_tag2_en = false;
9942 vport->txvlan_cfg.default_tag2 = 0;
9943 vport->txvlan_cfg.tag_shift_mode_en = true;
9944
9945 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9946 vport->rxvlan_cfg.strip_tag1_en = false;
9947 vport->rxvlan_cfg.strip_tag2_en =
9948 vport->rxvlan_cfg.rx_vlan_offload_en;
9949 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9950 } else {
9951 vport->rxvlan_cfg.strip_tag1_en =
9952 vport->rxvlan_cfg.rx_vlan_offload_en;
9953 vport->rxvlan_cfg.strip_tag2_en = true;
9954 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9955 }
9956
9957 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9958 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9959 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9960
9961 ret = hclge_set_vlan_tx_offload_cfg(vport);
9962 if (ret)
9963 return ret;
9964
9965 return hclge_set_vlan_rx_offload_cfg(vport);
9966}
9967
9968static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9969{
9970 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9971 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9972 struct hclge_desc desc;
9973 int status;
9974
9975 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9976 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9977 rx_req->ot_fst_vlan_type =
9978 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9979 rx_req->ot_sec_vlan_type =
9980 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9981 rx_req->in_fst_vlan_type =
9982 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9983 rx_req->in_sec_vlan_type =
9984 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9985
9986 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9987 if (status) {
9988 dev_err(&hdev->pdev->dev,
9989 "Send rxvlan protocol type command fail, ret =%d\n",
9990 status);
9991 return status;
9992 }
9993
9994 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9995
9996 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9997 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9998 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9999
10000 status = hclge_cmd_send(&hdev->hw, &desc, 1);
10001 if (status)
10002 dev_err(&hdev->pdev->dev,
10003 "Send txvlan protocol type command fail, ret =%d\n",
10004 status);
10005
10006 return status;
10007}
10008
10009static int hclge_init_vlan_config(struct hclge_dev *hdev)
10010{
10011#define HCLGE_DEF_VLAN_TYPE 0x8100
10012
10013 struct hnae3_handle *handle = &hdev->vport[0].nic;
10014 struct hclge_vport *vport;
10015 int ret;
10016 int i;
10017
10018 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10019
10020 for (i = 0; i < hdev->num_alloc_vport; i++) {
10021 vport = &hdev->vport[i];
10022 ret = hclge_set_vlan_filter_ctrl(hdev,
10023 HCLGE_FILTER_TYPE_VF,
10024 HCLGE_FILTER_FE_EGRESS,
10025 true,
10026 vport->vport_id);
10027 if (ret)
10028 return ret;
10029 vport->cur_vlan_fltr_en = true;
10030 }
10031
10032 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10033 HCLGE_FILTER_FE_INGRESS, true,
10034 0);
10035 if (ret)
10036 return ret;
10037 } else {
10038 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10039 HCLGE_FILTER_FE_EGRESS_V1_B,
10040 true, 0);
10041 if (ret)
10042 return ret;
10043 }
10044
10045 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10046 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10047 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10048 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10049 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10050 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10051
10052 ret = hclge_set_vlan_protocol_type(hdev);
10053 if (ret)
10054 return ret;
10055
10056 for (i = 0; i < hdev->num_alloc_vport; i++) {
10057 u16 vlan_tag;
10058 u8 qos;
10059
10060 vport = &hdev->vport[i];
10061 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10062 qos = vport->port_base_vlan_cfg.vlan_info.qos;
10063
10064 ret = hclge_vlan_offload_cfg(vport,
10065 vport->port_base_vlan_cfg.state,
10066 vlan_tag, qos);
10067 if (ret)
10068 return ret;
10069 }
10070
10071 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10072}
10073
10074static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10075 bool writen_to_tbl)
10076{
10077 struct hclge_vport_vlan_cfg *vlan, *tmp;
10078
10079 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
10080 if (vlan->vlan_id == vlan_id)
10081 return;
10082
10083 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10084 if (!vlan)
10085 return;
10086
10087 vlan->hd_tbl_status = writen_to_tbl;
10088 vlan->vlan_id = vlan_id;
10089
10090 list_add_tail(&vlan->node, &vport->vlan_list);
10091}
10092
10093static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10094{
10095 struct hclge_vport_vlan_cfg *vlan, *tmp;
10096 struct hclge_dev *hdev = vport->back;
10097 int ret;
10098
10099 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10100 if (!vlan->hd_tbl_status) {
10101 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10102 vport->vport_id,
10103 vlan->vlan_id, false);
10104 if (ret) {
10105 dev_err(&hdev->pdev->dev,
10106 "restore vport vlan list failed, ret=%d\n",
10107 ret);
10108 return ret;
10109 }
10110 }
10111 vlan->hd_tbl_status = true;
10112 }
10113
10114 return 0;
10115}
10116
10117static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10118 bool is_write_tbl)
10119{
10120 struct hclge_vport_vlan_cfg *vlan, *tmp;
10121 struct hclge_dev *hdev = vport->back;
10122
10123 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10124 if (vlan->vlan_id == vlan_id) {
10125 if (is_write_tbl && vlan->hd_tbl_status)
10126 hclge_set_vlan_filter_hw(hdev,
10127 htons(ETH_P_8021Q),
10128 vport->vport_id,
10129 vlan_id,
10130 true);
10131
10132 list_del(&vlan->node);
10133 kfree(vlan);
10134 break;
10135 }
10136 }
10137}
10138
10139void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10140{
10141 struct hclge_vport_vlan_cfg *vlan, *tmp;
10142 struct hclge_dev *hdev = vport->back;
10143
10144 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10145 if (vlan->hd_tbl_status)
10146 hclge_set_vlan_filter_hw(hdev,
10147 htons(ETH_P_8021Q),
10148 vport->vport_id,
10149 vlan->vlan_id,
10150 true);
10151
10152 vlan->hd_tbl_status = false;
10153 if (is_del_list) {
10154 list_del(&vlan->node);
10155 kfree(vlan);
10156 }
10157 }
10158 clear_bit(vport->vport_id, hdev->vf_vlan_full);
10159}
10160
10161void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10162{
10163 struct hclge_vport_vlan_cfg *vlan, *tmp;
10164 struct hclge_vport *vport;
10165 int i;
10166
10167 for (i = 0; i < hdev->num_alloc_vport; i++) {
10168 vport = &hdev->vport[i];
10169 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10170 list_del(&vlan->node);
10171 kfree(vlan);
10172 }
10173 }
10174}
10175
10176void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10177{
10178 struct hclge_vport_vlan_cfg *vlan, *tmp;
10179 struct hclge_dev *hdev = vport->back;
10180 u16 vlan_proto;
10181 u16 vlan_id;
10182 u16 state;
10183 int ret;
10184
10185 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10186 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10187 state = vport->port_base_vlan_cfg.state;
10188
10189 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10190 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10191 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10192 vport->vport_id, vlan_id,
10193 false);
10194 return;
10195 }
10196
10197 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10198 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10199 vport->vport_id,
10200 vlan->vlan_id, false);
10201 if (ret)
10202 break;
10203 vlan->hd_tbl_status = true;
10204 }
10205}
10206
10207
10208
10209
10210
10211
10212
10213static void hclge_mac_node_convert_for_reset(struct list_head *list)
10214{
10215 struct hclge_mac_node *mac_node, *tmp;
10216
10217 list_for_each_entry_safe(mac_node, tmp, list, node) {
10218 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10219 mac_node->state = HCLGE_MAC_TO_ADD;
10220 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10221 list_del(&mac_node->node);
10222 kfree(mac_node);
10223 }
10224 }
10225}
10226
10227void hclge_restore_mac_table_common(struct hclge_vport *vport)
10228{
10229 spin_lock_bh(&vport->mac_list_lock);
10230
10231 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10232 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10233 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10234
10235 spin_unlock_bh(&vport->mac_list_lock);
10236}
10237
10238static void hclge_restore_hw_table(struct hclge_dev *hdev)
10239{
10240 struct hclge_vport *vport = &hdev->vport[0];
10241 struct hnae3_handle *handle = &vport->nic;
10242
10243 hclge_restore_mac_table_common(vport);
10244 hclge_restore_vport_vlan_table(vport);
10245 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10246 hclge_restore_fd_entries(handle);
10247}
10248
10249int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10250{
10251 struct hclge_vport *vport = hclge_get_vport(handle);
10252
10253 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10254 vport->rxvlan_cfg.strip_tag1_en = false;
10255 vport->rxvlan_cfg.strip_tag2_en = enable;
10256 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10257 } else {
10258 vport->rxvlan_cfg.strip_tag1_en = enable;
10259 vport->rxvlan_cfg.strip_tag2_en = true;
10260 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10261 }
10262
10263 vport->rxvlan_cfg.strip_tag1_discard_en = false;
10264 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10265 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10266 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10267
10268 return hclge_set_vlan_rx_offload_cfg(vport);
10269}
10270
10271static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10272{
10273 struct hclge_dev *hdev = vport->back;
10274
10275 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10276 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10277}
10278
10279static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10280 u16 port_base_vlan_state,
10281 struct hclge_vlan_info *new_info,
10282 struct hclge_vlan_info *old_info)
10283{
10284 struct hclge_dev *hdev = vport->back;
10285 int ret;
10286
10287 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10288 hclge_rm_vport_all_vlan_table(vport, false);
10289
10290 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10291 if (ret)
10292 return ret;
10293 return hclge_set_vlan_filter_hw(hdev,
10294 htons(new_info->vlan_proto),
10295 vport->vport_id,
10296 new_info->vlan_tag,
10297 false);
10298 }
10299
10300
10301 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10302 if (ret)
10303 return ret;
10304
10305 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10306 vport->vport_id, old_info->vlan_tag,
10307 true);
10308 if (ret)
10309 return ret;
10310
10311 return hclge_add_vport_all_vlan_table(vport);
10312}
10313
10314static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10315 const struct hclge_vlan_info *old_cfg)
10316{
10317 if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10318 return true;
10319
10320 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10321 return true;
10322
10323 return false;
10324}
10325
10326int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10327 struct hclge_vlan_info *vlan_info)
10328{
10329 struct hnae3_handle *nic = &vport->nic;
10330 struct hclge_vlan_info *old_vlan_info;
10331 struct hclge_dev *hdev = vport->back;
10332 int ret;
10333
10334 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10335
10336 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10337 vlan_info->qos);
10338 if (ret)
10339 return ret;
10340
10341 if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10342 goto out;
10343
10344 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10345
10346 ret = hclge_set_vlan_filter_hw(hdev,
10347 htons(vlan_info->vlan_proto),
10348 vport->vport_id,
10349 vlan_info->vlan_tag,
10350 false);
10351 if (ret)
10352 return ret;
10353
10354
10355 if (old_vlan_info->vlan_tag == 0)
10356 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10357 true, 0);
10358 else
10359 ret = hclge_set_vlan_filter_hw(hdev,
10360 htons(ETH_P_8021Q),
10361 vport->vport_id,
10362 old_vlan_info->vlan_tag,
10363 true);
10364 if (ret) {
10365 dev_err(&hdev->pdev->dev,
10366 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10367 vport->vport_id, old_vlan_info->vlan_tag, ret);
10368 return ret;
10369 }
10370
10371 goto out;
10372 }
10373
10374 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10375 old_vlan_info);
10376 if (ret)
10377 return ret;
10378
10379out:
10380 vport->port_base_vlan_cfg.state = state;
10381 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10382 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10383 else
10384 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10385
10386 vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10387 hclge_set_vport_vlan_fltr_change(vport);
10388
10389 return 0;
10390}
10391
10392static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10393 enum hnae3_port_base_vlan_state state,
10394 u16 vlan, u8 qos)
10395{
10396 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10397 if (!vlan && !qos)
10398 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10399
10400 return HNAE3_PORT_BASE_VLAN_ENABLE;
10401 }
10402
10403 if (!vlan && !qos)
10404 return HNAE3_PORT_BASE_VLAN_DISABLE;
10405
10406 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10407 vport->port_base_vlan_cfg.vlan_info.qos == qos)
10408 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10409
10410 return HNAE3_PORT_BASE_VLAN_MODIFY;
10411}
10412
10413static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10414 u16 vlan, u8 qos, __be16 proto)
10415{
10416 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10417 struct hclge_vport *vport = hclge_get_vport(handle);
10418 struct hclge_dev *hdev = vport->back;
10419 struct hclge_vlan_info vlan_info;
10420 u16 state;
10421 int ret;
10422
10423 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10424 return -EOPNOTSUPP;
10425
10426 vport = hclge_get_vf_vport(hdev, vfid);
10427 if (!vport)
10428 return -EINVAL;
10429
10430
10431 if (vlan > VLAN_N_VID - 1 || qos > 7)
10432 return -EINVAL;
10433 if (proto != htons(ETH_P_8021Q))
10434 return -EPROTONOSUPPORT;
10435
10436 state = hclge_get_port_base_vlan_state(vport,
10437 vport->port_base_vlan_cfg.state,
10438 vlan, qos);
10439 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10440 return 0;
10441
10442 vlan_info.vlan_tag = vlan;
10443 vlan_info.qos = qos;
10444 vlan_info.vlan_proto = ntohs(proto);
10445
10446 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10447 if (ret) {
10448 dev_err(&hdev->pdev->dev,
10449 "failed to update port base vlan for vf %d, ret = %d\n",
10450 vfid, ret);
10451 return ret;
10452 }
10453
10454
10455
10456
10457 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10458 test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10459 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10460 vport->vport_id, state,
10461 &vlan_info);
10462
10463 return 0;
10464}
10465
10466static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10467{
10468 struct hclge_vlan_info *vlan_info;
10469 struct hclge_vport *vport;
10470 int ret;
10471 int vf;
10472
10473
10474 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10475 vport = &hdev->vport[vf];
10476 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10477
10478 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10479 vport->vport_id,
10480 vlan_info->vlan_tag, true);
10481 if (ret)
10482 dev_err(&hdev->pdev->dev,
10483 "failed to clear vf vlan for vf%d, ret = %d\n",
10484 vf - HCLGE_VF_VPORT_START_NUM, ret);
10485 }
10486}
10487
10488int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10489 u16 vlan_id, bool is_kill)
10490{
10491 struct hclge_vport *vport = hclge_get_vport(handle);
10492 struct hclge_dev *hdev = vport->back;
10493 bool writen_to_tbl = false;
10494 int ret = 0;
10495
10496
10497
10498
10499
10500 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10501 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10502 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10503 return -EBUSY;
10504 }
10505
10506
10507
10508
10509
10510
10511
10512 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10513 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10514 vlan_id, is_kill);
10515 writen_to_tbl = true;
10516 }
10517
10518 if (!ret) {
10519 if (is_kill)
10520 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10521 else
10522 hclge_add_vport_vlan_table(vport, vlan_id,
10523 writen_to_tbl);
10524 } else if (is_kill) {
10525
10526
10527
10528
10529 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10530 }
10531
10532 hclge_set_vport_vlan_fltr_change(vport);
10533
10534 return ret;
10535}
10536
10537static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10538{
10539 struct hclge_vport *vport;
10540 int ret;
10541 u16 i;
10542
10543 for (i = 0; i < hdev->num_alloc_vport; i++) {
10544 vport = &hdev->vport[i];
10545 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10546 &vport->state))
10547 continue;
10548
10549 ret = hclge_enable_vport_vlan_filter(vport,
10550 vport->req_vlan_fltr_en);
10551 if (ret) {
10552 dev_err(&hdev->pdev->dev,
10553 "failed to sync vlan filter state for vport%u, ret = %d\n",
10554 vport->vport_id, ret);
10555 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10556 &vport->state);
10557 return;
10558 }
10559 }
10560}
10561
10562static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10563{
10564#define HCLGE_MAX_SYNC_COUNT 60
10565
10566 int i, ret, sync_cnt = 0;
10567 u16 vlan_id;
10568
10569
10570 for (i = 0; i < hdev->num_alloc_vport; i++) {
10571 struct hclge_vport *vport = &hdev->vport[i];
10572
10573 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10574 VLAN_N_VID);
10575 while (vlan_id != VLAN_N_VID) {
10576 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10577 vport->vport_id, vlan_id,
10578 true);
10579 if (ret && ret != -EINVAL)
10580 return;
10581
10582 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10583 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10584 hclge_set_vport_vlan_fltr_change(vport);
10585
10586 sync_cnt++;
10587 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10588 return;
10589
10590 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10591 VLAN_N_VID);
10592 }
10593 }
10594
10595 hclge_sync_vlan_fltr_state(hdev);
10596}
10597
10598static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10599{
10600 struct hclge_config_max_frm_size_cmd *req;
10601 struct hclge_desc desc;
10602
10603 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10604
10605 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10606 req->max_frm_size = cpu_to_le16(new_mps);
10607 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10608
10609 return hclge_cmd_send(&hdev->hw, &desc, 1);
10610}
10611
10612static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10613{
10614 struct hclge_vport *vport = hclge_get_vport(handle);
10615
10616 return hclge_set_vport_mtu(vport, new_mtu);
10617}
10618
10619int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10620{
10621 struct hclge_dev *hdev = vport->back;
10622 int i, max_frm_size, ret;
10623
10624
10625 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10626 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10627 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10628 return -EINVAL;
10629
10630 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10631 mutex_lock(&hdev->vport_lock);
10632
10633 if (vport->vport_id && max_frm_size > hdev->mps) {
10634 mutex_unlock(&hdev->vport_lock);
10635 return -EINVAL;
10636 } else if (vport->vport_id) {
10637 vport->mps = max_frm_size;
10638 mutex_unlock(&hdev->vport_lock);
10639 return 0;
10640 }
10641
10642
10643 for (i = 1; i < hdev->num_alloc_vport; i++)
10644 if (max_frm_size < hdev->vport[i].mps) {
10645 mutex_unlock(&hdev->vport_lock);
10646 return -EINVAL;
10647 }
10648
10649 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10650
10651 ret = hclge_set_mac_mtu(hdev, max_frm_size);
10652 if (ret) {
10653 dev_err(&hdev->pdev->dev,
10654 "Change mtu fail, ret =%d\n", ret);
10655 goto out;
10656 }
10657
10658 hdev->mps = max_frm_size;
10659 vport->mps = max_frm_size;
10660
10661 ret = hclge_buffer_alloc(hdev);
10662 if (ret)
10663 dev_err(&hdev->pdev->dev,
10664 "Allocate buffer fail, ret =%d\n", ret);
10665
10666out:
10667 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10668 mutex_unlock(&hdev->vport_lock);
10669 return ret;
10670}
10671
10672static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10673 bool enable)
10674{
10675 struct hclge_reset_tqp_queue_cmd *req;
10676 struct hclge_desc desc;
10677 int ret;
10678
10679 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10680
10681 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10682 req->tqp_id = cpu_to_le16(queue_id);
10683 if (enable)
10684 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10685
10686 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10687 if (ret) {
10688 dev_err(&hdev->pdev->dev,
10689 "Send tqp reset cmd error, status =%d\n", ret);
10690 return ret;
10691 }
10692
10693 return 0;
10694}
10695
10696static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10697{
10698 struct hclge_reset_tqp_queue_cmd *req;
10699 struct hclge_desc desc;
10700 int ret;
10701
10702 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10703
10704 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10705 req->tqp_id = cpu_to_le16(queue_id);
10706
10707 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10708 if (ret) {
10709 dev_err(&hdev->pdev->dev,
10710 "Get reset status error, status =%d\n", ret);
10711 return ret;
10712 }
10713
10714 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10715}
10716
10717u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10718{
10719 struct hnae3_queue *queue;
10720 struct hclge_tqp *tqp;
10721
10722 queue = handle->kinfo.tqp[queue_id];
10723 tqp = container_of(queue, struct hclge_tqp, q);
10724
10725 return tqp->index;
10726}
10727
10728static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10729{
10730 struct hclge_vport *vport = hclge_get_vport(handle);
10731 struct hclge_dev *hdev = vport->back;
10732 u16 reset_try_times = 0;
10733 int reset_status;
10734 u16 queue_gid;
10735 int ret;
10736 u16 i;
10737
10738 for (i = 0; i < handle->kinfo.num_tqps; i++) {
10739 queue_gid = hclge_covert_handle_qid_global(handle, i);
10740 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10741 if (ret) {
10742 dev_err(&hdev->pdev->dev,
10743 "failed to send reset tqp cmd, ret = %d\n",
10744 ret);
10745 return ret;
10746 }
10747
10748 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10749 reset_status = hclge_get_reset_status(hdev, queue_gid);
10750 if (reset_status)
10751 break;
10752
10753
10754 usleep_range(1000, 1200);
10755 }
10756
10757 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10758 dev_err(&hdev->pdev->dev,
10759 "wait for tqp hw reset timeout\n");
10760 return -ETIME;
10761 }
10762
10763 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10764 if (ret) {
10765 dev_err(&hdev->pdev->dev,
10766 "failed to deassert soft reset, ret = %d\n",
10767 ret);
10768 return ret;
10769 }
10770 reset_try_times = 0;
10771 }
10772 return 0;
10773}
10774
10775static int hclge_reset_rcb(struct hnae3_handle *handle)
10776{
10777#define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10778#define HCLGE_RESET_RCB_SUCCESS 1U
10779
10780 struct hclge_vport *vport = hclge_get_vport(handle);
10781 struct hclge_dev *hdev = vport->back;
10782 struct hclge_reset_cmd *req;
10783 struct hclge_desc desc;
10784 u8 return_status;
10785 u16 queue_gid;
10786 int ret;
10787
10788 queue_gid = hclge_covert_handle_qid_global(handle, 0);
10789
10790 req = (struct hclge_reset_cmd *)desc.data;
10791 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10792 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10793 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10794 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10795
10796 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10797 if (ret) {
10798 dev_err(&hdev->pdev->dev,
10799 "failed to send rcb reset cmd, ret = %d\n", ret);
10800 return ret;
10801 }
10802
10803 return_status = req->fun_reset_rcb_return_status;
10804 if (return_status == HCLGE_RESET_RCB_SUCCESS)
10805 return 0;
10806
10807 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10808 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10809 return_status);
10810 return -EIO;
10811 }
10812
10813
10814
10815
10816 return hclge_reset_tqp_cmd(handle);
10817}
10818
10819int hclge_reset_tqp(struct hnae3_handle *handle)
10820{
10821 struct hclge_vport *vport = hclge_get_vport(handle);
10822 struct hclge_dev *hdev = vport->back;
10823 int ret;
10824
10825
10826 if (!vport->vport_id) {
10827 ret = hclge_tqp_enable(handle, false);
10828 if (ret) {
10829 dev_err(&hdev->pdev->dev,
10830 "failed to disable tqp, ret = %d\n", ret);
10831 return ret;
10832 }
10833 }
10834
10835 return hclge_reset_rcb(handle);
10836}
10837
10838static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10839{
10840 struct hclge_vport *vport = hclge_get_vport(handle);
10841 struct hclge_dev *hdev = vport->back;
10842
10843 return hdev->fw_version;
10844}
10845
10846static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10847{
10848 struct phy_device *phydev = hdev->hw.mac.phydev;
10849
10850 if (!phydev)
10851 return;
10852
10853 phy_set_asym_pause(phydev, rx_en, tx_en);
10854}
10855
10856static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10857{
10858 int ret;
10859
10860 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10861 return 0;
10862
10863 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10864 if (ret)
10865 dev_err(&hdev->pdev->dev,
10866 "configure pauseparam error, ret = %d.\n", ret);
10867
10868 return ret;
10869}
10870
10871int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10872{
10873 struct phy_device *phydev = hdev->hw.mac.phydev;
10874 u16 remote_advertising = 0;
10875 u16 local_advertising;
10876 u32 rx_pause, tx_pause;
10877 u8 flowctl;
10878
10879 if (!phydev->link || !phydev->autoneg)
10880 return 0;
10881
10882 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10883
10884 if (phydev->pause)
10885 remote_advertising = LPA_PAUSE_CAP;
10886
10887 if (phydev->asym_pause)
10888 remote_advertising |= LPA_PAUSE_ASYM;
10889
10890 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10891 remote_advertising);
10892 tx_pause = flowctl & FLOW_CTRL_TX;
10893 rx_pause = flowctl & FLOW_CTRL_RX;
10894
10895 if (phydev->duplex == HCLGE_MAC_HALF) {
10896 tx_pause = 0;
10897 rx_pause = 0;
10898 }
10899
10900 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10901}
10902
10903static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10904 u32 *rx_en, u32 *tx_en)
10905{
10906 struct hclge_vport *vport = hclge_get_vport(handle);
10907 struct hclge_dev *hdev = vport->back;
10908 u8 media_type = hdev->hw.mac.media_type;
10909
10910 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10911 hclge_get_autoneg(handle) : 0;
10912
10913 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10914 *rx_en = 0;
10915 *tx_en = 0;
10916 return;
10917 }
10918
10919 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10920 *rx_en = 1;
10921 *tx_en = 0;
10922 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10923 *tx_en = 1;
10924 *rx_en = 0;
10925 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10926 *rx_en = 1;
10927 *tx_en = 1;
10928 } else {
10929 *rx_en = 0;
10930 *tx_en = 0;
10931 }
10932}
10933
10934static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10935 u32 rx_en, u32 tx_en)
10936{
10937 if (rx_en && tx_en)
10938 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10939 else if (rx_en && !tx_en)
10940 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10941 else if (!rx_en && tx_en)
10942 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10943 else
10944 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10945
10946 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10947}
10948
10949static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10950 u32 rx_en, u32 tx_en)
10951{
10952 struct hclge_vport *vport = hclge_get_vport(handle);
10953 struct hclge_dev *hdev = vport->back;
10954 struct phy_device *phydev = hdev->hw.mac.phydev;
10955 u32 fc_autoneg;
10956
10957 if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10958 fc_autoneg = hclge_get_autoneg(handle);
10959 if (auto_neg != fc_autoneg) {
10960 dev_info(&hdev->pdev->dev,
10961 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10962 return -EOPNOTSUPP;
10963 }
10964 }
10965
10966 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10967 dev_info(&hdev->pdev->dev,
10968 "Priority flow control enabled. Cannot set link flow control.\n");
10969 return -EOPNOTSUPP;
10970 }
10971
10972 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10973
10974 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10975
10976 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10977 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10978
10979 if (phydev)
10980 return phy_start_aneg(phydev);
10981
10982 return -EOPNOTSUPP;
10983}
10984
10985static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10986 u8 *auto_neg, u32 *speed, u8 *duplex)
10987{
10988 struct hclge_vport *vport = hclge_get_vport(handle);
10989 struct hclge_dev *hdev = vport->back;
10990
10991 if (speed)
10992 *speed = hdev->hw.mac.speed;
10993 if (duplex)
10994 *duplex = hdev->hw.mac.duplex;
10995 if (auto_neg)
10996 *auto_neg = hdev->hw.mac.autoneg;
10997}
10998
10999static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11000 u8 *module_type)
11001{
11002 struct hclge_vport *vport = hclge_get_vport(handle);
11003 struct hclge_dev *hdev = vport->back;
11004
11005
11006
11007
11008
11009 hclge_update_port_info(hdev);
11010
11011 if (media_type)
11012 *media_type = hdev->hw.mac.media_type;
11013
11014 if (module_type)
11015 *module_type = hdev->hw.mac.module_type;
11016}
11017
11018static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11019 u8 *tp_mdix_ctrl, u8 *tp_mdix)
11020{
11021 struct hclge_vport *vport = hclge_get_vport(handle);
11022 struct hclge_dev *hdev = vport->back;
11023 struct phy_device *phydev = hdev->hw.mac.phydev;
11024 int mdix_ctrl, mdix, is_resolved;
11025 unsigned int retval;
11026
11027 if (!phydev) {
11028 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11029 *tp_mdix = ETH_TP_MDI_INVALID;
11030 return;
11031 }
11032
11033 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11034
11035 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11036 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11037 HCLGE_PHY_MDIX_CTRL_S);
11038
11039 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11040 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11041 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11042
11043 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11044
11045 switch (mdix_ctrl) {
11046 case 0x0:
11047 *tp_mdix_ctrl = ETH_TP_MDI;
11048 break;
11049 case 0x1:
11050 *tp_mdix_ctrl = ETH_TP_MDI_X;
11051 break;
11052 case 0x3:
11053 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11054 break;
11055 default:
11056 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11057 break;
11058 }
11059
11060 if (!is_resolved)
11061 *tp_mdix = ETH_TP_MDI_INVALID;
11062 else if (mdix)
11063 *tp_mdix = ETH_TP_MDI_X;
11064 else
11065 *tp_mdix = ETH_TP_MDI;
11066}
11067
11068static void hclge_info_show(struct hclge_dev *hdev)
11069{
11070 struct device *dev = &hdev->pdev->dev;
11071
11072 dev_info(dev, "PF info begin:\n");
11073
11074 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11075 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11076 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11077 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11078 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11079 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11080 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11081 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11082 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11083 dev_info(dev, "This is %s PF\n",
11084 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11085 dev_info(dev, "DCB %s\n",
11086 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11087 dev_info(dev, "MQPRIO %s\n",
11088 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11089 dev_info(dev, "Default tx spare buffer size: %u\n",
11090 hdev->tx_spare_buf_size);
11091
11092 dev_info(dev, "PF info end.\n");
11093}
11094
11095static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11096 struct hclge_vport *vport)
11097{
11098 struct hnae3_client *client = vport->nic.client;
11099 struct hclge_dev *hdev = ae_dev->priv;
11100 int rst_cnt = hdev->rst_stats.reset_cnt;
11101 int ret;
11102
11103 ret = client->ops->init_instance(&vport->nic);
11104 if (ret)
11105 return ret;
11106
11107 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11108 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11109 rst_cnt != hdev->rst_stats.reset_cnt) {
11110 ret = -EBUSY;
11111 goto init_nic_err;
11112 }
11113
11114
11115 ret = hclge_config_nic_hw_error(hdev, true);
11116 if (ret) {
11117 dev_err(&ae_dev->pdev->dev,
11118 "fail(%d) to enable hw error interrupts\n", ret);
11119 goto init_nic_err;
11120 }
11121
11122 hnae3_set_client_init_flag(client, ae_dev, 1);
11123
11124 if (netif_msg_drv(&hdev->vport->nic))
11125 hclge_info_show(hdev);
11126
11127 return ret;
11128
11129init_nic_err:
11130 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11131 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11132 msleep(HCLGE_WAIT_RESET_DONE);
11133
11134 client->ops->uninit_instance(&vport->nic, 0);
11135
11136 return ret;
11137}
11138
11139static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11140 struct hclge_vport *vport)
11141{
11142 struct hclge_dev *hdev = ae_dev->priv;
11143 struct hnae3_client *client;
11144 int rst_cnt;
11145 int ret;
11146
11147 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11148 !hdev->nic_client)
11149 return 0;
11150
11151 client = hdev->roce_client;
11152 ret = hclge_init_roce_base_info(vport);
11153 if (ret)
11154 return ret;
11155
11156 rst_cnt = hdev->rst_stats.reset_cnt;
11157 ret = client->ops->init_instance(&vport->roce);
11158 if (ret)
11159 return ret;
11160
11161 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11162 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11163 rst_cnt != hdev->rst_stats.reset_cnt) {
11164 ret = -EBUSY;
11165 goto init_roce_err;
11166 }
11167
11168
11169 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11170 if (ret) {
11171 dev_err(&ae_dev->pdev->dev,
11172 "fail(%d) to enable roce ras interrupts\n", ret);
11173 goto init_roce_err;
11174 }
11175
11176 hnae3_set_client_init_flag(client, ae_dev, 1);
11177
11178 return 0;
11179
11180init_roce_err:
11181 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11182 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11183 msleep(HCLGE_WAIT_RESET_DONE);
11184
11185 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11186
11187 return ret;
11188}
11189
11190static int hclge_init_client_instance(struct hnae3_client *client,
11191 struct hnae3_ae_dev *ae_dev)
11192{
11193 struct hclge_dev *hdev = ae_dev->priv;
11194 struct hclge_vport *vport = &hdev->vport[0];
11195 int ret;
11196
11197 switch (client->type) {
11198 case HNAE3_CLIENT_KNIC:
11199 hdev->nic_client = client;
11200 vport->nic.client = client;
11201 ret = hclge_init_nic_client_instance(ae_dev, vport);
11202 if (ret)
11203 goto clear_nic;
11204
11205 ret = hclge_init_roce_client_instance(ae_dev, vport);
11206 if (ret)
11207 goto clear_roce;
11208
11209 break;
11210 case HNAE3_CLIENT_ROCE:
11211 if (hnae3_dev_roce_supported(hdev)) {
11212 hdev->roce_client = client;
11213 vport->roce.client = client;
11214 }
11215
11216 ret = hclge_init_roce_client_instance(ae_dev, vport);
11217 if (ret)
11218 goto clear_roce;
11219
11220 break;
11221 default:
11222 return -EINVAL;
11223 }
11224
11225 return 0;
11226
11227clear_nic:
11228 hdev->nic_client = NULL;
11229 vport->nic.client = NULL;
11230 return ret;
11231clear_roce:
11232 hdev->roce_client = NULL;
11233 vport->roce.client = NULL;
11234 return ret;
11235}
11236
11237static void hclge_uninit_client_instance(struct hnae3_client *client,
11238 struct hnae3_ae_dev *ae_dev)
11239{
11240 struct hclge_dev *hdev = ae_dev->priv;
11241 struct hclge_vport *vport = &hdev->vport[0];
11242
11243 if (hdev->roce_client) {
11244 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11245 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11246 msleep(HCLGE_WAIT_RESET_DONE);
11247
11248 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11249 hdev->roce_client = NULL;
11250 vport->roce.client = NULL;
11251 }
11252 if (client->type == HNAE3_CLIENT_ROCE)
11253 return;
11254 if (hdev->nic_client && client->ops->uninit_instance) {
11255 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11256 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11257 msleep(HCLGE_WAIT_RESET_DONE);
11258
11259 client->ops->uninit_instance(&vport->nic, 0);
11260 hdev->nic_client = NULL;
11261 vport->nic.client = NULL;
11262 }
11263}
11264
11265static int hclge_dev_mem_map(struct hclge_dev *hdev)
11266{
11267#define HCLGE_MEM_BAR 4
11268
11269 struct pci_dev *pdev = hdev->pdev;
11270 struct hclge_hw *hw = &hdev->hw;
11271
11272
11273 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11274 return 0;
11275
11276 hw->mem_base = devm_ioremap_wc(&pdev->dev,
11277 pci_resource_start(pdev, HCLGE_MEM_BAR),
11278 pci_resource_len(pdev, HCLGE_MEM_BAR));
11279 if (!hw->mem_base) {
11280 dev_err(&pdev->dev, "failed to map device memory\n");
11281 return -EFAULT;
11282 }
11283
11284 return 0;
11285}
11286
11287static int hclge_pci_init(struct hclge_dev *hdev)
11288{
11289 struct pci_dev *pdev = hdev->pdev;
11290 struct hclge_hw *hw;
11291 int ret;
11292
11293 ret = pci_enable_device(pdev);
11294 if (ret) {
11295 dev_err(&pdev->dev, "failed to enable PCI device\n");
11296 return ret;
11297 }
11298
11299 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11300 if (ret) {
11301 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11302 if (ret) {
11303 dev_err(&pdev->dev,
11304 "can't set consistent PCI DMA");
11305 goto err_disable_device;
11306 }
11307 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11308 }
11309
11310 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11311 if (ret) {
11312 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11313 goto err_disable_device;
11314 }
11315
11316 pci_set_master(pdev);
11317 hw = &hdev->hw;
11318 hw->io_base = pcim_iomap(pdev, 2, 0);
11319 if (!hw->io_base) {
11320 dev_err(&pdev->dev, "Can't map configuration register space\n");
11321 ret = -ENOMEM;
11322 goto err_clr_master;
11323 }
11324
11325 ret = hclge_dev_mem_map(hdev);
11326 if (ret)
11327 goto err_unmap_io_base;
11328
11329 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11330
11331 return 0;
11332
11333err_unmap_io_base:
11334 pcim_iounmap(pdev, hdev->hw.io_base);
11335err_clr_master:
11336 pci_clear_master(pdev);
11337 pci_release_regions(pdev);
11338err_disable_device:
11339 pci_disable_device(pdev);
11340
11341 return ret;
11342}
11343
11344static void hclge_pci_uninit(struct hclge_dev *hdev)
11345{
11346 struct pci_dev *pdev = hdev->pdev;
11347
11348 if (hdev->hw.mem_base)
11349 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11350
11351 pcim_iounmap(pdev, hdev->hw.io_base);
11352 pci_free_irq_vectors(pdev);
11353 pci_clear_master(pdev);
11354 pci_release_mem_regions(pdev);
11355 pci_disable_device(pdev);
11356}
11357
11358static void hclge_state_init(struct hclge_dev *hdev)
11359{
11360 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11361 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11362 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11363 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11364 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11365 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11366 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11367}
11368
11369static void hclge_state_uninit(struct hclge_dev *hdev)
11370{
11371 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11372 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11373
11374 if (hdev->reset_timer.function)
11375 del_timer_sync(&hdev->reset_timer);
11376 if (hdev->service_task.work.func)
11377 cancel_delayed_work_sync(&hdev->service_task);
11378}
11379
11380static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11381 enum hnae3_reset_type rst_type)
11382{
11383#define HCLGE_RESET_RETRY_WAIT_MS 500
11384#define HCLGE_RESET_RETRY_CNT 5
11385
11386 struct hclge_dev *hdev = ae_dev->priv;
11387 int retry_cnt = 0;
11388 int ret;
11389
11390retry:
11391 down(&hdev->reset_sem);
11392 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11393 hdev->reset_type = rst_type;
11394 ret = hclge_reset_prepare(hdev);
11395 if (ret || hdev->reset_pending) {
11396 dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11397 ret);
11398 if (hdev->reset_pending ||
11399 retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11400 dev_err(&hdev->pdev->dev,
11401 "reset_pending:0x%lx, retry_cnt:%d\n",
11402 hdev->reset_pending, retry_cnt);
11403 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11404 up(&hdev->reset_sem);
11405 msleep(HCLGE_RESET_RETRY_WAIT_MS);
11406 goto retry;
11407 }
11408 }
11409
11410
11411 hclge_enable_vector(&hdev->misc_vector, false);
11412 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11413
11414 if (hdev->reset_type == HNAE3_FLR_RESET)
11415 hdev->rst_stats.flr_rst_cnt++;
11416}
11417
11418static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11419{
11420 struct hclge_dev *hdev = ae_dev->priv;
11421 int ret;
11422
11423 hclge_enable_vector(&hdev->misc_vector, true);
11424
11425 ret = hclge_reset_rebuild(hdev);
11426 if (ret)
11427 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11428
11429 hdev->reset_type = HNAE3_NONE_RESET;
11430 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11431 up(&hdev->reset_sem);
11432}
11433
11434static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11435{
11436 u16 i;
11437
11438 for (i = 0; i < hdev->num_alloc_vport; i++) {
11439 struct hclge_vport *vport = &hdev->vport[i];
11440 int ret;
11441
11442
11443 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11444 if (ret)
11445 dev_warn(&hdev->pdev->dev,
11446 "clear vf(%u) rst failed %d!\n",
11447 vport->vport_id, ret);
11448 }
11449}
11450
11451static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11452{
11453 struct hclge_desc desc;
11454 int ret;
11455
11456 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11457
11458 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11459
11460
11461
11462
11463
11464
11465 if (ret && ret != -EOPNOTSUPP) {
11466 dev_err(&hdev->pdev->dev,
11467 "failed to clear hw resource, ret = %d\n", ret);
11468 return ret;
11469 }
11470 return 0;
11471}
11472
11473static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11474{
11475 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11476 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11477}
11478
11479static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11480{
11481 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11482 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11483}
11484
11485static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11486{
11487 struct pci_dev *pdev = ae_dev->pdev;
11488 struct hclge_dev *hdev;
11489 int ret;
11490
11491 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11492 if (!hdev)
11493 return -ENOMEM;
11494
11495 hdev->pdev = pdev;
11496 hdev->ae_dev = ae_dev;
11497 hdev->reset_type = HNAE3_NONE_RESET;
11498 hdev->reset_level = HNAE3_FUNC_RESET;
11499 ae_dev->priv = hdev;
11500
11501
11502 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11503
11504 mutex_init(&hdev->vport_lock);
11505 spin_lock_init(&hdev->fd_rule_lock);
11506 sema_init(&hdev->reset_sem, 1);
11507
11508 ret = hclge_pci_init(hdev);
11509 if (ret)
11510 goto out;
11511
11512
11513 ret = hclge_cmd_queue_init(hdev);
11514 if (ret)
11515 goto err_pci_uninit;
11516
11517
11518 ret = hclge_cmd_init(hdev);
11519 if (ret)
11520 goto err_cmd_uninit;
11521
11522 ret = hclge_clear_hw_resource(hdev);
11523 if (ret)
11524 goto err_cmd_uninit;
11525
11526 ret = hclge_get_cap(hdev);
11527 if (ret)
11528 goto err_cmd_uninit;
11529
11530 ret = hclge_query_dev_specs(hdev);
11531 if (ret) {
11532 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11533 ret);
11534 goto err_cmd_uninit;
11535 }
11536
11537 ret = hclge_configure(hdev);
11538 if (ret) {
11539 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11540 goto err_cmd_uninit;
11541 }
11542
11543 ret = hclge_init_msi(hdev);
11544 if (ret) {
11545 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11546 goto err_cmd_uninit;
11547 }
11548
11549 ret = hclge_misc_irq_init(hdev);
11550 if (ret)
11551 goto err_msi_uninit;
11552
11553 ret = hclge_alloc_tqps(hdev);
11554 if (ret) {
11555 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11556 goto err_msi_irq_uninit;
11557 }
11558
11559 ret = hclge_alloc_vport(hdev);
11560 if (ret)
11561 goto err_msi_irq_uninit;
11562
11563 ret = hclge_map_tqp(hdev);
11564 if (ret)
11565 goto err_msi_irq_uninit;
11566
11567 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11568 !hnae3_dev_phy_imp_supported(hdev)) {
11569 ret = hclge_mac_mdio_config(hdev);
11570 if (ret)
11571 goto err_msi_irq_uninit;
11572 }
11573
11574 ret = hclge_init_umv_space(hdev);
11575 if (ret)
11576 goto err_mdiobus_unreg;
11577
11578 ret = hclge_mac_init(hdev);
11579 if (ret) {
11580 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11581 goto err_mdiobus_unreg;
11582 }
11583
11584 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11585 if (ret) {
11586 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11587 goto err_mdiobus_unreg;
11588 }
11589
11590 ret = hclge_config_gro(hdev);
11591 if (ret)
11592 goto err_mdiobus_unreg;
11593
11594 ret = hclge_init_vlan_config(hdev);
11595 if (ret) {
11596 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11597 goto err_mdiobus_unreg;
11598 }
11599
11600 ret = hclge_tm_schd_init(hdev);
11601 if (ret) {
11602 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11603 goto err_mdiobus_unreg;
11604 }
11605
11606 ret = hclge_rss_init_cfg(hdev);
11607 if (ret) {
11608 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11609 goto err_mdiobus_unreg;
11610 }
11611
11612 ret = hclge_rss_init_hw(hdev);
11613 if (ret) {
11614 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11615 goto err_mdiobus_unreg;
11616 }
11617
11618 ret = init_mgr_tbl(hdev);
11619 if (ret) {
11620 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11621 goto err_mdiobus_unreg;
11622 }
11623
11624 ret = hclge_init_fd_config(hdev);
11625 if (ret) {
11626 dev_err(&pdev->dev,
11627 "fd table init fail, ret=%d\n", ret);
11628 goto err_mdiobus_unreg;
11629 }
11630
11631 ret = hclge_ptp_init(hdev);
11632 if (ret)
11633 goto err_mdiobus_unreg;
11634
11635 INIT_KFIFO(hdev->mac_tnl_log);
11636
11637 hclge_dcb_ops_set(hdev);
11638
11639 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11640 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11641
11642
11643
11644
11645 hclge_misc_affinity_setup(hdev);
11646
11647 hclge_clear_all_event_cause(hdev);
11648 hclge_clear_resetting_state(hdev);
11649
11650
11651 if (hnae3_dev_ras_imp_supported(hdev))
11652 hclge_handle_occurred_error(hdev);
11653 else
11654 hclge_handle_all_hns_hw_errors(ae_dev);
11655
11656
11657
11658
11659 if (ae_dev->hw_err_reset_req) {
11660 enum hnae3_reset_type reset_level;
11661
11662 reset_level = hclge_get_reset_level(ae_dev,
11663 &ae_dev->hw_err_reset_req);
11664 hclge_set_def_reset_request(ae_dev, reset_level);
11665 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11666 }
11667
11668 hclge_init_rxd_adv_layout(hdev);
11669
11670
11671 hclge_enable_vector(&hdev->misc_vector, true);
11672
11673 hclge_state_init(hdev);
11674 hdev->last_reset_time = jiffies;
11675
11676 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11677 HCLGE_DRIVER_NAME);
11678
11679 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11680
11681 return 0;
11682
11683err_mdiobus_unreg:
11684 if (hdev->hw.mac.phydev)
11685 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11686err_msi_irq_uninit:
11687 hclge_misc_irq_uninit(hdev);
11688err_msi_uninit:
11689 pci_free_irq_vectors(pdev);
11690err_cmd_uninit:
11691 hclge_cmd_uninit(hdev);
11692err_pci_uninit:
11693 pcim_iounmap(pdev, hdev->hw.io_base);
11694 pci_clear_master(pdev);
11695 pci_release_regions(pdev);
11696 pci_disable_device(pdev);
11697out:
11698 mutex_destroy(&hdev->vport_lock);
11699 return ret;
11700}
11701
11702static void hclge_stats_clear(struct hclge_dev *hdev)
11703{
11704 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11705}
11706
11707static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11708{
11709 return hclge_config_switch_param(hdev, vf, enable,
11710 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11711}
11712
11713static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11714{
11715 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11716 HCLGE_FILTER_FE_NIC_INGRESS_B,
11717 enable, vf);
11718}
11719
11720static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11721{
11722 int ret;
11723
11724 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11725 if (ret) {
11726 dev_err(&hdev->pdev->dev,
11727 "Set vf %d mac spoof check %s failed, ret=%d\n",
11728 vf, enable ? "on" : "off", ret);
11729 return ret;
11730 }
11731
11732 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11733 if (ret)
11734 dev_err(&hdev->pdev->dev,
11735 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11736 vf, enable ? "on" : "off", ret);
11737
11738 return ret;
11739}
11740
11741static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11742 bool enable)
11743{
11744 struct hclge_vport *vport = hclge_get_vport(handle);
11745 struct hclge_dev *hdev = vport->back;
11746 u32 new_spoofchk = enable ? 1 : 0;
11747 int ret;
11748
11749 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11750 return -EOPNOTSUPP;
11751
11752 vport = hclge_get_vf_vport(hdev, vf);
11753 if (!vport)
11754 return -EINVAL;
11755
11756 if (vport->vf_info.spoofchk == new_spoofchk)
11757 return 0;
11758
11759 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11760 dev_warn(&hdev->pdev->dev,
11761 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11762 vf);
11763 else if (enable && hclge_is_umv_space_full(vport, true))
11764 dev_warn(&hdev->pdev->dev,
11765 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11766 vf);
11767
11768 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11769 if (ret)
11770 return ret;
11771
11772 vport->vf_info.spoofchk = new_spoofchk;
11773 return 0;
11774}
11775
11776static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11777{
11778 struct hclge_vport *vport = hdev->vport;
11779 int ret;
11780 int i;
11781
11782 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11783 return 0;
11784
11785
11786 for (i = 0; i < hdev->num_alloc_vport; i++) {
11787 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11788 vport->vf_info.spoofchk);
11789 if (ret)
11790 return ret;
11791
11792 vport++;
11793 }
11794
11795 return 0;
11796}
11797
11798static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11799{
11800 struct hclge_vport *vport = hclge_get_vport(handle);
11801 struct hclge_dev *hdev = vport->back;
11802 u32 new_trusted = enable ? 1 : 0;
11803
11804 vport = hclge_get_vf_vport(hdev, vf);
11805 if (!vport)
11806 return -EINVAL;
11807
11808 if (vport->vf_info.trusted == new_trusted)
11809 return 0;
11810
11811 vport->vf_info.trusted = new_trusted;
11812 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11813 hclge_task_schedule(hdev, 0);
11814
11815 return 0;
11816}
11817
11818static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11819{
11820 int ret;
11821 int vf;
11822
11823
11824 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11825 struct hclge_vport *vport = &hdev->vport[vf];
11826
11827 vport->vf_info.max_tx_rate = 0;
11828 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11829 if (ret)
11830 dev_err(&hdev->pdev->dev,
11831 "vf%d failed to reset to default, ret=%d\n",
11832 vf - HCLGE_VF_VPORT_START_NUM, ret);
11833 }
11834}
11835
11836static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11837 int min_tx_rate, int max_tx_rate)
11838{
11839 if (min_tx_rate != 0 ||
11840 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11841 dev_err(&hdev->pdev->dev,
11842 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11843 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11844 return -EINVAL;
11845 }
11846
11847 return 0;
11848}
11849
11850static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11851 int min_tx_rate, int max_tx_rate, bool force)
11852{
11853 struct hclge_vport *vport = hclge_get_vport(handle);
11854 struct hclge_dev *hdev = vport->back;
11855 int ret;
11856
11857 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11858 if (ret)
11859 return ret;
11860
11861 vport = hclge_get_vf_vport(hdev, vf);
11862 if (!vport)
11863 return -EINVAL;
11864
11865 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11866 return 0;
11867
11868 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11869 if (ret)
11870 return ret;
11871
11872 vport->vf_info.max_tx_rate = max_tx_rate;
11873
11874 return 0;
11875}
11876
11877static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11878{
11879 struct hnae3_handle *handle = &hdev->vport->nic;
11880 struct hclge_vport *vport;
11881 int ret;
11882 int vf;
11883
11884
11885 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11886 vport = hclge_get_vf_vport(hdev, vf);
11887 if (!vport)
11888 return -EINVAL;
11889
11890
11891
11892
11893 if (!vport->vf_info.max_tx_rate)
11894 continue;
11895
11896 ret = hclge_set_vf_rate(handle, vf, 0,
11897 vport->vf_info.max_tx_rate, true);
11898 if (ret) {
11899 dev_err(&hdev->pdev->dev,
11900 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11901 vf, vport->vf_info.max_tx_rate, ret);
11902 return ret;
11903 }
11904 }
11905
11906 return 0;
11907}
11908
11909static void hclge_reset_vport_state(struct hclge_dev *hdev)
11910{
11911 struct hclge_vport *vport = hdev->vport;
11912 int i;
11913
11914 for (i = 0; i < hdev->num_alloc_vport; i++) {
11915 hclge_vport_stop(vport);
11916 vport++;
11917 }
11918}
11919
11920static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11921{
11922 struct hclge_dev *hdev = ae_dev->priv;
11923 struct pci_dev *pdev = ae_dev->pdev;
11924 int ret;
11925
11926 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11927
11928 hclge_stats_clear(hdev);
11929
11930
11931
11932 if (hdev->reset_type == HNAE3_IMP_RESET ||
11933 hdev->reset_type == HNAE3_GLOBAL_RESET) {
11934 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11935 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11936 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11937 hclge_reset_umv_space(hdev);
11938 }
11939
11940 ret = hclge_cmd_init(hdev);
11941 if (ret) {
11942 dev_err(&pdev->dev, "Cmd queue init failed\n");
11943 return ret;
11944 }
11945
11946 ret = hclge_map_tqp(hdev);
11947 if (ret) {
11948 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11949 return ret;
11950 }
11951
11952 ret = hclge_mac_init(hdev);
11953 if (ret) {
11954 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11955 return ret;
11956 }
11957
11958 ret = hclge_tp_port_init(hdev);
11959 if (ret) {
11960 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11961 ret);
11962 return ret;
11963 }
11964
11965 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11966 if (ret) {
11967 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11968 return ret;
11969 }
11970
11971 ret = hclge_config_gro(hdev);
11972 if (ret)
11973 return ret;
11974
11975 ret = hclge_init_vlan_config(hdev);
11976 if (ret) {
11977 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11978 return ret;
11979 }
11980
11981 ret = hclge_tm_init_hw(hdev, true);
11982 if (ret) {
11983 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11984 return ret;
11985 }
11986
11987 ret = hclge_rss_init_hw(hdev);
11988 if (ret) {
11989 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11990 return ret;
11991 }
11992
11993 ret = init_mgr_tbl(hdev);
11994 if (ret) {
11995 dev_err(&pdev->dev,
11996 "failed to reinit manager table, ret = %d\n", ret);
11997 return ret;
11998 }
11999
12000 ret = hclge_init_fd_config(hdev);
12001 if (ret) {
12002 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12003 return ret;
12004 }
12005
12006 ret = hclge_ptp_init(hdev);
12007 if (ret)
12008 return ret;
12009
12010
12011 if (hnae3_dev_ras_imp_supported(hdev))
12012 hclge_handle_occurred_error(hdev);
12013 else
12014 hclge_handle_all_hns_hw_errors(ae_dev);
12015
12016
12017
12018
12019 ret = hclge_config_nic_hw_error(hdev, true);
12020 if (ret) {
12021 dev_err(&pdev->dev,
12022 "fail(%d) to re-enable NIC hw error interrupts\n",
12023 ret);
12024 return ret;
12025 }
12026
12027 if (hdev->roce_client) {
12028 ret = hclge_config_rocee_ras_interrupt(hdev, true);
12029 if (ret) {
12030 dev_err(&pdev->dev,
12031 "fail(%d) to re-enable roce ras interrupts\n",
12032 ret);
12033 return ret;
12034 }
12035 }
12036
12037 hclge_reset_vport_state(hdev);
12038 ret = hclge_reset_vport_spoofchk(hdev);
12039 if (ret)
12040 return ret;
12041
12042 ret = hclge_resume_vf_rate(hdev);
12043 if (ret)
12044 return ret;
12045
12046 hclge_init_rxd_adv_layout(hdev);
12047
12048 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12049 HCLGE_DRIVER_NAME);
12050
12051 return 0;
12052}
12053
12054static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12055{
12056 struct hclge_dev *hdev = ae_dev->priv;
12057 struct hclge_mac *mac = &hdev->hw.mac;
12058
12059 hclge_reset_vf_rate(hdev);
12060 hclge_clear_vf_vlan(hdev);
12061 hclge_misc_affinity_teardown(hdev);
12062 hclge_state_uninit(hdev);
12063 hclge_ptp_uninit(hdev);
12064 hclge_uninit_rxd_adv_layout(hdev);
12065 hclge_uninit_mac_table(hdev);
12066 hclge_del_all_fd_entries(hdev);
12067
12068 if (mac->phydev)
12069 mdiobus_unregister(mac->mdio_bus);
12070
12071
12072 hclge_enable_vector(&hdev->misc_vector, false);
12073 synchronize_irq(hdev->misc_vector.vector_irq);
12074
12075
12076 hclge_config_mac_tnl_int(hdev, false);
12077 hclge_config_nic_hw_error(hdev, false);
12078 hclge_config_rocee_ras_interrupt(hdev, false);
12079
12080 hclge_cmd_uninit(hdev);
12081 hclge_misc_irq_uninit(hdev);
12082 hclge_pci_uninit(hdev);
12083 mutex_destroy(&hdev->vport_lock);
12084 hclge_uninit_vport_vlan_table(hdev);
12085 ae_dev->priv = NULL;
12086}
12087
12088static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12089{
12090 struct hclge_vport *vport = hclge_get_vport(handle);
12091 struct hclge_dev *hdev = vport->back;
12092
12093 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12094}
12095
12096static void hclge_get_channels(struct hnae3_handle *handle,
12097 struct ethtool_channels *ch)
12098{
12099 ch->max_combined = hclge_get_max_channels(handle);
12100 ch->other_count = 1;
12101 ch->max_other = 1;
12102 ch->combined_count = handle->kinfo.rss_size;
12103}
12104
12105static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12106 u16 *alloc_tqps, u16 *max_rss_size)
12107{
12108 struct hclge_vport *vport = hclge_get_vport(handle);
12109 struct hclge_dev *hdev = vport->back;
12110
12111 *alloc_tqps = vport->alloc_tqps;
12112 *max_rss_size = hdev->pf_rss_size_max;
12113}
12114
12115static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12116 bool rxfh_configured)
12117{
12118 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12119 struct hclge_vport *vport = hclge_get_vport(handle);
12120 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12121 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12122 struct hclge_dev *hdev = vport->back;
12123 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12124 u16 cur_rss_size = kinfo->rss_size;
12125 u16 cur_tqps = kinfo->num_tqps;
12126 u16 tc_valid[HCLGE_MAX_TC_NUM];
12127 u16 roundup_size;
12128 u32 *rss_indir;
12129 unsigned int i;
12130 int ret;
12131
12132 kinfo->req_rss_size = new_tqps_num;
12133
12134 ret = hclge_tm_vport_map_update(hdev);
12135 if (ret) {
12136 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12137 return ret;
12138 }
12139
12140 roundup_size = roundup_pow_of_two(kinfo->rss_size);
12141 roundup_size = ilog2(roundup_size);
12142
12143 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12144 tc_valid[i] = 0;
12145
12146 if (!(hdev->hw_tc_map & BIT(i)))
12147 continue;
12148
12149 tc_valid[i] = 1;
12150 tc_size[i] = roundup_size;
12151 tc_offset[i] = kinfo->rss_size * i;
12152 }
12153 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12154 if (ret)
12155 return ret;
12156
12157
12158 if (rxfh_configured)
12159 goto out;
12160
12161
12162 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12163 GFP_KERNEL);
12164 if (!rss_indir)
12165 return -ENOMEM;
12166
12167 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12168 rss_indir[i] = i % kinfo->rss_size;
12169
12170 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12171 if (ret)
12172 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12173 ret);
12174
12175 kfree(rss_indir);
12176
12177out:
12178 if (!ret)
12179 dev_info(&hdev->pdev->dev,
12180 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12181 cur_rss_size, kinfo->rss_size,
12182 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12183
12184 return ret;
12185}
12186
12187static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12188 u32 *regs_num_64_bit)
12189{
12190 struct hclge_desc desc;
12191 u32 total_num;
12192 int ret;
12193
12194 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12195 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12196 if (ret) {
12197 dev_err(&hdev->pdev->dev,
12198 "Query register number cmd failed, ret = %d.\n", ret);
12199 return ret;
12200 }
12201
12202 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
12203 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
12204
12205 total_num = *regs_num_32_bit + *regs_num_64_bit;
12206 if (!total_num)
12207 return -EINVAL;
12208
12209 return 0;
12210}
12211
12212static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12213 void *data)
12214{
12215#define HCLGE_32_BIT_REG_RTN_DATANUM 8
12216#define HCLGE_32_BIT_DESC_NODATA_LEN 2
12217
12218 struct hclge_desc *desc;
12219 u32 *reg_val = data;
12220 __le32 *desc_data;
12221 int nodata_num;
12222 int cmd_num;
12223 int i, k, n;
12224 int ret;
12225
12226 if (regs_num == 0)
12227 return 0;
12228
12229 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12230 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12231 HCLGE_32_BIT_REG_RTN_DATANUM);
12232 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12233 if (!desc)
12234 return -ENOMEM;
12235
12236 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12237 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12238 if (ret) {
12239 dev_err(&hdev->pdev->dev,
12240 "Query 32 bit register cmd failed, ret = %d.\n", ret);
12241 kfree(desc);
12242 return ret;
12243 }
12244
12245 for (i = 0; i < cmd_num; i++) {
12246 if (i == 0) {
12247 desc_data = (__le32 *)(&desc[i].data[0]);
12248 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12249 } else {
12250 desc_data = (__le32 *)(&desc[i]);
12251 n = HCLGE_32_BIT_REG_RTN_DATANUM;
12252 }
12253 for (k = 0; k < n; k++) {
12254 *reg_val++ = le32_to_cpu(*desc_data++);
12255
12256 regs_num--;
12257 if (!regs_num)
12258 break;
12259 }
12260 }
12261
12262 kfree(desc);
12263 return 0;
12264}
12265
12266static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12267 void *data)
12268{
12269#define HCLGE_64_BIT_REG_RTN_DATANUM 4
12270#define HCLGE_64_BIT_DESC_NODATA_LEN 1
12271
12272 struct hclge_desc *desc;
12273 u64 *reg_val = data;
12274 __le64 *desc_data;
12275 int nodata_len;
12276 int cmd_num;
12277 int i, k, n;
12278 int ret;
12279
12280 if (regs_num == 0)
12281 return 0;
12282
12283 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12284 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12285 HCLGE_64_BIT_REG_RTN_DATANUM);
12286 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12287 if (!desc)
12288 return -ENOMEM;
12289
12290 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12291 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12292 if (ret) {
12293 dev_err(&hdev->pdev->dev,
12294 "Query 64 bit register cmd failed, ret = %d.\n", ret);
12295 kfree(desc);
12296 return ret;
12297 }
12298
12299 for (i = 0; i < cmd_num; i++) {
12300 if (i == 0) {
12301 desc_data = (__le64 *)(&desc[i].data[0]);
12302 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12303 } else {
12304 desc_data = (__le64 *)(&desc[i]);
12305 n = HCLGE_64_BIT_REG_RTN_DATANUM;
12306 }
12307 for (k = 0; k < n; k++) {
12308 *reg_val++ = le64_to_cpu(*desc_data++);
12309
12310 regs_num--;
12311 if (!regs_num)
12312 break;
12313 }
12314 }
12315
12316 kfree(desc);
12317 return 0;
12318}
12319
12320#define MAX_SEPARATE_NUM 4
12321#define SEPARATOR_VALUE 0xFDFCFBFA
12322#define REG_NUM_PER_LINE 4
12323#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
12324#define REG_SEPARATOR_LINE 1
12325#define REG_NUM_REMAIN_MASK 3
12326
12327int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12328{
12329 int i;
12330
12331
12332 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12333 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12334 true);
12335 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12336 }
12337
12338
12339 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12340
12341 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12342}
12343
12344static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12345 int *bd_num_list,
12346 u32 type_num)
12347{
12348 u32 entries_per_desc, desc_index, index, offset, i;
12349 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12350 int ret;
12351
12352 ret = hclge_query_bd_num_cmd_send(hdev, desc);
12353 if (ret) {
12354 dev_err(&hdev->pdev->dev,
12355 "Get dfx bd num fail, status is %d.\n", ret);
12356 return ret;
12357 }
12358
12359 entries_per_desc = ARRAY_SIZE(desc[0].data);
12360 for (i = 0; i < type_num; i++) {
12361 offset = hclge_dfx_bd_offset_list[i];
12362 index = offset % entries_per_desc;
12363 desc_index = offset / entries_per_desc;
12364 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12365 }
12366
12367 return ret;
12368}
12369
12370static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12371 struct hclge_desc *desc_src, int bd_num,
12372 enum hclge_opcode_type cmd)
12373{
12374 struct hclge_desc *desc = desc_src;
12375 int i, ret;
12376
12377 hclge_cmd_setup_basic_desc(desc, cmd, true);
12378 for (i = 0; i < bd_num - 1; i++) {
12379 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12380 desc++;
12381 hclge_cmd_setup_basic_desc(desc, cmd, true);
12382 }
12383
12384 desc = desc_src;
12385 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12386 if (ret)
12387 dev_err(&hdev->pdev->dev,
12388 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12389 cmd, ret);
12390
12391 return ret;
12392}
12393
12394static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12395 void *data)
12396{
12397 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12398 struct hclge_desc *desc = desc_src;
12399 u32 *reg = data;
12400
12401 entries_per_desc = ARRAY_SIZE(desc->data);
12402 reg_num = entries_per_desc * bd_num;
12403 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12404 for (i = 0; i < reg_num; i++) {
12405 index = i % entries_per_desc;
12406 desc_index = i / entries_per_desc;
12407 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12408 }
12409 for (i = 0; i < separator_num; i++)
12410 *reg++ = SEPARATOR_VALUE;
12411
12412 return reg_num + separator_num;
12413}
12414
12415static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12416{
12417 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12418 int data_len_per_desc, bd_num, i;
12419 int *bd_num_list;
12420 u32 data_len;
12421 int ret;
12422
12423 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12424 if (!bd_num_list)
12425 return -ENOMEM;
12426
12427 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12428 if (ret) {
12429 dev_err(&hdev->pdev->dev,
12430 "Get dfx reg bd num fail, status is %d.\n", ret);
12431 goto out;
12432 }
12433
12434 data_len_per_desc = sizeof_field(struct hclge_desc, data);
12435 *len = 0;
12436 for (i = 0; i < dfx_reg_type_num; i++) {
12437 bd_num = bd_num_list[i];
12438 data_len = data_len_per_desc * bd_num;
12439 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12440 }
12441
12442out:
12443 kfree(bd_num_list);
12444 return ret;
12445}
12446
12447static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12448{
12449 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12450 int bd_num, bd_num_max, buf_len, i;
12451 struct hclge_desc *desc_src;
12452 int *bd_num_list;
12453 u32 *reg = data;
12454 int ret;
12455
12456 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12457 if (!bd_num_list)
12458 return -ENOMEM;
12459
12460 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12461 if (ret) {
12462 dev_err(&hdev->pdev->dev,
12463 "Get dfx reg bd num fail, status is %d.\n", ret);
12464 goto out;
12465 }
12466
12467 bd_num_max = bd_num_list[0];
12468 for (i = 1; i < dfx_reg_type_num; i++)
12469 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12470
12471 buf_len = sizeof(*desc_src) * bd_num_max;
12472 desc_src = kzalloc(buf_len, GFP_KERNEL);
12473 if (!desc_src) {
12474 ret = -ENOMEM;
12475 goto out;
12476 }
12477
12478 for (i = 0; i < dfx_reg_type_num; i++) {
12479 bd_num = bd_num_list[i];
12480 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12481 hclge_dfx_reg_opcode_list[i]);
12482 if (ret) {
12483 dev_err(&hdev->pdev->dev,
12484 "Get dfx reg fail, status is %d.\n", ret);
12485 break;
12486 }
12487
12488 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12489 }
12490
12491 kfree(desc_src);
12492out:
12493 kfree(bd_num_list);
12494 return ret;
12495}
12496
12497static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12498 struct hnae3_knic_private_info *kinfo)
12499{
12500#define HCLGE_RING_REG_OFFSET 0x200
12501#define HCLGE_RING_INT_REG_OFFSET 0x4
12502
12503 int i, j, reg_num, separator_num;
12504 int data_num_sum;
12505 u32 *reg = data;
12506
12507
12508 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12509 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12510 for (i = 0; i < reg_num; i++)
12511 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12512 for (i = 0; i < separator_num; i++)
12513 *reg++ = SEPARATOR_VALUE;
12514 data_num_sum = reg_num + separator_num;
12515
12516 reg_num = ARRAY_SIZE(common_reg_addr_list);
12517 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12518 for (i = 0; i < reg_num; i++)
12519 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12520 for (i = 0; i < separator_num; i++)
12521 *reg++ = SEPARATOR_VALUE;
12522 data_num_sum += reg_num + separator_num;
12523
12524 reg_num = ARRAY_SIZE(ring_reg_addr_list);
12525 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12526 for (j = 0; j < kinfo->num_tqps; j++) {
12527 for (i = 0; i < reg_num; i++)
12528 *reg++ = hclge_read_dev(&hdev->hw,
12529 ring_reg_addr_list[i] +
12530 HCLGE_RING_REG_OFFSET * j);
12531 for (i = 0; i < separator_num; i++)
12532 *reg++ = SEPARATOR_VALUE;
12533 }
12534 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12535
12536 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12537 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12538 for (j = 0; j < hdev->num_msi_used - 1; j++) {
12539 for (i = 0; i < reg_num; i++)
12540 *reg++ = hclge_read_dev(&hdev->hw,
12541 tqp_intr_reg_addr_list[i] +
12542 HCLGE_RING_INT_REG_OFFSET * j);
12543 for (i = 0; i < separator_num; i++)
12544 *reg++ = SEPARATOR_VALUE;
12545 }
12546 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12547
12548 return data_num_sum;
12549}
12550
12551static int hclge_get_regs_len(struct hnae3_handle *handle)
12552{
12553 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12554 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12555 struct hclge_vport *vport = hclge_get_vport(handle);
12556 struct hclge_dev *hdev = vport->back;
12557 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12558 int regs_lines_32_bit, regs_lines_64_bit;
12559 int ret;
12560
12561 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12562 if (ret) {
12563 dev_err(&hdev->pdev->dev,
12564 "Get register number failed, ret = %d.\n", ret);
12565 return ret;
12566 }
12567
12568 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12569 if (ret) {
12570 dev_err(&hdev->pdev->dev,
12571 "Get dfx reg len failed, ret = %d.\n", ret);
12572 return ret;
12573 }
12574
12575 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12576 REG_SEPARATOR_LINE;
12577 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12578 REG_SEPARATOR_LINE;
12579 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12580 REG_SEPARATOR_LINE;
12581 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12582 REG_SEPARATOR_LINE;
12583 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12584 REG_SEPARATOR_LINE;
12585 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12586 REG_SEPARATOR_LINE;
12587
12588 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12589 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12590 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12591}
12592
12593static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12594 void *data)
12595{
12596 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12597 struct hclge_vport *vport = hclge_get_vport(handle);
12598 struct hclge_dev *hdev = vport->back;
12599 u32 regs_num_32_bit, regs_num_64_bit;
12600 int i, reg_num, separator_num, ret;
12601 u32 *reg = data;
12602
12603 *version = hdev->fw_version;
12604
12605 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12606 if (ret) {
12607 dev_err(&hdev->pdev->dev,
12608 "Get register number failed, ret = %d.\n", ret);
12609 return;
12610 }
12611
12612 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12613
12614 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12615 if (ret) {
12616 dev_err(&hdev->pdev->dev,
12617 "Get 32 bit register failed, ret = %d.\n", ret);
12618 return;
12619 }
12620 reg_num = regs_num_32_bit;
12621 reg += reg_num;
12622 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12623 for (i = 0; i < separator_num; i++)
12624 *reg++ = SEPARATOR_VALUE;
12625
12626 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12627 if (ret) {
12628 dev_err(&hdev->pdev->dev,
12629 "Get 64 bit register failed, ret = %d.\n", ret);
12630 return;
12631 }
12632 reg_num = regs_num_64_bit * 2;
12633 reg += reg_num;
12634 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12635 for (i = 0; i < separator_num; i++)
12636 *reg++ = SEPARATOR_VALUE;
12637
12638 ret = hclge_get_dfx_reg(hdev, reg);
12639 if (ret)
12640 dev_err(&hdev->pdev->dev,
12641 "Get dfx register failed, ret = %d.\n", ret);
12642}
12643
12644static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12645{
12646 struct hclge_set_led_state_cmd *req;
12647 struct hclge_desc desc;
12648 int ret;
12649
12650 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12651
12652 req = (struct hclge_set_led_state_cmd *)desc.data;
12653 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12654 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12655
12656 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12657 if (ret)
12658 dev_err(&hdev->pdev->dev,
12659 "Send set led state cmd error, ret =%d\n", ret);
12660
12661 return ret;
12662}
12663
12664enum hclge_led_status {
12665 HCLGE_LED_OFF,
12666 HCLGE_LED_ON,
12667 HCLGE_LED_NO_CHANGE = 0xFF,
12668};
12669
12670static int hclge_set_led_id(struct hnae3_handle *handle,
12671 enum ethtool_phys_id_state status)
12672{
12673 struct hclge_vport *vport = hclge_get_vport(handle);
12674 struct hclge_dev *hdev = vport->back;
12675
12676 switch (status) {
12677 case ETHTOOL_ID_ACTIVE:
12678 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12679 case ETHTOOL_ID_INACTIVE:
12680 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12681 default:
12682 return -EINVAL;
12683 }
12684}
12685
12686static void hclge_get_link_mode(struct hnae3_handle *handle,
12687 unsigned long *supported,
12688 unsigned long *advertising)
12689{
12690 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12691 struct hclge_vport *vport = hclge_get_vport(handle);
12692 struct hclge_dev *hdev = vport->back;
12693 unsigned int idx = 0;
12694
12695 for (; idx < size; idx++) {
12696 supported[idx] = hdev->hw.mac.supported[idx];
12697 advertising[idx] = hdev->hw.mac.advertising[idx];
12698 }
12699}
12700
12701static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12702{
12703 struct hclge_vport *vport = hclge_get_vport(handle);
12704 struct hclge_dev *hdev = vport->back;
12705 bool gro_en_old = hdev->gro_en;
12706 int ret;
12707
12708 hdev->gro_en = enable;
12709 ret = hclge_config_gro(hdev);
12710 if (ret)
12711 hdev->gro_en = gro_en_old;
12712
12713 return ret;
12714}
12715
12716static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12717{
12718 struct hclge_vport *vport = &hdev->vport[0];
12719 struct hnae3_handle *handle = &vport->nic;
12720 u8 tmp_flags;
12721 int ret;
12722 u16 i;
12723
12724 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12725 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12726 vport->last_promisc_flags = vport->overflow_promisc_flags;
12727 }
12728
12729 if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12730 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12731 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12732 tmp_flags & HNAE3_MPE);
12733 if (!ret) {
12734 clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12735 &vport->state);
12736 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12737 &vport->state);
12738 }
12739 }
12740
12741 for (i = 1; i < hdev->num_alloc_vport; i++) {
12742 bool uc_en = false;
12743 bool mc_en = false;
12744 bool bc_en;
12745
12746 vport = &hdev->vport[i];
12747
12748 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12749 &vport->state))
12750 continue;
12751
12752 if (vport->vf_info.trusted) {
12753 uc_en = vport->vf_info.request_uc_en > 0;
12754 mc_en = vport->vf_info.request_mc_en > 0;
12755 }
12756 bc_en = vport->vf_info.request_bc_en > 0;
12757
12758 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12759 mc_en, bc_en);
12760 if (ret) {
12761 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12762 &vport->state);
12763 return;
12764 }
12765 hclge_set_vport_vlan_fltr_change(vport);
12766 }
12767}
12768
12769static bool hclge_module_existed(struct hclge_dev *hdev)
12770{
12771 struct hclge_desc desc;
12772 u32 existed;
12773 int ret;
12774
12775 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12776 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12777 if (ret) {
12778 dev_err(&hdev->pdev->dev,
12779 "failed to get SFP exist state, ret = %d\n", ret);
12780 return false;
12781 }
12782
12783 existed = le32_to_cpu(desc.data[0]);
12784
12785 return existed != 0;
12786}
12787
12788
12789
12790
12791static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12792 u32 len, u8 *data)
12793{
12794 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12795 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12796 u16 read_len;
12797 u16 copy_len;
12798 int ret;
12799 int i;
12800
12801
12802 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12803 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12804 true);
12805
12806
12807 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12808 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12809 }
12810
12811
12812 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12813 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12814 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12815 sfp_info_bd0->read_len = cpu_to_le16(read_len);
12816
12817 ret = hclge_cmd_send(&hdev->hw, desc, i);
12818 if (ret) {
12819 dev_err(&hdev->pdev->dev,
12820 "failed to get SFP eeprom info, ret = %d\n", ret);
12821 return 0;
12822 }
12823
12824
12825 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12826 memcpy(data, sfp_info_bd0->data, copy_len);
12827 read_len = copy_len;
12828
12829
12830 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12831 if (read_len >= len)
12832 return read_len;
12833
12834 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12835 memcpy(data + read_len, desc[i].data, copy_len);
12836 read_len += copy_len;
12837 }
12838
12839 return read_len;
12840}
12841
12842static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12843 u32 len, u8 *data)
12844{
12845 struct hclge_vport *vport = hclge_get_vport(handle);
12846 struct hclge_dev *hdev = vport->back;
12847 u32 read_len = 0;
12848 u16 data_len;
12849
12850 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12851 return -EOPNOTSUPP;
12852
12853 if (!hclge_module_existed(hdev))
12854 return -ENXIO;
12855
12856 while (read_len < len) {
12857 data_len = hclge_get_sfp_eeprom_info(hdev,
12858 offset + read_len,
12859 len - read_len,
12860 data + read_len);
12861 if (!data_len)
12862 return -EIO;
12863
12864 read_len += data_len;
12865 }
12866
12867 return 0;
12868}
12869
12870static const struct hnae3_ae_ops hclge_ops = {
12871 .init_ae_dev = hclge_init_ae_dev,
12872 .uninit_ae_dev = hclge_uninit_ae_dev,
12873 .reset_prepare = hclge_reset_prepare_general,
12874 .reset_done = hclge_reset_done,
12875 .init_client_instance = hclge_init_client_instance,
12876 .uninit_client_instance = hclge_uninit_client_instance,
12877 .map_ring_to_vector = hclge_map_ring_to_vector,
12878 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12879 .get_vector = hclge_get_vector,
12880 .put_vector = hclge_put_vector,
12881 .set_promisc_mode = hclge_set_promisc_mode,
12882 .request_update_promisc_mode = hclge_request_update_promisc_mode,
12883 .set_loopback = hclge_set_loopback,
12884 .start = hclge_ae_start,
12885 .stop = hclge_ae_stop,
12886 .client_start = hclge_client_start,
12887 .client_stop = hclge_client_stop,
12888 .get_status = hclge_get_status,
12889 .get_ksettings_an_result = hclge_get_ksettings_an_result,
12890 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12891 .get_media_type = hclge_get_media_type,
12892 .check_port_speed = hclge_check_port_speed,
12893 .get_fec = hclge_get_fec,
12894 .set_fec = hclge_set_fec,
12895 .get_rss_key_size = hclge_get_rss_key_size,
12896 .get_rss = hclge_get_rss,
12897 .set_rss = hclge_set_rss,
12898 .set_rss_tuple = hclge_set_rss_tuple,
12899 .get_rss_tuple = hclge_get_rss_tuple,
12900 .get_tc_size = hclge_get_tc_size,
12901 .get_mac_addr = hclge_get_mac_addr,
12902 .set_mac_addr = hclge_set_mac_addr,
12903 .do_ioctl = hclge_do_ioctl,
12904 .add_uc_addr = hclge_add_uc_addr,
12905 .rm_uc_addr = hclge_rm_uc_addr,
12906 .add_mc_addr = hclge_add_mc_addr,
12907 .rm_mc_addr = hclge_rm_mc_addr,
12908 .set_autoneg = hclge_set_autoneg,
12909 .get_autoneg = hclge_get_autoneg,
12910 .restart_autoneg = hclge_restart_autoneg,
12911 .halt_autoneg = hclge_halt_autoneg,
12912 .get_pauseparam = hclge_get_pauseparam,
12913 .set_pauseparam = hclge_set_pauseparam,
12914 .set_mtu = hclge_set_mtu,
12915 .reset_queue = hclge_reset_tqp,
12916 .get_stats = hclge_get_stats,
12917 .get_mac_stats = hclge_get_mac_stat,
12918 .update_stats = hclge_update_stats,
12919 .get_strings = hclge_get_strings,
12920 .get_sset_count = hclge_get_sset_count,
12921 .get_fw_version = hclge_get_fw_version,
12922 .get_mdix_mode = hclge_get_mdix_mode,
12923 .enable_vlan_filter = hclge_enable_vlan_filter,
12924 .set_vlan_filter = hclge_set_vlan_filter,
12925 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12926 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12927 .reset_event = hclge_reset_event,
12928 .get_reset_level = hclge_get_reset_level,
12929 .set_default_reset_request = hclge_set_def_reset_request,
12930 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12931 .set_channels = hclge_set_channels,
12932 .get_channels = hclge_get_channels,
12933 .get_regs_len = hclge_get_regs_len,
12934 .get_regs = hclge_get_regs,
12935 .set_led_id = hclge_set_led_id,
12936 .get_link_mode = hclge_get_link_mode,
12937 .add_fd_entry = hclge_add_fd_entry,
12938 .del_fd_entry = hclge_del_fd_entry,
12939 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12940 .get_fd_rule_info = hclge_get_fd_rule_info,
12941 .get_fd_all_rules = hclge_get_all_rules,
12942 .enable_fd = hclge_enable_fd,
12943 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12944 .dbg_read_cmd = hclge_dbg_read_cmd,
12945 .handle_hw_ras_error = hclge_handle_hw_ras_error,
12946 .get_hw_reset_stat = hclge_get_hw_reset_stat,
12947 .ae_dev_resetting = hclge_ae_dev_resetting,
12948 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12949 .set_gro_en = hclge_gro_en,
12950 .get_global_queue_id = hclge_covert_handle_qid_global,
12951 .set_timer_task = hclge_set_timer_task,
12952 .mac_connect_phy = hclge_mac_connect_phy,
12953 .mac_disconnect_phy = hclge_mac_disconnect_phy,
12954 .get_vf_config = hclge_get_vf_config,
12955 .set_vf_link_state = hclge_set_vf_link_state,
12956 .set_vf_spoofchk = hclge_set_vf_spoofchk,
12957 .set_vf_trust = hclge_set_vf_trust,
12958 .set_vf_rate = hclge_set_vf_rate,
12959 .set_vf_mac = hclge_set_vf_mac,
12960 .get_module_eeprom = hclge_get_module_eeprom,
12961 .get_cmdq_stat = hclge_get_cmdq_stat,
12962 .add_cls_flower = hclge_add_cls_flower,
12963 .del_cls_flower = hclge_del_cls_flower,
12964 .cls_flower_active = hclge_is_cls_flower_active,
12965 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12966 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12967 .set_tx_hwts_info = hclge_ptp_set_tx_info,
12968 .get_rx_hwts = hclge_ptp_get_rx_hwts,
12969 .get_ts_info = hclge_ptp_get_ts_info,
12970};
12971
12972static struct hnae3_ae_algo ae_algo = {
12973 .ops = &hclge_ops,
12974 .pdev_id_table = ae_algo_pci_tbl,
12975};
12976
12977static int hclge_init(void)
12978{
12979 pr_info("%s is initializing\n", HCLGE_NAME);
12980
12981 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12982 if (!hclge_wq) {
12983 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12984 return -ENOMEM;
12985 }
12986
12987 hnae3_register_ae_algo(&ae_algo);
12988
12989 return 0;
12990}
12991
12992static void hclge_exit(void)
12993{
12994 hnae3_unregister_ae_algo(&ae_algo);
12995 destroy_workqueue(hclge_wq);
12996}
12997module_init(hclge_init);
12998module_exit(hclge_exit);
12999
13000MODULE_LICENSE("GPL");
13001MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13002MODULE_DESCRIPTION("HCLGE Driver");
13003MODULE_VERSION(HCLGE_MOD_VERSION);
13004