1
2
3
4#include <linux/acpi.h>
5#include <linux/device.h>
6#include <linux/etherdevice.h>
7#include <linux/init.h>
8#include <linux/interrupt.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
12#include <linux/pci.h>
13#include <linux/platform_device.h>
14#include <linux/if_vlan.h>
15#include <linux/crash_dump.h>
16#include <net/ipv6.h>
17#include <net/rtnetlink.h>
18#include "hclge_cmd.h"
19#include "hclge_dcb.h"
20#include "hclge_main.h"
21#include "hclge_mbx.h"
22#include "hclge_mdio.h"
23#include "hclge_tm.h"
24#include "hclge_err.h"
25#include "hnae3.h"
26
27#define HCLGE_NAME "hclge"
28#define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30
31#define HCLGE_BUF_SIZE_UNIT 256U
32#define HCLGE_BUF_MUL_BY 2
33#define HCLGE_BUF_DIV_BY 2
34#define NEED_RESERVE_TC_NUM 2
35#define BUF_MAX_PERCENT 100
36#define BUF_RESERVE_PERCENT 90
37
38#define HCLGE_RESET_MAX_FAIL_CNT 5
39#define HCLGE_RESET_SYNC_TIME 100
40#define HCLGE_PF_RESET_SYNC_TIME 20
41#define HCLGE_PF_RESET_SYNC_CNT 1500
42
43
44#define HCLGE_DFX_BIOS_BD_OFFSET 1
45#define HCLGE_DFX_SSU_0_BD_OFFSET 2
46#define HCLGE_DFX_SSU_1_BD_OFFSET 3
47#define HCLGE_DFX_IGU_BD_OFFSET 4
48#define HCLGE_DFX_RPU_0_BD_OFFSET 5
49#define HCLGE_DFX_RPU_1_BD_OFFSET 6
50#define HCLGE_DFX_NCSI_BD_OFFSET 7
51#define HCLGE_DFX_RTC_BD_OFFSET 8
52#define HCLGE_DFX_PPP_BD_OFFSET 9
53#define HCLGE_DFX_RCB_BD_OFFSET 10
54#define HCLGE_DFX_TQP_BD_OFFSET 11
55#define HCLGE_DFX_SSU_2_BD_OFFSET 12
56
57#define HCLGE_LINK_STATUS_MS 10
58
59static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60static int hclge_init_vlan_config(struct hclge_dev *hdev);
61static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
66static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67 unsigned long *addr);
68static int hclge_set_default_loopback(struct hclge_dev *hdev);
69
70static void hclge_sync_mac_table(struct hclge_dev *hdev);
71static void hclge_restore_hw_table(struct hclge_dev *hdev);
72static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73static void hclge_sync_fd_table(struct hclge_dev *hdev);
74
75static struct hnae3_ae_algo ae_algo;
76
77static struct workqueue_struct *hclge_wq;
78
79static const struct pci_device_id ae_algo_pci_tbl[] = {
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88
89 {0, }
90};
91
92MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93
94static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 HCLGE_CMDQ_TX_ADDR_H_REG,
96 HCLGE_CMDQ_TX_DEPTH_REG,
97 HCLGE_CMDQ_TX_TAIL_REG,
98 HCLGE_CMDQ_TX_HEAD_REG,
99 HCLGE_CMDQ_RX_ADDR_L_REG,
100 HCLGE_CMDQ_RX_ADDR_H_REG,
101 HCLGE_CMDQ_RX_DEPTH_REG,
102 HCLGE_CMDQ_RX_TAIL_REG,
103 HCLGE_CMDQ_RX_HEAD_REG,
104 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 HCLGE_CMDQ_INTR_STS_REG,
106 HCLGE_CMDQ_INTR_EN_REG,
107 HCLGE_CMDQ_INTR_GEN_REG};
108
109static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 HCLGE_VECTOR0_OTER_EN_REG,
111 HCLGE_MISC_RESET_STS_REG,
112 HCLGE_MISC_VECTOR_INT_STS,
113 HCLGE_GLOBAL_RESET_REG,
114 HCLGE_FUN_RST_ING,
115 HCLGE_GRO_EN_REG};
116
117static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 HCLGE_RING_RX_ADDR_H_REG,
119 HCLGE_RING_RX_BD_NUM_REG,
120 HCLGE_RING_RX_BD_LENGTH_REG,
121 HCLGE_RING_RX_MERGE_EN_REG,
122 HCLGE_RING_RX_TAIL_REG,
123 HCLGE_RING_RX_HEAD_REG,
124 HCLGE_RING_RX_FBD_NUM_REG,
125 HCLGE_RING_RX_OFFSET_REG,
126 HCLGE_RING_RX_FBD_OFFSET_REG,
127 HCLGE_RING_RX_STASH_REG,
128 HCLGE_RING_RX_BD_ERR_REG,
129 HCLGE_RING_TX_ADDR_L_REG,
130 HCLGE_RING_TX_ADDR_H_REG,
131 HCLGE_RING_TX_BD_NUM_REG,
132 HCLGE_RING_TX_PRIORITY_REG,
133 HCLGE_RING_TX_TC_REG,
134 HCLGE_RING_TX_MERGE_EN_REG,
135 HCLGE_RING_TX_TAIL_REG,
136 HCLGE_RING_TX_HEAD_REG,
137 HCLGE_RING_TX_FBD_NUM_REG,
138 HCLGE_RING_TX_OFFSET_REG,
139 HCLGE_RING_TX_EBD_NUM_REG,
140 HCLGE_RING_TX_EBD_OFFSET_REG,
141 HCLGE_RING_TX_BD_ERR_REG,
142 HCLGE_RING_EN_REG};
143
144static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 HCLGE_TQP_INTR_GL0_REG,
146 HCLGE_TQP_INTR_GL1_REG,
147 HCLGE_TQP_INTR_GL2_REG,
148 HCLGE_TQP_INTR_RL_REG};
149
150static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 "App Loopback test",
152 "Serdes serial Loopback test",
153 "Serdes parallel Loopback test",
154 "Phy Loopback test"
155};
156
157static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 {"mac_tx_mac_pause_num",
159 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 {"mac_rx_mac_pause_num",
161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 {"mac_tx_control_pkt_num",
163 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 {"mac_rx_control_pkt_num",
165 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 {"mac_tx_pfc_pkt_num",
167 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 {"mac_tx_pfc_pri0_pkt_num",
169 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 {"mac_tx_pfc_pri1_pkt_num",
171 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 {"mac_tx_pfc_pri2_pkt_num",
173 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 {"mac_tx_pfc_pri3_pkt_num",
175 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 {"mac_tx_pfc_pri4_pkt_num",
177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 {"mac_tx_pfc_pri5_pkt_num",
179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 {"mac_tx_pfc_pri6_pkt_num",
181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 {"mac_tx_pfc_pri7_pkt_num",
183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 {"mac_rx_pfc_pkt_num",
185 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 {"mac_rx_pfc_pri0_pkt_num",
187 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 {"mac_rx_pfc_pri1_pkt_num",
189 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 {"mac_rx_pfc_pri2_pkt_num",
191 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 {"mac_rx_pfc_pri3_pkt_num",
193 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 {"mac_rx_pfc_pri4_pkt_num",
195 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 {"mac_rx_pfc_pri5_pkt_num",
197 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 {"mac_rx_pfc_pri6_pkt_num",
199 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 {"mac_rx_pfc_pri7_pkt_num",
201 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 {"mac_tx_total_pkt_num",
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 {"mac_tx_total_oct_num",
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 {"mac_tx_good_pkt_num",
207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 {"mac_tx_bad_pkt_num",
209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 {"mac_tx_good_oct_num",
211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 {"mac_tx_bad_oct_num",
213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 {"mac_tx_uni_pkt_num",
215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 {"mac_tx_multi_pkt_num",
217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 {"mac_tx_broad_pkt_num",
219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 {"mac_tx_undersize_pkt_num",
221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 {"mac_tx_oversize_pkt_num",
223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 {"mac_tx_64_oct_pkt_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 {"mac_tx_65_127_oct_pkt_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 {"mac_tx_128_255_oct_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 {"mac_tx_256_511_oct_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 {"mac_tx_512_1023_oct_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 {"mac_tx_1024_1518_oct_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 {"mac_tx_1519_2047_oct_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 {"mac_tx_2048_4095_oct_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 {"mac_tx_4096_8191_oct_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 {"mac_tx_8192_9216_oct_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 {"mac_tx_9217_12287_oct_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 {"mac_tx_12288_16383_oct_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 {"mac_tx_1519_max_good_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 {"mac_tx_1519_max_bad_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 {"mac_rx_total_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 {"mac_rx_total_oct_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 {"mac_rx_good_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 {"mac_rx_bad_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 {"mac_rx_good_oct_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 {"mac_rx_bad_oct_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 {"mac_rx_uni_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 {"mac_rx_multi_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 {"mac_rx_broad_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 {"mac_rx_undersize_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 {"mac_rx_oversize_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 {"mac_rx_64_oct_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 {"mac_rx_65_127_oct_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 {"mac_rx_128_255_oct_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 {"mac_rx_256_511_oct_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 {"mac_rx_512_1023_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 {"mac_rx_1024_1518_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 {"mac_rx_1519_2047_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 {"mac_rx_2048_4095_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 {"mac_rx_4096_8191_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 {"mac_rx_8192_9216_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 {"mac_rx_9217_12287_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 {"mac_rx_12288_16383_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 {"mac_rx_1519_max_good_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 {"mac_rx_1519_max_bad_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302
303 {"mac_tx_fragment_pkt_num",
304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 {"mac_tx_undermin_pkt_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 {"mac_tx_jabber_pkt_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 {"mac_tx_err_all_pkt_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 {"mac_tx_from_app_good_pkt_num",
312 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 {"mac_tx_from_app_bad_pkt_num",
314 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 {"mac_rx_fragment_pkt_num",
316 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 {"mac_rx_undermin_pkt_num",
318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 {"mac_rx_jabber_pkt_num",
320 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 {"mac_rx_fcs_err_pkt_num",
322 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 {"mac_rx_send_app_good_pkt_num",
324 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 {"mac_rx_send_app_bad_pkt_num",
326 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327};
328
329static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 {
331 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 .ethter_type = cpu_to_le16(ETH_P_LLDP),
333 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 .i_port_bitmap = 0x1,
335 },
336};
337
338static const u8 hclge_hash_key[] = {
339 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344};
345
346static const u32 hclge_dfx_bd_offset_list[] = {
347 HCLGE_DFX_BIOS_BD_OFFSET,
348 HCLGE_DFX_SSU_0_BD_OFFSET,
349 HCLGE_DFX_SSU_1_BD_OFFSET,
350 HCLGE_DFX_IGU_BD_OFFSET,
351 HCLGE_DFX_RPU_0_BD_OFFSET,
352 HCLGE_DFX_RPU_1_BD_OFFSET,
353 HCLGE_DFX_NCSI_BD_OFFSET,
354 HCLGE_DFX_RTC_BD_OFFSET,
355 HCLGE_DFX_PPP_BD_OFFSET,
356 HCLGE_DFX_RCB_BD_OFFSET,
357 HCLGE_DFX_TQP_BD_OFFSET,
358 HCLGE_DFX_SSU_2_BD_OFFSET
359};
360
361static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 HCLGE_OPC_DFX_SSU_REG_0,
364 HCLGE_OPC_DFX_SSU_REG_1,
365 HCLGE_OPC_DFX_IGU_EGU_REG,
366 HCLGE_OPC_DFX_RPU_REG_0,
367 HCLGE_OPC_DFX_RPU_REG_1,
368 HCLGE_OPC_DFX_NCSI_REG,
369 HCLGE_OPC_DFX_RTC_REG,
370 HCLGE_OPC_DFX_PPP_REG,
371 HCLGE_OPC_DFX_RCB_REG,
372 HCLGE_OPC_DFX_TQP_REG,
373 HCLGE_OPC_DFX_SSU_REG_2
374};
375
376static const struct key_info meta_data_key_info[] = {
377 { PACKET_TYPE_ID, 6},
378 { IP_FRAGEMENT, 1},
379 { ROCE_TYPE, 1},
380 { NEXT_KEY, 5},
381 { VLAN_NUMBER, 2},
382 { SRC_VPORT, 12},
383 { DST_VPORT, 12},
384 { TUNNEL_PACKET, 1},
385};
386
387static const struct key_info tuple_key_info[] = {
388 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
389 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
391 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
392 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
393 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
394 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
395 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
396 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
397 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
398 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
399 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
400 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
402 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
403 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
404 { INNER_DST_MAC, 48, KEY_OPT_MAC,
405 offsetof(struct hclge_fd_rule, tuples.dst_mac),
406 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
407 { INNER_SRC_MAC, 48, KEY_OPT_MAC,
408 offsetof(struct hclge_fd_rule, tuples.src_mac),
409 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
410 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
411 offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
412 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
413 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
414 { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
415 offsetof(struct hclge_fd_rule, tuples.ether_proto),
416 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
417 { INNER_L2_RSV, 16, KEY_OPT_LE16,
418 offsetof(struct hclge_fd_rule, tuples.l2_user_def),
419 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
420 { INNER_IP_TOS, 8, KEY_OPT_U8,
421 offsetof(struct hclge_fd_rule, tuples.ip_tos),
422 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
423 { INNER_IP_PROTO, 8, KEY_OPT_U8,
424 offsetof(struct hclge_fd_rule, tuples.ip_proto),
425 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
426 { INNER_SRC_IP, 32, KEY_OPT_IP,
427 offsetof(struct hclge_fd_rule, tuples.src_ip),
428 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
429 { INNER_DST_IP, 32, KEY_OPT_IP,
430 offsetof(struct hclge_fd_rule, tuples.dst_ip),
431 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
432 { INNER_L3_RSV, 16, KEY_OPT_LE16,
433 offsetof(struct hclge_fd_rule, tuples.l3_user_def),
434 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
435 { INNER_SRC_PORT, 16, KEY_OPT_LE16,
436 offsetof(struct hclge_fd_rule, tuples.src_port),
437 offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
438 { INNER_DST_PORT, 16, KEY_OPT_LE16,
439 offsetof(struct hclge_fd_rule, tuples.dst_port),
440 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
441 { INNER_L4_RSV, 32, KEY_OPT_LE32,
442 offsetof(struct hclge_fd_rule, tuples.l4_user_def),
443 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
444};
445
446static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
447{
448#define HCLGE_MAC_CMD_NUM 21
449
450 u64 *data = (u64 *)(&hdev->mac_stats);
451 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
452 __le64 *desc_data;
453 int i, k, n;
454 int ret;
455
456 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
457 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
458 if (ret) {
459 dev_err(&hdev->pdev->dev,
460 "Get MAC pkt stats fail, status = %d.\n", ret);
461
462 return ret;
463 }
464
465 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
466
467 if (unlikely(i == 0)) {
468 desc_data = (__le64 *)(&desc[i].data[0]);
469 n = HCLGE_RD_FIRST_STATS_NUM;
470 } else {
471 desc_data = (__le64 *)(&desc[i]);
472 n = HCLGE_RD_OTHER_STATS_NUM;
473 }
474
475 for (k = 0; k < n; k++) {
476 *data += le64_to_cpu(*desc_data);
477 data++;
478 desc_data++;
479 }
480 }
481
482 return 0;
483}
484
485static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
486{
487 u64 *data = (u64 *)(&hdev->mac_stats);
488 struct hclge_desc *desc;
489 __le64 *desc_data;
490 u16 i, k, n;
491 int ret;
492
493
494
495
496 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
497 if (!desc)
498 return -ENOMEM;
499
500 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
501 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
502 if (ret) {
503 kfree(desc);
504 return ret;
505 }
506
507 for (i = 0; i < desc_num; i++) {
508
509 if (i == 0) {
510 desc_data = (__le64 *)(&desc[i].data[0]);
511 n = HCLGE_RD_FIRST_STATS_NUM;
512 } else {
513 desc_data = (__le64 *)(&desc[i]);
514 n = HCLGE_RD_OTHER_STATS_NUM;
515 }
516
517 for (k = 0; k < n; k++) {
518 *data += le64_to_cpu(*desc_data);
519 data++;
520 desc_data++;
521 }
522 }
523
524 kfree(desc);
525
526 return 0;
527}
528
529static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
530{
531 struct hclge_desc desc;
532 __le32 *desc_data;
533 u32 reg_num;
534 int ret;
535
536 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
537 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
538 if (ret)
539 return ret;
540
541 desc_data = (__le32 *)(&desc.data[0]);
542 reg_num = le32_to_cpu(*desc_data);
543
544 *desc_num = 1 + ((reg_num - 3) >> 2) +
545 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
546
547 return 0;
548}
549
550static int hclge_mac_update_stats(struct hclge_dev *hdev)
551{
552 u32 desc_num;
553 int ret;
554
555 ret = hclge_mac_query_reg_num(hdev, &desc_num);
556
557 if (!ret)
558 ret = hclge_mac_update_stats_complete(hdev, desc_num);
559 else if (ret == -EOPNOTSUPP)
560 ret = hclge_mac_update_stats_defective(hdev);
561 else
562 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
563
564 return ret;
565}
566
567static int hclge_tqps_update_stats(struct hnae3_handle *handle)
568{
569 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
570 struct hclge_vport *vport = hclge_get_vport(handle);
571 struct hclge_dev *hdev = vport->back;
572 struct hnae3_queue *queue;
573 struct hclge_desc desc[1];
574 struct hclge_tqp *tqp;
575 int ret, i;
576
577 for (i = 0; i < kinfo->num_tqps; i++) {
578 queue = handle->kinfo.tqp[i];
579 tqp = container_of(queue, struct hclge_tqp, q);
580
581 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
582 true);
583
584 desc[0].data[0] = cpu_to_le32(tqp->index);
585 ret = hclge_cmd_send(&hdev->hw, desc, 1);
586 if (ret) {
587 dev_err(&hdev->pdev->dev,
588 "Query tqp stat fail, status = %d,queue = %d\n",
589 ret, i);
590 return ret;
591 }
592 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
593 le32_to_cpu(desc[0].data[1]);
594 }
595
596 for (i = 0; i < kinfo->num_tqps; i++) {
597 queue = handle->kinfo.tqp[i];
598 tqp = container_of(queue, struct hclge_tqp, q);
599
600 hclge_cmd_setup_basic_desc(&desc[0],
601 HCLGE_OPC_QUERY_TX_STATS,
602 true);
603
604 desc[0].data[0] = cpu_to_le32(tqp->index);
605 ret = hclge_cmd_send(&hdev->hw, desc, 1);
606 if (ret) {
607 dev_err(&hdev->pdev->dev,
608 "Query tqp stat fail, status = %d,queue = %d\n",
609 ret, i);
610 return ret;
611 }
612 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
613 le32_to_cpu(desc[0].data[1]);
614 }
615
616 return 0;
617}
618
619static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
620{
621 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622 struct hclge_tqp *tqp;
623 u64 *buff = data;
624 int i;
625
626 for (i = 0; i < kinfo->num_tqps; i++) {
627 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
628 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
629 }
630
631 for (i = 0; i < kinfo->num_tqps; i++) {
632 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
633 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
634 }
635
636 return buff;
637}
638
639static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
640{
641 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
642
643
644 return kinfo->num_tqps * (2);
645}
646
647static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
648{
649 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
650 u8 *buff = data;
651 int i;
652
653 for (i = 0; i < kinfo->num_tqps; i++) {
654 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
655 struct hclge_tqp, q);
656 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
657 tqp->index);
658 buff = buff + ETH_GSTRING_LEN;
659 }
660
661 for (i = 0; i < kinfo->num_tqps; i++) {
662 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
663 struct hclge_tqp, q);
664 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
665 tqp->index);
666 buff = buff + ETH_GSTRING_LEN;
667 }
668
669 return buff;
670}
671
672static u64 *hclge_comm_get_stats(const void *comm_stats,
673 const struct hclge_comm_stats_str strs[],
674 int size, u64 *data)
675{
676 u64 *buf = data;
677 u32 i;
678
679 for (i = 0; i < size; i++)
680 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
681
682 return buf + size;
683}
684
685static u8 *hclge_comm_get_strings(u32 stringset,
686 const struct hclge_comm_stats_str strs[],
687 int size, u8 *data)
688{
689 char *buff = (char *)data;
690 u32 i;
691
692 if (stringset != ETH_SS_STATS)
693 return buff;
694
695 for (i = 0; i < size; i++) {
696 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
697 buff = buff + ETH_GSTRING_LEN;
698 }
699
700 return (u8 *)buff;
701}
702
703static void hclge_update_stats_for_all(struct hclge_dev *hdev)
704{
705 struct hnae3_handle *handle;
706 int status;
707
708 handle = &hdev->vport[0].nic;
709 if (handle->client) {
710 status = hclge_tqps_update_stats(handle);
711 if (status) {
712 dev_err(&hdev->pdev->dev,
713 "Update TQPS stats fail, status = %d.\n",
714 status);
715 }
716 }
717
718 status = hclge_mac_update_stats(hdev);
719 if (status)
720 dev_err(&hdev->pdev->dev,
721 "Update MAC stats fail, status = %d.\n", status);
722}
723
724static void hclge_update_stats(struct hnae3_handle *handle,
725 struct net_device_stats *net_stats)
726{
727 struct hclge_vport *vport = hclge_get_vport(handle);
728 struct hclge_dev *hdev = vport->back;
729 int status;
730
731 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
732 return;
733
734 status = hclge_mac_update_stats(hdev);
735 if (status)
736 dev_err(&hdev->pdev->dev,
737 "Update MAC stats fail, status = %d.\n",
738 status);
739
740 status = hclge_tqps_update_stats(handle);
741 if (status)
742 dev_err(&hdev->pdev->dev,
743 "Update TQPS stats fail, status = %d.\n",
744 status);
745
746 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
747}
748
749static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
750{
751#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
752 HNAE3_SUPPORT_PHY_LOOPBACK |\
753 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
754 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
755
756 struct hclge_vport *vport = hclge_get_vport(handle);
757 struct hclge_dev *hdev = vport->back;
758 int count = 0;
759
760
761
762
763
764
765 if (stringset == ETH_SS_TEST) {
766
767 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
768 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
769 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
770 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
771 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
772 count += 1;
773 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
774 }
775
776 count += 2;
777 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
778 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
779
780 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
781 hdev->hw.mac.phydev->drv->set_loopback) ||
782 hnae3_dev_phy_imp_supported(hdev)) {
783 count += 1;
784 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
785 }
786 } else if (stringset == ETH_SS_STATS) {
787 count = ARRAY_SIZE(g_mac_stats_string) +
788 hclge_tqps_get_sset_count(handle, stringset);
789 }
790
791 return count;
792}
793
794static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
795 u8 *data)
796{
797 u8 *p = (char *)data;
798 int size;
799
800 if (stringset == ETH_SS_STATS) {
801 size = ARRAY_SIZE(g_mac_stats_string);
802 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
803 size, p);
804 p = hclge_tqps_get_strings(handle, p);
805 } else if (stringset == ETH_SS_TEST) {
806 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
807 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
808 ETH_GSTRING_LEN);
809 p += ETH_GSTRING_LEN;
810 }
811 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
812 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
813 ETH_GSTRING_LEN);
814 p += ETH_GSTRING_LEN;
815 }
816 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
817 memcpy(p,
818 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
819 ETH_GSTRING_LEN);
820 p += ETH_GSTRING_LEN;
821 }
822 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
823 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
824 ETH_GSTRING_LEN);
825 p += ETH_GSTRING_LEN;
826 }
827 }
828}
829
830static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
831{
832 struct hclge_vport *vport = hclge_get_vport(handle);
833 struct hclge_dev *hdev = vport->back;
834 u64 *p;
835
836 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
837 ARRAY_SIZE(g_mac_stats_string), data);
838 p = hclge_tqps_get_stats(handle, p);
839}
840
841static void hclge_get_mac_stat(struct hnae3_handle *handle,
842 struct hns3_mac_stats *mac_stats)
843{
844 struct hclge_vport *vport = hclge_get_vport(handle);
845 struct hclge_dev *hdev = vport->back;
846
847 hclge_update_stats(handle, NULL);
848
849 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
850 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
851}
852
853static int hclge_parse_func_status(struct hclge_dev *hdev,
854 struct hclge_func_status_cmd *status)
855{
856#define HCLGE_MAC_ID_MASK 0xF
857
858 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
859 return -EINVAL;
860
861
862 if (status->pf_state & HCLGE_PF_STATE_MAIN)
863 hdev->flag |= HCLGE_FLAG_MAIN;
864 else
865 hdev->flag &= ~HCLGE_FLAG_MAIN;
866
867 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
868 return 0;
869}
870
871static int hclge_query_function_status(struct hclge_dev *hdev)
872{
873#define HCLGE_QUERY_MAX_CNT 5
874
875 struct hclge_func_status_cmd *req;
876 struct hclge_desc desc;
877 int timeout = 0;
878 int ret;
879
880 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
881 req = (struct hclge_func_status_cmd *)desc.data;
882
883 do {
884 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
885 if (ret) {
886 dev_err(&hdev->pdev->dev,
887 "query function status failed %d.\n", ret);
888 return ret;
889 }
890
891
892 if (req->pf_state)
893 break;
894 usleep_range(1000, 2000);
895 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
896
897 return hclge_parse_func_status(hdev, req);
898}
899
900static int hclge_query_pf_resource(struct hclge_dev *hdev)
901{
902 struct hclge_pf_res_cmd *req;
903 struct hclge_desc desc;
904 int ret;
905
906 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
907 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
908 if (ret) {
909 dev_err(&hdev->pdev->dev,
910 "query pf resource failed %d.\n", ret);
911 return ret;
912 }
913
914 req = (struct hclge_pf_res_cmd *)desc.data;
915 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
916 le16_to_cpu(req->ext_tqp_num);
917 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
918
919 if (req->tx_buf_size)
920 hdev->tx_buf_size =
921 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
922 else
923 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
924
925 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
926
927 if (req->dv_buf_size)
928 hdev->dv_buf_size =
929 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
930 else
931 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
932
933 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
934
935 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
936 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
937 dev_err(&hdev->pdev->dev,
938 "only %u msi resources available, not enough for pf(min:2).\n",
939 hdev->num_nic_msi);
940 return -EINVAL;
941 }
942
943 if (hnae3_dev_roce_supported(hdev)) {
944 hdev->num_roce_msi =
945 le16_to_cpu(req->pf_intr_vector_number_roce);
946
947
948
949
950 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
951 } else {
952 hdev->num_msi = hdev->num_nic_msi;
953 }
954
955 return 0;
956}
957
958static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
959{
960 switch (speed_cmd) {
961 case 6:
962 *speed = HCLGE_MAC_SPEED_10M;
963 break;
964 case 7:
965 *speed = HCLGE_MAC_SPEED_100M;
966 break;
967 case 0:
968 *speed = HCLGE_MAC_SPEED_1G;
969 break;
970 case 1:
971 *speed = HCLGE_MAC_SPEED_10G;
972 break;
973 case 2:
974 *speed = HCLGE_MAC_SPEED_25G;
975 break;
976 case 3:
977 *speed = HCLGE_MAC_SPEED_40G;
978 break;
979 case 4:
980 *speed = HCLGE_MAC_SPEED_50G;
981 break;
982 case 5:
983 *speed = HCLGE_MAC_SPEED_100G;
984 break;
985 case 8:
986 *speed = HCLGE_MAC_SPEED_200G;
987 break;
988 default:
989 return -EINVAL;
990 }
991
992 return 0;
993}
994
995static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
996{
997 struct hclge_vport *vport = hclge_get_vport(handle);
998 struct hclge_dev *hdev = vport->back;
999 u32 speed_ability = hdev->hw.mac.speed_ability;
1000 u32 speed_bit = 0;
1001
1002 switch (speed) {
1003 case HCLGE_MAC_SPEED_10M:
1004 speed_bit = HCLGE_SUPPORT_10M_BIT;
1005 break;
1006 case HCLGE_MAC_SPEED_100M:
1007 speed_bit = HCLGE_SUPPORT_100M_BIT;
1008 break;
1009 case HCLGE_MAC_SPEED_1G:
1010 speed_bit = HCLGE_SUPPORT_1G_BIT;
1011 break;
1012 case HCLGE_MAC_SPEED_10G:
1013 speed_bit = HCLGE_SUPPORT_10G_BIT;
1014 break;
1015 case HCLGE_MAC_SPEED_25G:
1016 speed_bit = HCLGE_SUPPORT_25G_BIT;
1017 break;
1018 case HCLGE_MAC_SPEED_40G:
1019 speed_bit = HCLGE_SUPPORT_40G_BIT;
1020 break;
1021 case HCLGE_MAC_SPEED_50G:
1022 speed_bit = HCLGE_SUPPORT_50G_BIT;
1023 break;
1024 case HCLGE_MAC_SPEED_100G:
1025 speed_bit = HCLGE_SUPPORT_100G_BIT;
1026 break;
1027 case HCLGE_MAC_SPEED_200G:
1028 speed_bit = HCLGE_SUPPORT_200G_BIT;
1029 break;
1030 default:
1031 return -EINVAL;
1032 }
1033
1034 if (speed_bit & speed_ability)
1035 return 0;
1036
1037 return -EINVAL;
1038}
1039
1040static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1041{
1042 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1044 mac->supported);
1045 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1047 mac->supported);
1048 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1050 mac->supported);
1051 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1053 mac->supported);
1054 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1056 mac->supported);
1057 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1059 mac->supported);
1060}
1061
1062static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1063{
1064 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1066 mac->supported);
1067 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1069 mac->supported);
1070 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1072 mac->supported);
1073 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1075 mac->supported);
1076 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1078 mac->supported);
1079 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1080 linkmode_set_bit(
1081 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1082 mac->supported);
1083}
1084
1085static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1086{
1087 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1089 mac->supported);
1090 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1092 mac->supported);
1093 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095 mac->supported);
1096 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1098 mac->supported);
1099 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1101 mac->supported);
1102 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1104 mac->supported);
1105}
1106
1107static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1108{
1109 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1111 mac->supported);
1112 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1114 mac->supported);
1115 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1117 mac->supported);
1118 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1120 mac->supported);
1121 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1123 mac->supported);
1124 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1126 mac->supported);
1127 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1129 mac->supported);
1130}
1131
1132static void hclge_convert_setting_fec(struct hclge_mac *mac)
1133{
1134 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1136
1137 switch (mac->speed) {
1138 case HCLGE_MAC_SPEED_10G:
1139 case HCLGE_MAC_SPEED_40G:
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1141 mac->supported);
1142 mac->fec_ability =
1143 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1144 break;
1145 case HCLGE_MAC_SPEED_25G:
1146 case HCLGE_MAC_SPEED_50G:
1147 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1148 mac->supported);
1149 mac->fec_ability =
1150 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 BIT(HNAE3_FEC_AUTO);
1152 break;
1153 case HCLGE_MAC_SPEED_100G:
1154 case HCLGE_MAC_SPEED_200G:
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1157 break;
1158 default:
1159 mac->fec_ability = 0;
1160 break;
1161 }
1162}
1163
1164static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1165 u16 speed_ability)
1166{
1167 struct hclge_mac *mac = &hdev->hw.mac;
1168
1169 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1171 mac->supported);
1172
1173 hclge_convert_setting_sr(mac, speed_ability);
1174 hclge_convert_setting_lr(mac, speed_ability);
1175 hclge_convert_setting_cr(mac, speed_ability);
1176 if (hnae3_dev_fec_supported(hdev))
1177 hclge_convert_setting_fec(mac);
1178
1179 if (hnae3_dev_pause_supported(hdev))
1180 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1184}
1185
1186static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1187 u16 speed_ability)
1188{
1189 struct hclge_mac *mac = &hdev->hw.mac;
1190
1191 hclge_convert_setting_kr(mac, speed_ability);
1192 if (hnae3_dev_fec_supported(hdev))
1193 hclge_convert_setting_fec(mac);
1194
1195 if (hnae3_dev_pause_supported(hdev))
1196 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1197
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1200}
1201
1202static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1203 u16 speed_ability)
1204{
1205 unsigned long *supported = hdev->hw.mac.supported;
1206
1207
1208 if (!speed_ability)
1209 speed_ability = HCLGE_SUPPORT_GE;
1210
1211 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1213 supported);
1214
1215 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1217 supported);
1218 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1219 supported);
1220 }
1221
1222 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1225 }
1226
1227 if (hnae3_dev_pause_supported(hdev)) {
1228 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1230 }
1231
1232 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1234}
1235
1236static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1237{
1238 u8 media_type = hdev->hw.mac.media_type;
1239
1240 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 hclge_parse_copper_link_mode(hdev, speed_ability);
1244 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 hclge_parse_backplane_link_mode(hdev, speed_ability);
1246}
1247
1248static u32 hclge_get_max_speed(u16 speed_ability)
1249{
1250 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 return HCLGE_MAC_SPEED_200G;
1252
1253 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 return HCLGE_MAC_SPEED_100G;
1255
1256 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 return HCLGE_MAC_SPEED_50G;
1258
1259 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 return HCLGE_MAC_SPEED_40G;
1261
1262 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 return HCLGE_MAC_SPEED_25G;
1264
1265 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 return HCLGE_MAC_SPEED_10G;
1267
1268 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 return HCLGE_MAC_SPEED_1G;
1270
1271 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 return HCLGE_MAC_SPEED_100M;
1273
1274 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 return HCLGE_MAC_SPEED_10M;
1276
1277 return HCLGE_MAC_SPEED_1G;
1278}
1279
1280static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1281{
1282#define HCLGE_TX_SPARE_SIZE_UNIT 4096
1283#define SPEED_ABILITY_EXT_SHIFT 8
1284
1285 struct hclge_cfg_param_cmd *req;
1286 u64 mac_addr_tmp_high;
1287 u16 speed_ability_ext;
1288 u64 mac_addr_tmp;
1289 unsigned int i;
1290
1291 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1292
1293
1294 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1295 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1296 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1297 HCLGE_CFG_TQP_DESC_N_M,
1298 HCLGE_CFG_TQP_DESC_N_S);
1299
1300 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1301 HCLGE_CFG_PHY_ADDR_M,
1302 HCLGE_CFG_PHY_ADDR_S);
1303 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1304 HCLGE_CFG_MEDIA_TP_M,
1305 HCLGE_CFG_MEDIA_TP_S);
1306 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 HCLGE_CFG_RX_BUF_LEN_M,
1308 HCLGE_CFG_RX_BUF_LEN_S);
1309
1310 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1311 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1312 HCLGE_CFG_MAC_ADDR_H_M,
1313 HCLGE_CFG_MAC_ADDR_H_S);
1314
1315 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1316
1317 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1318 HCLGE_CFG_DEFAULT_SPEED_M,
1319 HCLGE_CFG_DEFAULT_SPEED_S);
1320 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1321 HCLGE_CFG_RSS_SIZE_M,
1322 HCLGE_CFG_RSS_SIZE_S);
1323
1324 for (i = 0; i < ETH_ALEN; i++)
1325 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1326
1327 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1328 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1329
1330 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1331 HCLGE_CFG_SPEED_ABILITY_M,
1332 HCLGE_CFG_SPEED_ABILITY_S);
1333 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1334 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1335 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1336 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1337
1338 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1339 HCLGE_CFG_VLAN_FLTR_CAP_M,
1340 HCLGE_CFG_VLAN_FLTR_CAP_S);
1341
1342 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1343 HCLGE_CFG_UMV_TBL_SPACE_M,
1344 HCLGE_CFG_UMV_TBL_SPACE_S);
1345 if (!cfg->umv_space)
1346 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1347
1348 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1349 HCLGE_CFG_PF_RSS_SIZE_M,
1350 HCLGE_CFG_PF_RSS_SIZE_S);
1351
1352
1353
1354
1355
1356
1357
1358
1359 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1360 1U << cfg->pf_rss_size_max :
1361 cfg->vf_rss_size_max;
1362
1363
1364
1365
1366
1367 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1368 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1369 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1370 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1371}
1372
1373
1374
1375
1376
1377static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1378{
1379 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1380 struct hclge_cfg_param_cmd *req;
1381 unsigned int i;
1382 int ret;
1383
1384 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1385 u32 offset = 0;
1386
1387 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1388 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1389 true);
1390 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1391 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1392
1393 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1394 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1395 req->offset = cpu_to_le32(offset);
1396 }
1397
1398 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1399 if (ret) {
1400 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1401 return ret;
1402 }
1403
1404 hclge_parse_cfg(hcfg, desc);
1405
1406 return 0;
1407}
1408
1409static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1410{
1411#define HCLGE_MAX_NON_TSO_BD_NUM 8U
1412
1413 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1414
1415 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1416 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1417 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1418 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1419 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1420 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1421 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1422}
1423
1424static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1425 struct hclge_desc *desc)
1426{
1427 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1428 struct hclge_dev_specs_0_cmd *req0;
1429 struct hclge_dev_specs_1_cmd *req1;
1430
1431 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1432 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1433
1434 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1435 ae_dev->dev_specs.rss_ind_tbl_size =
1436 le16_to_cpu(req0->rss_ind_tbl_size);
1437 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1438 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1439 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1440 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1441 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1442 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1443}
1444
1445static void hclge_check_dev_specs(struct hclge_dev *hdev)
1446{
1447 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1448
1449 if (!dev_specs->max_non_tso_bd_num)
1450 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1451 if (!dev_specs->rss_ind_tbl_size)
1452 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1453 if (!dev_specs->rss_key_size)
1454 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1455 if (!dev_specs->max_tm_rate)
1456 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1457 if (!dev_specs->max_qset_num)
1458 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1459 if (!dev_specs->max_int_gl)
1460 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1461 if (!dev_specs->max_frm_size)
1462 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1463}
1464
1465static int hclge_query_dev_specs(struct hclge_dev *hdev)
1466{
1467 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1468 int ret;
1469 int i;
1470
1471
1472
1473
1474 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1475 hclge_set_default_dev_specs(hdev);
1476 return 0;
1477 }
1478
1479 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1480 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1481 true);
1482 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1483 }
1484 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1485
1486 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1487 if (ret)
1488 return ret;
1489
1490 hclge_parse_dev_specs(hdev, desc);
1491 hclge_check_dev_specs(hdev);
1492
1493 return 0;
1494}
1495
1496static int hclge_get_cap(struct hclge_dev *hdev)
1497{
1498 int ret;
1499
1500 ret = hclge_query_function_status(hdev);
1501 if (ret) {
1502 dev_err(&hdev->pdev->dev,
1503 "query function status error %d.\n", ret);
1504 return ret;
1505 }
1506
1507
1508 return hclge_query_pf_resource(hdev);
1509}
1510
1511static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1512{
1513#define HCLGE_MIN_TX_DESC 64
1514#define HCLGE_MIN_RX_DESC 64
1515
1516 if (!is_kdump_kernel())
1517 return;
1518
1519 dev_info(&hdev->pdev->dev,
1520 "Running kdump kernel. Using minimal resources\n");
1521
1522
1523 hdev->num_tqps = hdev->num_req_vfs + 1;
1524 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1525 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1526}
1527
1528static int hclge_configure(struct hclge_dev *hdev)
1529{
1530 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1531 struct hclge_cfg cfg;
1532 unsigned int i;
1533 int ret;
1534
1535 ret = hclge_get_cfg(hdev, &cfg);
1536 if (ret)
1537 return ret;
1538
1539 hdev->base_tqp_pid = 0;
1540 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1541 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1542 hdev->rx_buf_len = cfg.rx_buf_len;
1543 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1544 hdev->hw.mac.media_type = cfg.media_type;
1545 hdev->hw.mac.phy_addr = cfg.phy_addr;
1546 hdev->num_tx_desc = cfg.tqp_desc_num;
1547 hdev->num_rx_desc = cfg.tqp_desc_num;
1548 hdev->tm_info.num_pg = 1;
1549 hdev->tc_max = cfg.tc_num;
1550 hdev->tm_info.hw_pfc_map = 0;
1551 hdev->wanted_umv_size = cfg.umv_space;
1552 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1553 hdev->gro_en = true;
1554 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1555 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1556
1557 if (hnae3_dev_fd_supported(hdev)) {
1558 hdev->fd_en = true;
1559 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1560 }
1561
1562 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1563 if (ret) {
1564 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1565 cfg.default_speed, ret);
1566 return ret;
1567 }
1568
1569 hclge_parse_link_mode(hdev, cfg.speed_ability);
1570
1571 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1572
1573 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1574 (hdev->tc_max < 1)) {
1575 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1576 hdev->tc_max);
1577 hdev->tc_max = 1;
1578 }
1579
1580
1581 if (!hnae3_dev_dcb_supported(hdev)) {
1582 hdev->tc_max = 1;
1583 hdev->pfc_max = 0;
1584 } else {
1585 hdev->pfc_max = hdev->tc_max;
1586 }
1587
1588 hdev->tm_info.num_tc = 1;
1589
1590
1591 for (i = 0; i < hdev->tm_info.num_tc; i++)
1592 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1593
1594 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1595
1596 hclge_init_kdump_kernel_config(hdev);
1597
1598
1599 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1600 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1601 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1602 &hdev->affinity_mask);
1603
1604 return ret;
1605}
1606
1607static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1608 u16 tso_mss_max)
1609{
1610 struct hclge_cfg_tso_status_cmd *req;
1611 struct hclge_desc desc;
1612
1613 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1614
1615 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1616 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1617 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1618
1619 return hclge_cmd_send(&hdev->hw, &desc, 1);
1620}
1621
1622static int hclge_config_gro(struct hclge_dev *hdev)
1623{
1624 struct hclge_cfg_gro_status_cmd *req;
1625 struct hclge_desc desc;
1626 int ret;
1627
1628 if (!hnae3_dev_gro_supported(hdev))
1629 return 0;
1630
1631 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1632 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1633
1634 req->gro_en = hdev->gro_en ? 1 : 0;
1635
1636 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1637 if (ret)
1638 dev_err(&hdev->pdev->dev,
1639 "GRO hardware config cmd failed, ret = %d\n", ret);
1640
1641 return ret;
1642}
1643
1644static int hclge_alloc_tqps(struct hclge_dev *hdev)
1645{
1646 struct hclge_tqp *tqp;
1647 int i;
1648
1649 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1650 sizeof(struct hclge_tqp), GFP_KERNEL);
1651 if (!hdev->htqp)
1652 return -ENOMEM;
1653
1654 tqp = hdev->htqp;
1655
1656 for (i = 0; i < hdev->num_tqps; i++) {
1657 tqp->dev = &hdev->pdev->dev;
1658 tqp->index = i;
1659
1660 tqp->q.ae_algo = &ae_algo;
1661 tqp->q.buf_size = hdev->rx_buf_len;
1662 tqp->q.tx_desc_num = hdev->num_tx_desc;
1663 tqp->q.rx_desc_num = hdev->num_rx_desc;
1664
1665
1666
1667
1668 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1669 tqp->q.io_base = hdev->hw.io_base +
1670 HCLGE_TQP_REG_OFFSET +
1671 i * HCLGE_TQP_REG_SIZE;
1672 else
1673 tqp->q.io_base = hdev->hw.io_base +
1674 HCLGE_TQP_REG_OFFSET +
1675 HCLGE_TQP_EXT_REG_OFFSET +
1676 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1677 HCLGE_TQP_REG_SIZE;
1678
1679 tqp++;
1680 }
1681
1682 return 0;
1683}
1684
1685static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1686 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1687{
1688 struct hclge_tqp_map_cmd *req;
1689 struct hclge_desc desc;
1690 int ret;
1691
1692 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1693
1694 req = (struct hclge_tqp_map_cmd *)desc.data;
1695 req->tqp_id = cpu_to_le16(tqp_pid);
1696 req->tqp_vf = func_id;
1697 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1698 if (!is_pf)
1699 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1700 req->tqp_vid = cpu_to_le16(tqp_vid);
1701
1702 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1703 if (ret)
1704 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1705
1706 return ret;
1707}
1708
1709static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1710{
1711 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1712 struct hclge_dev *hdev = vport->back;
1713 int i, alloced;
1714
1715 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1716 alloced < num_tqps; i++) {
1717 if (!hdev->htqp[i].alloced) {
1718 hdev->htqp[i].q.handle = &vport->nic;
1719 hdev->htqp[i].q.tqp_index = alloced;
1720 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1721 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1722 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1723 hdev->htqp[i].alloced = true;
1724 alloced++;
1725 }
1726 }
1727 vport->alloc_tqps = alloced;
1728 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1729 vport->alloc_tqps / hdev->tm_info.num_tc);
1730
1731
1732 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1733 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1734
1735 return 0;
1736}
1737
1738static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1739 u16 num_tx_desc, u16 num_rx_desc)
1740
1741{
1742 struct hnae3_handle *nic = &vport->nic;
1743 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1744 struct hclge_dev *hdev = vport->back;
1745 int ret;
1746
1747 kinfo->num_tx_desc = num_tx_desc;
1748 kinfo->num_rx_desc = num_rx_desc;
1749
1750 kinfo->rx_buf_len = hdev->rx_buf_len;
1751 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1752
1753 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1754 sizeof(struct hnae3_queue *), GFP_KERNEL);
1755 if (!kinfo->tqp)
1756 return -ENOMEM;
1757
1758 ret = hclge_assign_tqp(vport, num_tqps);
1759 if (ret)
1760 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1761
1762 return ret;
1763}
1764
1765static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1766 struct hclge_vport *vport)
1767{
1768 struct hnae3_handle *nic = &vport->nic;
1769 struct hnae3_knic_private_info *kinfo;
1770 u16 i;
1771
1772 kinfo = &nic->kinfo;
1773 for (i = 0; i < vport->alloc_tqps; i++) {
1774 struct hclge_tqp *q =
1775 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1776 bool is_pf;
1777 int ret;
1778
1779 is_pf = !(vport->vport_id);
1780 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1781 i, is_pf);
1782 if (ret)
1783 return ret;
1784 }
1785
1786 return 0;
1787}
1788
1789static int hclge_map_tqp(struct hclge_dev *hdev)
1790{
1791 struct hclge_vport *vport = hdev->vport;
1792 u16 i, num_vport;
1793
1794 num_vport = hdev->num_req_vfs + 1;
1795 for (i = 0; i < num_vport; i++) {
1796 int ret;
1797
1798 ret = hclge_map_tqp_to_vport(hdev, vport);
1799 if (ret)
1800 return ret;
1801
1802 vport++;
1803 }
1804
1805 return 0;
1806}
1807
1808static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1809{
1810 struct hnae3_handle *nic = &vport->nic;
1811 struct hclge_dev *hdev = vport->back;
1812 int ret;
1813
1814 nic->pdev = hdev->pdev;
1815 nic->ae_algo = &ae_algo;
1816 nic->numa_node_mask = hdev->numa_node_mask;
1817
1818 ret = hclge_knic_setup(vport, num_tqps,
1819 hdev->num_tx_desc, hdev->num_rx_desc);
1820 if (ret)
1821 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1822
1823 return ret;
1824}
1825
1826static int hclge_alloc_vport(struct hclge_dev *hdev)
1827{
1828 struct pci_dev *pdev = hdev->pdev;
1829 struct hclge_vport *vport;
1830 u32 tqp_main_vport;
1831 u32 tqp_per_vport;
1832 int num_vport, i;
1833 int ret;
1834
1835
1836 num_vport = hdev->num_req_vfs + 1;
1837
1838 if (hdev->num_tqps < num_vport) {
1839 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1840 hdev->num_tqps, num_vport);
1841 return -EINVAL;
1842 }
1843
1844
1845 tqp_per_vport = hdev->num_tqps / num_vport;
1846 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1847
1848 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1849 GFP_KERNEL);
1850 if (!vport)
1851 return -ENOMEM;
1852
1853 hdev->vport = vport;
1854 hdev->num_alloc_vport = num_vport;
1855
1856 if (IS_ENABLED(CONFIG_PCI_IOV))
1857 hdev->num_alloc_vfs = hdev->num_req_vfs;
1858
1859 for (i = 0; i < num_vport; i++) {
1860 vport->back = hdev;
1861 vport->vport_id = i;
1862 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1863 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1864 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1865 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1866 vport->req_vlan_fltr_en = true;
1867 INIT_LIST_HEAD(&vport->vlan_list);
1868 INIT_LIST_HEAD(&vport->uc_mac_list);
1869 INIT_LIST_HEAD(&vport->mc_mac_list);
1870 spin_lock_init(&vport->mac_list_lock);
1871
1872 if (i == 0)
1873 ret = hclge_vport_setup(vport, tqp_main_vport);
1874 else
1875 ret = hclge_vport_setup(vport, tqp_per_vport);
1876 if (ret) {
1877 dev_err(&pdev->dev,
1878 "vport setup failed for vport %d, %d\n",
1879 i, ret);
1880 return ret;
1881 }
1882
1883 vport++;
1884 }
1885
1886 return 0;
1887}
1888
1889static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1890 struct hclge_pkt_buf_alloc *buf_alloc)
1891{
1892
1893#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1894#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1895 struct hclge_tx_buff_alloc_cmd *req;
1896 struct hclge_desc desc;
1897 int ret;
1898 u8 i;
1899
1900 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1901
1902 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1903 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1904 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1905
1906 req->tx_pkt_buff[i] =
1907 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1908 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1909 }
1910
1911 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1912 if (ret)
1913 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1914 ret);
1915
1916 return ret;
1917}
1918
1919static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1920 struct hclge_pkt_buf_alloc *buf_alloc)
1921{
1922 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1923
1924 if (ret)
1925 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1926
1927 return ret;
1928}
1929
1930static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1931{
1932 unsigned int i;
1933 u32 cnt = 0;
1934
1935 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1936 if (hdev->hw_tc_map & BIT(i))
1937 cnt++;
1938 return cnt;
1939}
1940
1941
1942static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1943 struct hclge_pkt_buf_alloc *buf_alloc)
1944{
1945 struct hclge_priv_buf *priv;
1946 unsigned int i;
1947 int cnt = 0;
1948
1949 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1950 priv = &buf_alloc->priv_buf[i];
1951 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1952 priv->enable)
1953 cnt++;
1954 }
1955
1956 return cnt;
1957}
1958
1959
1960static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1961 struct hclge_pkt_buf_alloc *buf_alloc)
1962{
1963 struct hclge_priv_buf *priv;
1964 unsigned int i;
1965 int cnt = 0;
1966
1967 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1968 priv = &buf_alloc->priv_buf[i];
1969 if (hdev->hw_tc_map & BIT(i) &&
1970 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1971 priv->enable)
1972 cnt++;
1973 }
1974
1975 return cnt;
1976}
1977
1978static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1979{
1980 struct hclge_priv_buf *priv;
1981 u32 rx_priv = 0;
1982 int i;
1983
1984 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1985 priv = &buf_alloc->priv_buf[i];
1986 if (priv->enable)
1987 rx_priv += priv->buf_size;
1988 }
1989 return rx_priv;
1990}
1991
1992static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1993{
1994 u32 i, total_tx_size = 0;
1995
1996 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1997 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1998
1999 return total_tx_size;
2000}
2001
2002static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2003 struct hclge_pkt_buf_alloc *buf_alloc,
2004 u32 rx_all)
2005{
2006 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2007 u32 tc_num = hclge_get_tc_num(hdev);
2008 u32 shared_buf, aligned_mps;
2009 u32 rx_priv;
2010 int i;
2011
2012 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2013
2014 if (hnae3_dev_dcb_supported(hdev))
2015 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2016 hdev->dv_buf_size;
2017 else
2018 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2019 + hdev->dv_buf_size;
2020
2021 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2022 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2023 HCLGE_BUF_SIZE_UNIT);
2024
2025 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2026 if (rx_all < rx_priv + shared_std)
2027 return false;
2028
2029 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2030 buf_alloc->s_buf.buf_size = shared_buf;
2031 if (hnae3_dev_dcb_supported(hdev)) {
2032 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2033 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2034 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2035 HCLGE_BUF_SIZE_UNIT);
2036 } else {
2037 buf_alloc->s_buf.self.high = aligned_mps +
2038 HCLGE_NON_DCB_ADDITIONAL_BUF;
2039 buf_alloc->s_buf.self.low = aligned_mps;
2040 }
2041
2042 if (hnae3_dev_dcb_supported(hdev)) {
2043 hi_thrd = shared_buf - hdev->dv_buf_size;
2044
2045 if (tc_num <= NEED_RESERVE_TC_NUM)
2046 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2047 / BUF_MAX_PERCENT;
2048
2049 if (tc_num)
2050 hi_thrd = hi_thrd / tc_num;
2051
2052 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2053 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2054 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2055 } else {
2056 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2057 lo_thrd = aligned_mps;
2058 }
2059
2060 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2061 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2062 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2063 }
2064
2065 return true;
2066}
2067
2068static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2069 struct hclge_pkt_buf_alloc *buf_alloc)
2070{
2071 u32 i, total_size;
2072
2073 total_size = hdev->pkt_buf_size;
2074
2075
2076 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2077 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2078
2079 if (hdev->hw_tc_map & BIT(i)) {
2080 if (total_size < hdev->tx_buf_size)
2081 return -ENOMEM;
2082
2083 priv->tx_buf_size = hdev->tx_buf_size;
2084 } else {
2085 priv->tx_buf_size = 0;
2086 }
2087
2088 total_size -= priv->tx_buf_size;
2089 }
2090
2091 return 0;
2092}
2093
2094static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2095 struct hclge_pkt_buf_alloc *buf_alloc)
2096{
2097 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2098 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2099 unsigned int i;
2100
2101 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2102 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2103
2104 priv->enable = 0;
2105 priv->wl.low = 0;
2106 priv->wl.high = 0;
2107 priv->buf_size = 0;
2108
2109 if (!(hdev->hw_tc_map & BIT(i)))
2110 continue;
2111
2112 priv->enable = 1;
2113
2114 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2115 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2116 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2117 HCLGE_BUF_SIZE_UNIT);
2118 } else {
2119 priv->wl.low = 0;
2120 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2121 aligned_mps;
2122 }
2123
2124 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2125 }
2126
2127 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2128}
2129
2130static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2131 struct hclge_pkt_buf_alloc *buf_alloc)
2132{
2133 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2134 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2135 int i;
2136
2137
2138 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2139 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2140 unsigned int mask = BIT((unsigned int)i);
2141
2142 if (hdev->hw_tc_map & mask &&
2143 !(hdev->tm_info.hw_pfc_map & mask)) {
2144
2145 priv->wl.low = 0;
2146 priv->wl.high = 0;
2147 priv->buf_size = 0;
2148 priv->enable = 0;
2149 no_pfc_priv_num--;
2150 }
2151
2152 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2153 no_pfc_priv_num == 0)
2154 break;
2155 }
2156
2157 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2158}
2159
2160static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2161 struct hclge_pkt_buf_alloc *buf_alloc)
2162{
2163 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2164 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2165 int i;
2166
2167
2168 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2169 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2170 unsigned int mask = BIT((unsigned int)i);
2171
2172 if (hdev->hw_tc_map & mask &&
2173 hdev->tm_info.hw_pfc_map & mask) {
2174
2175 priv->wl.low = 0;
2176 priv->enable = 0;
2177 priv->wl.high = 0;
2178 priv->buf_size = 0;
2179 pfc_priv_num--;
2180 }
2181
2182 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2183 pfc_priv_num == 0)
2184 break;
2185 }
2186
2187 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2188}
2189
2190static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2191 struct hclge_pkt_buf_alloc *buf_alloc)
2192{
2193#define COMPENSATE_BUFFER 0x3C00
2194#define COMPENSATE_HALF_MPS_NUM 5
2195#define PRIV_WL_GAP 0x1800
2196
2197 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2198 u32 tc_num = hclge_get_tc_num(hdev);
2199 u32 half_mps = hdev->mps >> 1;
2200 u32 min_rx_priv;
2201 unsigned int i;
2202
2203 if (tc_num)
2204 rx_priv = rx_priv / tc_num;
2205
2206 if (tc_num <= NEED_RESERVE_TC_NUM)
2207 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2208
2209 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2210 COMPENSATE_HALF_MPS_NUM * half_mps;
2211 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2212 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2213 if (rx_priv < min_rx_priv)
2214 return false;
2215
2216 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2217 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2218
2219 priv->enable = 0;
2220 priv->wl.low = 0;
2221 priv->wl.high = 0;
2222 priv->buf_size = 0;
2223
2224 if (!(hdev->hw_tc_map & BIT(i)))
2225 continue;
2226
2227 priv->enable = 1;
2228 priv->buf_size = rx_priv;
2229 priv->wl.high = rx_priv - hdev->dv_buf_size;
2230 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2231 }
2232
2233 buf_alloc->s_buf.buf_size = 0;
2234
2235 return true;
2236}
2237
2238
2239
2240
2241
2242
2243static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2244 struct hclge_pkt_buf_alloc *buf_alloc)
2245{
2246
2247 if (!hnae3_dev_dcb_supported(hdev)) {
2248 u32 rx_all = hdev->pkt_buf_size;
2249
2250 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2251 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2252 return -ENOMEM;
2253
2254 return 0;
2255 }
2256
2257 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2258 return 0;
2259
2260 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2261 return 0;
2262
2263
2264 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2265 return 0;
2266
2267 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2268 return 0;
2269
2270 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2271 return 0;
2272
2273 return -ENOMEM;
2274}
2275
2276static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2277 struct hclge_pkt_buf_alloc *buf_alloc)
2278{
2279 struct hclge_rx_priv_buff_cmd *req;
2280 struct hclge_desc desc;
2281 int ret;
2282 int i;
2283
2284 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2285 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2286
2287
2288 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2289 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2290
2291 req->buf_num[i] =
2292 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2293 req->buf_num[i] |=
2294 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2295 }
2296
2297 req->shared_buf =
2298 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2299 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2300
2301 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2302 if (ret)
2303 dev_err(&hdev->pdev->dev,
2304 "rx private buffer alloc cmd failed %d\n", ret);
2305
2306 return ret;
2307}
2308
2309static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2310 struct hclge_pkt_buf_alloc *buf_alloc)
2311{
2312 struct hclge_rx_priv_wl_buf *req;
2313 struct hclge_priv_buf *priv;
2314 struct hclge_desc desc[2];
2315 int i, j;
2316 int ret;
2317
2318 for (i = 0; i < 2; i++) {
2319 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2320 false);
2321 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2322
2323
2324 if (i == 0)
2325 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2326 else
2327 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2328
2329 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2330 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2331
2332 priv = &buf_alloc->priv_buf[idx];
2333 req->tc_wl[j].high =
2334 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2335 req->tc_wl[j].high |=
2336 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2337 req->tc_wl[j].low =
2338 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2339 req->tc_wl[j].low |=
2340 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2341 }
2342 }
2343
2344
2345 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2346 if (ret)
2347 dev_err(&hdev->pdev->dev,
2348 "rx private waterline config cmd failed %d\n",
2349 ret);
2350 return ret;
2351}
2352
2353static int hclge_common_thrd_config(struct hclge_dev *hdev,
2354 struct hclge_pkt_buf_alloc *buf_alloc)
2355{
2356 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2357 struct hclge_rx_com_thrd *req;
2358 struct hclge_desc desc[2];
2359 struct hclge_tc_thrd *tc;
2360 int i, j;
2361 int ret;
2362
2363 for (i = 0; i < 2; i++) {
2364 hclge_cmd_setup_basic_desc(&desc[i],
2365 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2366 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2367
2368
2369 if (i == 0)
2370 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2371 else
2372 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2373
2374 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2375 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2376
2377 req->com_thrd[j].high =
2378 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2379 req->com_thrd[j].high |=
2380 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2381 req->com_thrd[j].low =
2382 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2383 req->com_thrd[j].low |=
2384 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2385 }
2386 }
2387
2388
2389 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2390 if (ret)
2391 dev_err(&hdev->pdev->dev,
2392 "common threshold config cmd failed %d\n", ret);
2393 return ret;
2394}
2395
2396static int hclge_common_wl_config(struct hclge_dev *hdev,
2397 struct hclge_pkt_buf_alloc *buf_alloc)
2398{
2399 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2400 struct hclge_rx_com_wl *req;
2401 struct hclge_desc desc;
2402 int ret;
2403
2404 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2405
2406 req = (struct hclge_rx_com_wl *)desc.data;
2407 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2408 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2409
2410 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2411 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2412
2413 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2414 if (ret)
2415 dev_err(&hdev->pdev->dev,
2416 "common waterline config cmd failed %d\n", ret);
2417
2418 return ret;
2419}
2420
2421int hclge_buffer_alloc(struct hclge_dev *hdev)
2422{
2423 struct hclge_pkt_buf_alloc *pkt_buf;
2424 int ret;
2425
2426 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2427 if (!pkt_buf)
2428 return -ENOMEM;
2429
2430 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2431 if (ret) {
2432 dev_err(&hdev->pdev->dev,
2433 "could not calc tx buffer size for all TCs %d\n", ret);
2434 goto out;
2435 }
2436
2437 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2438 if (ret) {
2439 dev_err(&hdev->pdev->dev,
2440 "could not alloc tx buffers %d\n", ret);
2441 goto out;
2442 }
2443
2444 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2445 if (ret) {
2446 dev_err(&hdev->pdev->dev,
2447 "could not calc rx priv buffer size for all TCs %d\n",
2448 ret);
2449 goto out;
2450 }
2451
2452 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2453 if (ret) {
2454 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2455 ret);
2456 goto out;
2457 }
2458
2459 if (hnae3_dev_dcb_supported(hdev)) {
2460 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2461 if (ret) {
2462 dev_err(&hdev->pdev->dev,
2463 "could not configure rx private waterline %d\n",
2464 ret);
2465 goto out;
2466 }
2467
2468 ret = hclge_common_thrd_config(hdev, pkt_buf);
2469 if (ret) {
2470 dev_err(&hdev->pdev->dev,
2471 "could not configure common threshold %d\n",
2472 ret);
2473 goto out;
2474 }
2475 }
2476
2477 ret = hclge_common_wl_config(hdev, pkt_buf);
2478 if (ret)
2479 dev_err(&hdev->pdev->dev,
2480 "could not configure common waterline %d\n", ret);
2481
2482out:
2483 kfree(pkt_buf);
2484 return ret;
2485}
2486
2487static int hclge_init_roce_base_info(struct hclge_vport *vport)
2488{
2489 struct hnae3_handle *roce = &vport->roce;
2490 struct hnae3_handle *nic = &vport->nic;
2491 struct hclge_dev *hdev = vport->back;
2492
2493 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2494
2495 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2496 return -EINVAL;
2497
2498 roce->rinfo.base_vector = hdev->roce_base_vector;
2499
2500 roce->rinfo.netdev = nic->kinfo.netdev;
2501 roce->rinfo.roce_io_base = hdev->hw.io_base;
2502 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2503
2504 roce->pdev = nic->pdev;
2505 roce->ae_algo = nic->ae_algo;
2506 roce->numa_node_mask = nic->numa_node_mask;
2507
2508 return 0;
2509}
2510
2511static int hclge_init_msi(struct hclge_dev *hdev)
2512{
2513 struct pci_dev *pdev = hdev->pdev;
2514 int vectors;
2515 int i;
2516
2517 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2518 hdev->num_msi,
2519 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2520 if (vectors < 0) {
2521 dev_err(&pdev->dev,
2522 "failed(%d) to allocate MSI/MSI-X vectors\n",
2523 vectors);
2524 return vectors;
2525 }
2526 if (vectors < hdev->num_msi)
2527 dev_warn(&hdev->pdev->dev,
2528 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2529 hdev->num_msi, vectors);
2530
2531 hdev->num_msi = vectors;
2532 hdev->num_msi_left = vectors;
2533
2534 hdev->base_msi_vector = pdev->irq;
2535 hdev->roce_base_vector = hdev->base_msi_vector +
2536 hdev->num_nic_msi;
2537
2538 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2539 sizeof(u16), GFP_KERNEL);
2540 if (!hdev->vector_status) {
2541 pci_free_irq_vectors(pdev);
2542 return -ENOMEM;
2543 }
2544
2545 for (i = 0; i < hdev->num_msi; i++)
2546 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2547
2548 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2549 sizeof(int), GFP_KERNEL);
2550 if (!hdev->vector_irq) {
2551 pci_free_irq_vectors(pdev);
2552 return -ENOMEM;
2553 }
2554
2555 return 0;
2556}
2557
2558static u8 hclge_check_speed_dup(u8 duplex, int speed)
2559{
2560 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2561 duplex = HCLGE_MAC_FULL;
2562
2563 return duplex;
2564}
2565
2566static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2567 u8 duplex)
2568{
2569 struct hclge_config_mac_speed_dup_cmd *req;
2570 struct hclge_desc desc;
2571 int ret;
2572
2573 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2574
2575 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2576
2577 if (duplex)
2578 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2579
2580 switch (speed) {
2581 case HCLGE_MAC_SPEED_10M:
2582 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2583 HCLGE_CFG_SPEED_S, 6);
2584 break;
2585 case HCLGE_MAC_SPEED_100M:
2586 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2587 HCLGE_CFG_SPEED_S, 7);
2588 break;
2589 case HCLGE_MAC_SPEED_1G:
2590 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2591 HCLGE_CFG_SPEED_S, 0);
2592 break;
2593 case HCLGE_MAC_SPEED_10G:
2594 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2595 HCLGE_CFG_SPEED_S, 1);
2596 break;
2597 case HCLGE_MAC_SPEED_25G:
2598 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2599 HCLGE_CFG_SPEED_S, 2);
2600 break;
2601 case HCLGE_MAC_SPEED_40G:
2602 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2603 HCLGE_CFG_SPEED_S, 3);
2604 break;
2605 case HCLGE_MAC_SPEED_50G:
2606 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2607 HCLGE_CFG_SPEED_S, 4);
2608 break;
2609 case HCLGE_MAC_SPEED_100G:
2610 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2611 HCLGE_CFG_SPEED_S, 5);
2612 break;
2613 case HCLGE_MAC_SPEED_200G:
2614 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2615 HCLGE_CFG_SPEED_S, 8);
2616 break;
2617 default:
2618 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2619 return -EINVAL;
2620 }
2621
2622 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2623 1);
2624
2625 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2626 if (ret) {
2627 dev_err(&hdev->pdev->dev,
2628 "mac speed/duplex config cmd failed %d.\n", ret);
2629 return ret;
2630 }
2631
2632 return 0;
2633}
2634
2635int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2636{
2637 struct hclge_mac *mac = &hdev->hw.mac;
2638 int ret;
2639
2640 duplex = hclge_check_speed_dup(duplex, speed);
2641 if (!mac->support_autoneg && mac->speed == speed &&
2642 mac->duplex == duplex)
2643 return 0;
2644
2645 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2646 if (ret)
2647 return ret;
2648
2649 hdev->hw.mac.speed = speed;
2650 hdev->hw.mac.duplex = duplex;
2651
2652 return 0;
2653}
2654
2655static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2656 u8 duplex)
2657{
2658 struct hclge_vport *vport = hclge_get_vport(handle);
2659 struct hclge_dev *hdev = vport->back;
2660
2661 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2662}
2663
2664static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2665{
2666 struct hclge_config_auto_neg_cmd *req;
2667 struct hclge_desc desc;
2668 u32 flag = 0;
2669 int ret;
2670
2671 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2672
2673 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2674 if (enable)
2675 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2676 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2677
2678 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2679 if (ret)
2680 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2681 ret);
2682
2683 return ret;
2684}
2685
2686static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2687{
2688 struct hclge_vport *vport = hclge_get_vport(handle);
2689 struct hclge_dev *hdev = vport->back;
2690
2691 if (!hdev->hw.mac.support_autoneg) {
2692 if (enable) {
2693 dev_err(&hdev->pdev->dev,
2694 "autoneg is not supported by current port\n");
2695 return -EOPNOTSUPP;
2696 } else {
2697 return 0;
2698 }
2699 }
2700
2701 return hclge_set_autoneg_en(hdev, enable);
2702}
2703
2704static int hclge_get_autoneg(struct hnae3_handle *handle)
2705{
2706 struct hclge_vport *vport = hclge_get_vport(handle);
2707 struct hclge_dev *hdev = vport->back;
2708 struct phy_device *phydev = hdev->hw.mac.phydev;
2709
2710 if (phydev)
2711 return phydev->autoneg;
2712
2713 return hdev->hw.mac.autoneg;
2714}
2715
2716static int hclge_restart_autoneg(struct hnae3_handle *handle)
2717{
2718 struct hclge_vport *vport = hclge_get_vport(handle);
2719 struct hclge_dev *hdev = vport->back;
2720 int ret;
2721
2722 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2723
2724 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2725 if (ret)
2726 return ret;
2727 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2728}
2729
2730static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2731{
2732 struct hclge_vport *vport = hclge_get_vport(handle);
2733 struct hclge_dev *hdev = vport->back;
2734
2735 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2736 return hclge_set_autoneg_en(hdev, !halt);
2737
2738 return 0;
2739}
2740
2741static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2742{
2743 struct hclge_config_fec_cmd *req;
2744 struct hclge_desc desc;
2745 int ret;
2746
2747 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2748
2749 req = (struct hclge_config_fec_cmd *)desc.data;
2750 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2751 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2752 if (fec_mode & BIT(HNAE3_FEC_RS))
2753 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2754 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2755 if (fec_mode & BIT(HNAE3_FEC_BASER))
2756 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2757 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2758
2759 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2760 if (ret)
2761 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2762
2763 return ret;
2764}
2765
2766static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2767{
2768 struct hclge_vport *vport = hclge_get_vport(handle);
2769 struct hclge_dev *hdev = vport->back;
2770 struct hclge_mac *mac = &hdev->hw.mac;
2771 int ret;
2772
2773 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2774 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2775 return -EINVAL;
2776 }
2777
2778 ret = hclge_set_fec_hw(hdev, fec_mode);
2779 if (ret)
2780 return ret;
2781
2782 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2783 return 0;
2784}
2785
2786static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2787 u8 *fec_mode)
2788{
2789 struct hclge_vport *vport = hclge_get_vport(handle);
2790 struct hclge_dev *hdev = vport->back;
2791 struct hclge_mac *mac = &hdev->hw.mac;
2792
2793 if (fec_ability)
2794 *fec_ability = mac->fec_ability;
2795 if (fec_mode)
2796 *fec_mode = mac->fec_mode;
2797}
2798
2799static int hclge_mac_init(struct hclge_dev *hdev)
2800{
2801 struct hclge_mac *mac = &hdev->hw.mac;
2802 int ret;
2803
2804 hdev->support_sfp_query = true;
2805 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2806 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2807 hdev->hw.mac.duplex);
2808 if (ret)
2809 return ret;
2810
2811 if (hdev->hw.mac.support_autoneg) {
2812 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2813 if (ret)
2814 return ret;
2815 }
2816
2817 mac->link = 0;
2818
2819 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2820 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2821 if (ret)
2822 return ret;
2823 }
2824
2825 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2826 if (ret) {
2827 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2828 return ret;
2829 }
2830
2831 ret = hclge_set_default_loopback(hdev);
2832 if (ret)
2833 return ret;
2834
2835 ret = hclge_buffer_alloc(hdev);
2836 if (ret)
2837 dev_err(&hdev->pdev->dev,
2838 "allocate buffer fail, ret=%d\n", ret);
2839
2840 return ret;
2841}
2842
2843static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2844{
2845 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2846 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2847 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2848 hclge_wq, &hdev->service_task, 0);
2849}
2850
2851static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2852{
2853 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2854 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2855 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2856 hclge_wq, &hdev->service_task, 0);
2857}
2858
2859static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2860{
2861 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2862 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2863 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2864 hclge_wq, &hdev->service_task, 0);
2865}
2866
2867void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2868{
2869 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2870 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2871 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2872 hclge_wq, &hdev->service_task,
2873 delay_time);
2874}
2875
2876static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2877{
2878 struct hclge_link_status_cmd *req;
2879 struct hclge_desc desc;
2880 int ret;
2881
2882 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2883 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2884 if (ret) {
2885 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2886 ret);
2887 return ret;
2888 }
2889
2890 req = (struct hclge_link_status_cmd *)desc.data;
2891 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2892 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2893
2894 return 0;
2895}
2896
2897static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2898{
2899 struct phy_device *phydev = hdev->hw.mac.phydev;
2900
2901 *link_status = HCLGE_LINK_STATUS_DOWN;
2902
2903 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2904 return 0;
2905
2906 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2907 return 0;
2908
2909 return hclge_get_mac_link_status(hdev, link_status);
2910}
2911
2912static void hclge_push_link_status(struct hclge_dev *hdev)
2913{
2914 struct hclge_vport *vport;
2915 int ret;
2916 u16 i;
2917
2918 for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2919 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2920
2921 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2922 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2923 continue;
2924
2925 ret = hclge_push_vf_link_status(vport);
2926 if (ret) {
2927 dev_err(&hdev->pdev->dev,
2928 "failed to push link status to vf%u, ret = %d\n",
2929 i, ret);
2930 }
2931 }
2932}
2933
2934static void hclge_update_link_status(struct hclge_dev *hdev)
2935{
2936 struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2937 struct hnae3_handle *handle = &hdev->vport[0].nic;
2938 struct hnae3_client *rclient = hdev->roce_client;
2939 struct hnae3_client *client = hdev->nic_client;
2940 int state;
2941 int ret;
2942
2943 if (!client)
2944 return;
2945
2946 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2947 return;
2948
2949 ret = hclge_get_mac_phy_link(hdev, &state);
2950 if (ret) {
2951 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2952 return;
2953 }
2954
2955 if (state != hdev->hw.mac.link) {
2956 hdev->hw.mac.link = state;
2957 client->ops->link_status_change(handle, state);
2958 hclge_config_mac_tnl_int(hdev, state);
2959 if (rclient && rclient->ops->link_status_change)
2960 rclient->ops->link_status_change(rhandle, state);
2961
2962 hclge_push_link_status(hdev);
2963 }
2964
2965 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2966}
2967
2968static void hclge_update_port_capability(struct hclge_dev *hdev,
2969 struct hclge_mac *mac)
2970{
2971 if (hnae3_dev_fec_supported(hdev))
2972
2973 hclge_convert_setting_fec(mac);
2974
2975
2976
2977
2978 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2979 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2980 mac->module_type = HNAE3_MODULE_TYPE_KR;
2981 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2982 mac->module_type = HNAE3_MODULE_TYPE_TP;
2983
2984 if (mac->support_autoneg) {
2985 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2986 linkmode_copy(mac->advertising, mac->supported);
2987 } else {
2988 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2989 mac->supported);
2990 linkmode_zero(mac->advertising);
2991 }
2992}
2993
2994static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2995{
2996 struct hclge_sfp_info_cmd *resp;
2997 struct hclge_desc desc;
2998 int ret;
2999
3000 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3001 resp = (struct hclge_sfp_info_cmd *)desc.data;
3002 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3003 if (ret == -EOPNOTSUPP) {
3004 dev_warn(&hdev->pdev->dev,
3005 "IMP do not support get SFP speed %d\n", ret);
3006 return ret;
3007 } else if (ret) {
3008 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3009 return ret;
3010 }
3011
3012 *speed = le32_to_cpu(resp->speed);
3013
3014 return 0;
3015}
3016
3017static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3018{
3019 struct hclge_sfp_info_cmd *resp;
3020 struct hclge_desc desc;
3021 int ret;
3022
3023 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3024 resp = (struct hclge_sfp_info_cmd *)desc.data;
3025
3026 resp->query_type = QUERY_ACTIVE_SPEED;
3027
3028 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3029 if (ret == -EOPNOTSUPP) {
3030 dev_warn(&hdev->pdev->dev,
3031 "IMP does not support get SFP info %d\n", ret);
3032 return ret;
3033 } else if (ret) {
3034 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3035 return ret;
3036 }
3037
3038
3039
3040
3041 if (!le32_to_cpu(resp->speed))
3042 return 0;
3043
3044 mac->speed = le32_to_cpu(resp->speed);
3045
3046
3047
3048 if (resp->speed_ability) {
3049 mac->module_type = le32_to_cpu(resp->module_type);
3050 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3051 mac->autoneg = resp->autoneg;
3052 mac->support_autoneg = resp->autoneg_ability;
3053 mac->speed_type = QUERY_ACTIVE_SPEED;
3054 if (!resp->active_fec)
3055 mac->fec_mode = 0;
3056 else
3057 mac->fec_mode = BIT(resp->active_fec);
3058 } else {
3059 mac->speed_type = QUERY_SFP_SPEED;
3060 }
3061
3062 return 0;
3063}
3064
3065static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3066 struct ethtool_link_ksettings *cmd)
3067{
3068 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3069 struct hclge_vport *vport = hclge_get_vport(handle);
3070 struct hclge_phy_link_ksetting_0_cmd *req0;
3071 struct hclge_phy_link_ksetting_1_cmd *req1;
3072 u32 supported, advertising, lp_advertising;
3073 struct hclge_dev *hdev = vport->back;
3074 int ret;
3075
3076 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3077 true);
3078 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3079 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3080 true);
3081
3082 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3083 if (ret) {
3084 dev_err(&hdev->pdev->dev,
3085 "failed to get phy link ksetting, ret = %d.\n", ret);
3086 return ret;
3087 }
3088
3089 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3090 cmd->base.autoneg = req0->autoneg;
3091 cmd->base.speed = le32_to_cpu(req0->speed);
3092 cmd->base.duplex = req0->duplex;
3093 cmd->base.port = req0->port;
3094 cmd->base.transceiver = req0->transceiver;
3095 cmd->base.phy_address = req0->phy_address;
3096 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3097 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3098 supported = le32_to_cpu(req0->supported);
3099 advertising = le32_to_cpu(req0->advertising);
3100 lp_advertising = le32_to_cpu(req0->lp_advertising);
3101 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3102 supported);
3103 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3104 advertising);
3105 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3106 lp_advertising);
3107
3108 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3109 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3110 cmd->base.master_slave_state = req1->master_slave_state;
3111
3112 return 0;
3113}
3114
3115static int
3116hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3117 const struct ethtool_link_ksettings *cmd)
3118{
3119 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3120 struct hclge_vport *vport = hclge_get_vport(handle);
3121 struct hclge_phy_link_ksetting_0_cmd *req0;
3122 struct hclge_phy_link_ksetting_1_cmd *req1;
3123 struct hclge_dev *hdev = vport->back;
3124 u32 advertising;
3125 int ret;
3126
3127 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3128 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3129 (cmd->base.duplex != DUPLEX_HALF &&
3130 cmd->base.duplex != DUPLEX_FULL)))
3131 return -EINVAL;
3132
3133 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3134 false);
3135 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3136 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3137 false);
3138
3139 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3140 req0->autoneg = cmd->base.autoneg;
3141 req0->speed = cpu_to_le32(cmd->base.speed);
3142 req0->duplex = cmd->base.duplex;
3143 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3144 cmd->link_modes.advertising);
3145 req0->advertising = cpu_to_le32(advertising);
3146 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3147
3148 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3149 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3150
3151 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3152 if (ret) {
3153 dev_err(&hdev->pdev->dev,
3154 "failed to set phy link ksettings, ret = %d.\n", ret);
3155 return ret;
3156 }
3157
3158 hdev->hw.mac.autoneg = cmd->base.autoneg;
3159 hdev->hw.mac.speed = cmd->base.speed;
3160 hdev->hw.mac.duplex = cmd->base.duplex;
3161 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3162
3163 return 0;
3164}
3165
3166static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3167{
3168 struct ethtool_link_ksettings cmd;
3169 int ret;
3170
3171 if (!hnae3_dev_phy_imp_supported(hdev))
3172 return 0;
3173
3174 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3175 if (ret)
3176 return ret;
3177
3178 hdev->hw.mac.autoneg = cmd.base.autoneg;
3179 hdev->hw.mac.speed = cmd.base.speed;
3180 hdev->hw.mac.duplex = cmd.base.duplex;
3181
3182 return 0;
3183}
3184
3185static int hclge_tp_port_init(struct hclge_dev *hdev)
3186{
3187 struct ethtool_link_ksettings cmd;
3188
3189 if (!hnae3_dev_phy_imp_supported(hdev))
3190 return 0;
3191
3192 cmd.base.autoneg = hdev->hw.mac.autoneg;
3193 cmd.base.speed = hdev->hw.mac.speed;
3194 cmd.base.duplex = hdev->hw.mac.duplex;
3195 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3196
3197 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3198}
3199
3200static int hclge_update_port_info(struct hclge_dev *hdev)
3201{
3202 struct hclge_mac *mac = &hdev->hw.mac;
3203 int speed = HCLGE_MAC_SPEED_UNKNOWN;
3204 int ret;
3205
3206
3207 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3208 return hclge_update_tp_port_info(hdev);
3209
3210
3211 if (!hdev->support_sfp_query)
3212 return 0;
3213
3214 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3215 ret = hclge_get_sfp_info(hdev, mac);
3216 else
3217 ret = hclge_get_sfp_speed(hdev, &speed);
3218
3219 if (ret == -EOPNOTSUPP) {
3220 hdev->support_sfp_query = false;
3221 return ret;
3222 } else if (ret) {
3223 return ret;
3224 }
3225
3226 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3227 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3228 hclge_update_port_capability(hdev, mac);
3229 return 0;
3230 }
3231 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3232 HCLGE_MAC_FULL);
3233 } else {
3234 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3235 return 0;
3236
3237
3238 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3239 }
3240}
3241
3242static int hclge_get_status(struct hnae3_handle *handle)
3243{
3244 struct hclge_vport *vport = hclge_get_vport(handle);
3245 struct hclge_dev *hdev = vport->back;
3246
3247 hclge_update_link_status(hdev);
3248
3249 return hdev->hw.mac.link;
3250}
3251
3252static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3253{
3254 if (!pci_num_vf(hdev->pdev)) {
3255 dev_err(&hdev->pdev->dev,
3256 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3257 return NULL;
3258 }
3259
3260 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3261 dev_err(&hdev->pdev->dev,
3262 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3263 vf, pci_num_vf(hdev->pdev));
3264 return NULL;
3265 }
3266
3267
3268 vf += HCLGE_VF_VPORT_START_NUM;
3269 return &hdev->vport[vf];
3270}
3271
3272static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3273 struct ifla_vf_info *ivf)
3274{
3275 struct hclge_vport *vport = hclge_get_vport(handle);
3276 struct hclge_dev *hdev = vport->back;
3277
3278 vport = hclge_get_vf_vport(hdev, vf);
3279 if (!vport)
3280 return -EINVAL;
3281
3282 ivf->vf = vf;
3283 ivf->linkstate = vport->vf_info.link_state;
3284 ivf->spoofchk = vport->vf_info.spoofchk;
3285 ivf->trusted = vport->vf_info.trusted;
3286 ivf->min_tx_rate = 0;
3287 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3288 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3289 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3290 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3291 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3292
3293 return 0;
3294}
3295
3296static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3297 int link_state)
3298{
3299 struct hclge_vport *vport = hclge_get_vport(handle);
3300 struct hclge_dev *hdev = vport->back;
3301 int link_state_old;
3302 int ret;
3303
3304 vport = hclge_get_vf_vport(hdev, vf);
3305 if (!vport)
3306 return -EINVAL;
3307
3308 link_state_old = vport->vf_info.link_state;
3309 vport->vf_info.link_state = link_state;
3310
3311 ret = hclge_push_vf_link_status(vport);
3312 if (ret) {
3313 vport->vf_info.link_state = link_state_old;
3314 dev_err(&hdev->pdev->dev,
3315 "failed to push vf%d link status, ret = %d\n", vf, ret);
3316 }
3317
3318 return ret;
3319}
3320
3321static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3322{
3323 u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3324
3325
3326 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3327 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3328 hw_err_src_reg = hclge_read_dev(&hdev->hw,
3329 HCLGE_RAS_PF_OTHER_INT_STS_REG);
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3340 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3341 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3342 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3343 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3344 hdev->rst_stats.imp_rst_cnt++;
3345 return HCLGE_VECTOR0_EVENT_RST;
3346 }
3347
3348 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3349 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3350 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3351 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3352 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3353 hdev->rst_stats.global_rst_cnt++;
3354 return HCLGE_VECTOR0_EVENT_RST;
3355 }
3356
3357
3358 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3359 hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3360 return HCLGE_VECTOR0_EVENT_ERR;
3361
3362
3363 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3364 *clearval = msix_src_reg;
3365 return HCLGE_VECTOR0_EVENT_PTP;
3366 }
3367
3368
3369 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3370 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3371 *clearval = cmdq_src_reg;
3372 return HCLGE_VECTOR0_EVENT_MBX;
3373 }
3374
3375
3376 dev_info(&hdev->pdev->dev,
3377 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3378 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3379
3380 return HCLGE_VECTOR0_EVENT_OTHER;
3381}
3382
3383static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3384 u32 regclr)
3385{
3386 switch (event_type) {
3387 case HCLGE_VECTOR0_EVENT_PTP:
3388 case HCLGE_VECTOR0_EVENT_RST:
3389 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3390 break;
3391 case HCLGE_VECTOR0_EVENT_MBX:
3392 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3393 break;
3394 default:
3395 break;
3396 }
3397}
3398
3399static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3400{
3401 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3402 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3403 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3404 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3405 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3406}
3407
3408static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3409{
3410 writel(enable ? 1 : 0, vector->addr);
3411}
3412
3413static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3414{
3415 struct hclge_dev *hdev = data;
3416 unsigned long flags;
3417 u32 clearval = 0;
3418 u32 event_cause;
3419
3420 hclge_enable_vector(&hdev->misc_vector, false);
3421 event_cause = hclge_check_event_cause(hdev, &clearval);
3422
3423
3424 switch (event_cause) {
3425 case HCLGE_VECTOR0_EVENT_ERR:
3426 hclge_errhand_task_schedule(hdev);
3427 break;
3428 case HCLGE_VECTOR0_EVENT_RST:
3429 hclge_reset_task_schedule(hdev);
3430 break;
3431 case HCLGE_VECTOR0_EVENT_PTP:
3432 spin_lock_irqsave(&hdev->ptp->lock, flags);
3433 hclge_ptp_clean_tx_hwts(hdev);
3434 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3435 break;
3436 case HCLGE_VECTOR0_EVENT_MBX:
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446 hclge_mbx_task_schedule(hdev);
3447 break;
3448 default:
3449 dev_warn(&hdev->pdev->dev,
3450 "received unknown or unhandled event of vector0\n");
3451 break;
3452 }
3453
3454 hclge_clear_event_cause(hdev, event_cause, clearval);
3455
3456
3457 if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3458 event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3459 event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3460 hclge_enable_vector(&hdev->misc_vector, true);
3461
3462 return IRQ_HANDLED;
3463}
3464
3465static void