1
2
3
4
5
6
7#ifndef _GVE_H_
8#define _GVE_H_
9
10#include <linux/dma-mapping.h>
11#include <linux/netdevice.h>
12#include <linux/pci.h>
13#include <linux/u64_stats_sync.h>
14
15#include "gve_desc.h"
16#include "gve_desc_dqo.h"
17
18#ifndef PCI_VENDOR_ID_GOOGLE
19#define PCI_VENDOR_ID_GOOGLE 0x1ae0
20#endif
21
22#define PCI_DEV_ID_GVNIC 0x0042
23
24#define GVE_REGISTER_BAR 0
25#define GVE_DOORBELL_BAR 2
26
27
28#define GVE_TX_MAX_IOVEC 4
29
30#define GVE_MIN_MSIX 3
31
32
33#define GVE_TX_STATS_REPORT_NUM 5
34#define GVE_RX_STATS_REPORT_NUM 2
35
36
37#define GVE_STATS_REPORT_TIMER_PERIOD 20000
38
39
40#define NIC_TX_STATS_REPORT_NUM 0
41#define NIC_RX_STATS_REPORT_NUM 4
42
43#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
44
45
46#define GVE_NUM_PTYPES 1024
47
48#define GVE_RX_BUFFER_SIZE_DQO 2048
49
50
51struct gve_rx_desc_queue {
52 struct gve_rx_desc *desc_ring;
53 dma_addr_t bus;
54 u8 seqno;
55};
56
57
58struct gve_rx_slot_page_info {
59 struct page *page;
60 void *page_address;
61 u32 page_offset;
62 int pagecnt_bias;
63 u8 can_flip;
64};
65
66
67
68
69struct gve_queue_page_list {
70 u32 id;
71 u32 num_entries;
72 struct page **pages;
73 dma_addr_t *page_buses;
74};
75
76
77struct gve_rx_data_queue {
78 union gve_rx_data_slot *data_ring;
79 dma_addr_t data_bus;
80 struct gve_rx_slot_page_info *page_info;
81 struct gve_queue_page_list *qpl;
82 u8 raw_addressing;
83};
84
85struct gve_priv;
86
87
88
89
90struct gve_rx_buf_queue_dqo {
91 struct gve_rx_desc_dqo *desc_ring;
92 dma_addr_t bus;
93 u32 head;
94 u32 tail;
95 u32 mask;
96};
97
98
99struct gve_rx_compl_queue_dqo {
100 struct gve_rx_compl_desc_dqo *desc_ring;
101 dma_addr_t bus;
102
103
104
105
106
107 int num_free_slots;
108
109
110
111
112
113 u8 cur_gen_bit;
114
115
116
117
118 u32 head;
119 u32 mask;
120};
121
122
123struct gve_rx_buf_state_dqo {
124
125 struct gve_rx_slot_page_info page_info;
126
127
128 dma_addr_t addr;
129
130
131
132
133 u32 last_single_ref_offset;
134
135
136 s16 next;
137};
138
139
140struct gve_index_list {
141 s16 head;
142 s16 tail;
143};
144
145
146struct gve_rx_ring {
147 struct gve_priv *gve;
148 union {
149
150 struct {
151 struct gve_rx_desc_queue desc;
152 struct gve_rx_data_queue data;
153
154
155 u32 db_threshold;
156 };
157
158
159 struct {
160 struct gve_rx_buf_queue_dqo bufq;
161 struct gve_rx_compl_queue_dqo complq;
162
163 struct gve_rx_buf_state_dqo *buf_states;
164 u16 num_buf_states;
165
166
167
168
169 s16 free_buf_states;
170
171
172
173
174
175
176
177
178
179
180
181 struct gve_index_list recycled_buf_states;
182
183
184
185
186
187
188
189 struct gve_index_list used_buf_states;
190 } dqo;
191 };
192
193 u64 rbytes;
194 u64 rpackets;
195 u32 cnt;
196 u32 fill_cnt;
197 u32 mask;
198 u64 rx_copybreak_pkt;
199 u64 rx_copied_pkt;
200 u64 rx_skb_alloc_fail;
201 u64 rx_buf_alloc_fail;
202 u64 rx_desc_err_dropped_pkt;
203 u32 q_num;
204 u32 ntfy_id;
205 struct gve_queue_resources *q_resources;
206 dma_addr_t q_resources_bus;
207 struct u64_stats_sync statss;
208
209
210 struct sk_buff *skb_head;
211 struct sk_buff *skb_tail;
212};
213
214
215union gve_tx_desc {
216 struct gve_tx_pkt_desc pkt;
217 struct gve_tx_seg_desc seg;
218};
219
220
221struct gve_tx_iovec {
222 u32 iov_offset;
223 u32 iov_len;
224 u32 iov_padding;
225};
226
227struct gve_tx_dma_buf {
228 DEFINE_DMA_UNMAP_ADDR(dma);
229 DEFINE_DMA_UNMAP_LEN(len);
230};
231
232
233
234
235struct gve_tx_buffer_state {
236 struct sk_buff *skb;
237 union {
238 struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC];
239 struct gve_tx_dma_buf buf;
240 };
241};
242
243
244struct gve_tx_fifo {
245 void *base;
246 u32 size;
247 atomic_t available;
248 u32 head;
249 struct gve_queue_page_list *qpl;
250};
251
252
253union gve_tx_desc_dqo {
254 struct gve_tx_pkt_desc_dqo pkt;
255 struct gve_tx_tso_context_desc_dqo tso_ctx;
256 struct gve_tx_general_context_desc_dqo general_ctx;
257};
258
259enum gve_packet_state {
260
261
262
263 GVE_PACKET_STATE_UNALLOCATED,
264
265 GVE_PACKET_STATE_PENDING_DATA_COMPL,
266
267
268
269 GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
270
271 GVE_PACKET_STATE_TIMED_OUT_COMPL,
272};
273
274struct gve_tx_pending_packet_dqo {
275 struct sk_buff *skb;
276
277
278
279
280
281
282
283 struct gve_tx_dma_buf bufs[MAX_SKB_FRAGS + 1];
284 u16 num_bufs;
285
286
287 s16 next;
288
289
290
291
292
293 s16 prev;
294
295
296
297
298 u8 state;
299
300
301
302
303
304 unsigned long timeout_jiffies;
305};
306
307
308struct gve_tx_ring {
309
310 union {
311
312 struct {
313 struct gve_tx_fifo tx_fifo;
314 u32 req;
315 u32 done;
316 };
317
318
319 struct {
320
321
322
323
324
325
326
327
328 s16 free_pending_packets;
329
330
331 u32 head;
332 u32 tail;
333
334
335
336
337 u32 last_re_idx;
338 } dqo_tx;
339 };
340
341
342 union {
343
344 struct {
345
346 __be32 last_nic_done;
347 };
348
349
350 struct {
351 u32 head;
352
353
354 u8 cur_gen_bit;
355
356
357
358
359
360
361
362
363
364 atomic_t free_pending_packets;
365
366
367 atomic_t hw_tx_head;
368
369
370
371
372 struct gve_index_list miss_completions;
373
374
375
376
377
378 struct gve_index_list timed_out_completions;
379 } dqo_compl;
380 } ____cacheline_aligned;
381 u64 pkt_done;
382 u64 bytes_done;
383 u64 dropped_pkt;
384 u64 dma_mapping_error;
385
386
387 union {
388
389 struct {
390 union gve_tx_desc *desc;
391
392
393 struct gve_tx_buffer_state *info;
394 };
395
396
397 struct {
398 union gve_tx_desc_dqo *tx_ring;
399 struct gve_tx_compl_desc *compl_ring;
400
401 struct gve_tx_pending_packet_dqo *pending_packets;
402 s16 num_pending_packets;
403
404 u32 complq_mask;
405 } dqo;
406 } ____cacheline_aligned;
407 struct netdev_queue *netdev_txq;
408 struct gve_queue_resources *q_resources;
409 struct device *dev;
410 u32 mask;
411 u8 raw_addressing;
412
413
414 u32 q_num ____cacheline_aligned;
415 u32 stop_queue;
416 u32 wake_queue;
417 u32 ntfy_id;
418 dma_addr_t bus;
419 dma_addr_t q_resources_bus;
420 dma_addr_t complq_bus_dqo;
421 struct u64_stats_sync statss;
422} ____cacheline_aligned;
423
424
425
426
427struct gve_notify_block {
428 __be32 irq_db_index;
429 char name[IFNAMSIZ + 16];
430 struct napi_struct napi;
431 struct gve_priv *priv;
432 struct gve_tx_ring *tx;
433 struct gve_rx_ring *rx;
434} ____cacheline_aligned;
435
436
437struct gve_queue_config {
438 u16 max_queues;
439 u16 num_queues;
440};
441
442
443struct gve_qpl_config {
444 u32 qpl_map_size;
445 unsigned long *qpl_id_map;
446};
447
448struct gve_options_dqo_rda {
449 u16 tx_comp_ring_entries;
450 u16 rx_buff_ring_entries;
451};
452
453struct gve_ptype {
454 u8 l3_type;
455 u8 l4_type;
456};
457
458struct gve_ptype_lut {
459 struct gve_ptype ptypes[GVE_NUM_PTYPES];
460};
461
462
463
464
465
466enum gve_queue_format {
467 GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0,
468 GVE_GQI_RDA_FORMAT = 0x1,
469 GVE_GQI_QPL_FORMAT = 0x2,
470 GVE_DQO_RDA_FORMAT = 0x3,
471};
472
473struct gve_priv {
474 struct net_device *dev;
475 struct gve_tx_ring *tx;
476 struct gve_rx_ring *rx;
477 struct gve_queue_page_list *qpls;
478 struct gve_notify_block *ntfy_blocks;
479 dma_addr_t ntfy_block_bus;
480 struct msix_entry *msix_vectors;
481 char mgmt_msix_name[IFNAMSIZ + 16];
482 u32 mgmt_msix_idx;
483 __be32 *counter_array;
484 dma_addr_t counter_array_bus;
485
486 u16 num_event_counters;
487 u16 tx_desc_cnt;
488 u16 rx_desc_cnt;
489 u16 tx_pages_per_qpl;
490 u16 rx_data_slot_cnt;
491 u64 max_registered_pages;
492 u64 num_registered_pages;
493 u32 rx_copybreak;
494 u16 default_num_queues;
495
496 struct gve_queue_config tx_cfg;
497 struct gve_queue_config rx_cfg;
498 struct gve_qpl_config qpl_cfg;
499 u32 num_ntfy_blks;
500
501 struct gve_registers __iomem *reg_bar0;
502 __be32 __iomem *db_bar2;
503 u32 msg_enable;
504 struct pci_dev *pdev;
505
506
507 u32 tx_timeo_cnt;
508
509
510 union gve_adminq_command *adminq;
511 dma_addr_t adminq_bus_addr;
512 u32 adminq_mask;
513 u32 adminq_prod_cnt;
514 u32 adminq_cmd_fail;
515 u32 adminq_timeouts;
516
517 u32 adminq_describe_device_cnt;
518 u32 adminq_cfg_device_resources_cnt;
519 u32 adminq_register_page_list_cnt;
520 u32 adminq_unregister_page_list_cnt;
521 u32 adminq_create_tx_queue_cnt;
522 u32 adminq_create_rx_queue_cnt;
523 u32 adminq_destroy_tx_queue_cnt;
524 u32 adminq_destroy_rx_queue_cnt;
525 u32 adminq_dcfg_device_resources_cnt;
526 u32 adminq_set_driver_parameter_cnt;
527 u32 adminq_report_stats_cnt;
528 u32 adminq_report_link_speed_cnt;
529 u32 adminq_get_ptype_map_cnt;
530
531
532 u32 interface_up_cnt;
533 u32 interface_down_cnt;
534 u32 reset_cnt;
535 u32 page_alloc_fail;
536 u32 dma_mapping_error;
537 u32 stats_report_trigger_cnt;
538 struct workqueue_struct *gve_wq;
539 struct work_struct service_task;
540 struct work_struct stats_report_task;
541 unsigned long service_task_flags;
542 unsigned long state_flags;
543
544 struct gve_stats_report *stats_report;
545 u64 stats_report_len;
546 dma_addr_t stats_report_bus;
547 unsigned long ethtool_flags;
548
549 unsigned long stats_report_timer_period;
550 struct timer_list stats_report_timer;
551
552
553 u64 link_speed;
554
555 struct gve_options_dqo_rda options_dqo_rda;
556 struct gve_ptype_lut *ptype_lut_dqo;
557
558
559 int data_buffer_size_dqo;
560
561 enum gve_queue_format queue_format;
562};
563
564enum gve_service_task_flags_bit {
565 GVE_PRIV_FLAGS_DO_RESET = 1,
566 GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2,
567 GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3,
568 GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
569};
570
571enum gve_state_flags_bit {
572 GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1,
573 GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2,
574 GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3,
575 GVE_PRIV_FLAGS_NAPI_ENABLED = 4,
576};
577
578enum gve_ethtool_flags_bit {
579 GVE_PRIV_FLAGS_REPORT_STATS = 0,
580};
581
582static inline bool gve_get_do_reset(struct gve_priv *priv)
583{
584 return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
585}
586
587static inline void gve_set_do_reset(struct gve_priv *priv)
588{
589 set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
590}
591
592static inline void gve_clear_do_reset(struct gve_priv *priv)
593{
594 clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
595}
596
597static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
598{
599 return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
600 &priv->service_task_flags);
601}
602
603static inline void gve_set_reset_in_progress(struct gve_priv *priv)
604{
605 set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
606}
607
608static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
609{
610 clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
611}
612
613static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
614{
615 return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
616 &priv->service_task_flags);
617}
618
619static inline void gve_set_probe_in_progress(struct gve_priv *priv)
620{
621 set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
622}
623
624static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
625{
626 clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
627}
628
629static inline bool gve_get_do_report_stats(struct gve_priv *priv)
630{
631 return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
632 &priv->service_task_flags);
633}
634
635static inline void gve_set_do_report_stats(struct gve_priv *priv)
636{
637 set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
638}
639
640static inline void gve_clear_do_report_stats(struct gve_priv *priv)
641{
642 clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
643}
644
645static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
646{
647 return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
648}
649
650static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
651{
652 set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
653}
654
655static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
656{
657 clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
658}
659
660static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
661{
662 return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
663}
664
665static inline void gve_set_device_resources_ok(struct gve_priv *priv)
666{
667 set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
668}
669
670static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
671{
672 clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
673}
674
675static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
676{
677 return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
678}
679
680static inline void gve_set_device_rings_ok(struct gve_priv *priv)
681{
682 set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
683}
684
685static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
686{
687 clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
688}
689
690static inline bool gve_get_napi_enabled(struct gve_priv *priv)
691{
692 return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
693}
694
695static inline void gve_set_napi_enabled(struct gve_priv *priv)
696{
697 set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
698}
699
700static inline void gve_clear_napi_enabled(struct gve_priv *priv)
701{
702 clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
703}
704
705static inline bool gve_get_report_stats(struct gve_priv *priv)
706{
707 return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
708}
709
710static inline void gve_clear_report_stats(struct gve_priv *priv)
711{
712 clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
713}
714
715
716
717static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
718 struct gve_notify_block *block)
719{
720 return &priv->db_bar2[be32_to_cpu(block->irq_db_index)];
721}
722
723
724
725static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
726{
727 return queue_idx;
728}
729
730
731
732static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
733{
734 return (priv->num_ntfy_blks / 2) + queue_idx;
735}
736
737
738
739static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
740{
741 if (priv->queue_format != GVE_GQI_QPL_FORMAT)
742 return 0;
743
744 return priv->tx_cfg.num_queues;
745}
746
747
748
749static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
750{
751 if (priv->queue_format != GVE_GQI_QPL_FORMAT)
752 return 0;
753
754 return priv->rx_cfg.num_queues;
755}
756
757
758
759static inline
760struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
761{
762 int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
763 priv->qpl_cfg.qpl_map_size);
764
765
766 if (id >= gve_num_tx_qpls(priv))
767 return NULL;
768
769 set_bit(id, priv->qpl_cfg.qpl_id_map);
770 return &priv->qpls[id];
771}
772
773
774
775static inline
776struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
777{
778 int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
779 priv->qpl_cfg.qpl_map_size,
780 gve_num_tx_qpls(priv));
781
782
783 if (id == priv->qpl_cfg.qpl_map_size)
784 return NULL;
785
786 set_bit(id, priv->qpl_cfg.qpl_id_map);
787 return &priv->qpls[id];
788}
789
790
791
792static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
793{
794 clear_bit(id, priv->qpl_cfg.qpl_id_map);
795}
796
797
798
799static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
800 int id)
801{
802 if (id < gve_num_tx_qpls(priv))
803 return DMA_TO_DEVICE;
804 else
805 return DMA_FROM_DEVICE;
806}
807
808static inline bool gve_is_gqi(struct gve_priv *priv)
809{
810 return priv->queue_format == GVE_GQI_RDA_FORMAT ||
811 priv->queue_format == GVE_GQI_QPL_FORMAT;
812}
813
814
815int gve_alloc_page(struct gve_priv *priv, struct device *dev,
816 struct page **page, dma_addr_t *dma,
817 enum dma_data_direction);
818void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
819 enum dma_data_direction);
820
821netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
822bool gve_tx_poll(struct gve_notify_block *block, int budget);
823int gve_tx_alloc_rings(struct gve_priv *priv);
824void gve_tx_free_rings_gqi(struct gve_priv *priv);
825__be32 gve_tx_load_event_counter(struct gve_priv *priv,
826 struct gve_tx_ring *tx);
827
828void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
829bool gve_rx_poll(struct gve_notify_block *block, int budget);
830int gve_rx_alloc_rings(struct gve_priv *priv);
831void gve_rx_free_rings_gqi(struct gve_priv *priv);
832bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
833 netdev_features_t feat);
834
835void gve_schedule_reset(struct gve_priv *priv);
836int gve_reset(struct gve_priv *priv, bool attempt_teardown);
837int gve_adjust_queues(struct gve_priv *priv,
838 struct gve_queue_config new_rx_config,
839 struct gve_queue_config new_tx_config);
840
841void gve_handle_report_stats(struct gve_priv *priv);
842
843extern const struct ethtool_ops gve_ethtool_ops;
844
845extern const char gve_version_str[];
846#endif
847