linux/drivers/net/ethernet/google/gve/gve.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
   2 * Google virtual Ethernet (gve) driver
   3 *
   4 * Copyright (C) 2015-2021 Google, Inc.
   5 */
   6
   7#ifndef _GVE_H_
   8#define _GVE_H_
   9
  10#include <linux/dma-mapping.h>
  11#include <linux/netdevice.h>
  12#include <linux/pci.h>
  13#include <linux/u64_stats_sync.h>
  14
  15#include "gve_desc.h"
  16#include "gve_desc_dqo.h"
  17
  18#ifndef PCI_VENDOR_ID_GOOGLE
  19#define PCI_VENDOR_ID_GOOGLE    0x1ae0
  20#endif
  21
  22#define PCI_DEV_ID_GVNIC        0x0042
  23
  24#define GVE_REGISTER_BAR        0
  25#define GVE_DOORBELL_BAR        2
  26
  27/* Driver can alloc up to 2 segments for the header and 2 for the payload. */
  28#define GVE_TX_MAX_IOVEC        4
  29/* 1 for management, 1 for rx, 1 for tx */
  30#define GVE_MIN_MSIX 3
  31
  32/* Numbers of gve tx/rx stats in stats report. */
  33#define GVE_TX_STATS_REPORT_NUM 5
  34#define GVE_RX_STATS_REPORT_NUM 2
  35
  36/* Interval to schedule a stats report update, 20000ms. */
  37#define GVE_STATS_REPORT_TIMER_PERIOD   20000
  38
  39/* Numbers of NIC tx/rx stats in stats report. */
  40#define NIC_TX_STATS_REPORT_NUM 0
  41#define NIC_RX_STATS_REPORT_NUM 4
  42
  43#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
  44
  45/* PTYPEs are always 10 bits. */
  46#define GVE_NUM_PTYPES  1024
  47
  48#define GVE_RX_BUFFER_SIZE_DQO 2048
  49
  50/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
  51struct gve_rx_desc_queue {
  52        struct gve_rx_desc *desc_ring; /* the descriptor ring */
  53        dma_addr_t bus; /* the bus for the desc_ring */
  54        u8 seqno; /* the next expected seqno for this desc*/
  55};
  56
  57/* The page info for a single slot in the RX data queue */
  58struct gve_rx_slot_page_info {
  59        struct page *page;
  60        void *page_address;
  61        u32 page_offset; /* offset to write to in page */
  62        int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
  63        u8 can_flip;
  64};
  65
  66/* A list of pages registered with the device during setup and used by a queue
  67 * as buffers
  68 */
  69struct gve_queue_page_list {
  70        u32 id; /* unique id */
  71        u32 num_entries;
  72        struct page **pages; /* list of num_entries pages */
  73        dma_addr_t *page_buses; /* the dma addrs of the pages */
  74};
  75
  76/* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
  77struct gve_rx_data_queue {
  78        union gve_rx_data_slot *data_ring; /* read by NIC */
  79        dma_addr_t data_bus; /* dma mapping of the slots */
  80        struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
  81        struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
  82        u8 raw_addressing; /* use raw_addressing? */
  83};
  84
  85struct gve_priv;
  86
  87/* RX buffer queue for posting buffers to HW.
  88 * Each RX (completion) queue has a corresponding buffer queue.
  89 */
  90struct gve_rx_buf_queue_dqo {
  91        struct gve_rx_desc_dqo *desc_ring;
  92        dma_addr_t bus;
  93        u32 head; /* Pointer to start cleaning buffers at. */
  94        u32 tail; /* Last posted buffer index + 1 */
  95        u32 mask; /* Mask for indices to the size of the ring */
  96};
  97
  98/* RX completion queue to receive packets from HW. */
  99struct gve_rx_compl_queue_dqo {
 100        struct gve_rx_compl_desc_dqo *desc_ring;
 101        dma_addr_t bus;
 102
 103        /* Number of slots which did not have a buffer posted yet. We should not
 104         * post more buffers than the queue size to avoid HW overrunning the
 105         * queue.
 106         */
 107        int num_free_slots;
 108
 109        /* HW uses a "generation bit" to notify SW of new descriptors. When a
 110         * descriptor's generation bit is different from the current generation,
 111         * that descriptor is ready to be consumed by SW.
 112         */
 113        u8 cur_gen_bit;
 114
 115        /* Pointer into desc_ring where the next completion descriptor will be
 116         * received.
 117         */
 118        u32 head;
 119        u32 mask; /* Mask for indices to the size of the ring */
 120};
 121
 122/* Stores state for tracking buffers posted to HW */
 123struct gve_rx_buf_state_dqo {
 124        /* The page posted to HW. */
 125        struct gve_rx_slot_page_info page_info;
 126
 127        /* The DMA address corresponding to `page_info`. */
 128        dma_addr_t addr;
 129
 130        /* Last offset into the page when it only had a single reference, at
 131         * which point every other offset is free to be reused.
 132         */
 133        u32 last_single_ref_offset;
 134
 135        /* Linked list index to next element in the list, or -1 if none */
 136        s16 next;
 137};
 138
 139/* `head` and `tail` are indices into an array, or -1 if empty. */
 140struct gve_index_list {
 141        s16 head;
 142        s16 tail;
 143};
 144
 145/* Contains datapath state used to represent an RX queue. */
 146struct gve_rx_ring {
 147        struct gve_priv *gve;
 148        union {
 149                /* GQI fields */
 150                struct {
 151                        struct gve_rx_desc_queue desc;
 152                        struct gve_rx_data_queue data;
 153
 154                        /* threshold for posting new buffs and descs */
 155                        u32 db_threshold;
 156                };
 157
 158                /* DQO fields. */
 159                struct {
 160                        struct gve_rx_buf_queue_dqo bufq;
 161                        struct gve_rx_compl_queue_dqo complq;
 162
 163                        struct gve_rx_buf_state_dqo *buf_states;
 164                        u16 num_buf_states;
 165
 166                        /* Linked list of gve_rx_buf_state_dqo. Index into
 167                         * buf_states, or -1 if empty.
 168                         */
 169                        s16 free_buf_states;
 170
 171                        /* Linked list of gve_rx_buf_state_dqo. Indexes into
 172                         * buf_states, or -1 if empty.
 173                         *
 174                         * This list contains buf_states which are pointing to
 175                         * valid buffers.
 176                         *
 177                         * We use a FIFO here in order to increase the
 178                         * probability that buffers can be reused by increasing
 179                         * the time between usages.
 180                         */
 181                        struct gve_index_list recycled_buf_states;
 182
 183                        /* Linked list of gve_rx_buf_state_dqo. Indexes into
 184                         * buf_states, or -1 if empty.
 185                         *
 186                         * This list contains buf_states which have buffers
 187                         * which cannot be reused yet.
 188                         */
 189                        struct gve_index_list used_buf_states;
 190                } dqo;
 191        };
 192
 193        u64 rbytes; /* free-running bytes received */
 194        u64 rpackets; /* free-running packets received */
 195        u32 cnt; /* free-running total number of completed packets */
 196        u32 fill_cnt; /* free-running total number of descs and buffs posted */
 197        u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
 198        u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
 199        u64 rx_copied_pkt; /* free-running total number of copied packets */
 200        u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
 201        u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
 202        u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
 203        u32 q_num; /* queue index */
 204        u32 ntfy_id; /* notification block index */
 205        struct gve_queue_resources *q_resources; /* head and tail pointer idx */
 206        dma_addr_t q_resources_bus; /* dma address for the queue resources */
 207        struct u64_stats_sync statss; /* sync stats for 32bit archs */
 208
 209        /* head and tail of skb chain for the current packet or NULL if none */
 210        struct sk_buff *skb_head;
 211        struct sk_buff *skb_tail;
 212};
 213
 214/* A TX desc ring entry */
 215union gve_tx_desc {
 216        struct gve_tx_pkt_desc pkt; /* first desc for a packet */
 217        struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
 218};
 219
 220/* Tracks the memory in the fifo occupied by a segment of a packet */
 221struct gve_tx_iovec {
 222        u32 iov_offset; /* offset into this segment */
 223        u32 iov_len; /* length */
 224        u32 iov_padding; /* padding associated with this segment */
 225};
 226
 227struct gve_tx_dma_buf {
 228        DEFINE_DMA_UNMAP_ADDR(dma);
 229        DEFINE_DMA_UNMAP_LEN(len);
 230};
 231
 232/* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
 233 * ring entry but only used for a pkt_desc not a seg_desc
 234 */
 235struct gve_tx_buffer_state {
 236        struct sk_buff *skb; /* skb for this pkt */
 237        union {
 238                struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
 239                struct gve_tx_dma_buf buf;
 240        };
 241};
 242
 243/* A TX buffer - each queue has one */
 244struct gve_tx_fifo {
 245        void *base; /* address of base of FIFO */
 246        u32 size; /* total size */
 247        atomic_t available; /* how much space is still available */
 248        u32 head; /* offset to write at */
 249        struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
 250};
 251
 252/* TX descriptor for DQO format */
 253union gve_tx_desc_dqo {
 254        struct gve_tx_pkt_desc_dqo pkt;
 255        struct gve_tx_tso_context_desc_dqo tso_ctx;
 256        struct gve_tx_general_context_desc_dqo general_ctx;
 257};
 258
 259enum gve_packet_state {
 260        /* Packet is in free list, available to be allocated.
 261         * This should always be zero since state is not explicitly initialized.
 262         */
 263        GVE_PACKET_STATE_UNALLOCATED,
 264        /* Packet is expecting a regular data completion or miss completion */
 265        GVE_PACKET_STATE_PENDING_DATA_COMPL,
 266        /* Packet has received a miss completion and is expecting a
 267         * re-injection completion.
 268         */
 269        GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
 270        /* No valid completion received within the specified timeout. */
 271        GVE_PACKET_STATE_TIMED_OUT_COMPL,
 272};
 273
 274struct gve_tx_pending_packet_dqo {
 275        struct sk_buff *skb; /* skb for this packet */
 276
 277        /* 0th element corresponds to the linear portion of `skb`, should be
 278         * unmapped with `dma_unmap_single`.
 279         *
 280         * All others correspond to `skb`'s frags and should be unmapped with
 281         * `dma_unmap_page`.
 282         */
 283        struct gve_tx_dma_buf bufs[MAX_SKB_FRAGS + 1];
 284        u16 num_bufs;
 285
 286        /* Linked list index to next element in the list, or -1 if none */
 287        s16 next;
 288
 289        /* Linked list index to prev element in the list, or -1 if none.
 290         * Used for tracking either outstanding miss completions or prematurely
 291         * freed packets.
 292         */
 293        s16 prev;
 294
 295        /* Identifies the current state of the packet as defined in
 296         * `enum gve_packet_state`.
 297         */
 298        u8 state;
 299
 300        /* If packet is an outstanding miss completion, then the packet is
 301         * freed if the corresponding re-injection completion is not received
 302         * before kernel jiffies exceeds timeout_jiffies.
 303         */
 304        unsigned long timeout_jiffies;
 305};
 306
 307/* Contains datapath state used to represent a TX queue. */
 308struct gve_tx_ring {
 309        /* Cacheline 0 -- Accessed & dirtied during transmit */
 310        union {
 311                /* GQI fields */
 312                struct {
 313                        struct gve_tx_fifo tx_fifo;
 314                        u32 req; /* driver tracked head pointer */
 315                        u32 done; /* driver tracked tail pointer */
 316                };
 317
 318                /* DQO fields. */
 319                struct {
 320                        /* Linked list of gve_tx_pending_packet_dqo. Index into
 321                         * pending_packets, or -1 if empty.
 322                         *
 323                         * This is a consumer list owned by the TX path. When it
 324                         * runs out, the producer list is stolen from the
 325                         * completion handling path
 326                         * (dqo_compl.free_pending_packets).
 327                         */
 328                        s16 free_pending_packets;
 329
 330                        /* Cached value of `dqo_compl.hw_tx_head` */
 331                        u32 head;
 332                        u32 tail; /* Last posted buffer index + 1 */
 333
 334                        /* Index of the last descriptor with "report event" bit
 335                         * set.
 336                         */
 337                        u32 last_re_idx;
 338                } dqo_tx;
 339        };
 340
 341        /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
 342        union {
 343                /* GQI fields */
 344                struct {
 345                        /* NIC tail pointer */
 346                        __be32 last_nic_done;
 347                };
 348
 349                /* DQO fields. */
 350                struct {
 351                        u32 head; /* Last read on compl_desc */
 352
 353                        /* Tracks the current gen bit of compl_q */
 354                        u8 cur_gen_bit;
 355
 356                        /* Linked list of gve_tx_pending_packet_dqo. Index into
 357                         * pending_packets, or -1 if empty.
 358                         *
 359                         * This is the producer list, owned by the completion
 360                         * handling path. When the consumer list
 361                         * (dqo_tx.free_pending_packets) is runs out, this list
 362                         * will be stolen.
 363                         */
 364                        atomic_t free_pending_packets;
 365
 366                        /* Last TX ring index fetched by HW */
 367                        atomic_t hw_tx_head;
 368
 369                        /* List to track pending packets which received a miss
 370                         * completion but not a corresponding reinjection.
 371                         */
 372                        struct gve_index_list miss_completions;
 373
 374                        /* List to track pending packets that were completed
 375                         * before receiving a valid completion because they
 376                         * reached a specified timeout.
 377                         */
 378                        struct gve_index_list timed_out_completions;
 379                } dqo_compl;
 380        } ____cacheline_aligned;
 381        u64 pkt_done; /* free-running - total packets completed */
 382        u64 bytes_done; /* free-running - total bytes completed */
 383        u64 dropped_pkt; /* free-running - total packets dropped */
 384        u64 dma_mapping_error; /* count of dma mapping errors */
 385
 386        /* Cacheline 2 -- Read-mostly fields */
 387        union {
 388                /* GQI fields */
 389                struct {
 390                        union gve_tx_desc *desc;
 391
 392                        /* Maps 1:1 to a desc */
 393                        struct gve_tx_buffer_state *info;
 394                };
 395
 396                /* DQO fields. */
 397                struct {
 398                        union gve_tx_desc_dqo *tx_ring;
 399                        struct gve_tx_compl_desc *compl_ring;
 400
 401                        struct gve_tx_pending_packet_dqo *pending_packets;
 402                        s16 num_pending_packets;
 403
 404                        u32 complq_mask; /* complq size is complq_mask + 1 */
 405                } dqo;
 406        } ____cacheline_aligned;
 407        struct netdev_queue *netdev_txq;
 408        struct gve_queue_resources *q_resources; /* head and tail pointer idx */
 409        struct device *dev;
 410        u32 mask; /* masks req and done down to queue size */
 411        u8 raw_addressing; /* use raw_addressing? */
 412
 413        /* Slow-path fields */
 414        u32 q_num ____cacheline_aligned; /* queue idx */
 415        u32 stop_queue; /* count of queue stops */
 416        u32 wake_queue; /* count of queue wakes */
 417        u32 ntfy_id; /* notification block index */
 418        dma_addr_t bus; /* dma address of the descr ring */
 419        dma_addr_t q_resources_bus; /* dma address of the queue resources */
 420        dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
 421        struct u64_stats_sync statss; /* sync stats for 32bit archs */
 422} ____cacheline_aligned;
 423
 424/* Wraps the info for one irq including the napi struct and the queues
 425 * associated with that irq.
 426 */
 427struct gve_notify_block {
 428        __be32 irq_db_index; /* idx into Bar2 - set by device, must be 1st */
 429        char name[IFNAMSIZ + 16]; /* name registered with the kernel */
 430        struct napi_struct napi; /* kernel napi struct for this block */
 431        struct gve_priv *priv;
 432        struct gve_tx_ring *tx; /* tx rings on this block */
 433        struct gve_rx_ring *rx; /* rx rings on this block */
 434} ____cacheline_aligned;
 435
 436/* Tracks allowed and current queue settings */
 437struct gve_queue_config {
 438        u16 max_queues;
 439        u16 num_queues; /* current */
 440};
 441
 442/* Tracks the available and used qpl IDs */
 443struct gve_qpl_config {
 444        u32 qpl_map_size; /* map memory size */
 445        unsigned long *qpl_id_map; /* bitmap of used qpl ids */
 446};
 447
 448struct gve_options_dqo_rda {
 449        u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
 450        u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
 451};
 452
 453struct gve_ptype {
 454        u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
 455        u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
 456};
 457
 458struct gve_ptype_lut {
 459        struct gve_ptype ptypes[GVE_NUM_PTYPES];
 460};
 461
 462/* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
 463 * when the entire configure_device_resources command is zeroed out and the
 464 * queue_format is not specified.
 465 */
 466enum gve_queue_format {
 467        GVE_QUEUE_FORMAT_UNSPECIFIED    = 0x0,
 468        GVE_GQI_RDA_FORMAT              = 0x1,
 469        GVE_GQI_QPL_FORMAT              = 0x2,
 470        GVE_DQO_RDA_FORMAT              = 0x3,
 471};
 472
 473struct gve_priv {
 474        struct net_device *dev;
 475        struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
 476        struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
 477        struct gve_queue_page_list *qpls; /* array of num qpls */
 478        struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
 479        dma_addr_t ntfy_block_bus;
 480        struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
 481        char mgmt_msix_name[IFNAMSIZ + 16];
 482        u32 mgmt_msix_idx;
 483        __be32 *counter_array; /* array of num_event_counters */
 484        dma_addr_t counter_array_bus;
 485
 486        u16 num_event_counters;
 487        u16 tx_desc_cnt; /* num desc per ring */
 488        u16 rx_desc_cnt; /* num desc per ring */
 489        u16 tx_pages_per_qpl; /* tx buffer length */
 490        u16 rx_data_slot_cnt; /* rx buffer length */
 491        u64 max_registered_pages;
 492        u64 num_registered_pages; /* num pages registered with NIC */
 493        u32 rx_copybreak; /* copy packets smaller than this */
 494        u16 default_num_queues; /* default num queues to set up */
 495
 496        struct gve_queue_config tx_cfg;
 497        struct gve_queue_config rx_cfg;
 498        struct gve_qpl_config qpl_cfg; /* map used QPL ids */
 499        u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
 500
 501        struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
 502        __be32 __iomem *db_bar2; /* "array" of doorbells */
 503        u32 msg_enable; /* level for netif* netdev print macros */
 504        struct pci_dev *pdev;
 505
 506        /* metrics */
 507        u32 tx_timeo_cnt;
 508
 509        /* Admin queue - see gve_adminq.h*/
 510        union gve_adminq_command *adminq;
 511        dma_addr_t adminq_bus_addr;
 512        u32 adminq_mask; /* masks prod_cnt to adminq size */
 513        u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
 514        u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
 515        u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
 516        /* free-running count of per AQ cmd executed */
 517        u32 adminq_describe_device_cnt;
 518        u32 adminq_cfg_device_resources_cnt;
 519        u32 adminq_register_page_list_cnt;
 520        u32 adminq_unregister_page_list_cnt;
 521        u32 adminq_create_tx_queue_cnt;
 522        u32 adminq_create_rx_queue_cnt;
 523        u32 adminq_destroy_tx_queue_cnt;
 524        u32 adminq_destroy_rx_queue_cnt;
 525        u32 adminq_dcfg_device_resources_cnt;
 526        u32 adminq_set_driver_parameter_cnt;
 527        u32 adminq_report_stats_cnt;
 528        u32 adminq_report_link_speed_cnt;
 529        u32 adminq_get_ptype_map_cnt;
 530
 531        /* Global stats */
 532        u32 interface_up_cnt; /* count of times interface turned up since last reset */
 533        u32 interface_down_cnt; /* count of times interface turned down since last reset */
 534        u32 reset_cnt; /* count of reset */
 535        u32 page_alloc_fail; /* count of page alloc fails */
 536        u32 dma_mapping_error; /* count of dma mapping errors */
 537        u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
 538        struct workqueue_struct *gve_wq;
 539        struct work_struct service_task;
 540        struct work_struct stats_report_task;
 541        unsigned long service_task_flags;
 542        unsigned long state_flags;
 543
 544        struct gve_stats_report *stats_report;
 545        u64 stats_report_len;
 546        dma_addr_t stats_report_bus; /* dma address for the stats report */
 547        unsigned long ethtool_flags;
 548
 549        unsigned long stats_report_timer_period;
 550        struct timer_list stats_report_timer;
 551
 552        /* Gvnic device link speed from hypervisor. */
 553        u64 link_speed;
 554
 555        struct gve_options_dqo_rda options_dqo_rda;
 556        struct gve_ptype_lut *ptype_lut_dqo;
 557
 558        /* Must be a power of two. */
 559        int data_buffer_size_dqo;
 560
 561        enum gve_queue_format queue_format;
 562};
 563
 564enum gve_service_task_flags_bit {
 565        GVE_PRIV_FLAGS_DO_RESET                 = 1,
 566        GVE_PRIV_FLAGS_RESET_IN_PROGRESS        = 2,
 567        GVE_PRIV_FLAGS_PROBE_IN_PROGRESS        = 3,
 568        GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
 569};
 570
 571enum gve_state_flags_bit {
 572        GVE_PRIV_FLAGS_ADMIN_QUEUE_OK           = 1,
 573        GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK      = 2,
 574        GVE_PRIV_FLAGS_DEVICE_RINGS_OK          = 3,
 575        GVE_PRIV_FLAGS_NAPI_ENABLED             = 4,
 576};
 577
 578enum gve_ethtool_flags_bit {
 579        GVE_PRIV_FLAGS_REPORT_STATS             = 0,
 580};
 581
 582static inline bool gve_get_do_reset(struct gve_priv *priv)
 583{
 584        return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
 585}
 586
 587static inline void gve_set_do_reset(struct gve_priv *priv)
 588{
 589        set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
 590}
 591
 592static inline void gve_clear_do_reset(struct gve_priv *priv)
 593{
 594        clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
 595}
 596
 597static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
 598{
 599        return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
 600                        &priv->service_task_flags);
 601}
 602
 603static inline void gve_set_reset_in_progress(struct gve_priv *priv)
 604{
 605        set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
 606}
 607
 608static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
 609{
 610        clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
 611}
 612
 613static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
 614{
 615        return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
 616                        &priv->service_task_flags);
 617}
 618
 619static inline void gve_set_probe_in_progress(struct gve_priv *priv)
 620{
 621        set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
 622}
 623
 624static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
 625{
 626        clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
 627}
 628
 629static inline bool gve_get_do_report_stats(struct gve_priv *priv)
 630{
 631        return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
 632                        &priv->service_task_flags);
 633}
 634
 635static inline void gve_set_do_report_stats(struct gve_priv *priv)
 636{
 637        set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
 638}
 639
 640static inline void gve_clear_do_report_stats(struct gve_priv *priv)
 641{
 642        clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
 643}
 644
 645static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
 646{
 647        return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
 648}
 649
 650static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
 651{
 652        set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
 653}
 654
 655static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
 656{
 657        clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
 658}
 659
 660static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
 661{
 662        return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
 663}
 664
 665static inline void gve_set_device_resources_ok(struct gve_priv *priv)
 666{
 667        set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
 668}
 669
 670static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
 671{
 672        clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
 673}
 674
 675static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
 676{
 677        return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
 678}
 679
 680static inline void gve_set_device_rings_ok(struct gve_priv *priv)
 681{
 682        set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
 683}
 684
 685static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
 686{
 687        clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
 688}
 689
 690static inline bool gve_get_napi_enabled(struct gve_priv *priv)
 691{
 692        return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
 693}
 694
 695static inline void gve_set_napi_enabled(struct gve_priv *priv)
 696{
 697        set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
 698}
 699
 700static inline void gve_clear_napi_enabled(struct gve_priv *priv)
 701{
 702        clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
 703}
 704
 705static inline bool gve_get_report_stats(struct gve_priv *priv)
 706{
 707        return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
 708}
 709
 710static inline void gve_clear_report_stats(struct gve_priv *priv)
 711{
 712        clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
 713}
 714
 715/* Returns the address of the ntfy_blocks irq doorbell
 716 */
 717static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
 718                                               struct gve_notify_block *block)
 719{
 720        return &priv->db_bar2[be32_to_cpu(block->irq_db_index)];
 721}
 722
 723/* Returns the index into ntfy_blocks of the given tx ring's block
 724 */
 725static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
 726{
 727        return queue_idx;
 728}
 729
 730/* Returns the index into ntfy_blocks of the given rx ring's block
 731 */
 732static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
 733{
 734        return (priv->num_ntfy_blks / 2) + queue_idx;
 735}
 736
 737/* Returns the number of tx queue page lists
 738 */
 739static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
 740{
 741        if (priv->queue_format != GVE_GQI_QPL_FORMAT)
 742                return 0;
 743
 744        return priv->tx_cfg.num_queues;
 745}
 746
 747/* Returns the number of rx queue page lists
 748 */
 749static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
 750{
 751        if (priv->queue_format != GVE_GQI_QPL_FORMAT)
 752                return 0;
 753
 754        return priv->rx_cfg.num_queues;
 755}
 756
 757/* Returns a pointer to the next available tx qpl in the list of qpls
 758 */
 759static inline
 760struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
 761{
 762        int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
 763                                     priv->qpl_cfg.qpl_map_size);
 764
 765        /* we are out of tx qpls */
 766        if (id >= gve_num_tx_qpls(priv))
 767                return NULL;
 768
 769        set_bit(id, priv->qpl_cfg.qpl_id_map);
 770        return &priv->qpls[id];
 771}
 772
 773/* Returns a pointer to the next available rx qpl in the list of qpls
 774 */
 775static inline
 776struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
 777{
 778        int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
 779                                    priv->qpl_cfg.qpl_map_size,
 780                                    gve_num_tx_qpls(priv));
 781
 782        /* we are out of rx qpls */
 783        if (id == priv->qpl_cfg.qpl_map_size)
 784                return NULL;
 785
 786        set_bit(id, priv->qpl_cfg.qpl_id_map);
 787        return &priv->qpls[id];
 788}
 789
 790/* Unassigns the qpl with the given id
 791 */
 792static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
 793{
 794        clear_bit(id, priv->qpl_cfg.qpl_id_map);
 795}
 796
 797/* Returns the correct dma direction for tx and rx qpls
 798 */
 799static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
 800                                                      int id)
 801{
 802        if (id < gve_num_tx_qpls(priv))
 803                return DMA_TO_DEVICE;
 804        else
 805                return DMA_FROM_DEVICE;
 806}
 807
 808static inline bool gve_is_gqi(struct gve_priv *priv)
 809{
 810        return priv->queue_format == GVE_GQI_RDA_FORMAT ||
 811                priv->queue_format == GVE_GQI_QPL_FORMAT;
 812}
 813
 814/* buffers */
 815int gve_alloc_page(struct gve_priv *priv, struct device *dev,
 816                   struct page **page, dma_addr_t *dma,
 817                   enum dma_data_direction);
 818void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
 819                   enum dma_data_direction);
 820/* tx handling */
 821netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
 822bool gve_tx_poll(struct gve_notify_block *block, int budget);
 823int gve_tx_alloc_rings(struct gve_priv *priv);
 824void gve_tx_free_rings_gqi(struct gve_priv *priv);
 825__be32 gve_tx_load_event_counter(struct gve_priv *priv,
 826                                 struct gve_tx_ring *tx);
 827/* rx handling */
 828void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
 829bool gve_rx_poll(struct gve_notify_block *block, int budget);
 830int gve_rx_alloc_rings(struct gve_priv *priv);
 831void gve_rx_free_rings_gqi(struct gve_priv *priv);
 832bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
 833                       netdev_features_t feat);
 834/* Reset */
 835void gve_schedule_reset(struct gve_priv *priv);
 836int gve_reset(struct gve_priv *priv, bool attempt_teardown);
 837int gve_adjust_queues(struct gve_priv *priv,
 838                      struct gve_queue_config new_rx_config,
 839                      struct gve_queue_config new_tx_config);
 840/* report stats handling */
 841void gve_handle_report_stats(struct gve_priv *priv);
 842/* exported by ethtool.c */
 843extern const struct ethtool_ops gve_ethtool_ops;
 844/* needed by ethtool */
 845extern const char gve_version_str[];
 846#endif /* _GVE_H_ */
 847