/* SPDX-License-Identifier: GPL-2.0-only */ /* * JaguarMicro virt device driver for virtio dataplane offloading * * Copyright (C) 2020 JaguarMicro Corporation. */ #ifndef _CRETE_NIC_H_ #define _CRETE_NIC_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../crete-core/crete_cmd.h" #include "../crete-core/crete_cmd_if.h" /* Max 8 data queue pairs(16 queues) and one control vq for now. */ #define CRETE_VNIC_MAX_QUEUES 17 #define CRETE_VNIC_QUEUES_NOCTRL 16 #define CRETE_VNIC_QUEUE_ALIGNMENT PAGE_SIZE /*attention pls ,it's the depth of queue*/ #define CRETE_VNIC_QUEUE_SIZE_MAX 512 #define CRETE_VNIC_QUEUE_SIZE_MIN 64 #define CRETE_VNIC_MSI_CONFIG_OFF 0 #define CRETE_VNIC_MSI_QUEUE_OFF 1 #define CRETE_VNIC_PCI_MAX_RESOURCE 6 #define VHOST_F_LOG_ALL 26 #define VHOST_ACCESS_WO 0x2 #define DEBUG #define VIRTIO_NET_CONFIG_OFFSET_MAC offsetof(struct virtio_net_config, mac) #define VIRTIO_NET_CONFIG_OFFSET_MTU offsetof(struct virtio_net_config, mtu) /**********************************vnic add******************************************/ #define CRETE_VNIC_MQ_MAX 17 #define CRETE_VNIC_AUX_DEV_NAME "crete_core.nic" #define CRETE_NIC_DRV_NAME "crete_pnic" /* FIXME: MTU in config. */ #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) #define GOOD_COPY_LEN 128 #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ #define VIRTIO_XDP_HEADROOM 256 /* RX packet size EWMA. The average packet size is used to determine the packet * buffer size when refilling RX rings. As the entire RX ring may be refilled * at once, the weight is chosen so that the EWMA will be insensitive to short- * term, transient changes in packet size. */ DECLARE_EWMA(pkt_len, 0, 64) #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ (1ULL << VIRTIO_NET_F_GUEST_ECN) | \ (1ULL << VIRTIO_NET_F_GUEST_UFO)) struct crete_vnic_stat_desc { char desc[ETH_GSTRING_LEN]; size_t offset; }; struct crete_vnic_sq_stats { struct u64_stats_sync syncp; u64 packets; u64 bytes; u64 xdp_tx; u64 xdp_tx_drops; u64 kicks; }; struct crete_vnic_rq_stats { struct u64_stats_sync syncp; u64 packets; u64 bytes; u64 drops; u64 xdp_packets; u64 xdp_tx; u64 xdp_redirects; u64 xdp_drops; u64 kicks; }; #define CRETE_VNIC_SQ_STAT(m) offsetof(struct crete_vnic_sq_stats, m) #define CRETE_VNIC_RQ_STAT(m) offsetof(struct crete_vnic_rq_stats, m) struct send_queue { /* Virtqueue associated with this send _queue */ struct virtqueue *vq; /* TX: fragments + linear part + virtio header */ struct scatterlist sg[MAX_SKB_FRAGS + 2]; /* Name of the send queue: output.$index */ char name[40]; struct crete_vnic_sq_stats stats; struct napi_struct napi; }; /* Internal representation of a receive virtqueue */ struct receive_queue { /* Virtqueue associated with this receive_queue */ struct virtqueue *vq; struct napi_struct napi; struct bpf_prog __rcu *xdp_prog; struct crete_vnic_rq_stats stats; /* Chain pages by the private ptr. */ struct page *pages; /* Average packet length for mergeable receive buffers. */ struct ewma_pkt_len mrg_avg_pkt_len; /* Page frag for packet buffer allocation. */ struct page_frag alloc_frag; /* RX: fragments + linear part + virtio header */ struct scatterlist sg[MAX_SKB_FRAGS + 2]; /* Min single buffer size for mergeable buffers case. */ unsigned int min_buf_len; /* Name of this receive queue: input.$index */ char name[40]; }; struct crete_vnic_msix_info { struct virtqueue *vq; int qid; /* MSI-X vector (or none) */ unsigned int msix_vector; unsigned int irq; bool ready; char msix_name[256]; }; struct crete_net_common_cfg { u32 rx_mask; u16 uc_filter_count; u8 *uc_list; u8 *mc_list; int mc_list_size; int mc_list_count; __u16 vportid; __u8 mac[ETH_ALEN]; __u16 mtu; __le32 speed; /* * 0x00 - half duplex * 0x01 - full duplex * Any other value stands for unknown. */ __u8 duplex; /* maximum size of RSS key */ __u8 rss_max_key_size; /* maximum number of indirection table entries */ __le16 rss_max_indirection_table_length; /* bitmask of supported VIRTIO_NET_RSS_HASH_ types */ __le32 supported_hash_types; __le16 vlanid; }; struct crete_vnic_priv { struct virtio_device vdev; struct crete_core_dev *coredev; struct net_device *netdev; struct send_queue *sq; struct receive_queue *rq; unsigned int status; u16 max_queue_pairs; u16 curr_queue_pairs; bool big_packets; bool mergeable_rx_bufs; bool any_header_sg; u8 hdr_len; struct delayed_work refill; bool refill_enabled; spinlock_t refill_lock; bool affinity_hint_set; struct hlist_node node; struct hlist_node node_dead; unsigned long guest_offloads; unsigned long guest_offloads_capable; spinlock_t lock; struct crete_vnic_msix_info msix_info[CRETE_VNIC_MQ_MAX]; struct crete_net_common_cfg net_cfg; struct crete_nb cnb; struct work_struct set_rx_mode_work; struct workqueue_struct *wq; }; /*******************************************************************************/ enum crete_net_status { CRETE_NET_DEV_STARTUP, CRETE_NET_DEV_FEATURE_OK, CRETE_NET_DEV_DEV_OK, CRETE_NET_DEV_STOPPED }; #define CRETE_RX_SEQ 0 #define CRETE_TX_SEQ 1 int crete_nic_set_hwstatus(struct crete_core_dev *core_dev, u8 status); int crete_nic_set_device_type(struct crete_core_dev *core_dev, enum crete_dev_type op); int crete_trim_rings(struct crete_core_dev *core_dev); struct net_device *crete_vnic_create_netdev(struct crete_core_dev *core_dev); void crete_build_common_netdev(struct net_device *netdev); void crete_vnic_destroy_netdev(struct net_device *netdev); void crete_vnic_priv_cleanup(struct crete_vnic_priv *priv); u64 crete_net_get_max_supported_vqs(struct crete_core_dev *core_dev); u64 crete_net_get_supported_features(struct crete_core_dev *core_dev, enum crete_feature_opcode op); #endif /* _CRETE_NIC_H_ */