1766 lines
48 KiB
C
1766 lines
48 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/* A network driver using virtio.
|
|
*
|
|
* Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
|
|
*/
|
|
#include <linux/netdevice.h>
|
|
#include <linux/etherdevice.h>
|
|
#include <linux/ethtool.h>
|
|
#include <linux/module.h>
|
|
#include <linux/virtio.h>
|
|
#include <linux/virtio_net.h>
|
|
#include <linux/bpf.h>
|
|
#include <linux/bpf_trace.h>
|
|
#include <linux/scatterlist.h>
|
|
#include <linux/if_vlan.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/average.h>
|
|
#include <linux/filter.h>
|
|
#include <linux/kernel.h>
|
|
#include <net/route.h>
|
|
#include <net/xdp.h>
|
|
#include <net/net_failover.h>
|
|
#include "crete_nic.h"
|
|
|
|
#define CRETE_VNIC_XDP_HEADROOM 256
|
|
#define MRG_CTX_HEADER_SHIFT 22
|
|
|
|
static const struct crete_vnic_stat_desc crete_vnic_sq_stats_desc[] = {
|
|
{"packets", CRETE_VNIC_SQ_STAT(packets)},
|
|
{"bytes", CRETE_VNIC_SQ_STAT(bytes)},
|
|
{"xdp_tx", CRETE_VNIC_SQ_STAT(xdp_tx)},
|
|
{"xdp_tx_drops", CRETE_VNIC_SQ_STAT(xdp_tx_drops)},
|
|
{"kicks", CRETE_VNIC_SQ_STAT(kicks)},
|
|
};
|
|
|
|
static const struct crete_vnic_stat_desc crete_vnic_rq_stats_desc[] = {
|
|
{"packets", CRETE_VNIC_RQ_STAT(packets)},
|
|
{"bytes", CRETE_VNIC_RQ_STAT(bytes)},
|
|
{"drops", CRETE_VNIC_RQ_STAT(drops)},
|
|
{"xdp_packets", CRETE_VNIC_RQ_STAT(xdp_packets)},
|
|
{"xdp_tx", CRETE_VNIC_RQ_STAT(xdp_tx)},
|
|
{"xdp_redirects", CRETE_VNIC_RQ_STAT(xdp_redirects)},
|
|
{"xdp_drops", CRETE_VNIC_RQ_STAT(xdp_drops)},
|
|
{"kicks", CRETE_VNIC_RQ_STAT(kicks)},
|
|
};
|
|
|
|
static const char crete_vnic_phy_stat_desc[][ETH_GSTRING_LEN] = {
|
|
"rx_bytes_phy",
|
|
"rx_packets_phy",
|
|
"tx_bytes_phy",
|
|
"tx_packets_phy"
|
|
};
|
|
|
|
#define CRETE_VNIC_SQ_STATS_LEN ARRAY_SIZE(crete_vnic_sq_stats_desc)
|
|
#define CRETE_VNIC_RQ_STATS_LEN ARRAY_SIZE(crete_vnic_rq_stats_desc)
|
|
|
|
#define CRETE_VNIC_PHY_STATS_LEN ARRAY_SIZE(crete_vnic_phy_stat_desc)
|
|
|
|
struct padded_vnet_hdr {
|
|
struct virtio_net_hdr_mrg_rxbuf hdr;
|
|
/*
|
|
* hdr is in a separate sg buffer, and data sg buffer shares same page
|
|
* with this header sg. This padding makes next sg 16 byte aligned
|
|
* after the header.
|
|
*/
|
|
char padding[4];
|
|
};
|
|
|
|
static int vq2rxq(struct virtqueue *vq)
|
|
{
|
|
return vq->index / 2;
|
|
}
|
|
|
|
static int vq2txq(struct virtqueue *vq)
|
|
{
|
|
return (vq->index - 1) / 2;
|
|
}
|
|
|
|
static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
|
|
{
|
|
return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
|
|
}
|
|
|
|
static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
|
|
{
|
|
return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
|
|
}
|
|
|
|
static inline struct virtio_net_hdr_mrg_rxbuf *skb_to_crete_vnic_hdr(struct
|
|
sk_buff
|
|
*skb)
|
|
{
|
|
return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
|
|
}
|
|
|
|
static void crete_vnic_give_pages(struct receive_queue *rq, struct page *page)
|
|
{
|
|
struct page *end;
|
|
|
|
/* Find end of list, sew whole thing into vnic_priv->rq.pages. */
|
|
for (end = page; end->private; end = (struct page *)end->private)
|
|
end->private = (unsigned long)rq->pages;
|
|
rq->pages = page;
|
|
}
|
|
|
|
/* Called from bottom half context */
|
|
static struct sk_buff *crete_vnic_page_to_skb(struct crete_vnic_priv *vnic_priv,
|
|
struct receive_queue *rq,
|
|
struct page *page,
|
|
unsigned int offset,
|
|
unsigned int len,
|
|
unsigned int truesize,
|
|
bool hdr_valid,
|
|
unsigned int metasize)
|
|
{
|
|
struct sk_buff *skb;
|
|
struct virtio_net_hdr_mrg_rxbuf *hdr;
|
|
unsigned int copy, hdr_len, hdr_padded_len;
|
|
char *p;
|
|
|
|
p = page_address(page) + offset;
|
|
|
|
/* copy small packet so we can reuse these pages for small data */
|
|
skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
|
|
if (unlikely(!skb))
|
|
return NULL;
|
|
|
|
hdr = skb_to_crete_vnic_hdr(skb);
|
|
|
|
hdr_len = vnic_priv->hdr_len;
|
|
if (vnic_priv->mergeable_rx_bufs)
|
|
hdr_padded_len = sizeof(*hdr);
|
|
else
|
|
hdr_padded_len = sizeof(struct padded_vnet_hdr);
|
|
if (hdr_valid)
|
|
memcpy(hdr, p, hdr_len);
|
|
len -= hdr_len;
|
|
offset += hdr_padded_len;
|
|
p += hdr_padded_len;
|
|
|
|
/* Copy all frame if it fits skb->head, otherwise
|
|
* we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
|
|
*/
|
|
if (len <= skb_tailroom(skb))
|
|
copy = len;
|
|
else
|
|
copy = ETH_HLEN + metasize;
|
|
skb_put_data(skb, p, copy);
|
|
|
|
if (metasize) {
|
|
__skb_pull(skb, metasize);
|
|
skb_metadata_set(skb, metasize);
|
|
}
|
|
|
|
len -= copy;
|
|
offset += copy;
|
|
|
|
if (vnic_priv->mergeable_rx_bufs) {
|
|
if (len)
|
|
skb_add_rx_frag(skb, 0, page, offset, len, truesize);
|
|
else
|
|
put_page(page);
|
|
return skb;
|
|
}
|
|
|
|
/*
|
|
* Verify that we can indeed put this data into a skb.
|
|
* This is here to handle cases when the device erroneously
|
|
* tries to receive more than is possible. This is usually
|
|
* the case of a broken device.
|
|
*/
|
|
if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
|
|
net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
|
|
dev_kfree_skb(skb);
|
|
return NULL;
|
|
}
|
|
BUG_ON(offset >= PAGE_SIZE);
|
|
while (len) {
|
|
unsigned int frag_size = min((unsigned int)PAGE_SIZE - offset, len);
|
|
|
|
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
|
|
frag_size, truesize);
|
|
len -= frag_size;
|
|
page = (struct page *)page->private;
|
|
offset = 0;
|
|
}
|
|
|
|
if (page)
|
|
crete_vnic_give_pages(rq, page);
|
|
|
|
return skb;
|
|
}
|
|
|
|
/****************net rx************************************/
|
|
|
|
static void virtqueue_napi_schedule(struct napi_struct *napi,
|
|
struct virtqueue *vq)
|
|
{
|
|
if (napi_schedule_prep(napi)) {
|
|
virtqueue_disable_cb(vq);
|
|
__napi_schedule(napi);
|
|
}
|
|
}
|
|
|
|
static void virtqueue_napi_complete(struct napi_struct *napi,
|
|
struct virtqueue *vq, int processed)
|
|
{
|
|
int opaque;
|
|
|
|
opaque = virtqueue_enable_cb_prepare(vq);
|
|
if (napi_complete_done(napi, processed)) {
|
|
if (unlikely(virtqueue_poll(vq, opaque)))
|
|
virtqueue_napi_schedule(napi, vq);
|
|
} else {
|
|
virtqueue_disable_cb(vq);
|
|
}
|
|
}
|
|
|
|
static void crete_vnic_napi_enable(struct virtqueue *vq,
|
|
struct napi_struct *napi)
|
|
{
|
|
napi_enable(napi);
|
|
|
|
/* If all buffers were filled by other side before we napi_enabled, we
|
|
* won't get another interrupt, so process any outstanding packets now.
|
|
* Call local_bh_enable after to trigger softIRQ processing.
|
|
*/
|
|
local_bh_disable();
|
|
virtqueue_napi_schedule(napi, vq);
|
|
local_bh_enable();
|
|
}
|
|
|
|
static void crete_vnic_napi_tx_enable(struct crete_vnic_priv *vnic_priv,
|
|
struct virtqueue *vq,
|
|
struct napi_struct *napi)
|
|
{
|
|
if (!napi->weight)
|
|
return;
|
|
|
|
/* Tx napi touches cachelines on the cpu handling tx interrupts. Only
|
|
* enable the feature if this is likely affine with the transmit path.
|
|
*/
|
|
if (!vnic_priv->affinity_hint_set) {
|
|
napi->weight = 0;
|
|
return;
|
|
}
|
|
|
|
return crete_vnic_napi_enable(vq, napi);
|
|
}
|
|
|
|
static void crete_vnic_napi_tx_disable(struct napi_struct *napi)
|
|
{
|
|
if (napi->weight)
|
|
napi_disable(napi);
|
|
}
|
|
|
|
static unsigned int crete_vnic_get_headroom(struct crete_vnic_priv *vnic_priv)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#define MRG_CTX_HEADER_SHIFT 22
|
|
static void *crete_vnic_mergeable_len_to_ctx(unsigned int truesize,
|
|
unsigned int headroom)
|
|
{
|
|
return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) |
|
|
truesize);
|
|
}
|
|
|
|
static unsigned int crete_vnic_get_mergeable_buf_len(struct receive_queue *rq, struct ewma_pkt_len
|
|
*avg_pkt_len,
|
|
unsigned int room)
|
|
{
|
|
const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
|
|
unsigned int len;
|
|
|
|
if (room)
|
|
return PAGE_SIZE - room;
|
|
|
|
len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
|
|
rq->min_buf_len, PAGE_SIZE - hdr_len);
|
|
|
|
return ALIGN(len, L1_CACHE_BYTES);
|
|
}
|
|
|
|
static int crete_vnic_add_recvbuf_mergeable(struct crete_vnic_priv *vnic_priv,
|
|
struct receive_queue *rq, gfp_t gfp)
|
|
{
|
|
struct page_frag *alloc_frag = &rq->alloc_frag;
|
|
unsigned int headroom = crete_vnic_get_headroom(vnic_priv);
|
|
unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
|
|
unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
|
|
char *buf;
|
|
void *ctx;
|
|
int err;
|
|
unsigned int len, hole;
|
|
|
|
/* Extra tailroom is needed to satisfy XDP's assumption. This
|
|
* means rx frags coalescing won't work, but consider we've
|
|
* disabled GSO for XDP, it won't be a big issue.
|
|
*/
|
|
len = crete_vnic_get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
|
|
if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
|
|
return -ENOMEM;
|
|
|
|
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
|
|
buf += headroom; /* advance address leaving hole at front of pkt */
|
|
get_page(alloc_frag->page);
|
|
alloc_frag->offset += len + room;
|
|
hole = alloc_frag->size - alloc_frag->offset;
|
|
if (hole < len + room) {
|
|
/* To avoid internal fragmentation, if there is very likely not
|
|
* enough space for another buffer, add the remaining space to
|
|
* the current buffer.
|
|
*/
|
|
len += hole;
|
|
alloc_frag->offset += hole;
|
|
}
|
|
|
|
sg_init_one(rq->sg, buf, len);
|
|
ctx = crete_vnic_mergeable_len_to_ctx(len, headroom);
|
|
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
|
|
if (err < 0)
|
|
put_page(virt_to_head_page(buf));
|
|
|
|
return err;
|
|
}
|
|
|
|
static void crete_vnic_enable_delayed_refill(struct crete_vnic_priv *vnic_priv)
|
|
{
|
|
spin_lock_bh(&vnic_priv->refill_lock);
|
|
vnic_priv->refill_enabled = true;
|
|
spin_unlock_bh(&vnic_priv->refill_lock);
|
|
}
|
|
|
|
static void crete_vnic_disable_delayed_refill(struct crete_vnic_priv *vnic_priv)
|
|
{
|
|
spin_lock_bh(&vnic_priv->refill_lock);
|
|
vnic_priv->refill_enabled = false;
|
|
spin_unlock_bh(&vnic_priv->refill_lock);
|
|
}
|
|
|
|
static bool crete_vnic_try_fill_recv(struct crete_vnic_priv *vnic_priv,
|
|
struct receive_queue *rq, gfp_t gfp)
|
|
{
|
|
int err;
|
|
bool oom;
|
|
|
|
do {
|
|
err = crete_vnic_add_recvbuf_mergeable(vnic_priv, rq, gfp);
|
|
oom = err == -ENOMEM;
|
|
if (err)
|
|
break;
|
|
} while (rq->vq->num_free);
|
|
if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
|
|
unsigned long flags;
|
|
|
|
flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
|
|
rq->stats.kicks++;
|
|
u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
|
|
}
|
|
return !oom;
|
|
}
|
|
|
|
void crete_vnic_alloc_recv_buf(struct work_struct *work)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv =
|
|
container_of(work, struct crete_vnic_priv, refill.work);
|
|
bool still_empty;
|
|
int i;
|
|
|
|
for (i = 0; i < vnic_priv->curr_queue_pairs; i++) {
|
|
struct receive_queue *rq = &vnic_priv->rq[i];
|
|
|
|
napi_disable(&rq->napi);
|
|
still_empty =
|
|
!crete_vnic_try_fill_recv(vnic_priv, rq, GFP_KERNEL);
|
|
crete_vnic_napi_enable(rq->vq, &rq->napi);
|
|
|
|
/* In theory, this can happen: if we don't get any buffers in
|
|
* we will *never* try to fill again.
|
|
*/
|
|
if (still_empty)
|
|
schedule_delayed_work(&vnic_priv->refill, HZ / 2);
|
|
}
|
|
}
|
|
|
|
/***************rx end*****************************************/
|
|
|
|
/**********************tx xmit************************************/
|
|
|
|
static inline int crete_vnic_hdr_from_skb(const struct sk_buff *skb,
|
|
struct virtio_net_hdr *hdr,
|
|
bool little_endian,
|
|
bool has_data_valid, int vlan_hlen)
|
|
{
|
|
memset(hdr, 0, sizeof(*hdr)); /* no info leak */
|
|
|
|
if (skb_is_gso(skb)) {
|
|
struct skb_shared_info *sinfo = skb_shinfo(skb);
|
|
|
|
/* This is a hint as to how much should be linear. */
|
|
hdr->hdr_len = __cpu_to_virtio16(little_endian,
|
|
skb_headlen(skb));
|
|
hdr->gso_size = __cpu_to_virtio16(little_endian,
|
|
sinfo->gso_size);
|
|
if (sinfo->gso_type & SKB_GSO_TCPV4)
|
|
hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
|
|
else if (sinfo->gso_type & SKB_GSO_TCPV6)
|
|
hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
|
|
else
|
|
return -EINVAL;
|
|
if (sinfo->gso_type & SKB_GSO_TCP_ECN)
|
|
hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
|
|
} else
|
|
hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
|
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
|
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
|
|
hdr->csum_start = __cpu_to_virtio16(little_endian,
|
|
skb_checksum_start_offset
|
|
(skb) + vlan_hlen);
|
|
hdr->csum_offset =
|
|
__cpu_to_virtio16(little_endian, skb->csum_offset);
|
|
} else if (has_data_valid && skb->ip_summed == CHECKSUM_UNNECESSARY) {
|
|
hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
|
|
}
|
|
/* else everything is zero */
|
|
return 0;
|
|
}
|
|
|
|
static int crete_vnic_send_skb(struct send_queue *sq, struct sk_buff *skb)
|
|
{
|
|
struct virtio_net_hdr_mrg_rxbuf *hdr;
|
|
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
|
|
struct crete_vnic_priv *vnic_priv = sq->vq->vdev->priv;
|
|
int num_sg;
|
|
unsigned int hdr_len = vnic_priv->hdr_len;
|
|
bool can_push;
|
|
|
|
pr_debug("xmit %p %pM\n", skb, dest);
|
|
|
|
can_push = vnic_priv->any_header_sg &&
|
|
!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
|
|
!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
|
|
/* Even if we can, don't push here yet as this would skew
|
|
* csum_start offset below.
|
|
*/
|
|
if (can_push)
|
|
hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
|
|
else
|
|
hdr = skb_to_crete_vnic_hdr(skb);
|
|
|
|
if (crete_vnic_hdr_from_skb(skb, &hdr->hdr,
|
|
virtio_is_little_endian(&vnic_priv->vdev),
|
|
false, 0))
|
|
return -EPROTO;
|
|
|
|
if (vnic_priv->mergeable_rx_bufs)
|
|
hdr->num_buffers = 0;
|
|
|
|
sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
|
|
if (can_push) {
|
|
__skb_push(skb, hdr_len);
|
|
num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
|
|
if (unlikely(num_sg < 0))
|
|
return num_sg;
|
|
/* Pull header back to avoid skew in tx bytes calculations. */
|
|
__skb_pull(skb, hdr_len);
|
|
} else {
|
|
sg_set_buf(sq->sg, hdr, hdr_len);
|
|
num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
|
|
if (unlikely(num_sg < 0))
|
|
return num_sg;
|
|
num_sg++;
|
|
}
|
|
return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
|
|
}
|
|
|
|
static void crete_vnic_free_old_tx_skbs(struct send_queue *sq, bool in_napi)
|
|
{
|
|
unsigned int len;
|
|
unsigned int packets = 0;
|
|
unsigned int bytes = 0;
|
|
void *ptr;
|
|
|
|
while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
|
|
struct sk_buff *skb = ptr;
|
|
|
|
pr_debug("Sent skb %p\n", skb);
|
|
bytes += skb->len;
|
|
napi_consume_skb(skb, in_napi);
|
|
packets++;
|
|
}
|
|
|
|
/* Avoid overhead when no packets have been processed
|
|
* happens when called speculatively from start_xmit.
|
|
*/
|
|
if (!packets)
|
|
return;
|
|
|
|
u64_stats_update_begin(&sq->stats.syncp);
|
|
sq->stats.bytes += bytes;
|
|
sq->stats.packets += packets;
|
|
u64_stats_update_end(&sq->stats.syncp);
|
|
}
|
|
|
|
static netdev_tx_t crete_vnic_dev_queue_xmit(struct sk_buff *skb,
|
|
struct net_device *dev)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(dev);
|
|
int qnum = skb_get_queue_mapping(skb);
|
|
struct send_queue *sq = &vnic_priv->sq[qnum];
|
|
int err;
|
|
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
|
|
bool kick = !netdev_xmit_more();
|
|
bool use_napi = sq->napi.weight;
|
|
|
|
/* Free up any pending old buffers before queueing new ones. */
|
|
crete_vnic_free_old_tx_skbs(sq, false);
|
|
|
|
if (use_napi && kick)
|
|
virtqueue_enable_cb_delayed(sq->vq);
|
|
|
|
/* timestamp packet in software */
|
|
skb_tx_timestamp(skb);
|
|
|
|
/* Try to transmit */
|
|
err = crete_vnic_send_skb(sq, skb);
|
|
|
|
/* This should not happen! */
|
|
if (unlikely(err)) {
|
|
dev->stats.tx_fifo_errors++;
|
|
if (net_ratelimit())
|
|
dev_warn(&dev->dev,
|
|
"Unexpected TXQ (%d) queue failure: %d\n",
|
|
qnum, err);
|
|
dev->stats.tx_dropped++;
|
|
dev_kfree_skb_any(skb);
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
/* Don't wait up for transmitted skbs to be freed. */
|
|
if (!use_napi) {
|
|
skb_orphan(skb);
|
|
nf_reset_ct(skb);
|
|
}
|
|
|
|
if (sq->vq->num_free < 2 + MAX_SKB_FRAGS) {
|
|
netif_stop_subqueue(dev, qnum);
|
|
if (!use_napi && unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
|
|
/* More just got used, free them then recheck. */
|
|
crete_vnic_free_old_tx_skbs(sq, false);
|
|
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
|
|
netif_start_subqueue(dev, qnum);
|
|
virtqueue_disable_cb(sq->vq);
|
|
}
|
|
}
|
|
}
|
|
|
|
if (kick || netif_xmit_stopped(txq)) {
|
|
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
|
|
u64_stats_update_begin(&sq->stats.syncp);
|
|
sq->stats.kicks++;
|
|
u64_stats_update_end(&sq->stats.syncp);
|
|
}
|
|
}
|
|
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
/*************************tx end**************************************/
|
|
|
|
static int crete_vnic_open(struct net_device *dev)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(dev);
|
|
struct crete_core_dev *core_dev = vnic_priv->coredev;
|
|
int i;
|
|
|
|
if (!vnic_priv) {
|
|
dev_err(core_dev->device, "vnic info is nuLL\n");
|
|
return -ENOSPC;
|
|
}
|
|
|
|
crete_vnic_enable_delayed_refill(vnic_priv);
|
|
|
|
for (i = 0; i < core_dev->cap.qpcap.max_qp_num; i++) {
|
|
if (i < vnic_priv->curr_queue_pairs)
|
|
if (!crete_vnic_try_fill_recv
|
|
(vnic_priv, &vnic_priv->rq[i], GFP_KERNEL))
|
|
schedule_delayed_work(&vnic_priv->refill, 0);
|
|
|
|
crete_vnic_napi_enable(vnic_priv->rq[i].vq,
|
|
&vnic_priv->rq[i].napi);
|
|
crete_vnic_napi_tx_enable(vnic_priv, vnic_priv->sq[i].vq,
|
|
&vnic_priv->sq[i].napi);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crete_vnic_close(struct net_device *dev)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(dev);
|
|
struct crete_core_dev *core_dev = vnic_priv->coredev;
|
|
int i;
|
|
int max_queue_pairs = core_dev->cap.qpcap.max_qp_num;
|
|
|
|
/* Make sure NAPI doesn't schedule refill work */
|
|
crete_vnic_disable_delayed_refill(vnic_priv);
|
|
/* Make sure refill_work doesn't re-enable napi! */
|
|
cancel_delayed_work_sync(&vnic_priv->refill);
|
|
|
|
for (i = 0; i < max_queue_pairs; i++) {
|
|
napi_disable(&vnic_priv->rq[i].napi);
|
|
crete_vnic_napi_tx_disable(&vnic_priv->sq[i].napi);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
//////////////////////////////cmd begin////////////////////////////////////////
|
|
static int crete_vnic_set_guest_offloads(struct crete_vnic_priv *vnic_priv,
|
|
u64 offloads)
|
|
{
|
|
u64 virtio64_offloads = 0;
|
|
struct crete_core_dev *core_dev = vnic_priv->coredev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
int err;
|
|
|
|
virtio64_offloads = cpu_to_virtio64(&vnic_priv->vdev, offloads);
|
|
err =
|
|
crete_cmd_set_features(core_dev, CRETE_OFFLOAD_FEAT,
|
|
virtio64_offloads);
|
|
if (err)
|
|
dev_err(&pdev->dev, "set offload features failed %d\n", err);
|
|
|
|
return err;
|
|
}
|
|
|
|
static int crete_vnic_set_features(struct net_device *dev,
|
|
netdev_features_t features)
|
|
{
|
|
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(dev);
|
|
u64 offloads;
|
|
int err;
|
|
|
|
if ((dev->features ^ features) & NETIF_F_GRO_HW) {
|
|
|
|
if (features & NETIF_F_GRO_HW)
|
|
offloads = vnic_priv->guest_offloads_capable;
|
|
else
|
|
offloads = vnic_priv->guest_offloads_capable &
|
|
~GUEST_OFFLOAD_GRO_HW_MASK;
|
|
|
|
err = crete_vnic_set_guest_offloads(vnic_priv, offloads);
|
|
if (err)
|
|
return err;
|
|
vnic_priv->guest_offloads = offloads;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crete_vnic_cmd_set_vport_vlan(struct crete_core_dev *core_dev,
|
|
int vport, u16 vlan, bool del)
|
|
{
|
|
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
int ret = 0, status = 0, err_type;
|
|
int in_len = CRETE_ST_SZ_BYTES(set_rx_mode_in);
|
|
int out_len = CRETE_ST_SZ_BYTES(set_rx_mode_out);
|
|
void *in, *out, *vlan_list_in_base;
|
|
int vlan_range_num = 1;
|
|
int vlan_range_list_size =
|
|
vlan_range_num * CRETE_ST_SZ_BYTES(vlan_range);
|
|
|
|
in_len += ALIGN_TO_DW(vlan_range_list_size);
|
|
|
|
in = kvzalloc(in_len, GFP_KERNEL);
|
|
out = kvzalloc(out_len, GFP_KERNEL);
|
|
if (!out || !in) {
|
|
ret = -ENOMEM;
|
|
goto err_out;
|
|
}
|
|
|
|
vlan_list_in_base = in + in_len - ALIGN_TO_DW(vlan_range_list_size);
|
|
|
|
CRETE_SET(set_rx_mode_in, in, cmd_op, 1);
|
|
CRETE_SET(set_rx_mode_in, in, cmd_id, CRETE_CMD_SET_VPORT_RX_MODE);
|
|
CRETE_SET(set_rx_mode_in, in, vport_id, vport);
|
|
if (del) {
|
|
CRETE_SET(set_rx_mode_in, in, vlan_fliter, 0);
|
|
CRETE_SET(set_rx_mode_in, in, vlan_filter_mode, 0);
|
|
in_len -= ALIGN_TO_DW(vlan_range_list_size);
|
|
} else {
|
|
CRETE_SET(set_rx_mode_in, in, vlan_fliter, 1);
|
|
CRETE_SET(set_rx_mode_in, in, vlan_tpid, 1);
|
|
CRETE_SET(set_rx_mode_in, in, vlan_filter_mode, 2);
|
|
CRETE_SET(set_rx_mode_in, in, vlan_range_num, vlan_range_num);
|
|
CRETE_SET(vlan_range, vlan_list_in_base, start_vlan, vlan);
|
|
CRETE_SET(vlan_range, vlan_list_in_base, end_vlan, vlan);
|
|
}
|
|
ret = crete_cmd_exec_polling(core_dev, in, in_len, out, out_len);
|
|
if (ret < 0)
|
|
goto err_out;
|
|
|
|
status = CRETE_GET(set_rx_mode_out, out, status);
|
|
if (status != SUCCESS) {
|
|
err_type = CRETE_GET(set_rx_mode_out, out, err_type);
|
|
crete_err(&pdev->dev,
|
|
"crete set vport_vlan failed, err type:0x%x status:0x%x\n",
|
|
err_type, status);
|
|
ret = -EINVAL;
|
|
}
|
|
err_out:
|
|
kvfree(in);
|
|
kvfree(out);
|
|
dev_warn(&pdev->dev, "%s esw_vport_vlan ret:0x%x\n", __func__, ret);
|
|
return ret;
|
|
}
|
|
|
|
static int crete_vnic_vlan_rx_add_vid(struct net_device *dev,
|
|
__be16 proto, u16 vid)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(dev);
|
|
struct crete_net_common_cfg *netcfg = &vnic_priv->net_cfg;
|
|
struct crete_core_dev *core_dev = vnic_priv->coredev;
|
|
__virtio16 local_vid;
|
|
int ret, vportid;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
|
|
vportid = netcfg->vportid;
|
|
local_vid = cpu_to_virtio16(&vnic_priv->vdev, vid);
|
|
ret =
|
|
crete_vnic_cmd_set_vport_vlan(core_dev, vportid, local_vid, false);
|
|
if (ret)
|
|
dev_err(&pdev->dev, "set vlan id %d: is error\n", vid);
|
|
netcfg->vlanid = local_vid;
|
|
return ret;
|
|
}
|
|
|
|
static int crete_vnic_vlan_rx_kill_vid(struct net_device *dev,
|
|
__be16 proto, u16 vid)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(dev);
|
|
struct crete_net_common_cfg *netcfg = &vnic_priv->net_cfg;
|
|
struct crete_core_dev *core_dev = vnic_priv->coredev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
__virtio16 local_vid;
|
|
int ret, vportid;
|
|
|
|
vportid = netcfg->vportid;
|
|
local_vid = cpu_to_virtio16(&vnic_priv->vdev, vid);
|
|
ret = crete_vnic_cmd_set_vport_vlan(core_dev, vportid, local_vid, true);
|
|
if (ret)
|
|
dev_err(&pdev->dev, "delvlan id %d: is error\n", vid);
|
|
netcfg->vlanid = 0;
|
|
return ret;
|
|
|
|
}
|
|
|
|
static void crete_vnic_get_stats(struct net_device *dev,
|
|
struct rtnl_link_stats64 *tot)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(dev);
|
|
unsigned int start;
|
|
int i;
|
|
|
|
for (i = 0; i < vnic_priv->max_queue_pairs; i++) {
|
|
u64 tpackets, tbytes, rpackets, rbytes, rdrops;
|
|
struct receive_queue *rq = &vnic_priv->rq[i];
|
|
struct send_queue *sq = &vnic_priv->sq[i];
|
|
|
|
do {
|
|
start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
|
|
tpackets = sq->stats.packets;
|
|
tbytes = sq->stats.bytes;
|
|
} while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
|
|
|
|
do {
|
|
start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
|
|
rpackets = rq->stats.packets;
|
|
rbytes = rq->stats.bytes;
|
|
rdrops = rq->stats.drops;
|
|
} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
|
|
|
|
tot->rx_packets += rpackets;
|
|
tot->tx_packets += tpackets;
|
|
tot->rx_bytes += rbytes;
|
|
tot->tx_bytes += tbytes;
|
|
tot->rx_dropped += rdrops;
|
|
}
|
|
|
|
tot->tx_dropped = dev->stats.tx_dropped;
|
|
tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
|
|
tot->rx_length_errors = dev->stats.rx_length_errors;
|
|
tot->rx_frame_errors = dev->stats.rx_frame_errors;
|
|
}
|
|
|
|
static int crete_vnic_set_trust_vf(struct net_device *dev, int vf_id, bool trust_state)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(dev);
|
|
struct crete_core_dev *core_dev = vnic_priv->coredev;
|
|
int ret = 0;
|
|
u16 vf_sfi = 0;
|
|
|
|
ret = crete_get_dev_sfi(core_dev, CRETE_GET_SFI_VF, vf_id, &vf_sfi);
|
|
if (ret) {
|
|
pr_err("get vf %u sfi error info\n", vf_id);
|
|
return ret;
|
|
}
|
|
|
|
ret = crete_set_trust_vf(core_dev, vf_sfi, trust_state);
|
|
return ret;
|
|
}
|
|
|
|
static int crete_vnic_change_mtu(struct net_device *dev, int new_mtu)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(dev);
|
|
struct crete_core_dev *core_dev = vnic_priv->coredev;
|
|
int ret = 0;
|
|
|
|
if (CRETE_COREDEV_PF != core_dev->coredev_type) {
|
|
pr_warn("port %u is not pf, can not be set mtu\n", core_dev->sfi_id);
|
|
return 0;
|
|
}
|
|
|
|
if (new_mtu > dev->max_mtu ) {
|
|
pr_err("set port mtu (%u) error , exceed max mtu\n", new_mtu);
|
|
return -EINVAL;
|
|
}
|
|
|
|
ret = crete_set_port_mtu(core_dev, CRETE_SET_MTU_MACPORT, MAC_MTU(new_mtu));
|
|
if (ret) {
|
|
pr_err("set port mtu error info\n");
|
|
return ret;
|
|
}
|
|
|
|
WRITE_ONCE(dev->mtu, new_mtu);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int crete_vnic_adminq_set_mac(struct net_device *dev)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(dev);
|
|
struct crete_net_common_cfg *netcfg = &vnic_priv->net_cfg;
|
|
struct crete_core_dev *core_dev = vnic_priv->coredev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
int ret = 0; //status = 0, err_type;
|
|
int in_len = CRETE_ST_SZ_BYTES(set_uc_mac_in);
|
|
int out_len = CRETE_ST_SZ_BYTES(set_uc_mac_out);
|
|
void *in, *out, *uc_mac_list_in_base;
|
|
int uc_mac_list_size = 0;
|
|
|
|
uc_mac_list_size =
|
|
netcfg->uc_filter_count * CRETE_ST_SZ_BYTES(uc_mac_list);
|
|
|
|
in_len += ALIGN_TO_DW(uc_mac_list_size);
|
|
in = kvzalloc(in_len, GFP_KERNEL);
|
|
out = kvzalloc(out_len, GFP_KERNEL);
|
|
if (!out || !in) {
|
|
ret = -ENOMEM;
|
|
goto err_out;
|
|
}
|
|
|
|
uc_mac_list_in_base = in + in_len - ALIGN_TO_DW(uc_mac_list_size);
|
|
CRETE_SET(set_uc_mac_in, in, cmd_op, 0);
|
|
CRETE_SET(set_uc_mac_in, in, cmd_id, CRETE_CMD_SET_UC_MAC);
|
|
CRETE_SET(set_uc_mac_in, in, uc_list_num, netcfg->uc_filter_count);
|
|
memcpy(uc_mac_list_in_base, netcfg->uc_list, uc_mac_list_size);
|
|
/*
|
|
* ret = crete_cmd_exec_polling(core_dev, in, in_len, out, out_len);
|
|
* if (ret < 0)
|
|
* goto err_out;
|
|
* status = CRETE_GET(set_uc_mac_out, out, status);
|
|
* if (status != SUCCESS) {
|
|
* err_type = CRETE_GET(set_uc_mac_out, out, err_type);
|
|
* crete_err(&pdev->dev,
|
|
* "crete set uc mac failed, err type:0x%x status:0x%x\n",
|
|
* err_type, status);
|
|
* ret = -EINVAL;
|
|
* }
|
|
*/
|
|
err_out:
|
|
kvfree(in);
|
|
kvfree(out);
|
|
crete_err(&pdev->dev, "%s return:%x\n", __func__, ret);
|
|
return ret;
|
|
}
|
|
|
|
static int crete_vnic_cmd_set_mc_filter(struct net_device *netdev)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(netdev);
|
|
struct crete_net_common_cfg *netcfg = &vnic_priv->net_cfg;
|
|
struct crete_core_dev *core_dev = vnic_priv->coredev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
int ret = 0, status = 0, err_type;
|
|
int in_len = CRETE_ST_SZ_BYTES(set_rx_mode_new_in);
|
|
int out_len = CRETE_ST_SZ_BYTES(set_rx_mode_out);
|
|
void *in, *out, *mc_mac_list_in_base;
|
|
int vlan_range_list_size;
|
|
int mc_mac_list_size;
|
|
int vlan_range_num = 0;
|
|
int mc_list_num = netcfg->mc_list_count;
|
|
int mc_list_size = mc_list_num * ETH_ALEN;
|
|
|
|
vlan_range_list_size = vlan_range_num * CRETE_ST_SZ_BYTES(vlan_range);
|
|
in_len += vlan_range_list_size;
|
|
|
|
mc_mac_list_size = mc_list_num * CRETE_ST_SZ_BYTES(mc_mac_list);
|
|
in_len += ALIGN_TO_DW(mc_mac_list_size);
|
|
|
|
in = kvzalloc(in_len, GFP_KERNEL);
|
|
out = kvzalloc(out_len, GFP_KERNEL);
|
|
if (!out || !in) {
|
|
ret = -ENOMEM;
|
|
goto err_out;
|
|
}
|
|
|
|
mc_mac_list_in_base = in + in_len - ALIGN_TO_DW(mc_mac_list_size);
|
|
|
|
CRETE_SET(set_rx_mode_new_in, in, cmd_op, BIT(CRETE_RX_MODE_MC_MAC_LIST_ADD));
|
|
CRETE_SET(set_rx_mode_new_in, in, cmd_id, CRETE_CMD_SET_VPORT_RX_MODE);
|
|
CRETE_SET(set_rx_mode_new_in, in, svport_id, core_dev->sfi_id);
|
|
CRETE_SET(set_rx_mode_new_in, in, dvport_id, core_dev->sfi_id);
|
|
|
|
CRETE_SET(set_rx_mode_new_in, in, mc_list_num, netcfg->mc_list_count);
|
|
memcpy(mc_mac_list_in_base, netcfg->mc_list, mc_list_size);
|
|
|
|
ret = crete_cmd_exec_polling(core_dev, in, in_len, out, out_len);
|
|
if (ret < 0)
|
|
goto err_out;
|
|
|
|
status = CRETE_GET(set_rx_mode_out, out, status);
|
|
if (status != SUCCESS) {
|
|
err_type = CRETE_GET(set_rx_mode_out, out, err_type);
|
|
crete_err(&pdev->dev,
|
|
"crete set mc filter failed, err type:0x%x status:0x%x\n",
|
|
err_type, status);
|
|
ret = -EINVAL;
|
|
}
|
|
err_out:
|
|
kvfree(in);
|
|
kvfree(out);
|
|
return ret;
|
|
}
|
|
|
|
static int __maybe_unused crete_vnic_set_uc_filter(struct net_device *netdev)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(netdev);
|
|
struct crete_net_common_cfg *netcfg = &vnic_priv->net_cfg;
|
|
struct netdev_hw_addr *ha;
|
|
int off = 0;
|
|
|
|
netif_addr_lock_bh(netdev);
|
|
if (netdev_uc_count(netdev) > (CRETE_MAX_UC_ADDRS - 1)) {
|
|
netcfg->rx_mask |= L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
|
|
} else {
|
|
netdev_for_each_uc_addr(ha, netdev) {
|
|
memcpy(netcfg->uc_list + off, ha->addr, ETH_ALEN);
|
|
off += ETH_ALEN;
|
|
netcfg->uc_filter_count++;
|
|
}
|
|
}
|
|
netif_addr_unlock_bh(netdev);
|
|
|
|
return crete_vnic_adminq_set_mac(netdev);
|
|
}
|
|
|
|
static int crete_vnic_cmd_set_rx_mask(struct net_device *netdev)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(netdev);
|
|
struct crete_net_common_cfg *netcfg = &vnic_priv->net_cfg;
|
|
struct crete_core_dev *core_dev = vnic_priv->coredev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
int ret = 0, status = 0, err_type;
|
|
int in_len = CRETE_ST_SZ_BYTES(set_rx_mode_new_in);
|
|
int out_len = CRETE_ST_SZ_BYTES(set_rx_mode_out);
|
|
void *in, *out;
|
|
|
|
in = kvzalloc(in_len, GFP_KERNEL);
|
|
out = kvzalloc(out_len, GFP_KERNEL);
|
|
if (!out || !in) {
|
|
ret = -ENOMEM;
|
|
goto err_out;
|
|
}
|
|
|
|
CRETE_SET(set_rx_mode_new_in, in, cmd_op, 1);
|
|
CRETE_SET(set_rx_mode_new_in, in, cmd_id, CRETE_CMD_SET_VPORT_RX_MODE);
|
|
CRETE_SET(set_rx_mode_new_in, in, svport_id, core_dev->sfi_id);
|
|
CRETE_SET(set_rx_mode_new_in, in, dvport_id, core_dev->sfi_id);
|
|
|
|
if (netcfg->rx_mask & L2_SET_RX_MASK_REQ_MASK_BCAST)
|
|
CRETE_SET(set_rx_mode_new_in, in, broadcast, 1);
|
|
|
|
if (netcfg->rx_mask & L2_SET_RX_MASK_REQ_MASK_ALL_MCAST)
|
|
CRETE_SET(set_rx_mode_new_in, in, all_mmulticast, 1);
|
|
|
|
if (netcfg->rx_mask & L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS)
|
|
CRETE_SET(set_rx_mode_new_in, in, promiscuous, 1);
|
|
|
|
ret = crete_cmd_exec_polling(core_dev, in, in_len, out, out_len);
|
|
if (ret < 0)
|
|
goto err_out;
|
|
|
|
status = CRETE_GET(set_rx_mode_out, out, status);
|
|
if (status != SUCCESS) {
|
|
err_type = CRETE_GET(set_rx_mode_out, out, err_type);
|
|
crete_err(&pdev->dev,
|
|
"crete set rx mask failed, err type:0x%x status:0x%x\n",
|
|
err_type, status);
|
|
ret = -EINVAL;
|
|
}
|
|
err_out:
|
|
kvfree(in);
|
|
kvfree(out);
|
|
return ret;
|
|
}
|
|
|
|
void crete_vnic_do_set_rx_mode(struct crete_vnic_priv *vnic_priv)
|
|
{
|
|
|
|
struct crete_core_dev *core_dev = vnic_priv->coredev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
struct net_device *netdev = vnic_priv->netdev;
|
|
int rc;
|
|
struct crete_net_common_cfg *netcfg = &vnic_priv->net_cfg;
|
|
|
|
if (netcfg->rx_mask & L2_SET_RX_MASK_REQ_MASK_MCAST) {
|
|
rc = crete_vnic_cmd_set_mc_filter(netdev);
|
|
if (rc) {
|
|
dev_err(&pdev->dev,
|
|
"Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
|
|
rc);
|
|
netcfg->rx_mask &= ~L2_SET_RX_MASK_REQ_MASK_MCAST;
|
|
netcfg->rx_mask |= L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
|
|
netcfg->mc_list_count = 0;
|
|
}
|
|
}
|
|
/*promisc check */
|
|
//if ((netcfg->rx_mask & L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
|
|
//!crete_promisc_ok(core_dev))
|
|
// priv->rx_mask &= ~L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
|
|
|
|
rc = crete_vnic_cmd_set_rx_mask(netdev);
|
|
if (rc)
|
|
dev_err(&pdev->dev, "HWRM l2 rx mask failure rc: %d\n", rc);
|
|
}
|
|
|
|
void crete_vnic_set_rx_mode_work(struct work_struct *work)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = container_of(work, struct crete_vnic_priv,
|
|
set_rx_mode_work);
|
|
struct crete_core_dev *core_dev = vnic_priv->coredev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
struct net_device *netdev = vnic_priv->netdev;
|
|
int rc;
|
|
struct crete_net_common_cfg *netcfg = &vnic_priv->net_cfg;
|
|
|
|
if (netcfg->rx_mask & L2_SET_RX_MASK_REQ_MASK_MCAST) {
|
|
rc = crete_vnic_cmd_set_mc_filter(netdev);
|
|
if (rc) {
|
|
dev_err(&pdev->dev,
|
|
"Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
|
|
rc);
|
|
netcfg->rx_mask &= ~L2_SET_RX_MASK_REQ_MASK_MCAST;
|
|
netcfg->rx_mask |= L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
|
|
netcfg->mc_list_count = 0;
|
|
}
|
|
}
|
|
|
|
rc = crete_vnic_cmd_set_rx_mask(netdev);
|
|
if (rc)
|
|
dev_err(&pdev->dev, "HWRM l2 rx mask failure rc: %d\n", rc);
|
|
|
|
}
|
|
EXPORT_SYMBOL(crete_vnic_set_rx_mode_work);
|
|
|
|
static bool __maybe_unused crete_vnic_uc_list_updated(struct net_device *dev)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(dev);
|
|
struct crete_net_common_cfg *netcfg = &vnic_priv->net_cfg;
|
|
struct netdev_hw_addr *ha;
|
|
int off = 0;
|
|
|
|
if (netdev_uc_count(dev) != (netcfg->uc_filter_count))
|
|
return true;
|
|
|
|
netdev_for_each_uc_addr(ha, dev) {
|
|
if (!ether_addr_equal(ha->addr, netcfg->uc_list + off))
|
|
return true;
|
|
off += ETH_ALEN;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static bool __maybe_unused crete_vnic_mc_list_updated(struct net_device *dev, u32 *rx_mask)
|
|
{
|
|
int mc_count = 0;
|
|
bool update = false;
|
|
int off = 0;
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(dev);
|
|
struct netdev_hw_addr *ha;
|
|
struct crete_net_common_cfg *netcfg = &vnic_priv->net_cfg;
|
|
|
|
netdev_for_each_mc_addr(ha, dev) {
|
|
if (mc_count >= CRETE_MAX_MC_ADDRS) {
|
|
*rx_mask |= L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
|
|
netcfg->mc_list_count = 0;
|
|
return false;
|
|
}
|
|
if (!ether_addr_equal(ha->addr, netcfg->mc_list + off)) {
|
|
memcpy(netcfg->mc_list + off, ha->addr, ETH_ALEN);
|
|
update = true;
|
|
}
|
|
off += ETH_ALEN;
|
|
mc_count++;
|
|
}
|
|
if (mc_count)
|
|
*rx_mask |= L2_SET_RX_MASK_REQ_MASK_MCAST;
|
|
|
|
if (mc_count != netcfg->mc_list_count) {
|
|
netcfg->mc_list_count = mc_count;
|
|
update = true;
|
|
}
|
|
return update;
|
|
}
|
|
|
|
static void crete_vnic_set_rx_mode(struct net_device *dev)
|
|
{
|
|
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(dev);
|
|
struct crete_core_dev *core_dev = vnic_priv->coredev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
u32 mask;
|
|
struct crete_net_common_cfg *netcfg = &vnic_priv->net_cfg;
|
|
bool mc_update;
|
|
|
|
pr_debug("crete enter into rx mode set\n");
|
|
mask = netcfg->rx_mask;
|
|
mask &= ~(L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
|
|
L2_SET_RX_MASK_REQ_MASK_MCAST |
|
|
L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
|
|
L2_SET_RX_MASK_REQ_MASK_BCAST);
|
|
|
|
if (dev->flags & IFF_PROMISC)
|
|
mask |= L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
|
|
if (dev->flags & IFF_BROADCAST)
|
|
mask |= L2_SET_RX_MASK_REQ_MASK_BCAST;
|
|
if (dev->flags & IFF_ALLMULTI) {
|
|
mask |= L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
|
|
netcfg->mc_list_count = 0;
|
|
}
|
|
/*
|
|
* snic not support the MC MAC ADD AND DEL
|
|
*/
|
|
#if 0
|
|
} else if (dev->flags & IFF_MULTICAST) {
|
|
mc_update = crete_vnic_mc_list_updated(dev, &mask);
|
|
}
|
|
bool uc_update = crete_vnic_uc_list_updated(dev);
|
|
if (uc_update) {
|
|
rc = crete_vnic_set_uc_filter(dev);
|
|
if (rc)
|
|
dev_err(&pdev->dev,
|
|
"HWRM l2 uc filter failure rc: %d\n", rc);
|
|
}
|
|
#endif
|
|
dev_dbg(&pdev->dev,
|
|
"update rxmode config new mask %x old cfg mask %x\n",
|
|
mask, netcfg->rx_mask);
|
|
//todo pf current not set vdev
|
|
if (mask != netcfg->rx_mask || mc_update) {
|
|
netcfg->rx_mask = mask;
|
|
queue_work(vnic_priv->wq, &vnic_priv->set_rx_mode_work);
|
|
}
|
|
|
|
}
|
|
|
|
static int crete_cmd_set_uc_mac_new(struct net_device *netdev)
|
|
{
|
|
struct crete_vnic_priv *priv = netdev_priv(netdev);
|
|
struct crete_net_common_cfg *netcfg = &priv->net_cfg;
|
|
struct crete_core_dev *core_dev = priv->coredev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
int ret = 0, status = 0, err_type;
|
|
int in_len = CRETE_ST_SZ_BYTES(set_rx_mode_new_in);
|
|
int out_len = CRETE_ST_SZ_BYTES(set_rx_mode_out);
|
|
void *in, *out, *uc_mac_list_in_base;
|
|
int uc_list_num = netcfg->uc_filter_count;
|
|
int uc_mac_list_size = uc_list_num * CRETE_ST_SZ_BYTES(uc_mac_list);
|
|
|
|
in_len += ALIGN_TO_DW(uc_mac_list_size);
|
|
in = kvzalloc(in_len, GFP_KERNEL);
|
|
out = kvzalloc(out_len, GFP_KERNEL);
|
|
if (!out || !in) {
|
|
ret = -ENOMEM;
|
|
goto err_out;
|
|
}
|
|
|
|
uc_mac_list_in_base = in + in_len - ALIGN_TO_DW(uc_mac_list_size);
|
|
CRETE_SET(set_rx_mode_new_in, in, cmd_op, BIT(CRETE_RX_MODE_UC_MAC_LIST_ADD));
|
|
CRETE_SET(set_rx_mode_new_in, in, cmd_id, CRETE_CMD_SET_VPORT_RX_MODE);
|
|
|
|
CRETE_SET(set_rx_mode_new_in, in, svport_id, core_dev->sfi_id);
|
|
CRETE_SET(set_rx_mode_new_in, in, dvport_id, core_dev->sfi_id);
|
|
|
|
CRETE_SET(set_rx_mode_new_in, in, uc_list_num, netcfg->uc_filter_count);
|
|
memcpy(uc_mac_list_in_base, netcfg->uc_list, uc_mac_list_size);
|
|
|
|
ret = crete_cmd_exec_polling(core_dev, in, in_len, out, out_len);
|
|
if (ret < 0)
|
|
goto err_out;
|
|
|
|
status = CRETE_GET(set_rx_mode_out, out, status);
|
|
if (status != SUCCESS) {
|
|
err_type = CRETE_GET(set_rx_mode_out, out, err_type);
|
|
crete_err(&pdev->dev,
|
|
"crete set uc mac failed, err type:0x%x status:0x%x\n",
|
|
err_type, status);
|
|
ret = -EINVAL;
|
|
}
|
|
err_out:
|
|
if (in) {
|
|
kvfree(in);
|
|
in = NULL;
|
|
}
|
|
if (out) {
|
|
kvfree(out);
|
|
out = NULL;
|
|
}
|
|
crete_err(&pdev->dev, "%s return:%d\n", __func__, ret);
|
|
return ret;
|
|
|
|
}
|
|
|
|
int crete_vnic_set_mac_address(struct net_device *dev, void *p)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(dev);
|
|
struct crete_core_dev *core_dev = vnic_priv->coredev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
struct crete_net_common_cfg *netcfg = &vnic_priv->net_cfg;
|
|
int ret, i;
|
|
struct sockaddr *addr;
|
|
|
|
addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
|
|
if (!addr)
|
|
return -ENOMEM;
|
|
|
|
ret = eth_prepare_mac_addr_change(dev, addr);
|
|
if (ret)
|
|
goto out;
|
|
for (i = 0; i < ETH_ALEN; i++)
|
|
netcfg->uc_list[i] = addr->sa_data[ETH_ALEN - i - 1];
|
|
|
|
netcfg->uc_filter_count = 1;
|
|
ret = crete_cmd_set_uc_mac_new(dev);
|
|
if (ret) {
|
|
dev_warn(&pdev->dev, "Failed to set mac address ret %d\n", ret);
|
|
netcfg->uc_filter_count = 0;
|
|
goto out;
|
|
}
|
|
eth_commit_mac_addr_change(dev, p);
|
|
out:
|
|
kfree(addr);
|
|
return ret;
|
|
}
|
|
|
|
/////////////////////////////cmd end/////////////////////////////////////////
|
|
|
|
/************************interrupts ctx********************************/
|
|
int crete_vnic_poll_tx(struct napi_struct *napi, int budget)
|
|
{
|
|
struct send_queue *sq = container_of(napi, struct send_queue, napi);
|
|
struct crete_vnic_priv *vnic_priv = sq->vq->vdev->priv;
|
|
unsigned int index = vq2txq(sq->vq);
|
|
struct netdev_queue *txq;
|
|
int opaque;
|
|
bool done;
|
|
|
|
txq = netdev_get_tx_queue(vnic_priv->netdev, index);
|
|
__netif_tx_lock(txq, raw_smp_processor_id());
|
|
virtqueue_disable_cb(sq->vq);
|
|
crete_vnic_free_old_tx_skbs(sq, true);
|
|
|
|
opaque = virtqueue_enable_cb_prepare(sq->vq);
|
|
done = napi_complete_done(napi, 0);
|
|
if (!done)
|
|
virtqueue_disable_cb(sq->vq);
|
|
|
|
__netif_tx_unlock(txq);
|
|
|
|
if (done) {
|
|
if (unlikely(virtqueue_poll(sq->vq, opaque))) {
|
|
if (napi_schedule_prep(napi)) {
|
|
__netif_tx_lock(txq, raw_smp_processor_id());
|
|
virtqueue_disable_cb(sq->vq);
|
|
__netif_tx_unlock(txq);
|
|
__napi_schedule(napi);
|
|
}
|
|
}
|
|
}
|
|
|
|
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
|
|
netif_tx_wake_queue(txq);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void crete_vnic_xmit_done(struct virtqueue *vq)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = vq->vdev->priv;
|
|
struct napi_struct *napi = &vnic_priv->sq[vq2txq(vq)].napi;
|
|
|
|
/* Suppress further interrupts. */
|
|
virtqueue_disable_cb(vq);
|
|
|
|
if (napi->weight)
|
|
virtqueue_napi_schedule(napi, vq);
|
|
else
|
|
/* We were probably waiting for more output buffers. */
|
|
netif_wake_subqueue(vnic_priv->netdev, vq2txq(vq));
|
|
}
|
|
|
|
static struct sk_buff *crete_vnic_receive_mergeable(struct net_device *dev, struct crete_vnic_priv
|
|
*vnic_priv,
|
|
struct receive_queue *rq,
|
|
void *buf, void *ctx,
|
|
unsigned int len,
|
|
struct crete_vnic_rq_stats
|
|
*stats)
|
|
{
|
|
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
|
|
u16 num_buf = virtio16_to_cpu(&vnic_priv->vdev, hdr->num_buffers);
|
|
struct page *page = virt_to_head_page(buf);
|
|
int offset = buf - page_address(page);
|
|
struct sk_buff *head_skb, *curr_skb;
|
|
unsigned int truesize = mergeable_ctx_to_truesize(ctx);
|
|
unsigned int headroom = 0;
|
|
unsigned int metasize = 0;
|
|
|
|
headroom = mergeable_ctx_to_headroom(ctx);
|
|
head_skb = NULL;
|
|
stats->bytes += len - vnic_priv->hdr_len;
|
|
if (unlikely(len > truesize)) {
|
|
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
|
|
dev->name, len, (unsigned long)ctx);
|
|
dev->stats.rx_length_errors++;
|
|
goto err_skb;
|
|
}
|
|
|
|
head_skb =
|
|
crete_vnic_page_to_skb(vnic_priv, rq, page, offset, len, truesize,
|
|
true, metasize);
|
|
curr_skb = head_skb;
|
|
|
|
if (unlikely(!curr_skb))
|
|
goto err_skb;
|
|
while (--num_buf) {
|
|
int num_skb_frags;
|
|
|
|
buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
|
|
if (unlikely(!buf)) {
|
|
pr_debug("%s: rx error: %d buffers out of %d missing\n",
|
|
dev->name, num_buf,
|
|
virtio16_to_cpu(&vnic_priv->vdev,
|
|
hdr->num_buffers));
|
|
dev->stats.rx_length_errors++;
|
|
goto err_buf;
|
|
}
|
|
|
|
stats->bytes += len;
|
|
page = virt_to_head_page(buf);
|
|
|
|
truesize = mergeable_ctx_to_truesize(ctx);
|
|
if (unlikely(len > truesize)) {
|
|
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
|
|
dev->name, len, (unsigned long)ctx);
|
|
dev->stats.rx_length_errors++;
|
|
goto err_skb;
|
|
}
|
|
|
|
num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
|
|
if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
|
|
struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
|
|
|
|
if (unlikely(!nskb))
|
|
goto err_skb;
|
|
if (curr_skb == head_skb)
|
|
skb_shinfo(curr_skb)->frag_list = nskb;
|
|
else
|
|
curr_skb->next = nskb;
|
|
curr_skb = nskb;
|
|
head_skb->truesize += nskb->truesize;
|
|
num_skb_frags = 0;
|
|
}
|
|
if (curr_skb != head_skb) {
|
|
head_skb->data_len += len;
|
|
head_skb->len += len;
|
|
head_skb->truesize += truesize;
|
|
}
|
|
offset = buf - page_address(page);
|
|
if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
|
|
put_page(page);
|
|
skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
|
|
len, truesize);
|
|
} else {
|
|
skb_add_rx_frag(curr_skb, num_skb_frags, page,
|
|
offset, len, truesize);
|
|
}
|
|
}
|
|
|
|
ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
|
|
return head_skb;
|
|
err_skb:
|
|
put_page(page);
|
|
while (num_buf-- > 1) {
|
|
buf = virtqueue_get_buf(rq->vq, &len);
|
|
if (unlikely(!buf)) {
|
|
pr_debug("%s: rx error: %d buffers missing\n",
|
|
dev->name, num_buf);
|
|
dev->stats.rx_length_errors++;
|
|
break;
|
|
}
|
|
stats->bytes += len;
|
|
page = virt_to_head_page(buf);
|
|
put_page(page);
|
|
}
|
|
err_buf:
|
|
stats->drops++;
|
|
dev_kfree_skb(head_skb);
|
|
return NULL;
|
|
}
|
|
|
|
static void crete_vnic_receive_buf(struct crete_vnic_priv *vnic_priv,
|
|
struct receive_queue *rq, void *buf,
|
|
unsigned int len, void **ctx,
|
|
struct crete_vnic_rq_stats *stats)
|
|
{
|
|
struct net_device *dev = vnic_priv->netdev;
|
|
struct sk_buff *skb;
|
|
struct virtio_net_hdr_mrg_rxbuf *hdr;
|
|
|
|
if (unlikely(len < vnic_priv->hdr_len + ETH_HLEN)) {
|
|
pr_debug("%s: short packet %i\n", dev->name, len);
|
|
dev->stats.rx_length_errors++;
|
|
put_page(virt_to_head_page(buf));
|
|
return;
|
|
}
|
|
|
|
skb = crete_vnic_receive_mergeable(dev, vnic_priv, rq, buf, ctx, len,
|
|
stats);
|
|
if (unlikely(!skb))
|
|
return;
|
|
|
|
hdr = skb_to_crete_vnic_hdr(skb);
|
|
|
|
if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
|
|
virtio_is_little_endian(&vnic_priv->vdev))) {
|
|
net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
|
|
dev->name, hdr->hdr.gso_type,
|
|
hdr->hdr.gso_size);
|
|
goto frame_err;
|
|
}
|
|
|
|
skb_record_rx_queue(skb, vq2rxq(rq->vq));
|
|
skb->protocol = eth_type_trans(skb, dev);
|
|
pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
|
|
ntohs(skb->protocol), skb->len, skb->pkt_type);
|
|
|
|
napi_gro_receive(&rq->napi, skb);
|
|
return;
|
|
|
|
frame_err:
|
|
dev->stats.rx_frame_errors++;
|
|
dev_kfree_skb(skb);
|
|
}
|
|
|
|
static int crete_vnic_receive(struct receive_queue *rq, int budget)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = rq->vq->vdev->priv;
|
|
struct crete_vnic_rq_stats stats = { };
|
|
unsigned int len;
|
|
void *buf;
|
|
int i;
|
|
|
|
if (!vnic_priv->big_packets || vnic_priv->mergeable_rx_bufs) {
|
|
void *ctx;
|
|
|
|
while (stats.packets < budget &&
|
|
(buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
|
|
crete_vnic_receive_buf(vnic_priv, rq, buf, len, ctx,
|
|
&stats);
|
|
stats.packets++;
|
|
}
|
|
} else {
|
|
while (stats.packets < budget &&
|
|
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
|
|
crete_vnic_receive_buf(vnic_priv, rq, buf, len, NULL,
|
|
&stats);
|
|
stats.packets++;
|
|
}
|
|
}
|
|
|
|
if (rq->vq->num_free > min_t(unsigned int,
|
|
(unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
|
|
if (!crete_vnic_try_fill_recv(vnic_priv, rq, GFP_ATOMIC)) {
|
|
spin_lock(&vnic_priv->refill_lock);
|
|
if (vnic_priv->refill_enabled)
|
|
schedule_delayed_work(&vnic_priv->refill, 0);
|
|
spin_unlock(&vnic_priv->refill_lock);
|
|
}
|
|
}
|
|
|
|
u64_stats_update_begin(&rq->stats.syncp);
|
|
for (i = 0; i < CRETE_VNIC_RQ_STATS_LEN; i++) {
|
|
size_t offset = crete_vnic_rq_stats_desc[i].offset;
|
|
u64 *item;
|
|
|
|
item = (u64 *) ((u8 *)&rq->stats + offset);
|
|
*item += *(u64 *) ((u8 *)&stats + offset);
|
|
}
|
|
u64_stats_update_end(&rq->stats.syncp);
|
|
|
|
return stats.packets;
|
|
}
|
|
|
|
static void crete_vnic_poll_cleantx(struct receive_queue *rq)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = rq->vq->vdev->priv;
|
|
unsigned int index = vq2rxq(rq->vq);
|
|
struct send_queue *sq = &vnic_priv->sq[index];
|
|
struct netdev_queue *txq =
|
|
netdev_get_tx_queue(vnic_priv->netdev, index);
|
|
|
|
if (!sq->napi.weight)
|
|
return;
|
|
|
|
if (__netif_tx_trylock(txq)) {
|
|
crete_vnic_free_old_tx_skbs(sq, true);
|
|
__netif_tx_unlock(txq);
|
|
}
|
|
|
|
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
|
|
netif_tx_wake_queue(txq);
|
|
}
|
|
|
|
int crete_vnic_poll(struct napi_struct *napi, int budget)
|
|
{
|
|
struct receive_queue *rq =
|
|
container_of(napi, struct receive_queue, napi);
|
|
unsigned int received;
|
|
|
|
crete_vnic_poll_cleantx(rq);
|
|
received = crete_vnic_receive(rq, budget);
|
|
/* Out of packets? */
|
|
if (received < budget)
|
|
virtqueue_napi_complete(napi, rq->vq, received);
|
|
return received;
|
|
}
|
|
|
|
void crete_vnic_recv_done(struct virtqueue *rvq)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = rvq->vdev->priv;
|
|
struct receive_queue *rq = &vnic_priv->rq[vq2rxq(rvq)];
|
|
|
|
virtqueue_napi_schedule(&rq->napi, rvq);
|
|
}
|
|
|
|
/************************interrupts ctx********************************/
|
|
|
|
void free_unused_bufs(struct crete_vnic_priv *vnic_priv)
|
|
{
|
|
}
|
|
|
|
static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
|
|
{
|
|
struct page *p = rq->pages;
|
|
|
|
if (p) {
|
|
rq->pages = (struct page *)p->private;
|
|
/* clear private here, it is used to chain pages */
|
|
p->private = 0;
|
|
} else
|
|
p = alloc_page(gfp_mask);
|
|
return p;
|
|
}
|
|
|
|
static void _free_receive_bufs(struct crete_vnic_priv *vnic_priv)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < vnic_priv->max_queue_pairs; i++) {
|
|
while (vnic_priv->rq[i].pages)
|
|
__free_pages(get_a_page(&vnic_priv->rq[i], GFP_KERNEL),
|
|
0);
|
|
}
|
|
}
|
|
|
|
void free_receive_bufs(struct crete_vnic_priv *vnic_priv)
|
|
{
|
|
rtnl_lock();
|
|
_free_receive_bufs(vnic_priv);
|
|
rtnl_unlock();
|
|
}
|
|
|
|
static const struct net_device_ops crete_vnic_netdev_ops = {
|
|
.ndo_open = crete_vnic_open,
|
|
.ndo_stop = crete_vnic_close,
|
|
.ndo_start_xmit = crete_vnic_dev_queue_xmit,
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
.ndo_set_mac_address = crete_vnic_set_mac_address,
|
|
.ndo_set_rx_mode = crete_vnic_set_rx_mode,
|
|
.ndo_get_stats64 = crete_vnic_get_stats,
|
|
.ndo_vlan_rx_add_vid = crete_vnic_vlan_rx_add_vid,
|
|
.ndo_vlan_rx_kill_vid = crete_vnic_vlan_rx_kill_vid,
|
|
.ndo_set_features = crete_vnic_set_features,
|
|
.ndo_set_vf_trust = crete_vnic_set_trust_vf,
|
|
.ndo_change_mtu = crete_vnic_change_mtu,
|
|
};
|
|
|
|
void crete_set_netdev_ops(struct net_device *netdev)
|
|
{
|
|
netdev->netdev_ops = &crete_vnic_netdev_ops;
|
|
}
|
|
|
|
void free_receive_page_frags(struct crete_vnic_priv *vnic_priv)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < vnic_priv->max_queue_pairs; i++)
|
|
if (vnic_priv->rq[i].alloc_frag.page)
|
|
put_page(vnic_priv->rq[i].alloc_frag.page);
|
|
}
|
|
|
|
int __crete_vnic_set_queues(struct net_device *netdev, u16 queue_pairs)
|
|
{
|
|
struct crete_vnic_priv *priv = netdev_priv(netdev);
|
|
struct crete_core_dev *core_dev = priv->coredev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
int ret = 0;
|
|
|
|
if (crete_set_qp_num(core_dev, queue_pairs) != 0) {
|
|
dev_warn(&pdev->dev, "Fail to set num of queue pairs to %d\n",
|
|
queue_pairs);
|
|
ret = -EINVAL;
|
|
} else {
|
|
priv->curr_queue_pairs = queue_pairs;
|
|
/* snic_open() will refill when device is going to up. */
|
|
if (netdev->flags & IFF_UP)
|
|
schedule_delayed_work(&priv->refill, 0);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
int crete_vnic_set_queues(struct net_device *netdev, u16 queue_pairs)
|
|
{
|
|
int err = 0;
|
|
|
|
rtnl_lock();
|
|
err = __crete_vnic_set_queues(netdev, queue_pairs);
|
|
rtnl_unlock();
|
|
return err;
|
|
}
|
|
|
|
void crete_vnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(dev);
|
|
char *p = (char *)data;
|
|
unsigned int i, j;
|
|
|
|
switch (stringset) {
|
|
case ETH_SS_STATS:
|
|
for (i = 0; i < vnic_priv->curr_queue_pairs; i++) {
|
|
for (j = 0; j < CRETE_VNIC_RQ_STATS_LEN; j++) {
|
|
snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s",
|
|
i, crete_vnic_rq_stats_desc[j].desc);
|
|
p += ETH_GSTRING_LEN;
|
|
}
|
|
}
|
|
|
|
for (i = 0; i < vnic_priv->curr_queue_pairs; i++) {
|
|
for (j = 0; j < CRETE_VNIC_SQ_STATS_LEN; j++) {
|
|
snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s",
|
|
i, crete_vnic_sq_stats_desc[j].desc);
|
|
p += ETH_GSTRING_LEN;
|
|
}
|
|
}
|
|
|
|
for (j = 0; j < CRETE_VNIC_PHY_STATS_LEN; j++) {
|
|
snprintf(p, ETH_GSTRING_LEN, "%s", crete_vnic_phy_stat_desc[j]);
|
|
p += ETH_GSTRING_LEN;
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
|
|
int crete_vnic_get_sset_count(struct net_device *dev, int sset)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(dev);
|
|
|
|
switch (sset) {
|
|
case ETH_SS_STATS:
|
|
return vnic_priv->curr_queue_pairs * (CRETE_VNIC_RQ_STATS_LEN +
|
|
CRETE_VNIC_SQ_STATS_LEN) + CRETE_VNIC_PHY_STATS_LEN;
|
|
default:
|
|
return -EOPNOTSUPP;
|
|
}
|
|
}
|
|
|
|
void crete_vnic_get_ethtool_stats(struct net_device *dev,
|
|
struct ethtool_stats *stats, u64 *data)
|
|
{
|
|
struct crete_vnic_priv *vnic_priv = netdev_priv(dev);
|
|
struct crete_core_dev *core_dev = vnic_priv->coredev;
|
|
unsigned int idx = 0, start, i, j;
|
|
const u8 *stats_base;
|
|
size_t offset;
|
|
int ret = 0;
|
|
struct crete_port_statistics port_statistics;
|
|
|
|
for (i = 0; i < vnic_priv->curr_queue_pairs; i++) {
|
|
struct receive_queue *rq = &vnic_priv->rq[i];
|
|
|
|
stats_base = (u8 *)&rq->stats;
|
|
do {
|
|
start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
|
|
for (j = 0; j < CRETE_VNIC_RQ_STATS_LEN; j++) {
|
|
offset = crete_vnic_rq_stats_desc[j].offset;
|
|
data[idx + j] = *(u64 *) (stats_base + offset);
|
|
}
|
|
} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
|
|
idx += CRETE_VNIC_RQ_STATS_LEN;
|
|
}
|
|
|
|
for (i = 0; i < vnic_priv->curr_queue_pairs; i++) {
|
|
struct send_queue *sq = &vnic_priv->sq[i];
|
|
|
|
stats_base = (u8 *)&sq->stats;
|
|
do {
|
|
start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
|
|
for (j = 0; j < CRETE_VNIC_SQ_STATS_LEN; j++) {
|
|
offset = crete_vnic_sq_stats_desc[j].offset;
|
|
data[idx + j] = *(u64 *) (stats_base + offset);
|
|
}
|
|
} while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
|
|
idx += CRETE_VNIC_SQ_STATS_LEN;
|
|
}
|
|
|
|
ret = crete_get_vport_pkt_statistics(core_dev, CRETE_GET_PKT_STATIS_MAC, &port_statistics);
|
|
if (ret == 0) {
|
|
data[idx + 0] = port_statistics.rx_bytes;
|
|
data[idx + 1] = port_statistics.rx_pkts;
|
|
data[idx + 2] = port_statistics.tx_bytes;
|
|
data[idx + 3] = port_statistics.tx_pkts;
|
|
}
|
|
}
|