Jintao 50c46e6857 Add LingYao
Change-Id: Iae6634ce565940904ee320c678d0f77473bebb90
2025-01-03 16:08:55 +08:00

1342 lines
35 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023, Jaguar Micro. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#define DEBUG
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/average.h>
#include <linux/filter.h>
#include <linux/kernel.h>
#include <net/route.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include "crete_nic.h"
#include "crete_nic_io.h"
#include "crete_nic_ethtool.h"
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
static bool csum = true, gso = true, napi_tx = true;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
module_param(napi_tx, bool, 0644);
#define CRETE_NIC_DRV_DESCRIPTION "JaguarMicro/CRETE NIC Device Driver"
#define CRETE_VNIC_DRV_NAME "crete_vnic"
static const unsigned long crete_vnic_guest_offloads[] = {
VIRTIO_NET_F_GUEST_TSO4,
VIRTIO_NET_F_GUEST_TSO6,
VIRTIO_NET_F_GUEST_ECN,
VIRTIO_NET_F_GUEST_UFO,
VIRTIO_NET_F_GUEST_CSUM
};
#define CRETE_VNIC_FEATURES \
VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
VIRTIO_NET_F_MAC, \
VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6, \
VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
VIRTIO_NET_F_CTRL_VLAN, \
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
VIRTIO_NET_F_CTRL_MAC_ADDR, \
VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_MRG_RXBUF
static unsigned int crete_vnic_features[] = {
CRETE_VNIC_FEATURES,
};
#define CRETE_VIRTIO_DEVICE_ID 0x1C20 /* virtio net */
#define CRETE_VIRTIO_VENDOR_ID 0x1f53 /* virtio net */
struct crete_vnic_wq_ent {
struct work_struct work;
struct crete_vnic_priv *vnic_priv;
};
static bool crete_vnic_get_link_state(struct crete_core_dev *core_dev,
struct crete_vnic_priv *vnic_priv)
{
int ret;
bool linkstat = false;
ret = crete_get_dev_linkattr(core_dev, &linkstat);
if (ret)
return false;
return linkstat;
}
static void update_carrier(struct work_struct *work)
{
struct crete_vnic_wq_ent *wqent;
struct crete_vnic_priv *vnic_priv;
struct crete_core_dev *core_dev;
wqent = container_of(work, struct crete_vnic_wq_ent, work);
vnic_priv = wqent->vnic_priv;
core_dev = vnic_priv->coredev;
if (crete_vnic_get_link_state(core_dev, vnic_priv))
netif_carrier_on(core_dev->netdev);
else
netif_carrier_off(core_dev->netdev);
kfree(wqent);
}
static int queue_link_work(struct crete_vnic_priv *vnic_priv)
{
struct crete_vnic_wq_ent *wqent;
wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
if (!wqent)
return -ENOMEM;
wqent->vnic_priv = vnic_priv;
INIT_WORK(&wqent->work, update_carrier);
queue_work(vnic_priv->wq, &wqent->work);
return 0;
}
static int crete_vnic_notify_handler(struct notifier_block *nb,
unsigned long ecode, void *data)
{
struct crete_nb *cnb = container_of(nb, struct crete_nb, nb);
struct crete_vnic_priv *vnic_priv =
container_of(cnb, struct crete_vnic_priv, cnb);
struct crete_core_dev *core_dev = vnic_priv->coredev;
struct crete_event_entry *cee = data;
uint8_t event_sub_type = cee->event_sub_type;
struct pci_dev *pdev = core_dev->pdev;
unsigned int status;
//dev_dbg(&pdev->dev, "%s: event code %lu\n sub_tupe %d\n", __func__,
crete_info(&pdev->dev, "%s: event code %lu\n sub_tupe %d\n", __func__,
ecode, event_sub_type);
/*
* about the event type is port link change
*/
if (ecode == CRETE_EVENT_PORT_LINK_CHANGE) {
status = event_sub_type;
/*
* if status exceed RIGHT LINK status
* or status not changed anymore.
* just return now.
*/
if (status >= CRETE_EVENT_PORT_LINK_ERR ||
status == vnic_priv->status)
return NOTIFY_DONE;
vnic_priv->status = status;
/* make sure other core will see it */
smp_mb();
if (queue_link_work(vnic_priv))
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static int crete_vnic_register_event_handler(struct net_device *netdev)
{
struct crete_vnic_priv *vnic_priv = netdev_priv(netdev);
struct crete_core_dev *core_dev = vnic_priv->coredev;
struct pci_dev *pdev = core_dev->pdev;
struct crete_nb *cnb = &vnic_priv->cnb;
struct notifier_block *nb = &cnb->nb;
int err;
if (!nb->notifier_call) {
nb->notifier_call = crete_vnic_notify_handler;
cnb->event_type = CRETE_EVENT_PORT_LINK_CHANGE;
err = crete_event_notifier_register(core_dev, cnb);
if (err) {
nb->notifier_call = NULL;
dev_err(&pdev->dev,
"failed to register pds event handler: %ps\n",
ERR_PTR(err));
return -EINVAL;
}
dev_dbg(&pdev->dev, "crete event handler registered\n");
}
return 0;
}
static void crete_vnic_unregister_event_handler(struct net_device *netdev)
{
struct crete_vnic_priv *vnic_priv = netdev_priv(netdev);
struct crete_core_dev *core_dev = vnic_priv->coredev;
struct crete_nb *cnb = &vnic_priv->cnb;
struct notifier_block *nb = &cnb->nb;
if (nb->notifier_call) {
crete_event_notifier_unregister(core_dev, cnb);
nb->notifier_call = NULL;
}
}
/* Converting between virtqueue no. and kernel tx/rx queue no.
* 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
*/
static int txq2vq(int txq)
{
return txq * 2 + 1;
}
static int rxq2vq(int rxq)
{
return rxq * 2;
}
static inline void __vnic_set_bit(struct virtio_device *vdev, unsigned int fbit)
{
/* Did you forget to fix assumptions on max features? */
if (__builtin_constant_p(fbit))
BUILD_BUG_ON(fbit >= 64);
else
BUG_ON(fbit >= 64);
vdev->features |= BIT_ULL(fbit);
}
/********************************virtio config ops*********************************************/
static inline struct crete_vnic_priv *to_vnic_net_device(struct virtio_device
*vdev)
{
return container_of(vdev, struct crete_vnic_priv, vdev);
}
static inline struct crete_core_dev *to_vnic_core_device(struct virtio_device
*dev)
{
struct crete_vnic_priv *cpriv;
cpriv = to_vnic_net_device(dev);
return cpriv->coredev;
}
static bool crete_vnic_kick_vq(struct virtqueue *vq)
{
iowrite16(vq->index, (void __iomem *)vq->priv);
return true;
}
static irqreturn_t crete_vnic_intr_handler(int irq, void *arg)
{
struct crete_vnic_msix_info *info = arg;
return vring_interrupt(irq, info->vq);
}
static void __iomem *crete_vnic_signal_queue_init_notify(struct crete_core_dev *core_dev,
u16 index)
{
return core_dev->db_base;
}
static int crete_vnic_signal_queue_init_irq(struct crete_vnic_msix_info *info,
struct crete_core_dev *core_dev,
u16 index)
{
struct device *dev = core_dev->device;
struct pci_dev *pdev = core_dev->pdev;
int vectorno, ret = 0;
struct crete_irq_info *irq_info;
if (!info) {
crete_err(dev, "info is null\n");
ret = -EINVAL;
goto err;
}
vectorno = crete_req_msixirq(core_dev);
if (vectorno < 0) {
crete_err(dev, "request irq vector failed\n");
ret = -ENXIO;
goto err;
}
irq_info = &core_dev->irq_info[vectorno];
info->msix_vector = vectorno;
info->irq = irq_info->vector;
snprintf(info->msix_name, 256, "crete-nic[%s]-%d",
pci_name(pdev), index);
ret = request_irq(irq_info->vector, crete_vnic_intr_handler, 0,
info->msix_name, info);
if (ret) {
crete_free_msixirq(core_dev, vectorno);
crete_err(dev, "enable irq failed\n");
goto err;
}
irq_info->handler = crete_vnic_intr_handler;
irq_info->requested = 1;
//info->ready = 1;
err:
return ret;
}
static void crete_vnic_signal_queue_freeirq(int msix_vector,
struct crete_core_dev *core_dev,
struct crete_vnic_msix_info *info)
{
struct crete_irq_info *irq_info;
irq_info = &core_dev->irq_info[msix_vector];
if (irq_info->requested == 0)
return;
irq_info->requested = 0;
free_irq(info->irq, info);
crete_free_msixirq(core_dev, msix_vector);
}
static int crete_vnic_signal_queue_create(struct crete_core_dev *core_dev,
struct virtqueue *vq, int qid,
int msix_vector)
{
struct crete_queue_context qc;
u64 desc_addr, driver_addr, device_addr;
int ret = 0;
int queue_size = core_dev->cap.qpcap.max_queue_size;
struct pci_dev *pdev = core_dev->pdev;
desc_addr = virtqueue_get_desc_addr(vq);
driver_addr = virtqueue_get_avail_addr(vq);
device_addr = virtqueue_get_used_addr(vq);
dev_info(&pdev->dev,
"qpid[%d] desc_addr[0x%llx] driver_addr[0x%llx] vector[%d]\n",
qid, desc_addr, driver_addr, msix_vector);
qc.qid = qid;
qc.queue_size = queue_size;
qc.cq_size = queue_size;
qc.queue_vec = msix_vector;
qc.queue_desc_base = desc_addr;
qc.queue_used_base = desc_addr;
//qc.queue_used_base = device_addr;
ret = crete_cmd_create_signal_queue(core_dev, &qc);
if (ret < 0) {
crete_err(&pdev->dev, "crete create queue failed ret:%d\n",
ret);
ret = -EINVAL;
goto err;
}
err:
return ret;
}
static void crete_vnic_driver_resetqueue(struct crete_core_dev *core_dev,
int qid)
{
int ret;
ret = crete_reset_singlequeue(core_dev, qid);
if (ret) {
crete_err(core_dev->device, "crete reset queue id:%d failed\n",
qid);
ret = -EINVAL;
}
}
static void crete_vnic_reset_io_queue(struct crete_core_dev *core_dev,
int endqid)
{
int i;
struct net_device *netdev = core_dev->netdev;
struct crete_vnic_priv *vnic_info = netdev_priv(netdev);
struct crete_vnic_msix_info *msix_info;
for (i = endqid - 1; i >= 0; i--) {
msix_info = &vnic_info->msix_info[i];
if (msix_info->irq)
crete_vnic_signal_queue_freeirq(msix_info->msix_vector,
core_dev, msix_info);
if (msix_info->ready)
crete_vnic_driver_resetqueue(core_dev, i);
msix_info->ready = 0;
msix_info->msix_vector = 0;
msix_info->irq = 0;
msix_info->vq = NULL;
}
}
static void crete_vnic_del_vqs(struct virtio_device *vdev)
{
struct virtqueue *vq, *n;
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
vring_del_virtqueue(vq);
}
}
static struct virtqueue *crete_vnic_setup_vq(struct virtio_device *vdev,
unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name, bool ctx)
{
struct crete_vnic_priv *vnic_info = to_vnic_net_device(vdev);
struct crete_core_dev *core_dev = to_vnic_core_device(vdev);
struct virtqueue *vq;
struct pci_dev *pdev = core_dev->pdev;
struct crete_vnic_msix_info *info;
int err;
int total_vqs = 2 * core_dev->cap.qpcap.max_qp_num;
u32 max_queue_size = core_dev->cap.qpcap.max_queue_size;
if (!name)
return NULL;
if (index >= total_vqs || index >= CRETE_VNIC_MQ_MAX)
return ERR_PTR(-ENOENT);
/* Queue shouldn't already be set up. */
vq = vring_create_virtqueue(index, max_queue_size, PAGE_SIZE, vdev,
true, true, ctx,
crete_vnic_kick_vq, callback, name);
if (!vq) {
err = -ENOMEM;
goto error_new_virtqueue;
}
info = &vnic_info->msix_info[index];
info->qid = index;
info->vq = vq;
vq->priv =
(void __force *)crete_vnic_signal_queue_init_notify(core_dev,
index);
if (!vq->priv) {
err = -ENOMEM;
goto error_new_virtqueue;
}
if (callback) {
err = crete_vnic_signal_queue_init_irq(info, core_dev, index);
if (err) {
dev_err(&pdev->dev,
"crete_vnic_signal_queue_init_irq failed\n");
err = -ENOMEM;
goto error_new_virtqueue;
}
} else {
info->irq = 0;
info->msix_vector = VIRTIO_MSI_NO_VECTOR;
info->ready = 0;
}
err =
crete_vnic_signal_queue_create(core_dev, vq, index,
info->msix_vector);
if (err) {
dev_err(&pdev->dev, "create queue is error qid[%d]\n", index);
err = -ENOMEM;
goto error_new_virtqueue;
}
info->ready = 1;
err = crete_start_singlequeue(core_dev, index);
if (err) {
dev_err(&pdev->dev, "start queue [%d]is error\n", index);
err = -ENOMEM;
goto error_new_virtqueue;
}
return vq;
error_new_virtqueue:
return ERR_PTR(err);
}
static int crete_vnic_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char *const names[],
const bool *ctx, struct irq_affinity *desc)
{
struct crete_core_dev *core_dev = to_vnic_core_device(vdev);
int i, err, queue_idx = 0;
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
vqs[i] = NULL;
continue;
}
vqs[i] = crete_vnic_setup_vq(vdev, queue_idx++,
callbacks[i], names[i], ctx ?
ctx[i] : false);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto err_setup_vq;
}
}
return 0;
err_setup_vq:
crete_vnic_reset_io_queue(core_dev, i);
crete_vnic_del_vqs(vdev);
return err;
}
static int crete_vnic_alloc_queues(struct crete_vnic_priv *vnic_info,
struct crete_core_dev *core_dev)
{
struct net_device *netdev = core_dev->netdev;
int i, max_queue_pairs;
max_queue_pairs = core_dev->cap.qpcap.max_qp_num;
vnic_info->sq =
kcalloc(max_queue_pairs, sizeof(*vnic_info->sq), GFP_KERNEL);
if (!vnic_info->sq)
goto err;
vnic_info->rq =
kcalloc(max_queue_pairs, sizeof(*vnic_info->rq), GFP_KERNEL);
if (!vnic_info->rq)
goto err_rq;
spin_lock_init(&vnic_info->refill_lock);
INIT_DELAYED_WORK(&vnic_info->refill, crete_vnic_alloc_recv_buf);
vnic_info->mergeable_rx_bufs = true;
for (i = 0; i < max_queue_pairs; i++) {
vnic_info->rq[i].pages = NULL;
#ifdef NETIF_NAPI_ADD_NEWAPI
netif_napi_add_weight(netdev, &vnic_info->rq[i].napi, crete_vnic_poll,
napi_weight);
netif_napi_add_tx_weight(netdev, &vnic_info->sq[i].napi,
crete_vnic_poll_tx,
napi_tx ? napi_weight : 0);
#else
netif_napi_add(netdev, &vnic_info->rq[i].napi, crete_vnic_poll,
napi_weight);
netif_tx_napi_add(netdev, &vnic_info->sq[i].napi,
crete_vnic_poll_tx,
napi_tx ? napi_weight : 0);
#endif
sg_init_table(vnic_info->rq[i].sg,
ARRAY_SIZE(vnic_info->rq[i].sg));
ewma_pkt_len_init(&vnic_info->rq[i].mrg_avg_pkt_len);
sg_init_table(vnic_info->sq[i].sg,
ARRAY_SIZE(vnic_info->sq[i].sg));
u64_stats_init(&vnic_info->rq[i].stats.syncp);
u64_stats_init(&vnic_info->sq[i].stats.syncp);
}
return 0;
err_rq:
kfree(vnic_info->sq);
err:
return -ENOMEM;
}
static void crete_vnic_free_queues(struct crete_vnic_priv *vnic_info,
struct crete_core_dev *core_dev)
{
int i, max_queue_pairs;
max_queue_pairs = core_dev->cap.qpcap.max_qp_num;
for (i = 0; i < max_queue_pairs; i++) {
__netif_napi_del(&vnic_info->rq[i].napi);
__netif_napi_del(&vnic_info->sq[i].napi);
}
/* We called __netif_napi_del(),
* we need to respect an RCU grace period before freeing vi->rq
*/
synchronize_net();
kfree(vnic_info->rq);
kfree(vnic_info->sq);
}
static unsigned int crete_get_min_buf_len(struct virtqueue *vq)
{
const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
unsigned int rq_size = virtqueue_get_vring_size(vq);
unsigned int packet_len = IP_MAX_MTU;
unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
unsigned int ret;
ret =
max(max(min_buf_len, hdr_len) - hdr_len,
(unsigned int)GOOD_PACKET_LEN);
return ret;
}
static int crete_vnic_setup_vqs(struct crete_vnic_priv *vnic_info)
{
vq_callback_t **callbacks;
struct virtqueue **vqs;
int ret = -ENOMEM;
int i, total_vqs;
const char **names;
bool *ctx;
struct crete_core_dev *core_dev = vnic_info->coredev;
total_vqs = 2 * core_dev->cap.qpcap.max_qp_num;
vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
if (!vqs)
goto err_vq;
callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
if (!callbacks)
goto err_callback;
names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
if (!names)
goto err_names;
if (!vnic_info->big_packets || vnic_info->mergeable_rx_bufs) {
ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
goto err_ctx;
} else {
ctx = NULL;
}
/* Parameters for control virtqueue, if any */
/* Allocate/initialize parameters for send/receive virtqueues */
for (i = 0; i < core_dev->cap.qpcap.max_qp_num; i++) {
callbacks[rxq2vq(i)] = crete_vnic_recv_done;
callbacks[txq2vq(i)] = crete_vnic_xmit_done;
sprintf(vnic_info->rq[i].name, "vnic-input.%d", i);
sprintf(vnic_info->sq[i].name, "vnic-output.%d", i);
names[rxq2vq(i)] = vnic_info->rq[i].name;
names[txq2vq(i)] = vnic_info->sq[i].name;
if (ctx)
ctx[rxq2vq(i)] = true;
}
ret = crete_vnic_find_vqs(&vnic_info->vdev, total_vqs, vqs, callbacks,
names, ctx, NULL);
if (ret)
goto err_find;
for (i = 0; i < core_dev->cap.qpcap.max_qp_num; i++) {
vnic_info->rq[i].vq = vqs[rxq2vq(i)];
vnic_info->rq[i].min_buf_len = crete_get_min_buf_len(vnic_info->rq[i].vq);
vnic_info->sq[i].vq = vqs[txq2vq(i)];
}
err_find:
kfree(ctx);
err_ctx:
kfree(names);
err_names:
kfree(callbacks);
err_callback:
kfree(vqs);
err_vq:
return ret;
}
static int crete_vnic_request_rings(struct crete_core_dev *core_dev)
{
struct net_device *netdev = core_dev->netdev;
struct crete_vnic_priv *priv = netdev_priv(netdev);
struct pci_dev *pdev = core_dev->pdev;
int ret = 0;
ret = crete_vnic_alloc_queues(priv, core_dev);
if (ret) {
dev_err(&pdev->dev, "crete_vnic_alloc_queues failed\n");
return -ENOMEM;
}
ret = crete_vnic_setup_vqs(priv);
if (ret) {
dev_err(&pdev->dev, "crete_vnic_setup_vqs failed\n");
goto alloc_queues;
}
return 0;
alloc_queues:
crete_vnic_free_queues(priv, core_dev);
return ret;
}
static void crete_vnic_distroy_rings(struct crete_core_dev *core_dev)
{
struct net_device *netdev = core_dev->netdev;
struct crete_vnic_priv *priv = netdev_priv(netdev);
struct virtio_device *vdev = &priv->vdev;
crete_vnic_del_vqs(vdev);
crete_vnic_free_queues(priv, core_dev);
}
static void crete_nic_negotiate_driver_features(struct crete_core_dev *core_dev)
{
u64 device_features;
u64 driver_features;
int i, feature_table_size;
device_features = core_dev->cap.hw_features;
feature_table_size = ARRAY_SIZE(crete_vnic_features),
/* Figure out what features the driver supports. */
driver_features = 0;
for (i = 0; i < feature_table_size; i++) {
unsigned int f = crete_vnic_features[i];
BUG_ON(f >= 64);
driver_features |= (1ULL << f);
}
core_dev->cap.driver_features = driver_features & device_features;
/* Transport features always preserved to pass to finalize_features. */
for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++)
if (device_features & (1ULL << i))
core_dev->cap.driver_features |= BIT_ULL(i);
}
static inline bool crete_vnic_has_feature(struct crete_core_dev *core_dev,
unsigned int fbit)
{
bool ret = false;
ret = !!(core_dev->cap.driver_features & BIT_ULL(fbit));
return ret;
}
static int crete_build_vnic_netdev_features(struct net_device *netdev)
{
struct crete_vnic_priv *priv = netdev_priv(netdev);
struct crete_core_dev *core_dev = priv->coredev;
struct pci_dev *pdev = core_dev->pdev;
int i;
netdev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
netdev->features = NETIF_F_HIGHDMA;
crete_build_ptys2ethtool_map();
crete_set_netdev_ops(netdev);
crete_set_ethtool_ops(netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
/* Do we support "hardware" checksums? */
if (crete_vnic_has_feature(core_dev, VIRTIO_NET_F_CSUM)) {
/* This opens up the world of extra features. */
netdev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
if (csum)
netdev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
/* Individual feature bits: what can host handle? */
if (crete_vnic_has_feature(core_dev, VIRTIO_NET_F_HOST_TSO4))
netdev->hw_features |= NETIF_F_TSO;
if (crete_vnic_has_feature(core_dev, VIRTIO_NET_F_HOST_TSO6))
netdev->hw_features |= NETIF_F_TSO6;
if (crete_vnic_has_feature(core_dev, VIRTIO_NET_F_HOST_ECN))
netdev->hw_features |= NETIF_F_TSO_ECN;
netdev->features |= NETIF_F_GSO_ROBUST;
if (gso)
netdev->features |=
netdev->hw_features & NETIF_F_ALL_TSO;
/* (!csum && gso) case will be fixed by register_netdev() */
}
if (crete_vnic_has_feature(core_dev, VIRTIO_NET_F_GUEST_CSUM))
netdev->features |= NETIF_F_RXCSUM;
if (crete_vnic_has_feature(core_dev, VIRTIO_NET_F_GUEST_TSO4) ||
crete_vnic_has_feature(core_dev, VIRTIO_NET_F_GUEST_TSO6))
netdev->features |= NETIF_F_GRO_HW;
if (crete_vnic_has_feature(core_dev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
netdev->hw_features |= NETIF_F_GRO_HW;
netdev->vlan_features = netdev->features;
for (i = 0; i < ARRAY_SIZE(crete_vnic_guest_offloads); i++)
if (crete_vnic_has_feature
(core_dev, crete_vnic_guest_offloads[i]))
set_bit(crete_vnic_guest_offloads[i],
&priv->guest_offloads);
priv->guest_offloads_capable = priv->guest_offloads;
return 0;
}
static int crete_vnic_init_mac_addr(struct net_device *netdev)
{
struct crete_vnic_priv *priv = netdev_priv(netdev);
struct crete_net_common_cfg *netcfg = &priv->net_cfg;
struct crete_core_dev *core_dev = priv->coredev;
if (crete_vnic_has_feature(core_dev, VIRTIO_NET_F_MAC))
memcpy(netdev->dev_addr, netcfg->mac, ETH_ALEN);
else
eth_hw_addr_random(netdev);
return 0;
}
int crete_vnic_set_mac_address(struct net_device *dev, void *p);
static int crete_vnic_init_mac_addr_new(struct net_device *netdev)
{
struct crete_vnic_priv *priv = netdev_priv(netdev);
struct crete_core_dev *core_dev = priv->coredev;
struct pci_dev *pdev = core_dev->pdev;
struct sockaddr addr;
int err = 0;
memset(&addr, 0, sizeof(struct sockaddr));
/* init get the mac default address */
err = crete_get_dev_macattr(core_dev, &addr);
if (err) {
dev_err(&pdev->dev, "vnic get default mac address faile\n");
return -1;
}
/* config the default mac */
err = crete_vnic_set_mac_address(netdev, (void *)(&addr));
if (err) {
dev_err(&pdev->dev, "vnic get default mac address faile\n");
return -1;
}
return 0;
}
static int crete_vnic_init_mtu(struct net_device *netdev)
{
struct crete_vnic_priv *priv = netdev_priv(netdev);
struct crete_core_dev *core_dev = priv->coredev;
struct pci_dev *pdev = core_dev->pdev;
struct crete_net_common_cfg *netcfg = &priv->net_cfg;
int vnic_mtu = 1500;
int err = 0;
/* MTU range: 68 - 65535 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = ETH_MAX_MTU;
if (crete_vnic_has_feature(core_dev, VIRTIO_NET_F_MTU)) {
vnic_mtu = netcfg->mtu;
if (vnic_mtu < netdev->min_mtu) {
/* Should never trigger: MTU was previously validated
* in virtnet_validate.
*/
dev_err(&pdev->dev,
"device MTU appears to have changed it is now %d < %d",
vnic_mtu, netdev->min_mtu);
err = -EINVAL;
return err;
}
netdev->mtu = vnic_mtu;
netdev->max_mtu = ETH_MAX_MTU;
} else {
netdev->mtu = 1500;
netdev->max_mtu = ETH_MAX_MTU;
}
err = crete_set_port_mtu(core_dev, CRETE_SET_MTU_MACPORT, MAC_MTU(netdev->mtu));
if (err) {
dev_err(&pdev->dev,"init port mtu (%u) error info\n", netdev->mtu);
return err;
}
return 0;
}
static int crete_vnic_init_mru(struct net_device *netdev)
{
return 0;
}
#define NEW_MAC_ADDRESS "\x56\x48\x4d\x47\x00\x00"
#define NEW_MAC_ADDRESS1 "\x56\x48\x4d\x47\x00\x01"
int crete_vport_get_net_comm_cfg(struct crete_core_dev *cdev,
struct crete_net_common_cfg *netcfg)
{
//int ret = 0;
//todo :get form vdev
static int mac_index = 0;
netcfg->vportid = 0;
netcfg->speed = 0x01;
netcfg->duplex = 0x01;
netcfg->mtu = 1500;
netcfg->vlanid = 0xffff;
memcpy(netcfg->mac, NEW_MAC_ADDRESS, ETH_ALEN);
if( mac_index)
memcpy(netcfg->mac, NEW_MAC_ADDRESS1, ETH_ALEN);
mac_index ++ ;
crete_info(cdev->device,
"vportid[%d] speed[0x%x] duplex[0x%x] mtu[%d] vlanid[%d] mac[%pM]\n",
netcfg->vportid, netcfg->speed, netcfg->duplex,
netcfg->mtu, netcfg->vlanid, netcfg->mac);
return 0;
}
static int crete_vnic_init_net_cfg(struct net_device *netdev)
{
struct crete_vnic_priv *priv = netdev_priv(netdev);
struct crete_core_dev *core_dev = priv->coredev;
struct pci_dev *pdev = core_dev->pdev;
struct crete_net_common_cfg *netcfg = &priv->net_cfg;
int ret = 0;
ret = crete_vport_get_net_comm_cfg(core_dev, netcfg);
if (ret) {
dev_err(&pdev->dev, "get net cfg erro ret %d\n", ret);
goto err;
}
ret = crete_vnic_init_mac_addr_new(netdev);
if (ret) {
dev_err(&pdev->dev, "Unable to initialize mac address. Use the ramdom mac\n");
crete_vnic_init_mac_addr(netdev);
}
ret = crete_vnic_init_mtu(netdev);
if (ret) {
dev_err(&pdev->dev, "Unable to initialize mtu.\n");
goto err;
}
ret = crete_vnic_init_mru(netdev);
if (ret) {
dev_err(&pdev->dev, "Unable to initialize mru.\n");
goto err;
}
err:
return ret;
}
static void crete_virtio_release_dev(struct device *_d)
{
struct virtio_device *vdev =
container_of(_d, struct virtio_device, dev);
struct crete_vnic_priv *priv =
container_of(vdev, struct crete_vnic_priv, vdev);
struct crete_core_dev *core_dev = priv->coredev;
dev_info(core_dev->device, "release virtio device\n");
}
static int crete_vnic_set_priv(struct net_device *netdev)
{
struct crete_vnic_priv *priv = netdev_priv(netdev);
struct crete_core_dev *core_dev = priv->coredev;
struct pci_dev *pdev = core_dev->pdev;
int maxqueue = 2 * core_dev->cap.qpcap.max_qp_num;
struct virtio_device *vdev = &priv->vdev;
int ret = 0;
int i;
vdev->priv = priv;
vdev->dev.parent = &pdev->dev;
vdev->dev.release = crete_virtio_release_dev;
priv->curr_queue_pairs = core_dev->ring_size;
priv->max_queue_pairs = core_dev->cap.qpcap.max_qp_num;
//crete_vnic_priv->vdev.config = &crete_vnic_config_ops;
//INIT_LIST_HEAD(&crete_vnic_priv->vvdev->featuresirtqueues);
spin_lock_init(&priv->lock);
vdev->id.vendor = CRETE_VIRTIO_VENDOR_ID;
vdev->id.device = CRETE_VIRTIO_DEVICE_ID;
vdev->features = core_dev->cap.driver_features;
spin_lock_init(&vdev->config_lock);
vdev->config_enabled = false;
vdev->config_change_pending = false;
INIT_LIST_HEAD(&vdev->vqs);
spin_lock_init(&vdev->vqs_list_lock);
for (i = 0; i < maxqueue; i++) {
priv->msix_info[i].irq = 0;
priv->msix_info[i].msix_vector = VIRTIO_MSI_NO_VECTOR;
priv->msix_info[i].ready = 0;
priv->msix_info[i].vq = NULL;
}
ret = crete_cmd_set_features(core_dev, CRETE_VIRTIO_NET_DEV_FEAT,
vdev->features);
if (ret) {
dev_err(&pdev->dev, "crete set features 0x%llx failed\n",
vdev->features);
goto err;
}
ret = crete_set_status(core_dev, CRETE_NET_DEV_STATUS,
CRETE_NET_DEV_FEATURE_OK);
if (ret) {
dev_err(&pdev->dev, "crete cmd set status %u failed\n",
CRETE_NET_DEV_FEATURE_OK);
goto err;
}
priv->big_packets = true;
priv->mergeable_rx_bufs = true;
priv->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
priv->any_header_sg = true;
netif_set_real_num_tx_queues(netdev, priv->curr_queue_pairs);
netif_set_real_num_rx_queues(netdev, priv->curr_queue_pairs);
return ret;
err:
return ret;
}
int crete_vnic_trim_rings(struct crete_core_dev *cdev)
{
int cpunums = num_online_cpus();
int maxqpnums = cdev->cap.qpcap.max_qp_num;
if (maxqpnums > CRETE_VNIC_MAX_QUEUES) {
cdev->cap.qpcap.max_qp_num = CRETE_VNIC_MAX_QUEUES;
crete_err(cdev->device, "max qp greater than %d\n",
CRETE_VNIC_MAX_QUEUES);
}
cdev->ring_size = min(cpunums, maxqpnums);
if (!cdev->ring_size) {
crete_err(cdev->device, "ring size zero not right\n");
return -1;
}
return 0;
}
void crete_vnic_set_rx_mode_work(struct work_struct *work);
static int crete_vnic_net_init(struct crete_core_dev *core_dev)
{
struct pci_dev *pdev = core_dev->pdev;
struct device *dev = &pdev->dev;
struct crete_vnic_priv *priv;
struct net_device *netdev;
int err;
/* reserve the ring size */
err = crete_vnic_trim_rings(core_dev);
if (err)
return err;
netdev = crete_vnic_create_netdev(core_dev);
if (!netdev) {
dev_err(dev, "crete_create_netdev failed\n");
return -ENOMEM;
}
priv = netdev_priv(netdev);
crete_build_common_netdev(netdev);
err = crete_build_vnic_netdev_features(netdev);
if (err) {
dev_err(dev, "build netdev features failed, %d\n", err);
goto err_crete_create_netdev;
}
err = crete_get_func_caps(core_dev);
if (err < 0) {
dev_err(dev, "init func err\n");
goto err_crete_create_netdev;
}
err = crete_vnic_set_priv(netdev);
if (err) {
dev_err(dev, "Unable to initialize prive.\n");
goto err_crete_create_netdev;
}
err = crete_vnic_init_net_cfg(netdev);
if (err) {
dev_err(dev, "Unable to initialize mac address.\n");
goto err_init_set_priv;
}
err = crete_vnic_request_rings(core_dev);
if (err) {
dev_err(dev, "Reserve rings failed\n");
goto err_init_set_priv;
}
priv->wq = create_singlethread_workqueue("crete_nic_wq");
if (!priv->wq) {
dev_err(dev, "Reserve workqueue faild\n");
err = -ENOMEM;
goto err_init_set_priv;
}
/* init rx mode work */
INIT_WORK(&priv->set_rx_mode_work, crete_vnic_set_rx_mode_work);
err = crete_vnic_register_event_handler(netdev);
if (err) {
dev_err(dev, "register evnet faild\n");
crete_vnic_unregister_event_handler(netdev);
goto err_init_set_priv;
}
return 0;
//err_request_rings:
// crete_vnic_distroy_rings(core_dev);
err_init_set_priv:
err_crete_create_netdev:
priv = netdev_priv(netdev);
crete_vnic_priv_cleanup(priv);
return err;
}
int crete_vnic_net_del(struct crete_core_dev *core_dev)
{
struct net_device *netdev = core_dev->netdev;
struct crete_vnic_priv *priv;
struct workqueue_struct *wq;
priv = netdev_priv(netdev);
wq = priv->wq;
crete_vnic_unregister_event_handler(netdev);
destroy_workqueue(wq);
crete_vnic_distroy_rings(core_dev);
crete_vnic_priv_cleanup(priv);
priv->wq = NULL;
return 0;
}
static int crete_vnic_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct crete_aux_dev *cadev =
container_of(adev, struct crete_aux_dev, adev);
struct crete_core_dev *core_dev = cadev->core_dev;
struct pci_dev *pdev = core_dev->pdev;
struct device *dev = &pdev->dev;
enum crete_feature_opcode op = CRETE_VIRTIO_NET_DEV_FEAT;
enum crete_dev_type crete_pci_type = CRETE_VNET_DEV;
struct crete_vnic_priv *priv = NULL;
int ret;
ret = crete_nic_set_device_type(core_dev, crete_pci_type);
if (ret) {
dev_err(dev, "Failed to set device type pnic\n");
return -EINVAL;
}
ret = crete_set_status(core_dev, CRETE_NET_DEV_STATUS,
CRETE_NET_DEV_STARTUP);
if (ret) {
dev_err(dev, "crete cmd set status %u failed\n",
CRETE_NET_DEV_STARTUP);
return -EINVAL;
}
/* get core dev cap init */
ret = crete_net_get_max_supported_vqs(core_dev);
if (ret) {
dev_err(dev, "crete core dev get qpcap failed\n");
return -EINVAL;
}
ret = crete_net_get_supported_features(core_dev, op);
if (ret) {
dev_err(dev, "crete core dev get features failed\n");
return -EINVAL;
}
crete_nic_negotiate_driver_features(core_dev);
ret = crete_vnic_net_init(core_dev);
if (ret) {
dev_err(dev, "Failed to init net dev management\n");
goto err_return;
}
ret = register_netdev(core_dev->netdev);
if (ret) {
dev_err(dev, "register_netdev failed, %d\n", ret);
goto err_net_init;
}
priv = netdev_priv(core_dev->netdev);
/* config the netdev queue pairs */
ret = crete_vnic_set_queues(core_dev->netdev, priv->curr_queue_pairs);
if (ret) {
dev_err(dev, "crete vnic set queues %d failed\n", priv->curr_queue_pairs);
goto err_net_registert;
}
netif_carrier_off(core_dev->netdev);
ret =
crete_set_status(core_dev, CRETE_NET_DEV_STATUS,
CRETE_NET_DEV_DEV_OK);
if (ret) {
dev_err(dev, "Failed to set device CRETE_NET_DEV_DEV_OK\n");
goto err_net_registert;
}
#if 0
/* carrier off reporting is important to ethtool even BEFORE open */
if (crete_vnic_get_link_state(core_dev))
netif_carrier_on(core_dev->netdev);
else
netif_carrier_off(core_dev->netdev);
#endif
dev_set_drvdata(&adev->dev, core_dev);
//crete_vnic_debugfs_add_dev(cvm);
//crete_vnic_debugfs_add_hcap(cvm);
return 0;
err_net_registert:
unregister_netdev(core_dev->netdev);
err_net_init:
crete_vnic_net_del(core_dev);
err_return:
return ret;
}
static void crete_vnic_reset_device(struct crete_core_dev *core_dev)
{
int ret;
enum crete_net_status op = CRETE_NET_DEV_STARTUP;
int max_queue = core_dev->cap.qpcap.max_qp_num * 2;
crete_vnic_reset_io_queue(core_dev, max_queue);
ret = crete_set_status(core_dev, CRETE_NET_DEV_STATUS, op);
if (ret) {
crete_err(core_dev->device, "crete cmd set status %u failed\n",
op);
WARN_ON(1);
}
}
static void crete_remove_vq_common(struct crete_core_dev *core_dev)
{
struct net_device *netdev = core_dev->netdev;
struct crete_vnic_priv *priv = netdev_priv(netdev);
crete_vnic_reset_device(core_dev);
/* Free unused buffers in both send and recv, if any. */
free_unused_bufs(priv); //todo
free_receive_bufs(priv); //todo
free_receive_page_frags(priv); //todo
crete_vnic_net_del(core_dev);
free_netdev(netdev);
core_dev->netdev = NULL;
}
/*
* about the anolis 5.10.134-14 kernel version
* auxliary define with return int value
*/
#ifdef SNIC_ANOLIS_VERSION14
static int crete_vnic_remove(struct auxiliary_device *adev)
{
struct crete_aux_dev *cadev =
container_of(adev, struct crete_aux_dev, adev);
struct crete_core_dev *core_dev = cadev->core_dev;
struct pci_dev *pdev = core_dev->pdev;
struct device *dev = &pdev->dev;
struct crete_vnic_priv *priv = netdev_priv(core_dev->netdev);
cancel_work_sync(&priv->set_rx_mode_work);
unregister_netdev(core_dev->netdev);
crete_remove_vq_common(core_dev);
dev_set_drvdata(&adev->dev, NULL);
dev_info(dev, "crete vnic remove\n");
return 0;
}
#else
static void crete_vnic_remove(struct auxiliary_device *adev)
{
struct crete_aux_dev *cadev =
container_of(adev, struct crete_aux_dev, adev);
struct crete_core_dev *core_dev = cadev->core_dev;
struct pci_dev *pdev = core_dev->pdev;
struct device *dev = &pdev->dev;
struct crete_vnic_priv *priv = netdev_priv(core_dev->netdev);
cancel_work_sync(&priv->set_rx_mode_work);
unregister_netdev(core_dev->netdev);
crete_remove_vq_common(core_dev);
dev_set_drvdata(&adev->dev, NULL);
dev_info(dev, "crete vnic remove\n");
}
#endif
static const struct auxiliary_device_id crete_vnic_id_table[] = {
{.name = CRETE_VNIC_AUX_DEV_NAME,},
{},
};
static struct auxiliary_driver crete_vnic_driver = {
.name = CRETE_VNIC_DRV_NAME,
.probe = crete_vnic_probe,
.remove = crete_vnic_remove,
.id_table = crete_vnic_id_table,
};
static void __exit crete_vnic_cleanup(void)
{
auxiliary_driver_unregister(&crete_vnic_driver);
// crete_vnet_debugfs_destroy();
}
module_exit(crete_vnic_cleanup);
static int __init crete_vnic_init(void)
{
int err;
// crete_nic_debugfs_create();
err = auxiliary_driver_register(&crete_vnic_driver);
if (err) {
pr_err("%s: aux driver register failed: %pe\n",
CRETE_NIC_DRV_NAME, ERR_PTR(err));
//crete_nic_debugfs_destroy();
}
return err;
}
module_init(crete_vnic_init);
MODULE_DESCRIPTION(CRETE_NIC_DRV_DESCRIPTION);
MODULE_AUTHOR("jaguarmicro.com");
MODULE_LICENSE("GPL");