2412 lines
65 KiB
C
2412 lines
65 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* JaguarMicro virtual dev driver for virtio dataplane offloading
|
|
*
|
|
* Copyright (C) 2022 JaguarMicro Corporation.
|
|
*
|
|
* Author: Angus Chen <angus.chen@jaguarmicro.com>
|
|
*
|
|
*/
|
|
#include <linux/sched/mm.h>
|
|
#include <linux/vdpa.h>
|
|
#include <linux/iommu.h>
|
|
#include <linux/vhost.h>
|
|
#include <linux/dma-iommu.h>
|
|
#include <linux/device.h>
|
|
#include "crete_vdpa_dev.h"
|
|
#include "../crete-core/crete_cmd_if.h"
|
|
#include <uapi/linux/vdpa.h>
|
|
|
|
#define JMND_PCI_DEV_TYPE_MASK 0x0010
|
|
|
|
static int batch_vq_state = 1;
|
|
module_param(batch_vq_state, int, 0444);
|
|
MODULE_PARM_DESC(batch_vq_state, "Batched get vq state 1 -Enable; 0 - Disable");
|
|
|
|
static int crete_queue_size = 512;
|
|
module_param(crete_queue_size, int, 0444);
|
|
MODULE_PARM_DESC(crete_queue_size, "512 default; 1024 - max");
|
|
|
|
static bool vf_irq = true;
|
|
module_param(vf_irq, bool, 0444);
|
|
MODULE_PARM_DESC(vf_irq, "enable the interupt for vf 1 - Auto; 0 - Disable interupt");
|
|
|
|
static bool vf_packed = false;
|
|
module_param(vf_packed, bool, 0444);
|
|
MODULE_PARM_DESC(vf_packed, "1 - packe; 0 - split");
|
|
|
|
|
|
static struct virtio_device_id id_table_net[] = {
|
|
{VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
|
|
{0},
|
|
};
|
|
|
|
static struct virtio_device_id id_table_blk[] = {
|
|
{VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID},
|
|
{0},
|
|
};
|
|
|
|
static u32 get_dev_type(struct pci_dev *pdev)
|
|
{
|
|
u32 dev_type;
|
|
|
|
/* This drirver drives both modern virtio devices and transitional
|
|
* devices in modern mode.
|
|
* vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
|
|
* so legacy devices and transitional devices in legacy
|
|
* mode will not work for vDPA, this driver will not
|
|
* drive devices with legacy interface.
|
|
*/
|
|
|
|
if (pdev->device < 0x1040)
|
|
dev_type = pdev->subsystem_device;
|
|
else
|
|
dev_type = pdev->device - 0x1040;
|
|
|
|
if ( pdev->device & JMND_PCI_DEV_TYPE_MASK)
|
|
dev_type = VIRTIO_ID_BLOCK;
|
|
else
|
|
dev_type = VIRTIO_ID_NET;
|
|
|
|
return dev_type;
|
|
}
|
|
|
|
u32 crete_init_config_size(struct crete_vdpa *hw)
|
|
{
|
|
u32 config_size;
|
|
|
|
switch (hw->dev_type) {
|
|
case VIRTIO_ID_NET:
|
|
config_size = sizeof(struct virtio_net_config);
|
|
break;
|
|
case VIRTIO_ID_BLOCK:
|
|
config_size = sizeof(struct virtio_blk_config);
|
|
break;
|
|
default:
|
|
config_size = 0;
|
|
pr_err("VIRTIO ID %u not supported\n", hw->dev_type);
|
|
}
|
|
return config_size;
|
|
}
|
|
|
|
|
|
int crete_vdpa_driver_resetqueue(struct vdpa_device *vdpa_dev,
|
|
int startqueue, int endqueue);
|
|
|
|
struct crete_adapter *vd_to_adapter(struct crete_vdpa *hw)
|
|
{
|
|
return container_of(hw, struct crete_adapter, vd);
|
|
}
|
|
|
|
struct crete_core_dev *vd_to_coredev(struct crete_vdpa *hw)
|
|
{
|
|
struct crete_adapter *adapter = vd_to_adapter(hw);
|
|
struct crete_core_dev *core_dev = adapter->cdev;
|
|
return core_dev;
|
|
}
|
|
|
|
static struct crete_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
|
|
{
|
|
return container_of(vdpa_dev, struct crete_adapter, vdpa);
|
|
}
|
|
|
|
static struct crete_vdpa *vdpa_to_vd(struct vdpa_device *vdpa_dev)
|
|
{
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
|
|
return &adapter->vd;
|
|
}
|
|
|
|
static u64 __maybe_unused crete_vdpa_get_log_base(struct crete_vdpa *vd)
|
|
{
|
|
u64 log_base;
|
|
log_base = ((u64)vd->mig_log.log_base_h << 32) | vd->mig_log.log_base_l;
|
|
return log_base;
|
|
}
|
|
|
|
static u64 __maybe_unused crete_vdpa_get_log_size(struct crete_vdpa *vd)
|
|
{
|
|
u64 log_size;
|
|
log_size = ((u64)vd->mig_log.iova_size_h << 32) | vd->mig_log.iova_size_l;
|
|
return log_size;
|
|
}
|
|
|
|
|
|
static int __maybe_unused crete_vdpa_notify_handler(struct notifier_block *nb,
|
|
unsigned long ecode, void *data)
|
|
{
|
|
struct crete_nb *cnb = container_of(nb, struct crete_nb, nb);
|
|
struct crete_vdpa *vd = container_of(cnb, struct crete_vdpa, cnb);
|
|
struct crete_adapter *adapter = vd_to_adapter(vd);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
struct crete_event_entry *cee = data;
|
|
uint8_t event_sub_type = cee->event_sub_type;
|
|
int ret;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
|
|
dev_dbg(dev, "%s: event code %lu\n sub_tupe %d\n", __func__,
|
|
ecode, event_sub_type);
|
|
if (ecode == CRETE_EVENT_PORT_LINK_CHANGE) {
|
|
switch (event_sub_type) {
|
|
case CRETE_EVENT_VNET_LINK_CHANGE:
|
|
case CRETE_EVENT_VNET_RESET:
|
|
if (vd->config_cb.callback)
|
|
vd->config_cb.callback(vd->config_cb.private);
|
|
ret = NOTIFY_OK;
|
|
break;
|
|
default:
|
|
return NOTIFY_DONE;
|
|
}
|
|
return ret;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int crete_vdpa_register_event_handler(struct crete_vdpa *vd)
|
|
{
|
|
#if 0
|
|
struct crete_adapter *adapter = vd_to_adapter(vd);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
struct crete_nb *cnb = &vd->cnb;
|
|
struct notifier_block *nb = &cnb->nb;
|
|
int err;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
|
|
if (!nb->notifier_call) {
|
|
nb->notifier_call = crete_vdpa_notify_handler;
|
|
cnb->event_type = CRETE_EVENT_VNET_LINK_CHANGE;
|
|
err = crete_event_notifier_register(cdev, cnb);
|
|
if (err) {
|
|
nb->notifier_call = NULL;
|
|
dev_err(dev,
|
|
"failed to register pds event handler: %ps\n",
|
|
ERR_PTR(err));
|
|
return -EINVAL;
|
|
}
|
|
dev_dbg(dev, "crete event handler registered\n");
|
|
}
|
|
#endif
|
|
return 0;
|
|
}
|
|
|
|
static void crete_vdpa_unregister_event_handler(struct crete_vdpa *vd)
|
|
{
|
|
#if 0
|
|
struct crete_adapter *adapter = vd_to_adapter(vd);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
struct crete_nb *cnb = &vd->cnb;
|
|
struct notifier_block *nb = &cnb->nb;
|
|
|
|
if (nb->notifier_call) {
|
|
crete_event_notifier_unregister(cdev, cnb);
|
|
nb->notifier_call = NULL;
|
|
}
|
|
#endif
|
|
}
|
|
|
|
int crete_vdpa_set_hwstatus(struct crete_core_dev *core_dev, u8 status,
|
|
struct device *dev)
|
|
{
|
|
int ret;
|
|
u8 dev_status = 0;
|
|
|
|
ret = crete_set_status(core_dev, CRETE_NET_DEV_STATUS, status);
|
|
if (ret) {
|
|
crete_err(dev, "crete cmd set status %u failed\n", status);
|
|
return ret;
|
|
}
|
|
|
|
ret = crete_get_status(core_dev, CRETE_NET_DEV_STATUS, &dev_status);
|
|
if (ret)
|
|
crete_err(dev, "crete cmd get status failed\n");
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int __maybe_unused crete_vdpa_add_status(struct crete_vdpa *hw, u8 status)
|
|
{
|
|
u8 hw_status;
|
|
int ret = 0;
|
|
struct crete_core_dev *cdev = vd_to_coredev(hw);
|
|
|
|
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
|
|
hw_status = CRETE_NET_DEV_DEV_OK;
|
|
return crete_vdpa_set_hwstatus(cdev, hw_status,
|
|
&cdev->pdev->dev);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
void crete_reset_netdev(struct crete_vdpa *hw)
|
|
{
|
|
struct crete_adapter *adp = vd_to_adapter(hw);
|
|
|
|
hw->config_cb.callback = NULL;
|
|
hw->config_cb.private = NULL;
|
|
crete_reset_dev(adp->cdev, CRETE_RESET_NET_DEV);
|
|
}
|
|
|
|
u64 crete_vdpa_get_features(struct crete_vdpa *hw)
|
|
{
|
|
struct crete_vnet_hw_cap *hcap = hw->hcap;
|
|
|
|
return hcap->hw_features;
|
|
}
|
|
|
|
void crete_notify_queue(struct crete_vdpa *hw, u16 qid)
|
|
{
|
|
iowrite16(qid, hw->vring[qid].notify_addr);
|
|
}
|
|
|
|
static void crete_reset_vring(struct vdpa_device *vdpa_dev)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
int i;
|
|
|
|
crete_vdpa_driver_resetqueue(vdpa_dev, 0, vd->num_queues);
|
|
|
|
for (i = 0; i < vd->num_queues; i++) {
|
|
vd->vring[i].ready = 0;
|
|
vd->vring[i].last_avail_idx = 0;
|
|
vd->vring[i].desc = 0;
|
|
vd->vring[i].avail = 0;
|
|
vd->vring[i].used = 0;
|
|
vd->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
|
|
vd->vring[i].irqvec = 0xFFFF;
|
|
vd->vring[i].cb.callback = NULL;
|
|
vd->vring[i].cb.private = NULL;
|
|
}
|
|
|
|
}
|
|
|
|
static irqreturn_t crete_vdpa_intr_handler(int irq, void *arg)
|
|
{
|
|
struct vring_info *vring = arg;
|
|
|
|
if (vring->cb.callback)
|
|
return vring->cb.callback(vring->cb.private);
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
static void crete_driver_freeirq_on(struct vdpa_device *vdpa_dev, int qid)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
struct vring_info *vfinfo = vd->vring;
|
|
struct crete_irq_info *irq_info;
|
|
|
|
irq_info = &cdev->irq_info[vfinfo[qid].irqvec];
|
|
if (vfinfo[qid].irq == VIRTIO_MSI_NO_VECTOR)
|
|
return;
|
|
|
|
irq_info->requested = 0;
|
|
free_irq(vfinfo[qid].irq, &vfinfo[qid]);
|
|
crete_free_msixirq(cdev, vfinfo[qid].irqvec);
|
|
vfinfo[qid].irqvec = 0;
|
|
vfinfo[qid].irq = VIRTIO_MSI_NO_VECTOR;
|
|
}
|
|
|
|
static void crete_driver_freeirq(struct vdpa_device *vdpa_dev, int queuenum)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < queuenum; i++)
|
|
crete_driver_freeirq_on(vdpa_dev, i);
|
|
}
|
|
|
|
int crete_driver_initirq(struct vdpa_device *vdpa_dev)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
struct vring_info *vfinfo = vd->vring;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
int i, vectorno, ret = 0;
|
|
int queuenum = vd->qp_nums << 1;
|
|
struct pci_dev *pdev = cdev->pdev;
|
|
struct crete_irq_info *irq_info;
|
|
|
|
/* ctrlq could't request the msix */
|
|
for (i = 0; i < queuenum; i++) {
|
|
vectorno = crete_req_msixirq(cdev);
|
|
if (vectorno < 0) {
|
|
crete_err(dev, "request irq vector failed\n");
|
|
ret = -ENXIO;
|
|
goto err1;
|
|
}
|
|
snprintf(vd->vring[i].msix_name, 256, "crete-vdpa[%s]-%d",
|
|
pci_name(pdev), i);
|
|
|
|
irq_info = &cdev->irq_info[vectorno];
|
|
snprintf(irq_info->name, 256, "crete-vdpa[%s]-%d",
|
|
pci_name(pdev), i);
|
|
ret =
|
|
request_irq(irq_info->vector, crete_vdpa_intr_handler, 0,
|
|
vd->vring[i].msix_name, &vfinfo[i]);
|
|
if (ret) {
|
|
crete_free_msixirq(cdev, vectorno);
|
|
crete_err(dev, "enable irq failed\n");
|
|
goto err1;
|
|
|
|
}
|
|
vfinfo[i].irqvec = vectorno;
|
|
vfinfo[i].irq = irq_info->vector;
|
|
irq_info->handler = crete_vdpa_intr_handler;
|
|
irq_info->requested = 1;
|
|
}
|
|
crete_info(dev, "crete vdpa init irq, queues %d\n", queuenum);
|
|
err1:
|
|
if (ret) {
|
|
/* irq request failed */
|
|
crete_driver_freeirq(vdpa_dev, i);
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
int crete_driver_init_ctl_irq(struct vdpa_device *vdpa_dev, bool *ctrl_irq)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
struct vring_info *vfinfo = vd->vring;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
int i, irqvec, irq, ret = 0;
|
|
int queuenum = vd->num_queues;
|
|
int io_vector = 0;
|
|
struct crete_vdpa_mgmtdev *vdpa_mgmt_dev;
|
|
struct crete_vnet_hw_cap *hcap;
|
|
|
|
if (!adapter->priv) {
|
|
*ctrl_irq = false;
|
|
crete_info(dev, "crete is not vdpa device\n");
|
|
return 0;
|
|
}
|
|
vdpa_mgmt_dev = adapter->priv;
|
|
hcap = &vdpa_mgmt_dev->hcap;
|
|
if (!hcap->have_ctl) {
|
|
*ctrl_irq = false;
|
|
crete_info(dev, "crete is not have ctl\n");
|
|
return 0;
|
|
}
|
|
for (i = 0; i < vd->num_queues; ++i) {
|
|
if (vd->vring[i].cb.callback)
|
|
++io_vector;
|
|
}
|
|
if (io_vector == queuenum) {
|
|
*ctrl_irq = false;
|
|
crete_info(dev, "crete is not have ctl irq\n");
|
|
return 0;
|
|
} else if (io_vector != queuenum + 1) {
|
|
*ctrl_irq = false;
|
|
crete_err(dev, "crete is not support current vdpa devie\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
*ctrl_irq = true;
|
|
irqvec = crete_req_msixirq(cdev);
|
|
if (irqvec < 0) {
|
|
crete_err(dev, "request irq vector failed\n");
|
|
ret = -ENXIO;
|
|
return ret;
|
|
}
|
|
i = queuenum;
|
|
snprintf(vd->vring[i].msix_name, 256, "crete-ctrl[%s]-%d\n",
|
|
pci_name(cdev->pdev), i);
|
|
|
|
/* init vringinfo irq num */
|
|
irq = cdev->irq_info[irqvec].vector;
|
|
ret = devm_request_irq(dev, irq,
|
|
crete_vdpa_intr_handler,
|
|
0, vd->vring[i].msix_name, &vfinfo[i]);
|
|
if (ret) {
|
|
crete_free_msixirq(cdev, irqvec);
|
|
crete_err(dev, "enable irq failed\n");
|
|
goto err1;
|
|
}
|
|
/* vring info irq vector */
|
|
vfinfo[i].irqvec = irqvec;
|
|
/* vring info irq num */
|
|
vfinfo[i].irq = irq;
|
|
return 0;
|
|
err1:
|
|
return ret;
|
|
}
|
|
|
|
static int crete_driver_creteqp(struct vdpa_device *vdpa_dev)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
struct vring_info *vfinfo = vd->vring;
|
|
int queuenum = vd->qp_nums << 1;
|
|
struct crete_queue_context qc;
|
|
int i, ret = 0;
|
|
|
|
for (i = 0; i < queuenum; i++) {
|
|
/* if vring not ready just failed \n */
|
|
if (!vfinfo[i].ready) {
|
|
crete_info(&cdev->pdev->dev, "qpid %d is no ready\n", i);
|
|
//break;
|
|
}
|
|
qc.qid = i;
|
|
qc.queue_size = vd->queue_size;
|
|
qc.cq_size = vd->queue_size;
|
|
qc.queue_vec = vfinfo[i].irqvec;
|
|
if (vd->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) {
|
|
qc.queue_desc_base = vfinfo[i].desc;
|
|
qc.queue_used_base = vfinfo[i].desc;
|
|
} else {
|
|
qc.queue_desc_base = vfinfo[i].desc;
|
|
qc.queue_used_base = vfinfo[i].used;
|
|
}
|
|
ret = crete_cmd_create_signal_queue(cdev, &qc);
|
|
if (ret < 0) {
|
|
crete_err(cdev->device, "crete create queue failed ret:%d\n", ret);
|
|
ret = -EINVAL;
|
|
goto err;
|
|
}
|
|
}
|
|
err:
|
|
return ret;
|
|
}
|
|
|
|
static int crete_driver_crete_ctrlq(struct vdpa_device *vdpa_dev)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
struct vring_info *vfinfo = vd->vring;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
int queuenum = vd->qp_nums << 1;
|
|
struct crete_vdpa_mgmtdev *vdpa_mgmt_dev;
|
|
struct crete_vnet_hw_cap *hcap;
|
|
struct crete_queue_context qc;
|
|
int ret = 0;
|
|
|
|
if (!adapter->priv) {
|
|
crete_info(dev, "crete is not vdpa device\n");
|
|
return 0;
|
|
}
|
|
vdpa_mgmt_dev = adapter->priv;
|
|
hcap = &vdpa_mgmt_dev->hcap;
|
|
if (!hcap->have_ctl) {
|
|
crete_info(dev, "crete is not have ctl\n");
|
|
return 0;
|
|
}
|
|
|
|
qc.qid = queuenum;
|
|
qc.queue_size = vfinfo[queuenum].size;
|
|
qc.queue_vec = 0xffff;
|
|
qc.cq_size = vfinfo[queuenum].size;
|
|
if (vd->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) {
|
|
qc.queue_desc_base = vfinfo[queuenum].desc;
|
|
qc.queue_used_base = vfinfo[queuenum].desc;
|
|
} else {
|
|
qc.queue_desc_base = vfinfo[queuenum].desc;
|
|
qc.queue_used_base = vfinfo[queuenum].used;
|
|
}
|
|
|
|
ret = crete_cmd_create_signal_queue(cdev, &qc);
|
|
if (ret < 0) {
|
|
crete_err(dev, "crete create queue failed ret: %d\n", ret);
|
|
return ret;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
static int crete_set_vq_state(struct crete_vdpa *hw, u16 qid, u16 last_used)
|
|
{
|
|
struct crete_core_dev *cdev = vd_to_coredev(hw);
|
|
struct device *dev = &cdev->pdev->dev;
|
|
int ret;
|
|
ret = crete_set_vq_mig_state(cdev, CRETE_MIG_DEV_VIRTIO, qid, last_used);
|
|
if (ret < 0) {
|
|
crete_err(dev, "set mig vq state error. qid %d last used %d err %d\n",
|
|
qid, last_used, ret);
|
|
/* FIXME: what value should be returned */
|
|
return ret;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int crete_driver_startqueue(struct vdpa_device *vdpa_dev,
|
|
int startqueue, int endqueue)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
struct vring_info *vfinfo = vd->vring;
|
|
int i, ret = 0;
|
|
|
|
for (i = startqueue; i < endqueue; i++) {
|
|
if (!vfinfo[i].ready)
|
|
crete_err(dev, " start queue %d no ready\n", i);
|
|
|
|
/* set the last avail index */
|
|
if (vd->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED))
|
|
crete_set_vq_state(vd, i, vd->vring_lm_cfg[i].last_avail_idx);
|
|
else
|
|
if (vd->vring_lm_cfg[i].last_avail_idx > 0)
|
|
crete_set_vq_state(vd, i, vd->vring_lm_cfg[i].last_avail_idx);
|
|
|
|
/* enable queue. don't enable queue until set vq state */
|
|
ret = crete_start_singlequeue(cdev, i);
|
|
if (ret) {
|
|
crete_err(dev, "crete start queue id:%d failed\n", i);
|
|
ret = -EINVAL;
|
|
break;
|
|
}
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
int crete_vdpa_driver_resetqueue(struct vdpa_device *vdpa_dev,
|
|
int startqueue, int endqueue)
|
|
{
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
int i, ret = 0;
|
|
|
|
for (i = startqueue; i < endqueue; i++) {
|
|
ret = crete_reset_singlequeue(cdev, i);
|
|
if (ret) {
|
|
crete_err(dev, "crete reset queue id:%d failed\n", i);
|
|
ret = -EINVAL;
|
|
break;
|
|
}
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void crete_driver_reset_ctrlq(struct vdpa_device *vdpa_dev)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
int startqueue = vd->qp_nums << 1;
|
|
|
|
crete_vdpa_driver_resetqueue(vdpa_dev, startqueue, vd->num_queues);
|
|
}
|
|
|
|
static void crete_driver_reset_ioqp(struct vdpa_device *vdpa_dev)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
int endqueue = vd->qp_nums << 1;
|
|
|
|
crete_vdpa_driver_resetqueue(vdpa_dev, 0, endqueue);
|
|
}
|
|
|
|
static void crete_vdap_unmask_irq(struct vdpa_device *vdpa_dev)
|
|
{
|
|
int i;
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
int qnum = vd->qp_nums << 1;
|
|
for (i = 0; i < qnum; i++) {
|
|
vd->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
|
|
vd->vring[i].irqvec = 0xFFFF;
|
|
}
|
|
}
|
|
|
|
int crete_driver_setup(struct vdpa_device *vdpa_dev)
|
|
{
|
|
|
|
int ret = 0;
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
bool ctrl_have_irq;
|
|
int queuenum = vd->qp_nums << 1;
|
|
|
|
ctrl_have_irq = false;
|
|
|
|
if (vd->host_polling) {
|
|
/* disable interupt */
|
|
crete_event_exit(&cdev->hw);
|
|
crete_exit_irq(cdev);
|
|
/* reset irq value on the vring */
|
|
crete_vdap_unmask_irq(vdpa_dev);
|
|
crete_info(dev, "crete vdpa unmask the irq\n");
|
|
} else {
|
|
/* step 1: request vring irq resource */
|
|
ret = crete_driver_initirq(vdpa_dev);
|
|
if (ret) {
|
|
crete_err(dev, "crete driver init irq failed\n");
|
|
goto err;
|
|
}
|
|
}
|
|
|
|
/* step 2: create qp failed */
|
|
ret = crete_driver_creteqp(vdpa_dev);
|
|
if (ret) {
|
|
crete_err(dev, "crete driver create qp failed\n");
|
|
goto err_ctrl_irq;
|
|
}
|
|
ret = crete_driver_crete_ctrlq(vdpa_dev);
|
|
if (ret) {
|
|
crete_err(dev, "crete driver create ctrl queue failed\n");
|
|
goto err_creteqp;
|
|
}
|
|
/* step 3: */
|
|
ret = crete_driver_startqueue(vdpa_dev, 0, vd->num_queues);
|
|
if (ret) {
|
|
crete_err(dev, "crete start queue failed\n");
|
|
goto err_crete_ctrlq;
|
|
}
|
|
crete_info(dev, "crete vdpa driver setup\n");
|
|
return 0;
|
|
|
|
err_crete_ctrlq:
|
|
crete_driver_reset_ctrlq(vdpa_dev);
|
|
err_creteqp:
|
|
crete_driver_reset_ioqp(vdpa_dev);
|
|
err_ctrl_irq:
|
|
if (ctrl_have_irq)
|
|
crete_driver_freeirq_on(vdpa_dev, queuenum);
|
|
|
|
crete_driver_freeirq(vdpa_dev, queuenum);
|
|
#ifndef JMND_DISABLE_MSIX
|
|
err:
|
|
#endif
|
|
return ret;
|
|
}
|
|
|
|
int crete_driver_down(struct vdpa_device *vdpa_dev)
|
|
{
|
|
int ret = 0;
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
/* TODO: free the irq, except ctrlq */
|
|
crete_driver_freeirq(vdpa_dev, vd->num_queues);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int __maybe_unused crete_driver_postinit(struct vdpa_device *vdpa_dev)
|
|
{
|
|
int ret = 0;
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
|
|
/* set features */
|
|
ret =
|
|
crete_cmd_set_features(cdev, CRETE_VIRTIO_NET_DEV_FEAT,
|
|
vd->driver_features);
|
|
if (ret) {
|
|
crete_err(dev, "crete set features 0x%llx failed\n",
|
|
vd->driver_features);
|
|
goto out;
|
|
}
|
|
|
|
/* set feature ok status */
|
|
ret = crete_vdpa_set_hwstatus(cdev, CRETE_NET_DEV_FEATURE_OK, dev);
|
|
if (ret)
|
|
crete_err(dev, "crete set status feature ok failed\n");
|
|
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
static u64 crete_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_vnet_hw_cap *hcap = vd->hcap;
|
|
|
|
return hcap->hw_features;
|
|
}
|
|
|
|
static int crete_vdpa_set_log_state(struct crete_vdpa *vd, u8 state)
|
|
{
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
struct device *dev = &cdev->pdev->dev;
|
|
int ret;
|
|
ret = crete_set_mig_log_state(cdev, CRETE_MIG_DEV_VIRTIO, state);
|
|
if (ret < 0) {
|
|
crete_err(dev, "set mig log state error. state %d err %d\n", state, ret);
|
|
/* FIXME: what value should be returned */
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void crete_vdpa_disable_logging(struct crete_vdpa *vd)
|
|
{
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
struct device *dev = &cdev->pdev->dev;
|
|
|
|
vd->lm_ctrl = CORSICA_LM_DISABLE;
|
|
crete_vdpa_set_log_state(vd, CORSICA_LM_DISABLE);
|
|
crete_info(dev, "crete vdpa disable dirty logging\n");
|
|
}
|
|
|
|
static void crete_vdpa_enable_logging(struct crete_vdpa *vd)
|
|
{
|
|
crete_vdpa_set_log_state(vd, CORSICA_LM_ENABLE);
|
|
}
|
|
|
|
static int crete_vdpa_stop_mig(struct crete_vdpa *vd)
|
|
{
|
|
return crete_vdpa_set_log_state(vd, CORSICA_LM_STOP_DEV);
|
|
}
|
|
|
|
static int crete_vdpa_set_driver_features(struct vdpa_device *vdpa_dev,
|
|
u64 features)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
struct crete_core_dev *core_dev = adapter->cdev;
|
|
struct device *dev = &core_dev->pdev->dev;
|
|
int ret;
|
|
|
|
if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) {
|
|
crete_err(dev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
ret =
|
|
crete_cmd_set_features(core_dev, CRETE_VIRTIO_NET_DEV_FEAT, features);
|
|
if (ret) {
|
|
crete_err(dev, "crete set features 0x%llx failed ret %d\n", features, ret);
|
|
return ret;
|
|
}
|
|
|
|
if (features & (1 << VHOST_F_LOG_ALL)
|
|
&& (vd->lm_ctrl == CORSICA_LM_ENABLE)) {
|
|
crete_info(dev, "enable log log_base= 0x%x%08x,log_size=0x%x%08x\n",
|
|
vd->mig_log.log_base_h, vd->mig_log.log_base_l, vd->mig_log.iova_size_h, vd->mig_log.iova_size_l);
|
|
crete_vdpa_enable_logging(vd);
|
|
} else if (!(features & (1 << VHOST_F_LOG_ALL)) &&
|
|
(vd->lm_ctrl == CORSICA_LM_STOP_DEV)) {
|
|
crete_info(dev, "disable log now lm ctrl= %d\n", vd->lm_ctrl);
|
|
crete_vdpa_disable_logging(vd);
|
|
}
|
|
vd->driver_features = features;
|
|
crete_info(dev, "vdpa set features 0x%llx\n", features);
|
|
return 0;
|
|
}
|
|
|
|
#ifdef HAVE_VDPA_OPS_DEVICE_FEAT
|
|
static u64 crete_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
|
|
return vd->driver_features;
|
|
}
|
|
#endif
|
|
|
|
|
|
static u8 crete_vdpa_get_status(struct vdpa_device *vdpa_dev)
|
|
{
|
|
struct crete_vdpa *vd;
|
|
struct crete_core_dev *cdev;
|
|
|
|
vd = vdpa_to_vd(vdpa_dev);
|
|
cdev = vd_to_coredev(vd);
|
|
|
|
dev_info(&cdev->pdev->dev, "vdpa get dev status: %d\n", vd->status);
|
|
return vd->status;
|
|
}
|
|
|
|
static int crete_vdpa_reset(struct vdpa_device *vdpa_dev, int state)
|
|
{
|
|
struct crete_adapter *adapter;
|
|
struct crete_vdpa *vd;
|
|
u8 status_old;
|
|
struct crete_core_dev *cdev;
|
|
int ret;
|
|
|
|
vd = vdpa_to_vd(vdpa_dev);
|
|
adapter = vdpa_to_adapter(vdpa_dev);
|
|
status_old = vd->status;
|
|
cdev = adapter->cdev;
|
|
|
|
/* if within the non initial status */
|
|
if (status_old == 0)
|
|
return 0;
|
|
|
|
if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
|
|
crete_driver_down(vdpa_dev);
|
|
crete_exit_irq(cdev);
|
|
crete_reset_vring(vdpa_dev);
|
|
crete_reset_netdev(vd);
|
|
ret = crete_vdpa_set_hwstatus(adapter->cdev,
|
|
CRETE_NET_DEV_STARTUP, cdev->device);
|
|
if (ret) {
|
|
dev_err(cdev->device,
|
|
"%s set CRETE_NET_DEV_STARTUP failed\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
vd->announce_count = 0;
|
|
vd->status = state;
|
|
/* reset net device status as LINKUP */
|
|
adapter->config.status = VIRTIO_NET_S_LINK_UP;
|
|
dev_info(cdev->device, "vdap reset stauts %d old %d\n", state, status_old);
|
|
return 0;
|
|
}
|
|
|
|
static void crete_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
|
|
{
|
|
struct crete_adapter *adapter;
|
|
struct crete_core_dev *cdev;
|
|
struct crete_vdpa *vd;
|
|
struct virtio_net_config *config;
|
|
u8 status_old, jnet_st;
|
|
int ret = 0;
|
|
bool need = false;
|
|
vd = vdpa_to_vd(vdpa_dev);
|
|
adapter = vdpa_to_adapter(vdpa_dev);
|
|
cdev = adapter->cdev;
|
|
status_old = vd->status;
|
|
config = &adapter->config;
|
|
|
|
if (status_old == status)
|
|
return;
|
|
|
|
if (status == 0 || ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) &&
|
|
!(status & VIRTIO_CONFIG_S_DRIVER_OK))) {
|
|
crete_vdpa_reset(vdpa_dev, 0);
|
|
vd->status = status;
|
|
return;
|
|
}
|
|
|
|
if ((status & VIRTIO_CONFIG_S_ACKNOWLEDGE) &&
|
|
!(status_old & VIRTIO_CONFIG_S_ACKNOWLEDGE)) {
|
|
jnet_st = CRETE_NET_DEV_STARTUP;
|
|
need = true;
|
|
}
|
|
|
|
if ((status & VIRTIO_CONFIG_S_FEATURES_OK) &&
|
|
!(status_old & VIRTIO_CONFIG_S_FEATURES_OK)) {
|
|
jnet_st = CRETE_NET_DEV_FEATURE_OK;
|
|
crete_init_msix(cdev);
|
|
need = true;
|
|
}
|
|
|
|
if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
|
|
!(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
|
|
ret = crete_driver_setup(vdpa_dev);
|
|
if (ret) {
|
|
dev_err(cdev->device, "driver setup error\n");
|
|
goto fw_reset;
|
|
}
|
|
jnet_st = CRETE_NET_DEV_DEV_OK;
|
|
need = true;
|
|
if ( (vd->driver_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) &&
|
|
((config->status & VIRTIO_NET_S_ANNOUNCE) != VIRTIO_NET_S_ANNOUNCE)) {
|
|
config->status |= VIRTIO_NET_S_ANNOUNCE;
|
|
dev_info(cdev->device, "crete vdpa set config status has VIRTIO_NET_S_ANNOUNCE\n");
|
|
}
|
|
}
|
|
|
|
if (need) {
|
|
ret = crete_set_status(cdev, CRETE_NET_DEV_STATUS, jnet_st);
|
|
if (ret) {
|
|
crete_err(cdev->device, "crete cmd set status %u failed\n", status);
|
|
goto fw_reset;
|
|
}
|
|
}
|
|
|
|
vd->status = status;
|
|
if (jnet_st == CRETE_NET_DEV_DEV_OK) {
|
|
/* The config_cb.call can't be setted until SET DRIVER_OK be returned */
|
|
vd->announce_count = 5;
|
|
dev_info(cdev->device, "set config announce status %d\n", config->status);
|
|
}
|
|
dev_info(cdev->device, "vdpa set status: %d jnet %d need %d\n", status, jnet_st, need);
|
|
return;
|
|
fw_reset:
|
|
vd->status = status_old | VIRTIO_CONFIG_S_FAILED;
|
|
/* todo nodify fw send reset evnet to virtio*/
|
|
}
|
|
|
|
static u16 crete_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
|
|
return vd->queue_size;
|
|
}
|
|
|
|
#ifdef HAVE_VDPA_OPS_NUM_MIN
|
|
static u16 crete_vdpa_get_vq_num_min(struct vdpa_device *vdpa_dev)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
|
|
return vd->queue_size;
|
|
}
|
|
#endif
|
|
|
|
static int crete_vdpa_iotlb_invalidate(struct vdpa_device *vdpa)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa);
|
|
struct crete_iotlb_map *iter, *_iter;
|
|
struct page *page;
|
|
unsigned long pfn, pinned;
|
|
|
|
if(!vd->domain) {
|
|
return -1;
|
|
}
|
|
spin_lock(&vd->iommu_lock);
|
|
list_for_each_entry_safe(iter, _iter, &vd->mig_log.list,link) {
|
|
if(vd->mig_log.nmaps == 0 || iter->size == 0)
|
|
break;
|
|
pinned = PFN_DOWN(iter->size);
|
|
for (pfn = PFN_DOWN(iter->addr);
|
|
pinned > 0; pfn++, pinned--) {
|
|
page = pfn_to_page(pfn);
|
|
if (iter->perm & VHOST_ACCESS_WO)
|
|
set_page_dirty_lock(page);
|
|
unpin_user_page(page);
|
|
}
|
|
atomic64_sub(PFN_DOWN(iter->size), &vd->mm->pinned_vm);
|
|
iommu_unmap(vd->domain,iter->start,iter->size);
|
|
list_del(&iter->link);
|
|
kfree(iter);
|
|
vd->mig_log.nmaps--;
|
|
}
|
|
spin_unlock(&vd->iommu_lock);
|
|
return 0;
|
|
}
|
|
|
|
static void crete_vdpa_unbind_mm(struct vdpa_device *vdpa)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa);
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
|
|
if (!vd->mm) {
|
|
crete_info(&cdev->pdev->dev, "crete vdpa mm alread be unbind\n");
|
|
return;
|
|
}
|
|
crete_vdpa_iotlb_invalidate(vdpa);
|
|
mmdrop(vd->mm);
|
|
vd->mm = NULL;
|
|
|
|
crete_info(&cdev->pdev->dev, "crete vdpa mm be unbind n");
|
|
return ;
|
|
}
|
|
|
|
static void crete_avail_idx_to_vq_state(struct crete_vdpa *vd, u16 avail_index, struct vdpa_vq_state *state)
|
|
{
|
|
if (vd->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) {
|
|
state->packed.last_avail_idx = avail_index & 0x7fff;
|
|
state->packed.last_avail_counter = !!!(avail_index & 0x8000);
|
|
state->packed.last_used_counter = state->packed.last_avail_counter;
|
|
state->packed.last_used_idx = state->packed.last_avail_idx;
|
|
} else {
|
|
state->split.avail_index = avail_index;
|
|
}
|
|
}
|
|
|
|
static u16 crete_vq_state_to_avail_idx(struct crete_vdpa *vd, const struct vdpa_vq_state *state)
|
|
{
|
|
u16 avail_index;
|
|
if (vd->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) {
|
|
avail_index = state->packed.last_avail_idx;
|
|
avail_index |= (!((uint16_t)state->packed.last_avail_counter)) << 15;
|
|
} else
|
|
avail_index = state->split.avail_index;
|
|
|
|
return avail_index;
|
|
}
|
|
|
|
static u16 crete_get_vq_state(struct crete_vdpa *vd, u16 qid)
|
|
{
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
struct device *dev = &cdev->pdev->dev;
|
|
u16 last_used;
|
|
int ret;
|
|
|
|
ret = crete_get_vq_mig_state(cdev, CRETE_MIG_DEV_VIRTIO, qid, &last_used);
|
|
if (ret < 0) {
|
|
crete_err(dev, "get mig vq state error. qid %d err %d\n", qid, ret);
|
|
/* FIXME: what value should be returned */
|
|
return 0;
|
|
}
|
|
return last_used;
|
|
}
|
|
|
|
static int crete_get_vq_state_batch(struct crete_vdpa *vd, u16 start_qid, u16 qnum)
|
|
{
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
struct device *dev = &cdev->pdev->dev;
|
|
u16 last_used_set[16], qid;
|
|
int ret, i;
|
|
|
|
ret = crete_get_vq_mig_state_batch(cdev, CRETE_MIG_DEV_VIRTIO, start_qid, qnum, last_used_set);
|
|
if (ret < 0) {
|
|
crete_err(dev, "get mig vq state batch error. start qid %d qnum %d err %d\n",
|
|
start_qid, qnum, ret);
|
|
/* FIXME: what value should be returned */
|
|
return ret;
|
|
}
|
|
|
|
/* save the last avail index */
|
|
for (i = 0, qid = start_qid; i < qnum; i++, qid++) {
|
|
vd->vring_lm_cfg[qid].last_avail_idx = last_used_set[i];
|
|
vd->vring_lm_cfg[qid].can_used = 1;
|
|
crete_info(dev, "crete vdpa save batch the last avail index qid %d value 0x%x",
|
|
qid, last_used_set[i]);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crete_vdpa_get_vq_state_single(struct vdpa_device *vdpa_dev, u16 qid,
|
|
struct vdpa_vq_state *state)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
u16 avail_index;
|
|
|
|
avail_index = crete_get_vq_state(vd, qid);
|
|
crete_avail_idx_to_vq_state(vd, avail_index, state);
|
|
/* save the avail index and set `can_used` */
|
|
vd->vring_lm_cfg[qid].last_avail_idx = avail_index;
|
|
vd->vring_lm_cfg[qid].can_used = 0;
|
|
|
|
if (vd->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED))
|
|
crete_info(dev,
|
|
"get vq state packed qid = %u, last avail idx=%u counter %d "
|
|
"last used idx %d counter %d "
|
|
"ctrl %d avail index %u\n",
|
|
qid, state->packed.last_avail_idx,
|
|
state->packed.last_avail_counter,
|
|
state->packed.last_used_idx, state->packed.last_used_counter,
|
|
vd->lm_ctrl, avail_index);
|
|
else
|
|
crete_info(dev,
|
|
"get vq state split qid = %u, last_avail_idx=%u "
|
|
"ctrl %d avail index %d\n",
|
|
qid, state->split.avail_index, vd->lm_ctrl, avail_index);
|
|
return 0;
|
|
}
|
|
|
|
static int crete_vdpa_get_vq_state_batch(struct vdpa_device *vdpa_dev, u16 start_qid,
|
|
u16 qnum, struct vdpa_vq_state *state)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
int ret;
|
|
ret = crete_get_vq_state_batch(vd, start_qid, qnum);
|
|
if (ret == 0) {
|
|
/* TODO: first check the flag `can_used` */
|
|
crete_avail_idx_to_vq_state(vd, vd->vring_lm_cfg[start_qid].last_avail_idx, state);
|
|
vd->vring_lm_cfg[start_qid].can_used = 0;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int crete_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
|
|
struct vdpa_vq_state *state)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
int ret;
|
|
/* gabage,qemu use this to stop the device */
|
|
if (qid == 0 && (vd->lm_ctrl & CORSICA_LM_ENABLE)) {
|
|
vd->lm_ctrl = CORSICA_LM_STOP_DEV;
|
|
|
|
/* STOP Migration Device */
|
|
ret = crete_vdpa_stop_mig(vd);
|
|
if (ret != 0) {
|
|
/* TODO: can't stop device. need stop migration */
|
|
crete_err(dev, "crete vdpa migration stop failed. ret %d\n", ret);
|
|
} else
|
|
crete_info(dev, "crete vdpa migration stop\n");
|
|
}
|
|
|
|
if ((qid == vd->num_queues - 1) &&
|
|
(vd->lm_ctrl & CORSICA_LM_STOP_DEV)) {
|
|
/** disable the log dirty tracking **/
|
|
crete_vdpa_disable_logging(vd);
|
|
crete_vdpa_unbind_mm(vdpa_dev);
|
|
vd->mig_log.log_base_h = 0;
|
|
vd->mig_log.log_base_l = 0;
|
|
vd->mig_log.iova_size_h = 0;
|
|
vd->mig_log.iova_size_l = 0;
|
|
vd->mig_log.nmaps = 0;
|
|
crete_info(dev, "crete vdpa dirty log diable\n");
|
|
}
|
|
|
|
if (vd->vring_lm_cfg[qid].can_used) {
|
|
crete_avail_idx_to_vq_state(vd, vd->vring_lm_cfg[qid].last_avail_idx, state);
|
|
/* unset the flag `can_used` */
|
|
vd->vring_lm_cfg[qid].can_used = 0;
|
|
return 0;
|
|
}
|
|
|
|
if ((qid == vd->num_queues - 1) || (batch_vq_state == 0)) {
|
|
/* get the vq state single */
|
|
return crete_vdpa_get_vq_state_single(vdpa_dev, qid, state);
|
|
} else {
|
|
/* get the vq state batch */
|
|
return crete_vdpa_get_vq_state_batch(vdpa_dev, qid,
|
|
min(vd->num_queues - qid, 16), state);
|
|
}
|
|
}
|
|
|
|
static int crete_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
|
|
const struct vdpa_vq_state *state)
|
|
{
|
|
u16 avail_index;
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
struct device *dev = &cdev->pdev->dev;
|
|
|
|
avail_index = crete_vq_state_to_avail_idx(vd, state);
|
|
|
|
vd->vring_lm_cfg[qid].last_avail_idx = avail_index;
|
|
if (vd->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) {
|
|
crete_info(dev,
|
|
"set vq state packed qid = %u, last avail idx %u counter %d "
|
|
"last used idx %d counter %d "
|
|
"ctrl %d avail index %u\n",
|
|
qid, state->packed.last_avail_idx,
|
|
state->packed.last_avail_counter,
|
|
state->packed.last_used_idx, state->packed.last_used_counter,
|
|
vd->lm_ctrl, avail_index);
|
|
} else {
|
|
if (avail_index == 0)
|
|
goto ret;
|
|
|
|
crete_info(dev,
|
|
"set vq state split qid = %u, last_avail_idx=%u "
|
|
"ctrl %d avail index %d\n",
|
|
qid, state->split.avail_index, vd->lm_ctrl, avail_index);
|
|
}
|
|
ret:
|
|
return 0;
|
|
}
|
|
|
|
static void crete_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
|
|
struct vdpa_callback *cb)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
|
|
vd->vring[qid].cb = *cb;
|
|
if (qid == 0) {
|
|
if ((cb->callback == NULL) || (vf_irq == 0)) {
|
|
vd->host_polling = true;
|
|
crete_info(&cdev->pdev->dev, "crete vdpa disable irq\n");
|
|
} else {
|
|
vd->host_polling = false;
|
|
crete_info(&cdev->pdev->dev, "crete vdpa enable irq\n");
|
|
}
|
|
}
|
|
}
|
|
|
|
static void crete_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
|
|
u16 qid, bool ready)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
|
|
vd->vring[qid].ready = ready;
|
|
}
|
|
|
|
static bool crete_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
|
|
return vd->vring[qid].ready;
|
|
}
|
|
|
|
static void crete_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
|
|
u32 num)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
//struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
|
|
vd->vring[qid].size = num;
|
|
//crete_info(&cdev->pdev->dev, "crete vdpa set vq num qid %d num %d\n", qid, num);
|
|
}
|
|
|
|
static int crete_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
|
|
u64 desc_area, u64 driver_area,
|
|
u64 device_area)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
|
|
vd->vring[qid].desc = desc_area;
|
|
vd->vring[qid].avail = driver_area;
|
|
vd->vring[qid].used = device_area;
|
|
|
|
dev_info(&cdev->pdev->dev, "vdpa set vq address qid: %d desc: 0x%llx avail: 0x%llx used: 0x%llx\n",
|
|
qid, desc_area, driver_area, device_area);
|
|
return 0;
|
|
}
|
|
|
|
static void crete_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
|
|
crete_notify_queue(vd, qid);
|
|
|
|
dev_info(&cdev->pdev->dev, "vdpa kick vq: %d\n", qid);
|
|
}
|
|
|
|
static u32 crete_vdpa_get_generation(struct vdpa_device *vdpa_dev)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
|
|
return vd->generation;
|
|
}
|
|
|
|
static u32 crete_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
|
|
return vd->dev_type;
|
|
}
|
|
|
|
static u32 crete_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
|
|
{
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
struct pci_dev *pdev = adapter->cdev->pdev;
|
|
|
|
return pdev->subsystem_vendor;
|
|
}
|
|
|
|
static u32 crete_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
|
|
{
|
|
return CORSICA_QUEUE_ALIGNMENT;
|
|
}
|
|
|
|
#ifdef HAVE_VDPA_OPS_GET_CONFIG_SIZE
|
|
static size_t crete_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
|
|
return vd->config_size;
|
|
}
|
|
#endif
|
|
|
|
static void crete_vdpa_announce(struct vdpa_device *vdpa_dev, struct virtio_net_config *config)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
|
|
if (config->status & VIRTIO_NET_S_ANNOUNCE) {
|
|
if (vd->announce_count > 0) {
|
|
vd->announce_count--;
|
|
if (vd->announce_count == 0) {
|
|
config->status &= ~VIRTIO_NET_S_ANNOUNCE;
|
|
crete_info(cdev->device, "crete vdpa unmask announce status %d\n",
|
|
config->status);
|
|
}
|
|
/* set the bit of ANNOUNCE */
|
|
if ((vd->config_cb.callback) &&
|
|
(vd->driver_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) {
|
|
vd->config_cb.callback(vd->config_cb.private);
|
|
crete_info(cdev->device, "send config callback status %d\n",
|
|
config->status);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
static void crete_vdpa_get_config(struct vdpa_device *vdpa_dev,
|
|
unsigned int offset,
|
|
void *buf, unsigned int len)
|
|
{
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
struct virtio_net_config *config = &adapter->config;
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
|
|
if (offset + len <= sizeof(struct virtio_net_config)) {
|
|
memcpy(buf, (u8 *)&adapter->config + offset, len);
|
|
crete_info(&cdev->pdev->dev, "crete vdpa get config mtu %d "
|
|
"status %d speed %d queue pairs %d\n",
|
|
config->mtu, config->status, config->speed,
|
|
config->max_virtqueue_pairs);
|
|
}
|
|
|
|
switch (offset)
|
|
{
|
|
case VIRTIO_NET_CONFIG_OFFSET_MAC:
|
|
crete_info(&cdev->pdev->dev, "get config mac %pM len %d\n", config->mac, len);
|
|
break;
|
|
case VIRTIO_NET_CONFIG_OFFSET_MTU:
|
|
crete_info(&cdev->pdev->dev, "get config mtu %d len %d\n", config->mtu, len);
|
|
break;
|
|
case VIRTIO_NET_CONFIG_OFFSET_STATUS:
|
|
crete_info(&cdev->pdev->dev, "get config status %d len %d\n", config->status, len);
|
|
crete_vdpa_announce(vdpa_dev, config);
|
|
break;
|
|
default:
|
|
crete_info(&cdev->pdev->dev, "get config unknown offset %u len %d\n", offset, len);
|
|
break;
|
|
}
|
|
}
|
|
|
|
static void crete_vdpa_set_config(struct vdpa_device *vdpa_dev,
|
|
unsigned int offset, const void *buf,
|
|
unsigned int len)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
struct virtio_net_config *config = &adapter->config;
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
const u8 *p;
|
|
|
|
p = buf;
|
|
WARN_ON(offset + len > vd->config_size);
|
|
crete_info(&cdev->pdev->dev,
|
|
"set config old mac=%pM mtu=%d offset %d len %d\n",
|
|
config->mac, config->mtu, offset, len);
|
|
|
|
memcpy((u8 *)config + offset, buf, len);
|
|
switch (offset) {
|
|
case VIRTIO_NET_CONFIG_OFFSET_MAC:
|
|
pr_info("send cmd mac addr update %02x:%02x:%02x:%02x:%02x:%02x\n",
|
|
p[0], p[1], p[2], p[3], p[4], p[5]);
|
|
break;
|
|
case VIRTIO_NET_CONFIG_OFFSET_MTU:
|
|
pr_info("send cmd mtu addr update mut %d\n", config->mtu);
|
|
break;
|
|
default:
|
|
crete_info(&cdev->pdev->dev, "set confg unknown offset %d len %d\n", offset, len);
|
|
break;
|
|
}
|
|
crete_info(&cdev->pdev->dev, "crete vdpa set_config mtu %d "
|
|
"status %d speed %d queue pairs %d\n",
|
|
config->mtu, config->status, config->speed,
|
|
config->max_virtqueue_pairs);
|
|
|
|
}
|
|
|
|
static void crete_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
|
|
struct vdpa_callback *cb)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
//struct virtio_net_config *config = &adapter->config;
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
|
|
vd->config_cb.callback = cb->callback;
|
|
vd->config_cb.private = cb->private;
|
|
|
|
crete_info(&cdev->pdev->dev, "set config callback\n");
|
|
|
|
/*
|
|
* In the qemu, can't update the DRIVER_OK to dev->status
|
|
* until this function returned.
|
|
* So, we need start a timer worker to send callback.
|
|
*/
|
|
if (vd->announce_count > 0) {
|
|
vd->announce_count--;
|
|
/* TODO: start a timer worker */
|
|
if ((vd->config_cb.callback) &&
|
|
(vd->driver_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) {
|
|
vd->config_cb.callback(vd->config_cb.private);
|
|
crete_info(&cdev->pdev->dev, "send config callback\n");
|
|
}
|
|
}
|
|
}
|
|
|
|
static int crete_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev, u16 qid)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa_dev);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
int irq = vd->vring[qid].irq;
|
|
if (irq == VIRTIO_MSI_NO_VECTOR) {
|
|
crete_err(&cdev->pdev->dev, "get vq irq. qid %d error NO_VECTOR\n", qid);
|
|
return -EINVAL;
|
|
}
|
|
|
|
crete_info(&cdev->pdev->dev, "get vq irq. qid %d irq %u", qid, irq);
|
|
return irq;
|
|
}
|
|
|
|
bool is_ctrl_vq_idx(struct vdpa_device *vdpa_dev, u16 idx)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
|
|
if (vd->num_queues == idx)
|
|
return true;
|
|
return false;
|
|
|
|
}
|
|
|
|
static struct vdpa_notification_area crete_get_vq_notification(
|
|
struct vdpa_device *vdpa_dev, u16 idx)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa_dev);
|
|
struct vdpa_notification_area area;
|
|
|
|
area.addr = vd->vring[idx].notify_pa;
|
|
area.size = PAGE_SIZE;
|
|
return area;
|
|
}
|
|
|
|
#ifdef HAVE_VDPA_EULER_OPS
|
|
static int perm_to_iommu_flags(u32 perm)
|
|
{
|
|
int flags = 0;
|
|
|
|
switch (perm) {
|
|
case VHOST_ACCESS_WO:
|
|
flags |= IOMMU_WRITE;
|
|
break;
|
|
case VHOST_ACCESS_RO:
|
|
flags |= IOMMU_READ;
|
|
break;
|
|
case VHOST_ACCESS_RW:
|
|
flags |= (IOMMU_WRITE | IOMMU_READ);
|
|
break;
|
|
default:
|
|
WARN(1, "invalidate vhost IOTLB permission\n");
|
|
break;
|
|
}
|
|
|
|
return flags | IOMMU_CACHE;
|
|
}
|
|
|
|
static int crete_vdpa_iotlb_update(struct vdpa_device *vdpa, unsigned int asid,
|
|
u64 iova, u64 size,
|
|
u64 pa, u32 perm, void *opaque)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
|
|
int ret = 0;
|
|
int flags = perm_to_iommu_flags(perm);
|
|
struct crete_iotlb_map *map;
|
|
|
|
if(!vd->domain){
|
|
dev_err(&cdev->pdev->dev,"crete vdpa domain is null\n");
|
|
return -EIO;
|
|
}
|
|
if (size <= 0)
|
|
return -EFAULT;
|
|
map = kmalloc(sizeof(*map), GFP_ATOMIC);
|
|
if (!map)
|
|
return -ENOMEM;
|
|
|
|
map->start = iova;
|
|
map->size = size;
|
|
map->last = iova + size - 1;
|
|
map->addr = pa;
|
|
map->perm = perm;
|
|
map->opaque = opaque;
|
|
|
|
INIT_LIST_HEAD(&map->link);
|
|
spin_lock(&vd->iommu_lock);
|
|
vd->mig_log.nmaps++;
|
|
list_add_tail(&map->link, &vd->mig_log.list);
|
|
ret = iommu_map(vd->domain, iova, pa, size, flags);
|
|
spin_unlock(&vd->iommu_lock);
|
|
return ret;
|
|
}
|
|
|
|
static int crete_vdpa_dma_unmap(struct vdpa_device *vdev, unsigned int asid,
|
|
u64 iova, u64 size)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdev);
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
|
|
if(!vd->domain){
|
|
dev_err(&cdev->pdev->dev,"crete vdpa domain is null\n");
|
|
return -EIO;
|
|
}
|
|
|
|
spin_lock(&vd->iommu_lock);
|
|
iommu_unmap(vd->domain, iova, size);
|
|
spin_unlock(&vd->iommu_lock);
|
|
return 0;
|
|
}
|
|
|
|
static int crete_vdpa_dma_map_log_base(struct vdpa_device *vdpa, u64 log_size, u64 iova)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
|
|
struct page **page_list;
|
|
unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
|
|
unsigned int gup_flags = FOLL_LONGTERM;
|
|
unsigned long npages, cur_base, map_pfn, last_pfn = 0;
|
|
unsigned long lock_limit, sz2pin, nchunks, i;
|
|
u64 start, log_base;
|
|
long pinned;
|
|
int ret = 0;
|
|
u32 perm;
|
|
|
|
start = iova;
|
|
perm = VHOST_ACCESS_RW;
|
|
/* Limit the use of memory for bookkeeping */
|
|
page_list = (struct page **) __get_free_page(GFP_KERNEL);
|
|
if (!page_list)
|
|
return -ENOMEM;
|
|
|
|
if (perm & VHOST_ACCESS_WO)
|
|
gup_flags |= FOLL_WRITE;
|
|
|
|
npages = PFN_UP(log_size + (iova & ~PAGE_MASK));
|
|
if (!npages) {
|
|
ret = -EINVAL;
|
|
goto free;
|
|
}
|
|
if(!vd->mm){
|
|
ret = -EIO;
|
|
goto free;
|
|
}
|
|
mmap_read_lock(vd->mm);
|
|
|
|
lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
|
|
if (npages + atomic64_read(&vd->mm->pinned_vm) > lock_limit) {
|
|
ret = -ENOMEM;
|
|
goto unlock;
|
|
}
|
|
|
|
log_base = crete_vdpa_get_log_base(vd);
|
|
log_base &= PAGE_MASK;
|
|
iova &= PAGE_MASK;
|
|
nchunks = 0;
|
|
|
|
crete_info(&cdev->pdev->dev, "crete vdpa dma map pages %ld log base 0x%llx size 0x%llx iova 0x%llx\n",
|
|
npages, log_base, log_size, iova);
|
|
while (npages) {
|
|
sz2pin = min_t(unsigned long, npages, list_size);
|
|
pinned = pin_user_pages(log_base, sz2pin,
|
|
gup_flags, page_list, NULL);
|
|
if (sz2pin != pinned) {
|
|
if (pinned < 0) {
|
|
ret = pinned;
|
|
} else {
|
|
unpin_user_pages(page_list, pinned);
|
|
ret = -ENOMEM;
|
|
}
|
|
goto out;
|
|
}
|
|
nchunks++;
|
|
|
|
if (!last_pfn)
|
|
map_pfn = page_to_pfn(page_list[0]);
|
|
|
|
for (i = 0; i < pinned; i++) {
|
|
unsigned long this_pfn = page_to_pfn(page_list[i]);
|
|
u64 csize;
|
|
if (last_pfn && (this_pfn != last_pfn + 1)) {
|
|
/* Pin a contiguous chunk of memory */
|
|
csize = PFN_PHYS(last_pfn - map_pfn + 1);
|
|
ret = crete_vdpa_iotlb_update(vdpa, 0, iova, csize,
|
|
PFN_PHYS(map_pfn),
|
|
perm, NULL);
|
|
if (ret) {
|
|
/*
|
|
* Unpin the pages that are left unmapped
|
|
* from this point on in the current
|
|
* page_list. The remaining outstanding
|
|
* ones which may stride across several
|
|
* chunks will be covered in the common
|
|
* error path subsequently.
|
|
*/
|
|
unpin_user_pages(&page_list[i],
|
|
pinned - i);
|
|
crete_err(&cdev->pdev->dev, "crete vpa update iotlb error. "
|
|
"iova 0x%llx csize 0x%llx npages %ld\n",
|
|
iova, csize, npages);
|
|
goto out;
|
|
}
|
|
|
|
map_pfn = this_pfn;
|
|
iova += csize;
|
|
nchunks = 0;
|
|
}
|
|
|
|
last_pfn = this_pfn;
|
|
}
|
|
|
|
cur_base += PFN_PHYS(pinned);
|
|
npages -= pinned;
|
|
}
|
|
|
|
/* Pin the rest chunk */
|
|
ret = crete_vdpa_iotlb_update(vdpa,0, iova, PFN_PHYS(last_pfn - map_pfn + 1),
|
|
PFN_PHYS(map_pfn), perm, NULL);
|
|
out:
|
|
if (ret) {
|
|
if (nchunks) {
|
|
unsigned long pfn;
|
|
|
|
/*
|
|
* Unpin the outstanding pages which are yet to be
|
|
* mapped but haven't due to vdpa_map() or
|
|
* pin_user_pages() failure.
|
|
*
|
|
* Mapped pages are accounted in vdpa_map(), hence
|
|
* the corresponding unpinning will be handled by
|
|
* vdpa_unmap().
|
|
*/
|
|
WARN_ON(!last_pfn);
|
|
for (pfn = map_pfn; pfn <= last_pfn; pfn++)
|
|
unpin_user_page(pfn_to_page(pfn));
|
|
}
|
|
crete_vdpa_dma_unmap(vdpa,0, start, log_size);
|
|
}
|
|
unlock:
|
|
mmap_read_unlock(vd->mm);
|
|
free:
|
|
free_page((unsigned long)page_list);
|
|
return ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_VDPA_JMND_OPS
|
|
static int crete_jmnd_set_log_base(struct vdpa_device *vdpa, u64 log_base, u64 log_size)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
int ret;
|
|
|
|
/* TODO: need disable log first? */
|
|
//crete_info(pdev->dev,"disable log \n");
|
|
//crete_info(pdev->,"enable log log_base = %llu\n",log_base);
|
|
vd->mig_log.log_base_l = log_base & 0xFFFFFFFF;
|
|
vd->mig_log.log_base_h = log_base >> 32;
|
|
vd->mig_log.iova_addr_l = 0;
|
|
vd->mig_log.iova_addr_h = 0;
|
|
|
|
log_size = log_size << 18; /* by 32K * 8 */
|
|
vd->mig_log.iova_size_l = log_size & 0xFFFFFFFF;
|
|
vd->mig_log.iova_size_h = log_size >> 32;
|
|
vd->lm_ctrl = CORSICA_LM_ENABLE;
|
|
ret = crete_set_mig_log_base(cdev, CRETE_MIG_DEV_VIRTIO, 1,
|
|
vd->mig_log.log_base_l, vd->mig_log.log_base_h,
|
|
vd->mig_log.iova_addr_l, vd->mig_log.iova_addr_h,
|
|
vd->mig_log.iova_size_l, vd->mig_log.iova_size_h);
|
|
if (ret < 0) {
|
|
crete_err(&cdev->pdev->dev, "set log base errro. ret %d log base 0x%llx log size 0x%llx\n",
|
|
ret, log_base, log_size);
|
|
}
|
|
return ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_VDPA_EULER_OPS
|
|
static void crete_vdpa_bind_mm(struct vdpa_device *vdpa)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
if (vd->mm) {
|
|
crete_warn(&cdev->pdev->dev, "crete vdpa mm already been bond\n");
|
|
return;
|
|
}
|
|
vd->mm = current->mm;
|
|
mmgrab(vd->mm);
|
|
crete_warn(&cdev->pdev->dev, "crete vdpa mm been bond\n");
|
|
}
|
|
|
|
static int crete_vdpa_set_mig_log(struct vdpa_device *vdpa, u64 log_size, u64 iova_base)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
int ret;
|
|
|
|
if((vd->mig_log.log_base_l == 0 && vd->mig_log.log_base_h ==0) ||
|
|
(vd->mig_log.iova_size_l == 0 && vd->mig_log.iova_size_h == 0)) {
|
|
return 0;
|
|
}
|
|
|
|
/* log base and log size all be assigned a value */
|
|
vd->lm_ctrl = CORSICA_LM_ENABLE;
|
|
ret = crete_vdpa_dma_map_log_base(vdpa, log_size, iova_base);
|
|
if (ret) {
|
|
crete_err(&cdev->pdev->dev, "crete vdpa log base dma map error. ret %d \n", ret);
|
|
return ret;
|
|
}
|
|
ret = crete_set_mig_log_base(cdev, CRETE_MIG_DEV_VIRTIO, 1,
|
|
vd->mig_log.log_base_l, vd->mig_log.log_base_h,
|
|
vd->mig_log.iova_addr_l, vd->mig_log.iova_addr_h,
|
|
vd->mig_log.iova_size_l, vd->mig_log.iova_size_h);
|
|
if (ret < 0) {
|
|
crete_err(&cdev->pdev->dev, "set log base errro. ret %d log base 0x%llx log size 0x%llx\n",
|
|
ret, crete_vdpa_get_log_base(vd), crete_vdpa_get_log_size(vd));
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
static int crete_eular_set_log_base(struct vdpa_device *vdpa, uint64_t log_base)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
|
|
/* check bind mm first */
|
|
crete_vdpa_bind_mm(vdpa);
|
|
vd->mig_log.log_base_l = log_base & 0xFFFFFFFF;
|
|
vd->mig_log.log_base_h = log_base >> 32;
|
|
vd->mig_log.iova_addr_l = 0;
|
|
vd->mig_log.iova_addr_h = 0;
|
|
|
|
crete_info(&cdev->pdev->dev, "set log base 0x%llx\n", log_base);
|
|
return 0;
|
|
}
|
|
|
|
static int crete_eular_set_log_size(struct vdpa_device *vdpa, uint64_t log_size)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa);
|
|
struct crete_adapter *adapter = vdpa_to_adapter(vdpa);
|
|
struct crete_core_dev *cdev = adapter->cdev;
|
|
u64 iova_size, iova_base, iova_off;
|
|
|
|
iova_off = ((u64)vd->mig_log.iova_size_h << 32) | vd->mig_log.iova_size_l;
|
|
iova_base = CRETE_LOG_BASE_IOVA + (PFN_UP(iova_off) << PAGE_SHIFT);
|
|
iova_size = log_size << 15; /* by 32K */
|
|
vd->mig_log.iova_size_l = iova_size & 0xFFFFFFFF;
|
|
vd->mig_log.iova_size_h = iova_size >> 32;
|
|
|
|
crete_info(&cdev->pdev->dev, "set log size 0x%llx iova size 0x%llx iova_base 0x%llx iova_off 0x%llx\n",
|
|
log_size, iova_size, iova_base, iova_off);
|
|
return crete_vdpa_set_mig_log(vdpa, log_size, iova_base);
|
|
}
|
|
|
|
int crete_vdpa_suspend(struct vdpa_device *vdev)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdev);
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
struct device *dev = &cdev->pdev->dev;
|
|
|
|
crete_info(dev, "crete vdpa suspend\n");
|
|
return 0;
|
|
}
|
|
|
|
int crete_vdpa_resume(struct vdpa_device *vdev)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdev);
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
struct device *dev = &cdev->pdev->dev;
|
|
|
|
crete_info(dev, "crete vdpa resume\n");
|
|
return 0;
|
|
}
|
|
|
|
static int crete_vdpa_dma_map(struct vdpa_device *vdev, unsigned int asid,
|
|
u64 iova, u64 size,
|
|
u64 pa, u32 perm, void *opaque)
|
|
{
|
|
int ret = 0;
|
|
int flags = perm_to_iommu_flags(perm);
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdev);
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
|
|
if(!vd->domain){
|
|
dev_err(&cdev->pdev->dev,"crete vdpa domain is null\n");
|
|
return -EIO;
|
|
}
|
|
spin_lock(&vd->iommu_lock);
|
|
ret = iommu_map(vd->domain, iova, pa, size, flags);
|
|
spin_unlock(&vd->iommu_lock);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static uint32_t crete_vdpa_get_dev_buffer_size(struct vdpa_device *vdpa)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa);
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
crete_info(&cdev->pdev->dev, "get dev buffer size %d\n", vd->config_size);
|
|
return vd->config_size;
|
|
}
|
|
|
|
static int crete_vdpa_get_dev_buffer(struct vdpa_device *vdpa, unsigned int offset,
|
|
void __user *dest, unsigned int len)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa);
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
u8 *buf;
|
|
|
|
buf = kvzalloc(len, GFP_KERNEL);
|
|
if (!buf)
|
|
return -ENOMEM;
|
|
|
|
crete_vdpa_get_config(vdpa, offset, buf, len);
|
|
|
|
if (copy_to_user(dest, buf, len)) {
|
|
kvfree(buf);
|
|
crete_err(&cdev->pdev->dev, "crete vdpa get dev buffer failed\n");
|
|
return -EFAULT;
|
|
}
|
|
|
|
kvfree(buf);
|
|
crete_info(&cdev->pdev->dev, "crete vdpa get dev buffer completely.\n");
|
|
return 0;
|
|
}
|
|
|
|
static int crete_vdpa_set_dev_buffer(struct vdpa_device *vdpa, unsigned int offset,
|
|
const void __user *src, unsigned int len)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa);
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
u8 *buf;
|
|
|
|
buf = vmemdup_user(src, len);
|
|
if (IS_ERR(buf))
|
|
return PTR_ERR(buf);
|
|
crete_vdpa_set_config(vdpa, offset, buf, len);
|
|
kvfree(buf);
|
|
crete_info(&cdev->pdev->dev, "crete vdpa set dev buffer offset %d len %d\n", offset, len);
|
|
return 0;
|
|
}
|
|
|
|
static int crete_vdpa_set_mig_state(struct vdpa_device *vdpa, u8 state)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa);
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
crete_info(&cdev->pdev->dev, "set mig state %d\n", state);
|
|
return 0;
|
|
}
|
|
|
|
static int crete_vdpa_log_sync(struct vdpa_device *vdpa)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdpa);
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
crete_info(&cdev->pdev->dev, "crete vdpa log sync\n");
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
struct vdpa_config_ops crete_vdpa_ops = {
|
|
#ifdef HAVE_VDPA_OPS_DEVICE_FEAT
|
|
.get_device_features = crete_vdpa_get_device_features,
|
|
.set_driver_features = crete_vdpa_set_driver_features,
|
|
.get_driver_features = crete_vdpa_get_driver_features,
|
|
#else
|
|
.get_features = crete_vdpa_get_device_features,
|
|
.set_features = crete_vdpa_set_driver_features,
|
|
#endif
|
|
.get_status = crete_vdpa_get_status,
|
|
.set_status = crete_vdpa_set_status,
|
|
#ifdef HAVE_VDPA_OPS_RESET
|
|
.reset = crete_vdpa_reset,
|
|
#endif
|
|
.get_vq_num_max = crete_vdpa_get_vq_num_max,
|
|
#ifdef HAVE_VDPA_OPS_NUM_MIN
|
|
.get_vq_num_min = crete_vdpa_get_vq_num_min,
|
|
#endif
|
|
.get_vq_state = crete_vdpa_get_vq_state,
|
|
.set_vq_state = crete_vdpa_set_vq_state,
|
|
.set_vq_cb = crete_vdpa_set_vq_cb,
|
|
.set_vq_ready = crete_vdpa_set_vq_ready,
|
|
.get_vq_ready = crete_vdpa_get_vq_ready,
|
|
.set_vq_num = crete_vdpa_set_vq_num,
|
|
.set_vq_address = crete_vdpa_set_vq_address,
|
|
.get_vq_irq = crete_vdpa_get_vq_irq,
|
|
.kick_vq = crete_vdpa_kick_vq,
|
|
.get_generation = crete_vdpa_get_generation,
|
|
.get_device_id = crete_vdpa_get_device_id,
|
|
.get_vendor_id = crete_vdpa_get_vendor_id,
|
|
.get_vq_align = crete_vdpa_get_vq_align,
|
|
|
|
#ifdef HAVE_VDPA_OPS_GET_CONFIG_SIZE
|
|
.get_config_size = crete_vdpa_get_config_size,
|
|
#endif
|
|
.get_config = crete_vdpa_get_config,
|
|
.set_config = crete_vdpa_set_config,
|
|
.set_config_cb = crete_vdpa_set_config_cb,
|
|
.get_vq_notification = crete_get_vq_notification,
|
|
#ifdef HAVE_VDPA_JMND_OPS
|
|
/* set log base & size on one functon */
|
|
.set_log_base = crete_jmnd_set_log_base,
|
|
#endif
|
|
|
|
#ifdef HAVE_VDPA_EULER_OPS
|
|
.set_log_base = crete_eular_set_log_base,
|
|
.set_log_size = crete_eular_set_log_size,
|
|
|
|
.suspend = crete_vdpa_suspend,
|
|
.resume = crete_vdpa_resume,
|
|
|
|
.dma_map = crete_vdpa_dma_map,
|
|
.dma_unmap = crete_vdpa_dma_unmap,
|
|
|
|
.get_dev_buffer_size = crete_vdpa_get_dev_buffer_size,
|
|
.get_dev_buffer = crete_vdpa_get_dev_buffer,
|
|
.set_dev_buffer = crete_vdpa_set_dev_buffer,
|
|
.set_mig_state = crete_vdpa_set_mig_state,
|
|
.log_sync = crete_vdpa_log_sync,
|
|
#endif
|
|
};
|
|
|
|
#ifdef HAVE_VDPA_EULER_OPS
|
|
static int crete_vdpa_resv_iommu_region(struct iommu_domain *domain, struct device *dma_dev,
|
|
struct vhost_iotlb *resv_iotlb)
|
|
{
|
|
struct list_head dev_resv_regions;
|
|
phys_addr_t resv_msi_base = 0;
|
|
struct iommu_resv_region *region;
|
|
int ret = 0;
|
|
bool with_sw_msi = false;
|
|
bool with_hw_msi = false;
|
|
|
|
INIT_LIST_HEAD(&dev_resv_regions);
|
|
iommu_get_resv_regions(dma_dev, &dev_resv_regions);
|
|
|
|
list_for_each_entry(region, &dev_resv_regions, list) {
|
|
ret = vhost_iotlb_add_range_ctx(resv_iotlb, region->start,
|
|
region->start + region->length - 1,
|
|
0, 0, NULL);
|
|
if (ret) {
|
|
vhost_iotlb_reset(resv_iotlb);
|
|
break;
|
|
}
|
|
|
|
if (region->type == IOMMU_RESV_MSI)
|
|
with_hw_msi = true;
|
|
|
|
if (region->type == IOMMU_RESV_SW_MSI) {
|
|
resv_msi_base = region->start;
|
|
with_sw_msi = true;
|
|
}
|
|
|
|
}
|
|
|
|
if (!ret && !with_hw_msi && with_sw_msi)
|
|
ret = iommu_get_msi_cookie(domain, resv_msi_base);
|
|
|
|
iommu_put_resv_regions(dma_dev, &dev_resv_regions);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int crete_vdpa_alloc_domain(struct vdpa_device *vdev)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdev);
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
struct device *dma_dev = vdpa_get_dma_dev(vdev);
|
|
struct bus_type *bus;
|
|
int ret;
|
|
|
|
/* Device want to do DMA by itself */
|
|
bus = dma_dev->bus;
|
|
if (!bus)
|
|
return -EFAULT;
|
|
|
|
if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
|
|
return -ENOTSUPP;
|
|
|
|
vd->domain = iommu_domain_alloc(bus);
|
|
if (!vd->domain)
|
|
return -EIO;
|
|
|
|
ret = iommu_attach_device(vd->domain, dma_dev);
|
|
if (ret)
|
|
goto err_alloc_domain;
|
|
|
|
|
|
ret = crete_vdpa_resv_iommu_region(vd->domain, dma_dev, &vd->resv_iotlb);
|
|
if (ret)
|
|
goto err_attach_device;
|
|
|
|
dev_info(&cdev->pdev->dev, "crete vdpa alloc domain successfully\n");
|
|
return 0;
|
|
err_attach_device:
|
|
iommu_detach_device(vd->domain, dma_dev);
|
|
err_alloc_domain:
|
|
iommu_domain_free(vd->domain);
|
|
vd->domain = NULL;
|
|
dev_err(&cdev->pdev->dev, "crete vdpa alloc domain failed\n");
|
|
return ret;
|
|
}
|
|
|
|
static void crete_vdpa_free_domain(struct vdpa_device *vdev)
|
|
{
|
|
struct crete_vdpa *vd = vdpa_to_vd(vdev);
|
|
struct crete_core_dev *cdev = vd_to_coredev(vd);
|
|
struct device *dma_dev = vdpa_get_dma_dev(vdev);
|
|
|
|
if (vd->domain) {
|
|
iommu_detach_device(vd->domain, dma_dev);
|
|
iommu_domain_free(vd->domain);
|
|
}
|
|
|
|
vd->domain = NULL;
|
|
dev_info(&cdev->pdev->dev, "crete vdpa free domain\n");
|
|
}
|
|
#endif
|
|
|
|
static int crete_vdpa_get_mac(struct crete_core_dev *cdev, struct device *dev, u8* mac)
|
|
{
|
|
int ret;
|
|
ret = crete_get_vf_mac(cdev, mac);
|
|
if (ret) {
|
|
crete_info(dev, "crete vdpa get vf mac error. %d", ret);
|
|
} else {
|
|
crete_info(dev, "crete vdpa get vf mac %pM\n", mac);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
int crete_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
|
|
const struct vdpa_dev_set_config *config)
|
|
{
|
|
struct crete_vdpa_mgmtdev *crete_mgmt_dev;
|
|
struct crete_adapter *adapter;
|
|
struct pci_dev *pdev;
|
|
struct device *dev;
|
|
struct crete_core_dev *core_dev;
|
|
struct crete_vnet_hw_cap *hcap;
|
|
struct crete_vdpa *cvd;
|
|
struct crete_hw *hw;
|
|
u8 addr[ETH_ALEN];
|
|
int ret, i;
|
|
|
|
crete_mgmt_dev = container_of(mdev, struct crete_vdpa_mgmtdev, mdev);
|
|
core_dev = crete_mgmt_dev->cdev;
|
|
pdev = core_dev->pdev;
|
|
dev = &pdev->dev;
|
|
|
|
crete_info(dev, "crete vdpa dev add\n");
|
|
if (crete_mgmt_dev->adapter) {
|
|
crete_warn(dev, "cant't add vdpa dev name %s\n", name);
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
hcap = &crete_mgmt_dev->hcap;
|
|
hw = &core_dev->hw;
|
|
|
|
if (!(hcap->hw_features & BIT_ULL(VIRTIO_F_VERSION_1) &&
|
|
hcap->hw_features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM))) {
|
|
dev_warn(dev,
|
|
"Must provision minimum features 0x%llx for this device",
|
|
BIT_ULL(VIRTIO_F_VERSION_1) |
|
|
BIT_ULL(VIRTIO_F_ACCESS_PLATFORM));
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
if (hcap->have_ctl)
|
|
hcap->vdpa_max_vqs = 2 * hcap->max_vqpnum + 1;
|
|
else
|
|
hcap->vdpa_max_vqs = 2;
|
|
|
|
|
|
#ifndef HAVE_VDPA_ALLOC_LACK_GROUP
|
|
adapter = vdpa_alloc_device(struct crete_adapter, vdpa,
|
|
dev, &crete_vdpa_ops, 1, 1, name, false);
|
|
#else
|
|
#ifndef HAVE_VDPA_ALLOC_LACK_NAME
|
|
adapter = vdpa_alloc_device(struct crete_adapter, vdpa,
|
|
dev, &crete_vdpa_ops, NULL, false);
|
|
#else
|
|
adapter = vdpa_alloc_device(struct crete_adapter, vdpa,
|
|
dev, &crete_vdpa_ops, hcap->vdpa_max_vqs);
|
|
#endif
|
|
#endif
|
|
if (IS_ERR(adapter)) {
|
|
dev_err(dev, "Failed to allocate vDPA structure");
|
|
return PTR_ERR(adapter);
|
|
}
|
|
memset(&adapter->config, 0, sizeof(struct virtio_net_config));
|
|
crete_mgmt_dev->adapter = adapter;
|
|
cvd = &adapter->vd;
|
|
cvd->hcap = hcap;
|
|
cvd->dev_type = get_dev_type(pdev);
|
|
cvd->config_size = crete_init_config_size(cvd);
|
|
|
|
cvd->status = 0;
|
|
cvd->num_queues = hcap->vdpa_max_vqs;
|
|
cvd->qp_nums = cvd->num_queues / 2;
|
|
cvd->queue_size = hcap->io_qlen;
|
|
cvd->mig_log.nmaps = 0;
|
|
cvd->mig_log.iova_size_h = 0;
|
|
cvd->mig_log.iova_size_l = 0;
|
|
INIT_LIST_HEAD(&cvd->mig_log.list);
|
|
|
|
adapter->vdpa.dma_dev = &pdev->dev;
|
|
|
|
#ifdef HAVE_VDPA_MGMTDEV_OPS
|
|
adapter->vdpa.mdev = mdev;
|
|
#endif
|
|
|
|
adapter->config.max_virtqueue_pairs = hcap->max_vqpnum;
|
|
adapter->config.mtu = 1500;
|
|
adapter->config.status = 1;
|
|
|
|
#if defined(HAVE_VDPA_MGMTDEV_OPS)
|
|
if (config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR)) {
|
|
memcpy(adapter->config.mac, config->net.mac, ETH_ALEN);
|
|
dev_info(dev, "crete vdpa set mac %pM\n", adapter->config.mac);
|
|
} else {
|
|
//eth_random_addr(addr);
|
|
crete_vdpa_get_mac(core_dev, dev, addr);
|
|
memcpy(adapter->config.mac, addr, ETH_ALEN);
|
|
}
|
|
|
|
if (config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) {
|
|
dev_info(dev, "crete vdpa set vQP %d\n", config->net.max_vq_pairs);
|
|
}
|
|
#else
|
|
eth_random_addr(addr);
|
|
memcpy(adapter->config.mac, addr, ETH_ALEN);
|
|
#endif
|
|
|
|
adapter->cdev = core_dev;
|
|
adapter->priv = crete_mgmt_dev;
|
|
|
|
|
|
dev_info(dev, "[%s] [%d] vf_packed %d vf_irq %d queue size %d vdpa_max_vqs =%d\n",
|
|
__func__, __LINE__, vf_packed, vf_irq, crete_queue_size, hcap->vdpa_max_vqs);
|
|
if (hcap->vdpa_max_vqs > CORSICA_MAX_QUEUES) {
|
|
dev_err(dev,
|
|
"[%s] [%d] hcap->vdpa_max_vqs > hcap->vdpa_max_vqs",
|
|
__func__, __LINE__);
|
|
hcap->vdpa_max_vqs = hcap->vdpa_max_vqs;
|
|
|
|
}
|
|
for (i = 0; i < hcap->vdpa_max_vqs; i++) {
|
|
if (cvd == NULL) {
|
|
dev_err(dev, "cvd is null\n");
|
|
ret = -66;
|
|
goto err;
|
|
}
|
|
|
|
cvd->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
|
|
|
|
if (i == 2 * hcap->max_vqpnum) {
|
|
cvd->vring[i].notify_addr =
|
|
core_dev->db_base + 0x1000;
|
|
cvd->vring[i].notify_pa =
|
|
core_dev->bar_addr + ((hw->jnd.offset) << 12) + 0x1000;
|
|
continue;
|
|
}
|
|
cvd->vring[i].notify_addr = core_dev->db_base;
|
|
cvd->vring[i].notify_pa =
|
|
core_dev->bar_addr + ((hw->jnd.offset) << 12);
|
|
}
|
|
dev_info(dev, "crete core dev doorbell base address:0x%pK\n",
|
|
core_dev->db_base);
|
|
|
|
ret = crete_vdpa_register_event_handler(cvd);
|
|
if (ret) {
|
|
dev_err(dev, "Failed to register for crete events: %pe\n",
|
|
ERR_PTR(ret));
|
|
goto err;
|
|
}
|
|
|
|
#if defined(HAVE_VDPA_MGMTDEV_OPS)
|
|
ret = _vdpa_register_device(&adapter->vdpa, cvd->num_queues);
|
|
#else
|
|
ret = vdpa_register_device(&adapter->vdpa);
|
|
#endif
|
|
if (ret) {
|
|
dev_err(dev, "Failed to register to vDPA bus %d", ret);
|
|
goto err_event;
|
|
}
|
|
|
|
/* the msix was enabled on the crete core, so when add a vdpa device, need free it */
|
|
crete_event_exit(hw);
|
|
crete_exit_irq(core_dev);
|
|
//pci_set_drvdata(pdev, crete_mgmt_dev); //core driver is used
|
|
|
|
#ifdef HAVE_VDPA_EULER_OPS
|
|
/* alloc domina */
|
|
vhost_iotlb_init(&cvd->resv_iotlb, 0, 0);
|
|
ret = crete_vdpa_alloc_domain(&adapter->vdpa);
|
|
if (ret) {
|
|
crete_err(dev, "crete vdpa alloc domain failed %d", ret);
|
|
goto err_event;
|
|
}
|
|
#endif
|
|
return 0;
|
|
err_event:
|
|
crete_vdpa_unregister_event_handler(cvd);
|
|
err:
|
|
put_device(&adapter->vdpa.dev);
|
|
return ret;
|
|
}
|
|
|
|
void crete_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
|
|
{
|
|
struct crete_vdpa_mgmtdev *crete_mgmt_dev;
|
|
struct crete_adapter *adapter;
|
|
|
|
crete_info(mdev->device, "crete vdpa del dev\n");
|
|
crete_mgmt_dev = container_of(mdev, struct crete_vdpa_mgmtdev, mdev);
|
|
adapter = crete_mgmt_dev->adapter;
|
|
crete_vdpa_unregister_event_handler(&adapter->vd);
|
|
#ifdef HAVE_VDPA_EULER_OPS
|
|
/* unbind the mm, MUST befor the free domain */
|
|
crete_vdpa_unbind_mm(dev);
|
|
crete_vdpa_free_domain(dev);
|
|
#endif
|
|
|
|
#ifndef HAVE_VDPA_MGMTDEV_OPS
|
|
vdpa_unregister_device(dev);
|
|
/* TODO: need release adapter */
|
|
#else
|
|
_vdpa_unregister_device(dev);
|
|
#endif
|
|
crete_mgmt_dev->adapter = NULL;
|
|
}
|
|
|
|
static u64 crete_get_max_supported_vqs(struct crete_vdpa_mgmtdev
|
|
*crete_mgmt_dev)
|
|
{
|
|
int ret;
|
|
u16 qsize;
|
|
u8 maxqpnum;
|
|
u8 ctrlqsize;
|
|
struct crete_core_dev *core_dev = crete_mgmt_dev->cdev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
struct crete_vnet_hw_cap *hcap = &crete_mgmt_dev->hcap;
|
|
|
|
ret = crete_get_qp_cap(core_dev, &qsize, &maxqpnum, &ctrlqsize);
|
|
if (ret) {
|
|
dev_err(&pdev->dev, "%s is error\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
|
|
// hcap->hw_features = hcap->hw_features & (~ BIT_ULL(VIRTIO_NET_F_CTRL_VQ));
|
|
hcap->hw_features = hcap->hw_features & (~ BIT_ULL(VIRTIO_NET_F_CTRL_RX));
|
|
hcap->hw_features = hcap->hw_features & (~ BIT_ULL(VIRTIO_NET_F_CTRL_VLAN));
|
|
// hcap->hw_features = hcap->hw_features & (~ BIT_ULL(VIRTIO_NET_F_GUEST_ANNOUNCE));
|
|
hcap->hw_features = hcap->hw_features & (~ BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR));
|
|
// hcap->hw_features = hcap->hw_features & (~ BIT_ULL(VIRTIO_NET_F_MQ));
|
|
/* openEular SP1 don't supprot the PACKED */
|
|
if (!vf_packed)
|
|
hcap->hw_features = hcap->hw_features & (~ BIT_ULL(VIRTIO_F_RING_PACKED));
|
|
/*
|
|
* To supported the openEular qemu,
|
|
* using this bit set the size of bitmap chunk.
|
|
*/
|
|
hcap->hw_features = hcap->hw_features & (~ BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX));
|
|
|
|
if (hcap->hw_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) {
|
|
hcap->have_ctl = true;
|
|
hcap->ctl_qlen = crete_queue_size;
|
|
} else {
|
|
hcap->have_ctl = false;
|
|
hcap->ctl_qlen = 0;
|
|
}
|
|
hcap->max_vqpnum = maxqpnum;
|
|
hcap->io_qlen = crete_queue_size; /* don't use the cap value, use the default */
|
|
dev_info(&pdev->dev,
|
|
"crete vqs: ctl_qlen[0x%x] ioqlen[0x%x] maxvqs[%d]\n",
|
|
hcap->ctl_qlen, hcap->io_qlen, hcap->max_vqpnum);
|
|
return ret;
|
|
}
|
|
|
|
static u64 crete_get_supported_features(struct crete_vdpa_mgmtdev
|
|
*crete_mgmt_dev)
|
|
{
|
|
struct crete_core_dev *core_dev = crete_mgmt_dev->cdev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
struct crete_vnet_hw_cap *hcap = &crete_mgmt_dev->hcap;
|
|
u64 dev_features;
|
|
u64 ret;
|
|
|
|
ret =
|
|
crete_cmd_get_features(core_dev, CRETE_VIRTIO_NET_DEV_FEAT,
|
|
&dev_features);
|
|
if (ret) {
|
|
dev_err(&pdev->dev, "get device features is error\n");
|
|
ret = -EINVAL;
|
|
}
|
|
hcap->hw_features = dev_features;
|
|
dev_info(&pdev->dev, "device supported features[0x%llx] [0x%llx]\n",
|
|
hcap->hw_features, dev_features);
|
|
return ret;
|
|
}
|
|
|
|
static const struct vdpa_mgmtdev_ops crete_vdpa_mgmt_dev_ops = {
|
|
.dev_add = crete_vdpa_dev_add,
|
|
.dev_del = crete_vdpa_dev_del
|
|
};
|
|
|
|
int crete_vdpa_get_mgmt_info(struct crete_vdpa_mgmtdev *cvm)
|
|
{
|
|
int ret;
|
|
u32 dev_type;
|
|
struct crete_core_dev *core_dev = cvm->cdev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
|
|
ret = crete_get_supported_features(cvm);
|
|
if (ret < 0) {
|
|
ret = -EOPNOTSUPP;
|
|
goto err;
|
|
}
|
|
ret = crete_get_max_supported_vqs(cvm);
|
|
if (ret < 0) {
|
|
ret = -EOPNOTSUPP;
|
|
goto err;
|
|
}
|
|
|
|
dev_type = get_dev_type(pdev);
|
|
switch (dev_type) {
|
|
case VIRTIO_ID_NET:
|
|
cvm->mdev.id_table = id_table_net;
|
|
break;
|
|
case VIRTIO_ID_BLOCK:
|
|
cvm->mdev.id_table = id_table_blk;
|
|
break;
|
|
default:
|
|
dev_err(&pdev->dev, "VIRTIO ID %u not supported\n", dev_type);
|
|
ret = -EOPNOTSUPP;
|
|
goto err;
|
|
}
|
|
|
|
if (cvm->hcap.have_ctl)
|
|
cvm->mdev.max_supported_vqs = 2 * cvm->hcap.max_vqpnum + 1;
|
|
else
|
|
cvm->mdev.max_supported_vqs = 2;
|
|
|
|
cvm->mdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
|
|
cvm->mdev.config_attr_mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
|
|
cvm->mdev.supported_features = cvm->hcap.hw_features;
|
|
cvm->mdev.ops = &crete_vdpa_mgmt_dev_ops;
|
|
cvm->mdev.device = &pdev->dev;
|
|
return 0;
|
|
err:
|
|
return ret;
|
|
}
|
|
|
|
int crete_vdpa_set_device_type(struct crete_core_dev *core_dev)
|
|
{
|
|
|
|
int ret;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
u32 devtype = 0;
|
|
|
|
ret = crete_set_dev_type(core_dev, CRETE_VNET_DEV);
|
|
if (ret) {
|
|
dev_err(&pdev->dev,
|
|
"Failed to set device type VNET, set cmd error\n");
|
|
return -EINVAL;
|
|
}
|
|
ret = crete_get_dev_type(core_dev, &devtype);
|
|
if (ret) {
|
|
dev_err(&pdev->dev, "set dev type failed\n");
|
|
goto out;
|
|
}
|
|
|
|
if (devtype != CRETE_VNET_DEV) {
|
|
dev_err(&pdev->dev, " dev type not right check failed\n");
|
|
goto out;
|
|
}
|
|
|
|
return ret;
|
|
out:
|
|
return -EINVAL;
|
|
}
|