2442 lines
59 KiB
C
2442 lines
59 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (c) 2023, Jaguar Micro. All rights reserved.
|
|
*
|
|
* This software is available to you under a choice of one of two
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
* General Public License (GPL) Version 2, available from the file
|
|
* COPYING in the main directory of this source tree, or the
|
|
* OpenIB.org BSD license below:
|
|
*
|
|
* Redistribution and use in source and binary forms, with or
|
|
* without modification, are permitted provided that the following
|
|
* conditions are met:
|
|
*
|
|
* - Redistributions of source code must retain the above
|
|
* copyright notice, this list of conditions and the following
|
|
* disclaimer.
|
|
*
|
|
* - Redistributions in binary form must reproduce the above
|
|
* copyright notice, this list of conditions and the following
|
|
* disclaimer in the documentation and/or other materials
|
|
* provided with the distribution.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
* SOFTWARE.
|
|
*/
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/types.h>
|
|
#include <linux/init.h>
|
|
#include <linux/bitops.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/etherdevice.h>
|
|
#include <linux/ipv6.h>
|
|
#include <linux/slab.h>
|
|
#include <net/checksum.h>
|
|
#include <net/ip6_checksum.h>
|
|
#include <net/pkt_sched.h>
|
|
#include <net/pkt_cls.h>
|
|
#include <linux/net_tstamp.h>
|
|
#include <linux/mii.h>
|
|
#include <linux/ethtool.h>
|
|
#include <linux/if.h>
|
|
#include <linux/if_vlan.h>
|
|
#include <linux/pci.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/ip.h>
|
|
#include <linux/tcp.h>
|
|
#include <linux/sctp.h>
|
|
#include <linux/if_ether.h>
|
|
#include <linux/aer.h>
|
|
#include <linux/prefetch.h>
|
|
#include <linux/bpf.h>
|
|
#include <linux/bpf_trace.h>
|
|
#include <linux/pm_runtime.h>
|
|
#include <linux/etherdevice.h>
|
|
#include <net/devlink.h>
|
|
#include <linux/workqueue.h>
|
|
|
|
#include "crete.h"
|
|
#include "crete_regs.h"
|
|
#include "crete_cmd.h"
|
|
#include "crete_cmd_if.h"
|
|
#include "crete_txrx.h"
|
|
#include "crete_sriov.h"
|
|
#include "crete_stub.h"
|
|
#include "crete_rdma_dev.h"
|
|
#include "crete_rdma_adapt.h"
|
|
#include "crete_devlink.h"
|
|
#include "crete_eswitch.h"
|
|
#include "crete_event.h"
|
|
|
|
char crete_driver_name[] = "crete";
|
|
|
|
struct crete_core_dev *g_cdev_test[2];
|
|
EXPORT_SYMBOL(g_cdev_test);
|
|
|
|
static const struct pci_device_id crete_pci_tbl[] = {
|
|
{ PCI_VDEVICE(CRETE, PF_DEVICE_ID) },
|
|
{ PCI_VDEVICE(CRETE, VF_DEVICE_ID), CRETE_PCI_DEV_IS_VF },
|
|
{ PCI_VDEVICE(CRETE, PF_DEVICE_ID_SIM) },
|
|
{ PCI_VDEVICE(CRETE, VF_DEVICE_ID_SIM), CRETE_PCI_DEV_IS_VF },
|
|
{ PCI_VDEVICE(CRETE, PF_DEVICE_ID_NIC) },
|
|
{ PCI_VDEVICE(CRETE, VF_DEVICE_ID_NIC), CRETE_PCI_DEV_IS_VF },
|
|
{ PCI_VDEVICE(CRETE, PF_DEVICE_ID_CMCC) },
|
|
#ifdef SNIC_OPENEULER_VERSION136
|
|
{ PCI_VDEVICE(CRETE, VF_DEVICE_ID_CMCC), CRETE_PCI_DEV_IS_VF },
|
|
#endif
|
|
/* required last entry */
|
|
{ 0, }
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(pci, crete_pci_tbl);
|
|
static int crete_probe_one(struct pci_dev *pdev,
|
|
const struct pci_device_id *id);
|
|
static void crete_remove_one(struct pci_dev *pdev);
|
|
static int crete_init_one(struct crete_core_dev *core_dev);
|
|
static void crete_uninit_one(struct crete_core_dev *core_dev);
|
|
static int crete_sw_init(struct crete_core_dev *, struct crete_hw *);
|
|
static int crete_hw_init(struct crete_core_dev *, struct crete_hw *);
|
|
static int crete_sw_uninit(struct crete_core_dev *, struct crete_hw *);
|
|
static int crete_hw_uninit(struct crete_core_dev *, struct crete_hw *);
|
|
static void crete_shutdown(struct pci_dev *);
|
|
static int crete_load(struct crete_core_dev *dev);
|
|
static void crete_unload(struct crete_core_dev *dev);
|
|
static int crete_init(void);
|
|
static void crete_cleanup(void);
|
|
static int crete_init_mac_addr(struct net_device *netdev);
|
|
static int crete_cap_init(struct crete_core_dev *core_dev, struct crete_hw *hw);
|
|
static bool crete_uc_list_updated(struct net_device *dev);
|
|
|
|
static pci_ers_result_t __maybe_unused crete_pci_err_detected(struct pci_dev *pdev,
|
|
pci_channel_state_t state)
|
|
{
|
|
struct crete_core_dev *dev = pci_get_drvdata(pdev);
|
|
bool abort = false;
|
|
|
|
crete_detach_device(dev);
|
|
rtnl_lock();
|
|
netif_device_detach(dev->netdev);
|
|
|
|
if (test_and_set_bit(__CRETE_RESETING, &dev->state)) {
|
|
crete_err(dev->device, "Firmware reset already in progress\n");
|
|
abort = true;
|
|
}
|
|
|
|
if (abort || state == pci_channel_io_perm_failure) {
|
|
rtnl_unlock();
|
|
return PCI_ERS_RESULT_DISCONNECT;
|
|
}
|
|
|
|
if (netif_running(dev->netdev))
|
|
crete_close(dev->netdev);
|
|
|
|
if (pci_is_enabled(pdev))
|
|
pci_disable_device(pdev);
|
|
|
|
/* Request a slot reset. */
|
|
rtnl_unlock();
|
|
return PCI_ERS_RESULT_NEED_RESET;
|
|
}
|
|
|
|
static pci_ers_result_t crete_pci_err_slot_reset(struct pci_dev *pdev)
|
|
{
|
|
enum pci_ers_result res = PCI_ERS_RESULT_DISCONNECT;
|
|
struct crete_core_dev *dev = pci_get_drvdata(pdev);
|
|
int err;
|
|
|
|
crete_info(dev->device, "PCI Slot Reset..\n");
|
|
err = pci_enable_device(pdev);
|
|
if (err) {
|
|
crete_err(dev->device, "%s: failed with error code: %d\n", __func__, err);
|
|
goto out;
|
|
}
|
|
|
|
pci_set_master(pdev);
|
|
pci_restore_state(pdev);
|
|
pci_save_state(pdev);
|
|
|
|
res = PCI_ERS_RESULT_RECOVERED;
|
|
out:
|
|
crete_info(dev->device, "%s Device state = %ld. Exit, err = %d\n",
|
|
__func__, dev->state, err);
|
|
return res;
|
|
}
|
|
|
|
static void crete_pci_resume(struct pci_dev *pdev)
|
|
{
|
|
struct crete_core_dev *dev = pci_get_drvdata(pdev);
|
|
int err;
|
|
|
|
crete_info(dev->device, "PCI Slot Resume..\n");
|
|
rtnl_lock();
|
|
|
|
if (netif_running(dev->netdev))
|
|
err = crete_open(dev->netdev);
|
|
|
|
if (!err)
|
|
netif_device_attach(dev->netdev);
|
|
|
|
rtnl_unlock();
|
|
|
|
crete_attach_device(dev);
|
|
}
|
|
|
|
static const struct pci_error_handlers crete_pci_err_handler = {
|
|
// current error dected do nothing
|
|
.error_detected = NULL,
|
|
.slot_reset = crete_pci_err_slot_reset,
|
|
.resume = crete_pci_resume
|
|
};
|
|
|
|
static struct pci_driver crete_pci_driver = {
|
|
.name = crete_driver_name,
|
|
.id_table = crete_pci_tbl,
|
|
.probe = crete_probe_one,
|
|
.remove = crete_remove_one,
|
|
.shutdown = crete_shutdown,
|
|
.sriov_configure = crete_pci_sriov_configure,
|
|
.err_handler = &crete_pci_err_handler
|
|
};
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
|
|
static int debug = -1;
|
|
module_param(debug, int, 0);
|
|
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
|
|
|
|
/**
|
|
* crete_init_module - Driver Registration Routine
|
|
*
|
|
* crete_init_module is the first routine called when the driver is
|
|
* loaded. All it does is register with the PCI subsystem.
|
|
**/
|
|
static int __init crete_init_module(void)
|
|
{
|
|
int err;
|
|
|
|
pr_info("init crete module\n");
|
|
|
|
crete_register_debugfs();
|
|
|
|
err = pci_register_driver(&crete_pci_driver);
|
|
if (err)
|
|
return err;
|
|
err = crete_init();
|
|
if (err)
|
|
goto err_crete_init;
|
|
return 0;
|
|
|
|
err_crete_init:
|
|
pci_unregister_driver(&crete_pci_driver);
|
|
crete_unregister_debugfs();
|
|
return err;
|
|
}
|
|
|
|
module_init(crete_init_module);
|
|
|
|
/**
|
|
* crete_exit_module - Driver Exit Cleanup Routine
|
|
*
|
|
* crete_exit_module is called just before the driver is removed
|
|
* from memory.
|
|
**/
|
|
static void __exit crete_exit_module(void)
|
|
{
|
|
pr_info("exit crete module\n");
|
|
crete_cleanup();
|
|
pci_unregister_driver(&crete_pci_driver);
|
|
crete_unregister_debugfs();
|
|
}
|
|
|
|
module_exit(crete_exit_module);
|
|
|
|
static int crete_get_max_irq(struct crete_core_dev *core_dev)
|
|
{
|
|
struct pci_dev *pdev;
|
|
u16 ctrl;
|
|
int num_vectors;
|
|
|
|
pdev = core_dev->pdev;
|
|
if (!pdev->msix_cap)
|
|
return 0;
|
|
|
|
pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
|
|
num_vectors = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
|
|
pr_info("MSIX: num_vectors=%d\n", num_vectors);
|
|
return num_vectors;
|
|
}
|
|
|
|
static void crete_init_rdma_vec_num(struct crete_core_dev *core_dev)
|
|
{
|
|
if (!crete_have_rdma_cap(core_dev)) {
|
|
dev_warn(&core_dev->pdev->dev, "not support rdma capability\n");
|
|
core_dev->rdma_adp.vector_num = 0;
|
|
} else
|
|
core_dev->rdma_adp.vector_num = JM_AUX_VECTOR_NUM;
|
|
dev_warn(&core_dev->pdev->dev, "rdma_dev->vector_num=%d\n",
|
|
core_dev->rdma_adp.vector_num);
|
|
}
|
|
|
|
static int crete_get_rdma_vec_num(struct crete_core_dev *core_dev)
|
|
{
|
|
return core_dev->rdma_adp.vector_num;
|
|
}
|
|
|
|
static void crete_init_rdma_vec_base(struct crete_core_dev *core_dev,
|
|
int vec_base)
|
|
{
|
|
if (!crete_have_rdma_cap(core_dev)) {
|
|
dev_warn(&core_dev->pdev->dev, "not support rdma capability\n");
|
|
core_dev->rdma_adp.vector_base = 0xfff;
|
|
} else
|
|
core_dev->rdma_adp.vector_base = vec_base;
|
|
dev_warn(&core_dev->pdev->dev, "rdma_dev->vector_base=%d\n", vec_base);
|
|
}
|
|
|
|
void crete_init_msix(struct crete_core_dev *core_dev)
|
|
{
|
|
int i, total_vecs, min, max_num, rdma_vecs;
|
|
struct pci_dev *pdev;
|
|
struct msix_entry *msix_ent;
|
|
struct crete_irq_info *irq_info;
|
|
|
|
if ((core_dev->flags & CRETE_FLAG_HAS_MSIX)) {
|
|
dev_info(&core_dev->pdev->dev, "crete msix already init.\n");
|
|
return;
|
|
}
|
|
|
|
pdev = core_dev->pdev;
|
|
max_num = crete_get_max_irq(core_dev);
|
|
crete_init_rdma_vec_num(core_dev);
|
|
rdma_vecs = crete_get_rdma_vec_num(core_dev);
|
|
if (max_num < 3) {
|
|
pr_info("msix irq num lower than 2 return now!\n");
|
|
return;
|
|
}
|
|
|
|
msix_ent = kcalloc(max_num, sizeof(struct msix_entry), GFP_KERNEL);
|
|
if (!msix_ent)
|
|
return;
|
|
|
|
for (i = 0; i < max_num; i++) {
|
|
msix_ent[i].entry = i;
|
|
msix_ent[i].vector = 0;
|
|
}
|
|
|
|
min = 3; /* IO Queue vec num */
|
|
min += rdma_vecs;
|
|
total_vecs = pci_enable_msix_range(pdev, msix_ent, min, max_num);
|
|
if (total_vecs < 0) {
|
|
pr_warn("msix alloc range min:%d max:%d\n", min, max_num);
|
|
kfree(msix_ent);
|
|
return;
|
|
}
|
|
|
|
irq_info =
|
|
kcalloc(total_vecs, sizeof(struct crete_irq_info), GFP_KERNEL);
|
|
if (!irq_info) {
|
|
kfree(msix_ent);
|
|
return;
|
|
}
|
|
|
|
for (i = 0; i < total_vecs; i++) {
|
|
irq_info[i].handler = NULL;
|
|
irq_info[i].requested = 0;
|
|
irq_info[i].vector = msix_ent[i].vector;
|
|
}
|
|
bitmap_zero(core_dev->irqbit, CRETE_MAX_MSIX_NUM);
|
|
core_dev->msix_ent = msix_ent;
|
|
core_dev->irq_info = irq_info;
|
|
core_dev->irq_num = total_vecs - rdma_vecs;
|
|
core_dev->flags |= CRETE_FLAG_HAS_MSIX;
|
|
crete_init_rdma_vec_base(core_dev, core_dev->irq_num);
|
|
/* reserve the rdma irq vector */
|
|
for (i = core_dev->irq_num; i < total_vecs; i++)
|
|
set_bit(i, core_dev->irqbit);
|
|
|
|
dev_warn(&pdev->dev, "MSI-X total vecs:%d, min:%d, max:%d, rdma:%d\n",
|
|
total_vecs, min, max_num, rdma_vecs);
|
|
}
|
|
EXPORT_SYMBOL(crete_init_msix);
|
|
|
|
int crete_req_msixirq(struct crete_core_dev *core_dev)
|
|
{
|
|
int ret;
|
|
unsigned long bits;
|
|
|
|
spin_lock(&core_dev->lock);
|
|
bits = find_first_zero_bit(core_dev->irqbit, CRETE_MAX_MSIX_NUM);
|
|
if (bits >= CRETE_MAX_MSIX_NUM) {
|
|
ret = -1;
|
|
} else {
|
|
set_bit(bits, core_dev->irqbit);
|
|
ret = (int)bits;
|
|
}
|
|
spin_unlock(&core_dev->lock);
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(crete_req_msixirq);
|
|
|
|
void crete_free_msixirq(struct crete_core_dev *core_dev, int irq)
|
|
{
|
|
if (irq >= CRETE_MAX_MSIX_NUM)
|
|
return;
|
|
|
|
spin_lock(&core_dev->lock);
|
|
clear_bit(irq, core_dev->irqbit);
|
|
spin_unlock(&core_dev->lock);
|
|
}
|
|
EXPORT_SYMBOL(crete_free_msixirq);
|
|
|
|
void crete_exit_irq(struct crete_core_dev *core_dev)
|
|
{
|
|
struct pci_dev *pdev;
|
|
pdev = core_dev->pdev;
|
|
if (!(core_dev->flags & CRETE_FLAG_HAS_MSIX))
|
|
return;
|
|
kfree(core_dev->irq_info);
|
|
core_dev->irq_info = NULL;
|
|
kfree(core_dev->msix_ent);
|
|
core_dev->msix_ent = NULL;
|
|
|
|
pci_disable_msix(core_dev->pdev);
|
|
core_dev->flags &= ~CRETE_FLAG_HAS_MSIX;
|
|
dev_info(&pdev->dev, "disable irq. flags 0x%x\n", core_dev->flags);
|
|
}
|
|
EXPORT_SYMBOL(crete_exit_irq);
|
|
|
|
extern const struct net_device_ops crete_netdev_ops;
|
|
static int crete_set_dma_caps(struct pci_dev *pdev)
|
|
{
|
|
int err;
|
|
|
|
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
|
if (err) {
|
|
dev_warn(&pdev->dev,
|
|
"Warning: couldn't set 64-bit PCI DMA mask\n");
|
|
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
|
if (err) {
|
|
dev_err(&pdev->dev,
|
|
"Can't set PCI DMA mask, aborting\n");
|
|
return err;
|
|
}
|
|
}
|
|
|
|
/* dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); */
|
|
return err;
|
|
}
|
|
|
|
static int crete_pci_init(struct crete_core_dev *core_dev, struct pci_dev *pdev,
|
|
const struct pci_device_id *id)
|
|
{
|
|
int err;
|
|
size_t bar_len;
|
|
|
|
pr_info("crete probe begin\n");
|
|
pci_set_drvdata(core_dev->pdev, core_dev);
|
|
err = pci_enable_device_mem(pdev);
|
|
if (err)
|
|
return err;
|
|
|
|
err = pci_request_mem_regions(pdev, crete_driver_name);
|
|
if (err) {
|
|
dev_err(&pdev->dev, "request mem regions err\n");
|
|
goto err_pci_request_mem_regions;
|
|
}
|
|
err = crete_set_dma_caps(pdev);
|
|
if (err)
|
|
goto err_crete_set_dma_caps;
|
|
|
|
pci_enable_pcie_error_reporting(pdev);
|
|
|
|
pci_set_master(pdev);
|
|
|
|
core_dev->bar_addr = pci_resource_start(pdev, 0);
|
|
bar_len = pci_resource_len(pdev, 0);
|
|
core_dev->io_addr = pci_iomap(pdev, 0, bar_len);
|
|
if (!core_dev->io_addr) {
|
|
dev_err(&pdev->dev, "map bar err\n");
|
|
err = -EFAULT;
|
|
goto err_pci_iomap;
|
|
}
|
|
|
|
return 0;
|
|
|
|
err_crete_set_dma_caps:
|
|
pci_release_regions(pdev);
|
|
err_pci_request_mem_regions:
|
|
pci_iounmap(pdev, core_dev->io_addr);
|
|
err_pci_iomap:
|
|
pci_disable_device(pdev);
|
|
return err;
|
|
}
|
|
|
|
static int crete_pci_close(struct crete_core_dev *core_dev)
|
|
{
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
|
|
pci_iounmap(pdev, core_dev->io_addr);
|
|
pci_release_mem_regions(pdev);
|
|
pci_disable_pcie_error_reporting(pdev);
|
|
pci_disable_device(pdev);
|
|
return 0;
|
|
}
|
|
|
|
static int crete_cdev_cap_init(struct crete_core_dev *cdev)
|
|
{
|
|
int ret;
|
|
u16 max_queue_size = 0;
|
|
u8 max_qp_num, ctrl_queue_size = 0;
|
|
|
|
ret = crete_get_qp_cap(cdev,
|
|
&max_queue_size, &max_qp_num, &ctrl_queue_size);
|
|
if (ret) {
|
|
crete_err(cdev->device, "crete core dev get cap failed\n");
|
|
return ret;
|
|
}
|
|
|
|
cdev->cap.qpcap.ctrl_queue_size = ctrl_queue_size;
|
|
cdev->cap.qpcap.max_qp_num = max_qp_num;
|
|
cdev->cap.qpcap.max_queue_size = max_queue_size;
|
|
return 0;
|
|
}
|
|
|
|
int crete_attach_netdev(struct crete_core_dev *core_dev)
|
|
{
|
|
struct crete_hw *hw;
|
|
static int count;
|
|
int err;
|
|
|
|
hw = &core_dev->hw;
|
|
// crete_core_dev init
|
|
pr_info("core dev init\n");
|
|
pr_info("core dev set globle count :%d\n", count & 0x1);
|
|
g_cdev_test[count & 0x1] = core_dev;
|
|
count++;
|
|
|
|
err = crete_cdev_cap_init(core_dev);
|
|
if (err < 0)
|
|
goto err_crete_get_func_caps;
|
|
|
|
err = crete_get_func_caps(core_dev);
|
|
if (err < 0) {
|
|
//dev_err(&pdev->dev, "crete hw init failed\n");
|
|
goto err_crete_get_func_caps;
|
|
}
|
|
|
|
/* setup the private structure */
|
|
err = crete_sw_init(core_dev, hw);
|
|
if (err) {
|
|
//dev_err(&pdev->dev, "crete sw init failed\n");
|
|
goto err_crete_sw_init;
|
|
}
|
|
|
|
return 0;
|
|
err_crete_sw_init:
|
|
err_crete_get_func_caps:
|
|
pr_info("core dev init failed, err:%d\n", err);
|
|
return err;
|
|
}
|
|
|
|
void crete_detach_netdev(struct crete_core_dev *core_dev)
|
|
{
|
|
struct crete_hw *hw;
|
|
|
|
hw = &core_dev->hw;
|
|
crete_sw_uninit(core_dev, hw);
|
|
/* note:must be free all irq */
|
|
}
|
|
|
|
int crete_rescan_drivers(struct crete_core_dev *dev)
|
|
{
|
|
int ret;
|
|
|
|
crete_dev_list_lock();
|
|
ret = crete_rescan_drivers_locked(dev);
|
|
crete_dev_list_unlock();
|
|
return ret;
|
|
}
|
|
|
|
static int crete_build_nic_netdev(struct net_device *netdev)
|
|
{
|
|
struct crete_priv *priv = netdev_priv(netdev);
|
|
struct crete_core_dev *core_dev = priv->coredev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
|
|
SET_NETDEV_DEV(netdev, &pdev->dev);
|
|
netdev->netdev_ops = &crete_netdev_ops;
|
|
crete_dcbnl_build_netdev(netdev);
|
|
//crete_set_ethtool_ops(netdev); //delay to init
|
|
netdev->watchdog_timeo = 5 * HZ;
|
|
netdev->ethtool_ops = &crete_ethtool_ops;
|
|
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
|
|
|
|
netdev->mem_start = pci_resource_start(pdev, 0);
|
|
netdev->mem_end = pci_resource_end(pdev, 0);
|
|
/*
|
|
* just for aux test
|
|
* step 1:crete_adev_init
|
|
* step 2:attach device
|
|
*/
|
|
|
|
if (crete_has_feature(core_dev, CRETE_FEATURE_OFFLOAD, CRETE_NET_F_RX_IPV4_CSUM) ||
|
|
crete_has_feature(core_dev, CRETE_FEATURE_OFFLOAD, CRETE_NET_F_RX_TCP_CSUM) ||
|
|
crete_has_feature(core_dev, CRETE_FEATURE_OFFLOAD, CRETE_NET_F_RX_UDP_CSUM) ||
|
|
crete_has_feature(core_dev, CRETE_FEATURE_OFFLOAD, CRETE_NET_F_RX_SCTP_CSUM)) {
|
|
netdev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
|
|
}
|
|
|
|
if (crete_has_feature(core_dev, CRETE_FEATURE_OFFLOAD, CRETE_NET_F_RX_RSS_HASH))
|
|
netdev->hw_features |= NETIF_F_RXHASH;
|
|
|
|
if (crete_has_feature(core_dev, CRETE_FEATURE_OFFLOAD, CRETE_NET_F_TX_TCP_TSO) ||
|
|
crete_has_feature(core_dev, CRETE_FEATURE_OFFLOAD, CRETE_NET_F_TX_IP_TNL_TSO) ||
|
|
crete_has_feature(core_dev, CRETE_FEATURE_OFFLOAD, CRETE_NET_F_TX_UDP_TNL_TSO))
|
|
netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
|
|
|
|
#define CRETE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
|
|
NETIF_F_GSO_GRE_CSUM | \
|
|
NETIF_F_GSO_IPXIP4 | \
|
|
NETIF_F_GSO_IPXIP6 | \
|
|
NETIF_F_GSO_UDP_TUNNEL | \
|
|
NETIF_F_GSO_UDP_TUNNEL_CSUM)
|
|
|
|
netdev->gso_partial_features = CRETE_GSO_PARTIAL_FEATURES;
|
|
netdev->features |= NETIF_F_GSO_PARTIAL | CRETE_GSO_PARTIAL_FEATURES;
|
|
|
|
/* copy netdev features into list of user selectable features */
|
|
if (crete_has_feature(core_dev, CRETE_FEATURE_OFFLOAD, CRETE_NET_F_RX_VLAN_FILTER) ||
|
|
crete_has_feature(core_dev, CRETE_FEATURE_OFFLOAD, CRETE_NET_F_RX_VLAN_STRIP) ||
|
|
crete_has_feature(core_dev, CRETE_FEATURE_OFFLOAD, CRETE_NET_F_TX_VLAN_INSERT)) {
|
|
netdev->features = NETIF_F_HW_VLAN_CTAG_RX |
|
|
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXALL;
|
|
}
|
|
|
|
/* MTU range: 68 - 9216 */
|
|
netdev->min_mtu = ETH_MIN_MTU;
|
|
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
|
|
|
|
strcpy(netdev->name, "eth%d");
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* Allow PF, trusted VFs to be in promiscuous mode */
|
|
static bool crete_promisc_ok(struct crete_core_dev *coredev)
|
|
{
|
|
if (crete_core_is_vf(coredev)
|
|
&& !crete_is_trusted_vf(coredev->netdev, &coredev->vf))
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
static int crete_cmd_set_uc_mac(struct net_device *netdev)
|
|
{
|
|
struct crete_priv *priv = netdev_priv(netdev);
|
|
struct crete_core_dev *core_dev = priv->coredev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
int ret = 0, status = 0, err_type;
|
|
int in_len = CRETE_ST_SZ_BYTES(set_uc_mac_in);
|
|
int out_len = CRETE_ST_SZ_BYTES(set_uc_mac_out);
|
|
void *in, *out, *uc_mac_list_in_base;
|
|
int uc_list_num = priv->uc_filter_count;
|
|
int uc_mac_list_size = uc_list_num * CRETE_ST_SZ_BYTES(uc_mac_list);
|
|
|
|
in_len += ALIGN_TO_DW(uc_mac_list_size);
|
|
in = kvzalloc(in_len, GFP_KERNEL);
|
|
out = kvzalloc(out_len, GFP_KERNEL);
|
|
if (!out || !in) {
|
|
ret = -ENOMEM;
|
|
goto err_out;
|
|
}
|
|
|
|
uc_mac_list_in_base = in + in_len - ALIGN_TO_DW(uc_mac_list_size);
|
|
|
|
CRETE_SET(set_uc_mac_in, in, cmd_op, 0);
|
|
CRETE_SET(set_uc_mac_in, in, cmd_id, CRETE_CMD_SET_UC_MAC);
|
|
CRETE_SET(set_uc_mac_in, in, uc_list_num, priv->uc_filter_count);
|
|
memcpy(uc_mac_list_in_base, priv->uc_list, uc_mac_list_size);
|
|
|
|
hexdump((char *)in, in_len);
|
|
ret = crete_cmd_exec_polling(core_dev, in, in_len, out, out_len);
|
|
if (ret < 0)
|
|
goto err_out;
|
|
|
|
status = CRETE_GET(set_uc_mac_out, out, status);
|
|
if (status != SUCCESS) {
|
|
err_type = CRETE_GET(set_uc_mac_out, out, err_type);
|
|
crete_err(&pdev->dev,
|
|
"crete set uc mac failed, err type:0x%x status:0x%x\n",
|
|
err_type, status);
|
|
ret = -EINVAL;
|
|
}
|
|
err_out:
|
|
if (in) {
|
|
kvfree(in);
|
|
in = NULL;
|
|
}
|
|
if (out) {
|
|
kvfree(out);
|
|
out = NULL;
|
|
}
|
|
crete_err(&pdev->dev, "%s return:%x\n", __func__, ret);
|
|
return ret;
|
|
}
|
|
|
|
static int crete_cmd_set_mc_filter(struct net_device *netdev)
|
|
{
|
|
struct crete_priv *priv = netdev_priv(netdev);
|
|
struct crete_core_dev *core_dev = priv->coredev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
int ret = 0, status = 0, err_type;
|
|
int in_len = CRETE_ST_SZ_BYTES(set_rx_mode_in);
|
|
int out_len = CRETE_ST_SZ_BYTES(set_rx_mode_out);
|
|
void *in, *out, *mc_mac_list_in_base;
|
|
int vlan_range_list_size;
|
|
int mc_mac_list_size;
|
|
int vlan_range_num = 0;
|
|
int mc_list_num = priv->mc_list_count;
|
|
int mc_list_size = mc_list_num * ETH_ALEN;
|
|
|
|
vlan_range_list_size = vlan_range_num * CRETE_ST_SZ_BYTES(vlan_range);
|
|
in_len += vlan_range_list_size;
|
|
|
|
mc_mac_list_size = mc_list_num * CRETE_ST_SZ_BYTES(mc_mac_list);
|
|
in_len += ALIGN_TO_DW(mc_mac_list_size);
|
|
|
|
in = kvzalloc(in_len, GFP_KERNEL);
|
|
out = kvzalloc(out_len, GFP_KERNEL);
|
|
if (!out || !in) {
|
|
ret = -ENOMEM;
|
|
goto err_out;
|
|
}
|
|
|
|
mc_mac_list_in_base = in + in_len - ALIGN_TO_DW(mc_mac_list_size);
|
|
|
|
CRETE_SET(set_rx_mode_in, in, cmd_op, 0);
|
|
CRETE_SET(set_rx_mode_in, in, cmd_id, CRETE_CMD_SET_VPORT_RX_MODE);
|
|
CRETE_SET(set_rx_mode_in, in, multicast, 1);
|
|
CRETE_SET(set_rx_mode_in, in, mc_list_num, priv->mc_list_count);
|
|
memcpy(mc_mac_list_in_base, priv->mc_list, mc_list_size);
|
|
|
|
hexdump((char *)in, in_len);
|
|
ret = crete_cmd_exec_polling(core_dev, in, in_len, out, out_len);
|
|
if (ret < 0)
|
|
goto err_out;
|
|
|
|
status = CRETE_GET(set_rx_mode_out, out, status);
|
|
if (status != SUCCESS) {
|
|
err_type = CRETE_GET(set_rx_mode_out, out, err_type);
|
|
crete_err(&pdev->dev,
|
|
"crete set mc filter failed, err type:0x%x status:0x%x\n",
|
|
err_type, status);
|
|
ret = -EINVAL;
|
|
}
|
|
err_out:
|
|
if (in) {
|
|
kvfree(in);
|
|
in = NULL;
|
|
}
|
|
if (out) {
|
|
kvfree(out);
|
|
out = NULL;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
static int crete_cmd_set_rx_mask(struct net_device *netdev)
|
|
{
|
|
struct crete_priv *priv = netdev_priv(netdev);
|
|
struct crete_core_dev *core_dev = priv->coredev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
int ret = 0, status = 0, err_type;
|
|
int in_len = CRETE_ST_SZ_BYTES(set_rx_mode_in);
|
|
int out_len = CRETE_ST_SZ_BYTES(set_rx_mode_out);
|
|
void *in, *out;
|
|
|
|
in = kvzalloc(in_len, GFP_KERNEL);
|
|
out = kvzalloc(out_len, GFP_KERNEL);
|
|
if (!out || !in) {
|
|
ret = -ENOMEM;
|
|
goto err_out;
|
|
}
|
|
|
|
CRETE_SET(set_rx_mode_in, in, cmd_op, 0);
|
|
CRETE_SET(set_rx_mode_in, in, cmd_id, CRETE_CMD_SET_VPORT_RX_MODE);
|
|
|
|
if (priv->rx_mask & L2_SET_RX_MASK_REQ_MASK_BCAST)
|
|
CRETE_SET(set_rx_mode_in, in, broadcast, 1);
|
|
|
|
if (priv->rx_mask & L2_SET_RX_MASK_REQ_MASK_ALL_MCAST)
|
|
CRETE_SET(set_rx_mode_in, in, all_multicast, 1);
|
|
|
|
if (priv->rx_mask & L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS)
|
|
CRETE_SET(set_rx_mode_in, in, promiscuous, 1);
|
|
|
|
hexdump((char *)in, in_len);
|
|
ret = crete_cmd_exec_polling(core_dev, in, in_len, out, out_len);
|
|
if (ret < 0)
|
|
goto err_out;
|
|
|
|
status = CRETE_GET(set_rx_mode_out, out, status);
|
|
if (status != SUCCESS) {
|
|
err_type = CRETE_GET(set_rx_mode_out, out, err_type);
|
|
crete_err(&pdev->dev,
|
|
"crete set rx mask failed, err type:0x%x status:0x%x\n",
|
|
err_type, status);
|
|
ret = -EINVAL;
|
|
}
|
|
err_out:
|
|
if (in) {
|
|
kvfree(in);
|
|
in = NULL;
|
|
}
|
|
if (out) {
|
|
kvfree(out);
|
|
out = NULL;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
static int crete_set_default_mac(struct net_device *netdev)
|
|
{
|
|
struct crete_priv *priv = netdev_priv(netdev);
|
|
int rc;
|
|
|
|
memcpy(priv->uc_list, netdev->dev_addr, ETH_ALEN);
|
|
priv->uc_filter_count = 1;
|
|
|
|
rc = crete_cmd_set_uc_mac(netdev);
|
|
if (rc)
|
|
priv->uc_filter_count = 0;
|
|
|
|
return rc;
|
|
}
|
|
|
|
static int crete_set_uc_filter(struct net_device *netdev)
|
|
{
|
|
struct crete_priv *priv = netdev_priv(netdev);
|
|
struct netdev_hw_addr *ha;
|
|
int off = 0;
|
|
|
|
netif_addr_lock_bh(netdev);
|
|
if (netdev_uc_count(netdev) > (CRETE_MAX_UC_ADDRS - 1)) {
|
|
priv->rx_mask |= L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
|
|
} else {
|
|
netdev_for_each_uc_addr(ha, netdev) {
|
|
memcpy(priv->uc_list + off, ha->addr, ETH_ALEN);
|
|
off += ETH_ALEN;
|
|
priv->uc_filter_count++;
|
|
}
|
|
}
|
|
netif_addr_unlock_bh(netdev);
|
|
|
|
return crete_cmd_set_uc_mac(netdev);
|
|
}
|
|
|
|
void crete_set_rx_mode_work(struct work_struct *work)
|
|
{
|
|
struct crete_priv *priv = container_of(work, struct crete_priv,
|
|
set_rx_mode_work);
|
|
struct crete_core_dev *core_dev = priv->coredev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
struct net_device *netdev = priv->netdev;
|
|
bool uc_update;
|
|
int rc;
|
|
|
|
netif_addr_lock_bh(netdev);
|
|
uc_update = crete_uc_list_updated(netdev);
|
|
netif_addr_unlock_bh(netdev);
|
|
|
|
if (uc_update) {
|
|
rc = crete_set_uc_filter(netdev);
|
|
if (rc)
|
|
dev_err(&pdev->dev,
|
|
"HWRM l2 uc filter failure rc: %d\n", rc);
|
|
}
|
|
if (priv->rx_mask & L2_SET_RX_MASK_REQ_MASK_MCAST) {
|
|
rc = crete_cmd_set_mc_filter(netdev);
|
|
if (rc) {
|
|
dev_err(&pdev->dev,
|
|
"Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
|
|
rc);
|
|
priv->rx_mask &= ~L2_SET_RX_MASK_REQ_MASK_MCAST;
|
|
priv->rx_mask |= L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
|
|
priv->mc_list_count = 0;
|
|
}
|
|
}
|
|
/*promisc check */
|
|
if ((priv->rx_mask & L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
|
|
!crete_promisc_ok(core_dev))
|
|
priv->rx_mask &= ~L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
|
|
|
|
rc = crete_cmd_set_rx_mask(netdev);
|
|
if (rc)
|
|
dev_err(&pdev->dev, "HWRM l2 rx mask failure rc: %d\n", rc);
|
|
}
|
|
|
|
/* crete generic netdev management API (move to en_common.c) */
|
|
int crete_priv_init(struct crete_priv *priv, struct net_device *netdev,
|
|
struct crete_core_dev *core_dev)
|
|
{
|
|
int mem_size = (CRETE_MAX_UC_ADDRS - 1) * ETH_ALEN;
|
|
int rc = 0;
|
|
|
|
/* priv init */
|
|
priv->coredev = core_dev;
|
|
priv->netdev = netdev;
|
|
|
|
priv->uc_list = kmalloc(mem_size, GFP_KERNEL);
|
|
if (!priv->uc_list) {
|
|
rc = -ENOMEM;
|
|
goto err_uc_list_alloc;
|
|
}
|
|
priv->mc_list_size = CRETE_MAX_MC_ADDRS * ETH_ALEN;
|
|
priv->mc_list = kmalloc(priv->mc_list_size, GFP_KERNEL);
|
|
if (!priv->mc_list) {
|
|
rc = -ENOMEM;
|
|
goto err_mc_list_alloc;
|
|
}
|
|
|
|
INIT_WORK(&priv->set_rx_mode_work, crete_set_rx_mode_work);
|
|
priv->wq = create_singlethread_workqueue("crete_rx_mode");
|
|
if (!priv->wq) {
|
|
rc = -ENOMEM;
|
|
goto err_workqueue;
|
|
}
|
|
return 0;
|
|
err_workqueue:
|
|
kfree(priv->mc_list);
|
|
priv->mc_list = NULL;
|
|
err_mc_list_alloc:
|
|
kfree(priv->uc_list);
|
|
priv->uc_list = NULL;
|
|
err_uc_list_alloc:
|
|
return rc;
|
|
}
|
|
|
|
void crete_priv_cleanup(struct crete_priv *priv)
|
|
{
|
|
struct crete_core_dev *core_dev = priv->coredev;
|
|
struct pci_dev *pdev;
|
|
|
|
if (!priv->coredev)
|
|
return;
|
|
|
|
pdev = core_dev->pdev;
|
|
dev_err(&pdev->dev, "enter crete_priv_cleanup\n");
|
|
cancel_work_sync(&priv->set_rx_mode_work);
|
|
destroy_workqueue(priv->wq);
|
|
|
|
kfree(priv->mc_list);
|
|
priv->mc_list = NULL;
|
|
|
|
kfree(priv->uc_list);
|
|
priv->uc_list = NULL;
|
|
|
|
memset(priv, 0, sizeof(*priv));
|
|
dev_err(&pdev->dev, "exit crete_priv_cleanup\n");
|
|
|
|
}
|
|
|
|
struct net_device *crete_create_netdev(struct crete_core_dev *coredev)
|
|
{
|
|
struct net_device *netdev;
|
|
unsigned int txqs, rxqs;
|
|
struct crete_priv *priv;
|
|
struct pci_dev *pdev = coredev->pdev;
|
|
int err;
|
|
|
|
// txqs = coredev->cap.qpcap.max_qp_num;
|
|
// rxqs = coredev->cap.qpcap.max_qp_num;
|
|
/* get the trim ring size */
|
|
txqs = coredev->ring_size;
|
|
rxqs = coredev->ring_size;
|
|
/* crete netdevice with max qp number */
|
|
netdev = alloc_etherdev_mqs(sizeof(struct crete_priv), txqs, rxqs);
|
|
if (!netdev) {
|
|
dev_err(&pdev->dev, "alloc_etherdev_mqs() failed\n");
|
|
return NULL;
|
|
}
|
|
priv = netdev_priv(netdev);
|
|
coredev->netdev = netdev;
|
|
err = crete_priv_init(priv, netdev, coredev);
|
|
if (err) {
|
|
dev_err(&pdev->dev, "crete_priv_init failed\n");
|
|
goto err_priv_init;
|
|
}
|
|
netif_carrier_off(netdev);
|
|
netif_tx_disable(netdev);
|
|
return netdev;
|
|
|
|
err_priv_init:
|
|
free_netdev(netdev);
|
|
return NULL;
|
|
}
|
|
|
|
void crete_destroy_netdev(struct net_device *netdev)
|
|
{
|
|
struct crete_priv *priv;
|
|
|
|
priv = netdev_priv(netdev);
|
|
crete_priv_cleanup(priv);
|
|
free_netdev(netdev);
|
|
}
|
|
|
|
int crete_coredev_init(struct crete_core_dev *dev)
|
|
{
|
|
int err;
|
|
|
|
err = crete_adev_init(dev);
|
|
return err;
|
|
}
|
|
|
|
void crete_coredev_uninit(struct crete_core_dev *dev)
|
|
{
|
|
crete_adev_cleanup(dev);
|
|
}
|
|
|
|
static enum crete_device_type crete_get_device_type(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
{
|
|
enum crete_device_type device_type;
|
|
|
|
switch (pdev->device) {
|
|
case PF_DEVICE_ID:
|
|
device_type = CRETE_DEVICE_CRETE;
|
|
break;
|
|
case PF_DEVICE_ID_NIC:
|
|
case VF_DEVICE_ID_NIC:
|
|
case PF_DEVICE_ID_CMCC:
|
|
case VF_DEVICE_ID_CMCC:
|
|
device_type = CRETE_DEVICE_PNIC;
|
|
break;
|
|
case PF_DEVICE_ID_FAKE:
|
|
device_type = CRETE_DEVICE_FAKE;
|
|
break;
|
|
default:
|
|
device_type = CRETE_DEVICE_CRETE;
|
|
}
|
|
pr_info("device type %d\n", device_type);
|
|
return device_type;
|
|
}
|
|
|
|
static int crete_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
{
|
|
struct crete_core_dev *core_dev;
|
|
struct devlink *devlink;
|
|
int err;
|
|
enum crete_coredev_type coredev_type;
|
|
enum crete_device_type device_type;
|
|
|
|
coredev_type = id->driver_data & CRETE_PCI_DEV_IS_VF ?
|
|
CRETE_COREDEV_VF : CRETE_COREDEV_PF;
|
|
device_type = crete_get_device_type(pdev, id);
|
|
devlink =
|
|
crete_devlink_alloc(&pdev->dev, coredev_type == CRETE_COREDEV_PF);
|
|
if (!devlink) {
|
|
dev_err(&pdev->dev, "devlink alloc failed\n");
|
|
return -ENOMEM;
|
|
}
|
|
|
|
core_dev = devlink_priv(devlink);
|
|
core_dev->device = &pdev->dev;
|
|
core_dev->pdev = pdev;
|
|
spin_lock_init(&core_dev->lock);
|
|
core_dev->coredev_type = coredev_type;
|
|
core_dev->device_type = device_type;
|
|
|
|
core_dev->adev_idx = crete_adev_idx_alloc();
|
|
if (core_dev->adev_idx < 0) {
|
|
err = core_dev->adev_idx;
|
|
goto err_adev_idx_init;
|
|
}
|
|
|
|
err = crete_coredev_init(core_dev);
|
|
if (err)
|
|
goto err_coredev_init;
|
|
err = crete_pci_init(core_dev, pdev, id);
|
|
if (err)
|
|
goto err_pci_init;
|
|
err = crete_init_one(core_dev);
|
|
if (err)
|
|
goto err_init_one;
|
|
|
|
pci_save_state(pdev);
|
|
|
|
err = crete_devlink_register(devlink);
|
|
if (err) {
|
|
dev_err(&pdev->dev, "Failed to register devlink %d\n", err);
|
|
goto err_devlink_reg;
|
|
}
|
|
|
|
return 0;
|
|
err_devlink_reg:
|
|
crete_uninit_one(core_dev);
|
|
err_init_one:
|
|
crete_pci_close(core_dev);
|
|
err_pci_init:
|
|
crete_coredev_uninit(core_dev);
|
|
err_coredev_init:
|
|
crete_adev_idx_free(core_dev->adev_idx);
|
|
err_adev_idx_init:
|
|
crete_devlink_free(devlink);
|
|
return err;
|
|
}
|
|
|
|
static void crete_remove_one(struct pci_dev *pdev)
|
|
{
|
|
struct crete_core_dev *dev = pci_get_drvdata(pdev);
|
|
struct devlink *devlink = priv_to_devlink(dev);
|
|
crete_devlink_unregister(devlink);
|
|
|
|
/* with the pf device need to clean the sriov with remove one */
|
|
if(crete_core_is_pf(dev))
|
|
crete_sriov_disable(pdev);
|
|
|
|
crete_uninit_one(dev);
|
|
crete_pci_close(dev);
|
|
crete_coredev_uninit(dev);
|
|
crete_adev_idx_free(dev->adev_idx);
|
|
crete_devlink_free(devlink);
|
|
}
|
|
|
|
static int crete_init_once(struct crete_core_dev *dev)
|
|
{
|
|
int err;
|
|
|
|
#ifndef CONFIG_NOSIM_DEBUG
|
|
err = crete_hw_init(dev, &dev->hw);
|
|
if (err) {
|
|
dev_err(&dev->pdev->dev, "crete hw init failed\n");
|
|
return err;
|
|
}
|
|
#endif
|
|
err = crete_rdma_coredev_init(dev);
|
|
if (err) {
|
|
dev_err(&dev->pdev->dev, "crete rdma coredev init failed\n");
|
|
goto err_rdma_coredev_init;
|
|
|
|
}
|
|
/* crete init msix interrupt */
|
|
crete_init_msix(dev);
|
|
|
|
err = crete_sriov_init(dev);
|
|
if (err) {
|
|
dev_err(&dev->pdev->dev, "Failed to init sriov %d\n", err);
|
|
goto err_sriov_init;
|
|
}
|
|
err = crete_eswitch_init(dev);
|
|
if (err) {
|
|
dev_err(&dev->pdev->dev, "Failed to init eswitch %d\n", err);
|
|
goto err_eswitch_init;
|
|
}
|
|
|
|
return 0;
|
|
|
|
err_eswitch_init:
|
|
crete_sriov_cleanup(dev);
|
|
err_sriov_init:
|
|
crete_rdma_coredev_uninit(dev);
|
|
err_rdma_coredev_init:
|
|
#ifndef CONFIG_NOSIM_DEBUG
|
|
(void)crete_hw_uninit(dev, &dev->hw);
|
|
#endif
|
|
return err;
|
|
}
|
|
|
|
static void crete_cleanup_once(struct crete_core_dev *dev)
|
|
{
|
|
int num_vfs = pci_num_vf(dev->pdev);
|
|
|
|
pr_info("cleanup once enter\n");
|
|
if (dev->sriov_cfg == 1)
|
|
crete_device_disable_sriov(dev, num_vfs, true);
|
|
|
|
crete_eswitch_cleanup(dev->eswitch);
|
|
crete_sriov_cleanup(dev);
|
|
|
|
crete_exit_irq(dev);
|
|
crete_rdma_coredev_uninit(dev);
|
|
#ifndef CONFIG_NOSIM_DEBUG
|
|
(void)crete_hw_uninit(dev, &dev->hw);
|
|
#endif
|
|
|
|
debugfs_remove(dev->dbg.dbg_root);
|
|
|
|
pr_info("cleanup once exit\n");
|
|
}
|
|
|
|
static int crete_init_one(struct crete_core_dev *core_dev)
|
|
{
|
|
int err;
|
|
struct pci_dev *pdev;
|
|
struct crete_hw *hw;
|
|
|
|
pr_info("init one enter\n");
|
|
|
|
core_dev->dbg.dbg_root = debugfs_create_dir(dev_name(core_dev->device),
|
|
crete_debugfs_root);
|
|
|
|
pdev = core_dev->pdev;
|
|
err = crete_init_once(core_dev);
|
|
if (err) {
|
|
dev_err(&pdev->dev, "sw objs init failed\n");
|
|
return err;
|
|
}
|
|
hw = &core_dev->hw;
|
|
#ifdef CONFIG_NOSIM_DEBUG
|
|
core_dev->db_base = core_dev->io_addr;
|
|
pr_info("Get db base address:0x%p\n", core_dev->db_base);
|
|
#else
|
|
core_dev->db_base = core_dev->io_addr + ((hw->jnd.offset) << 12);
|
|
pr_info("Get db base address:0x%p, offset:%u\n", core_dev->db_base,
|
|
hw->jnd.offset);
|
|
#endif
|
|
err = crete_event_init(hw);
|
|
if (err < 0) {
|
|
pr_info("crete_event_init failed\n");
|
|
goto err_crete_event_init;
|
|
}
|
|
|
|
err = crete_cmd_init(core_dev);
|
|
if (err < 0) {
|
|
//dev_err(&pdev->dev, "crete cmd init failed\n");
|
|
goto err_crete_cmd_init;
|
|
}
|
|
|
|
/* get sfi info */
|
|
err = crete_get_dev_sfi(core_dev, CRETE_GET_SFI_CURR, 0, &core_dev->sfi_id);
|
|
if (err) {
|
|
pr_err("get sfi error info\n");
|
|
goto err_load;
|
|
}
|
|
pr_info("get the current device sfi id %u\n", core_dev->sfi_id);
|
|
|
|
err = crete_load(core_dev);
|
|
if (err)
|
|
goto err_load;
|
|
|
|
err = crete_lag_add(core_dev);
|
|
if (err)
|
|
goto err_reg_device;
|
|
|
|
err = crete_register_device(core_dev);
|
|
pr_info("init one exit\n");
|
|
if (err) {
|
|
dev_err(&pdev->dev, "sw objs init failed\n");
|
|
goto err_add_lag;
|
|
}
|
|
return 0;
|
|
|
|
err_add_lag:
|
|
crete_lag_remove(core_dev);
|
|
err_reg_device:
|
|
crete_unload(core_dev);
|
|
err_load:
|
|
crete_cmd_exit(core_dev);
|
|
err_crete_cmd_init:
|
|
crete_event_exit(hw);
|
|
err_crete_event_init:
|
|
crete_cleanup_once(core_dev);
|
|
return err;
|
|
}
|
|
|
|
void crete_uninit_one(struct crete_core_dev *core_dev)
|
|
{
|
|
crete_unregister_device(core_dev);
|
|
crete_unregister_debugfs_statistics(core_dev);
|
|
crete_unload(core_dev);
|
|
crete_lag_remove(core_dev);
|
|
crete_cmd_exit(core_dev);
|
|
crete_event_exit(&core_dev->hw);
|
|
crete_cleanup_once(core_dev);
|
|
}
|
|
|
|
unsigned int crete_get_max_rss_queues(struct crete_core_dev *core_dev)
|
|
{
|
|
return CRETE_MAX_RX_QUEUES;
|
|
}
|
|
|
|
static void crete_init_queue_configuration(struct crete_core_dev *core_dev)
|
|
{
|
|
u32 max_rss_queues;
|
|
|
|
max_rss_queues = crete_get_max_rss_queues(core_dev);
|
|
core_dev->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
|
|
}
|
|
|
|
static int crete_cap_parse(struct crete_core_dev *core_dev, u8 __iomem *addr,
|
|
struct crete_hw *hw)
|
|
{
|
|
uint16_t cap_type = readw(addr);
|
|
|
|
switch (cap_type) {
|
|
case CRETE_NET_CONF_CAP:
|
|
crete_ioread32(addr, &hw->jnd, 2);
|
|
break;
|
|
case CRETE_RDMA_CONF_CAP:
|
|
crete_ioread32(addr, &hw->rdma_desc, 2);
|
|
break;
|
|
case CRETE_OFFLOAD_CONF_CAP:
|
|
crete_ioread32(addr, &hw->offload_desc, 2);
|
|
break;
|
|
case CRETE_RES_CONF_CAP:
|
|
crete_ioread32(addr, &hw->jrd, 2);
|
|
break;
|
|
case CRETE_DEVS_CONF_CAP:
|
|
crete_ioread32(addr, &hw->dev_spec_desc, 2);
|
|
break;
|
|
case CRETE_SHARES_DB_CAP:
|
|
crete_ioread32(addr, &hw->share_data_desc, 2);
|
|
break;
|
|
default:
|
|
pr_err("cap not found\n");
|
|
return -EIO;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crete_net_cap_lookup(struct crete_core_dev *core_dev,
|
|
struct crete_hw *hw)
|
|
{
|
|
int i = 0;
|
|
uint16_t cap_type;
|
|
uint8_t __iomem *addr = core_dev->io_addr + CRETE_JND_OFFSET;
|
|
|
|
hw->io_addr = (struct crete_init_seg *)(core_dev->io_addr +
|
|
hw->jdh.offset * sizeof(int));
|
|
|
|
for (i = 0; i < hw->jdh.cap_cnt; i++) {
|
|
cap_type = readw(addr);
|
|
if (cap_type == CRETE_NET_CONF_CAP)
|
|
return 0;
|
|
addr += CRETE_CAP_OFFSET;
|
|
}
|
|
|
|
return -ENXIO;
|
|
}
|
|
|
|
static int crete_cap_init(struct crete_core_dev *core_dev, struct crete_hw *hw)
|
|
{
|
|
u8 __iomem *addr = core_dev->io_addr + CRETE_JND_OFFSET;
|
|
int i = 0;
|
|
int err = 0;
|
|
|
|
for (i = 0; i < hw->jdh.cap_cnt; i++) {
|
|
err = crete_cap_parse(core_dev, addr, hw);
|
|
if (err < 0) {
|
|
pr_err("parse cap err\n");
|
|
return err;
|
|
}
|
|
addr += CRETE_CAP_OFFSET;
|
|
}
|
|
|
|
return err;
|
|
}
|
|
|
|
static int crete_reset(struct crete_hw *hw)
|
|
{
|
|
int cnt = 0;
|
|
|
|
writeb(CRETE_HW_RESET, &hw->io_addr->reset_dev);
|
|
|
|
for (cnt = 0; cnt < CRETE_HW_RESET_TIMEOUT; cnt++) {
|
|
if (!readb(&hw->io_addr->dev_status))
|
|
break;
|
|
|
|
msleep(20);
|
|
}
|
|
|
|
if (cnt == CRETE_HW_RESET_TIMEOUT) {
|
|
pr_err("crete reset polling failed to complete.\n");
|
|
return -EIO;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crete_hw_init(struct crete_core_dev *core_dev, struct crete_hw *hw)
|
|
{
|
|
int err = -EINVAL;
|
|
u8 val = 0;
|
|
|
|
/* TODO:may be this can be saved in memory */
|
|
crete_ioread32((CRETE_JDH_OFFSET + core_dev->io_addr), &hw->jdh, 2);
|
|
|
|
if (hw->jdh.magic != CRETE_MAGIC_NUM) {
|
|
dev_err(&core_dev->pdev->dev,
|
|
"crete hear err, magic:0x%x,cap cnt:0x%x\n",
|
|
hw->jdh.magic, hw->jdh.cap_cnt);
|
|
goto done;
|
|
}
|
|
|
|
hw->io_addr = (struct crete_init_seg *)(core_dev->io_addr +
|
|
hw->jdh.offset * sizeof(int));
|
|
pr_err("func:%s-line:%d-offset:0x%x\n", __func__, __LINE__,
|
|
hw->jdh.offset);
|
|
|
|
err = crete_reset(hw);
|
|
if (err < 0)
|
|
goto done;
|
|
|
|
val = CRETE_DEV_ACK | CRETE_DEV_DRV;
|
|
writeb(val, &hw->io_addr->dev_status);
|
|
|
|
err = crete_net_cap_lookup(core_dev, hw);
|
|
if (err < 0) {
|
|
dev_err(&core_dev->pdev->dev, "net cap not found\n");
|
|
val = CRETE_DEV_FAIL;
|
|
writeb(val, &hw->io_addr->dev_status);
|
|
goto done;
|
|
}
|
|
|
|
err = crete_cap_init(core_dev, hw);
|
|
if (!err) {
|
|
val = readb(&hw->io_addr->dev_status);
|
|
val |= CRETE_DEV_CAP;
|
|
writeb(val, &hw->io_addr->dev_status);
|
|
}
|
|
done:
|
|
return err;
|
|
|
|
}
|
|
|
|
static int crete_hw_uninit(struct crete_core_dev *core_dev, struct crete_hw *hw)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static int crete_sw_init(struct crete_core_dev *core_dev, struct crete_hw *hw)
|
|
{
|
|
/* Assume MSI-X interrupts, will be checked during IRQ allocation */
|
|
// core_dev->flags |= CRETE_FLAG_HAS_MSIX;
|
|
|
|
/* get queue num */
|
|
crete_init_queue_configuration(core_dev);
|
|
|
|
set_bit(__CRETE_DOWN, &core_dev->state);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crete_sw_uninit(struct crete_core_dev *core_dev, struct crete_hw *hw)
|
|
{
|
|
/* do nothing */
|
|
return 0;
|
|
}
|
|
|
|
static void crete_shutdown(struct pci_dev *pdev)
|
|
{
|
|
}
|
|
|
|
static int crete_load(struct crete_core_dev *dev)
|
|
{
|
|
int err;
|
|
|
|
err = crete_attach_device(dev);
|
|
if (err)
|
|
return err;
|
|
if (crete_have_rdma_cap(dev)) {
|
|
err = jm_attach_device(&dev->rdma_coredev);
|
|
if (err)
|
|
crete_detach_device(dev);
|
|
}
|
|
|
|
return err;
|
|
}
|
|
|
|
static void crete_unload(struct crete_core_dev *dev)
|
|
{
|
|
if (crete_have_rdma_cap(dev))
|
|
jm_detach_device(&dev->rdma_coredev);
|
|
|
|
crete_eswitch_disable(dev->eswitch);
|
|
crete_detach_device(dev);
|
|
}
|
|
|
|
int crete_create_mdev_resources(struct crete_core_dev *core_dev)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
void crete_destroy_coredev_resources(struct crete_core_dev *core_dev)
|
|
{
|
|
}
|
|
|
|
static int crete_resume(struct auxiliary_device *adev)
|
|
{
|
|
struct crete_aux_dev *aux_dev =
|
|
container_of(adev, struct crete_aux_dev, adev);
|
|
struct crete_core_dev *core_dev = aux_dev->core_dev;
|
|
struct net_device *netdev = core_dev->netdev;
|
|
int err;
|
|
|
|
if (netif_device_present(netdev))
|
|
return 0;
|
|
|
|
err = crete_create_mdev_resources(core_dev);
|
|
if (err)
|
|
return err;
|
|
|
|
err = crete_attach_netdev(core_dev);
|
|
if (err) {
|
|
crete_destroy_coredev_resources(core_dev);
|
|
return err;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crete_suspend(struct auxiliary_device *adev, pm_message_t state)
|
|
{
|
|
struct crete_aux_dev *aux_dev =
|
|
container_of(adev, struct crete_aux_dev, adev);
|
|
struct crete_core_dev *core_dev = aux_dev->core_dev;
|
|
struct net_device *netdev = core_dev->netdev;
|
|
|
|
if (!netif_device_present(netdev))
|
|
return -ENODEV;
|
|
|
|
crete_detach_netdev(core_dev);
|
|
crete_destroy_coredev_resources(core_dev);
|
|
return 0;
|
|
}
|
|
|
|
static int crete_cdev_ring_init(struct crete_core_dev *cdev)
|
|
{
|
|
int ret;
|
|
int qpnum;
|
|
struct pci_dev *pdev;
|
|
|
|
pdev = cdev->pdev;
|
|
qpnum = cdev->ring_size;
|
|
|
|
cdev->jnapi = kcalloc(2 * qpnum, sizeof(struct crete_napi), GFP_KERNEL);
|
|
if (!cdev->jnapi)
|
|
return -ENOMEM;
|
|
|
|
cdev->rxring =
|
|
kcalloc(qpnum, sizeof(struct crete_rxring_info), GFP_KERNEL);
|
|
if (!cdev->rxring) {
|
|
kfree(cdev->jnapi);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
cdev->rxring_mapping = kcalloc(qpnum, sizeof(u16), GFP_KERNEL);
|
|
if (!cdev->rxring_mapping) {
|
|
ret = -ENOMEM;
|
|
goto err_rxmapping;
|
|
}
|
|
|
|
cdev->txring =
|
|
kcalloc(qpnum, sizeof(struct crete_txring_info), GFP_KERNEL);
|
|
if (!cdev->txring) {
|
|
ret = -ENOMEM;
|
|
goto err_txring;
|
|
}
|
|
|
|
cdev->txring_mapping = kcalloc(qpnum, sizeof(u16), GFP_KERNEL);
|
|
if (!cdev->txring_mapping) {
|
|
ret = -ENOMEM;
|
|
goto err_txmapping;
|
|
}
|
|
|
|
cdev->db = kzalloc(sizeof(struct crete_db), GFP_KERNEL);
|
|
if (!cdev->db) {
|
|
ret = -ENOMEM;
|
|
goto err_db;
|
|
}
|
|
|
|
cdev->db->db_addr = cdev->db_base; // all the queue will share the doorbell address
|
|
cdev->rxcpring =
|
|
kcalloc(qpnum, sizeof(struct crete_rxcp_ring_info), GFP_KERNEL);
|
|
cdev->txcpring =
|
|
kcalloc(qpnum, sizeof(struct crete_txcp_ring_info), GFP_KERNEL);
|
|
|
|
return 0;
|
|
|
|
err_db:
|
|
kfree(cdev->txring_mapping);
|
|
cdev->txring_mapping = NULL;
|
|
|
|
err_txmapping:
|
|
kfree(cdev->txring);
|
|
cdev->txring = NULL;
|
|
|
|
err_txring:
|
|
kfree(cdev->rxring_mapping);
|
|
cdev->rxring_mapping = NULL;
|
|
|
|
err_rxmapping:
|
|
kfree(cdev->jnapi);
|
|
cdev->jnapi = NULL;
|
|
kfree(cdev->rxring);
|
|
cdev->rxring = NULL;
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int crete_alloc_rxring_mem(struct crete_core_dev *cdev,
|
|
struct crete_rxring_info *rxring)
|
|
{
|
|
dma_addr_t mapping;
|
|
void *addr;
|
|
int i;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
u16 queuesize = cdev->cap.qpcap.max_queue_size;
|
|
size_t len = L1_CACHE_ALIGN(queuesize * CRETE_ENTRY_DFAULT_SIZE);
|
|
|
|
/* if the length is zero
|
|
* or the length is not power of 2 return error
|
|
*/
|
|
if (!len || !is_power_of_2(len)) {
|
|
crete_err(dev, "queue size not correct\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
addr = dma_alloc_coherent(dev, len, &mapping, GFP_KERNEL);
|
|
if (!addr)
|
|
return -ENOMEM;
|
|
|
|
crete_info(dev, "rxring mem dma handle 0x%llx, addr 0x%lx\n", mapping,
|
|
(unsigned long)addr);
|
|
|
|
rxring->rx_bd = addr;
|
|
rxring->rx_bd_mapping = mapping;
|
|
rxring->rx_ring_size = queuesize;
|
|
rxring->rx_ring_mask = queuesize - 1;
|
|
rxring->rx_cons = rxring->rx_prod = 0;
|
|
rxring->bd_len = len;
|
|
rxring->rx_buf =
|
|
kcalloc(queuesize, sizeof(struct crete_rx_buf), GFP_KERNEL);
|
|
rxring->rxq_stats = kcalloc(1, sizeof(struct crete_rxq_stats), GFP_KERNEL);
|
|
if (!rxring->rxq_stats) {
|
|
return -ENOMEM;
|
|
}
|
|
u64_stats_init(&rxring->rxq_stats->syncp);
|
|
|
|
/* init the rx ring buffer ref conter is 0*/
|
|
for (i = 0; i < queuesize; i++)
|
|
refcount_set(&rxring->rx_buf[i].kref, 0);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crete_alloc_txring_mem(struct crete_core_dev *cdev,
|
|
struct crete_txring_info *txring)
|
|
{
|
|
dma_addr_t mapping;
|
|
void *addr;
|
|
int i;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
u16 queuesize = cdev->cap.qpcap.max_queue_size;
|
|
size_t len = L1_CACHE_ALIGN(queuesize * CRETE_ENTRY_DFAULT_SIZE);
|
|
|
|
crete_info(dev, "crete alloc txring mem ring id:%d cpring:0x%lx\n",
|
|
txring->id, (unsigned long)txring->cpring);
|
|
|
|
if (!len || !is_power_of_2(len)) {
|
|
crete_warn(dev, "is not power of 2\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
addr = dma_alloc_coherent(dev, len, &mapping, GFP_KERNEL);
|
|
if (!addr)
|
|
return -ENOMEM;
|
|
|
|
crete_info(dev, "txring mem dma handle 0x%llx, addr 0x%lx\n", mapping,
|
|
(unsigned long)addr);
|
|
|
|
txring->tx_bd = addr;
|
|
txring->tx_bd_mapping = mapping;
|
|
txring->tx_ring_size = queuesize;
|
|
txring->tx_ring_mask = queuesize - 1;
|
|
atomic_set(&txring->ring_avail, queuesize);
|
|
txring->bd_len = len;
|
|
txring->tx_buf =
|
|
kcalloc(queuesize, sizeof(struct crete_tx_buf), GFP_KERNEL);
|
|
txring->txq_stats = kcalloc(1, sizeof(struct crete_txq_stats), GFP_KERNEL);
|
|
if (!txring->txq_stats) {
|
|
return -ENOMEM;
|
|
}
|
|
u64_stats_init(&txring->txq_stats->syncp);
|
|
|
|
/* init the tx ring buffer ref conter is 0*/
|
|
for (i = 0; i < queuesize; i++)
|
|
refcount_set(&txring->tx_buf[i].kref, 0);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crete_alloc_txcpring_mem(struct crete_core_dev *cdev,
|
|
struct crete_txcp_ring_info *txcpr)
|
|
{
|
|
dma_addr_t mapping;
|
|
void *addr;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
u16 queuesize = cdev->cap.qpcap.max_queue_size;
|
|
size_t len = L1_CACHE_ALIGN(queuesize * CRETE_ENTRY_DFAULT_SIZE);
|
|
|
|
if (!len || !is_power_of_2(len)) {
|
|
crete_warn(dev, "is not power of 2\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
addr = dma_alloc_coherent(dev, len, &mapping, GFP_KERNEL);
|
|
if (!addr)
|
|
return -ENOMEM;
|
|
|
|
txcpr->txcq_base = addr;
|
|
txcpr->txcq_mapping = mapping;
|
|
txcpr->ring_size = queuesize;
|
|
txcpr->ring_mask = queuesize - 1;
|
|
txcpr->bd_len = len;
|
|
atomic_set(&txcpr->ring_avail, queuesize);
|
|
return 0;
|
|
|
|
}
|
|
|
|
static int crete_alloc_rxcpring_mem(struct crete_core_dev *cdev,
|
|
struct crete_rxcp_ring_info *rxcpr)
|
|
{
|
|
dma_addr_t mapping;
|
|
void *addr;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
u16 queuesize = cdev->cap.qpcap.max_queue_size;
|
|
size_t len = L1_CACHE_ALIGN(queuesize * CRETE_ENTRY_DFAULT_SIZE);
|
|
|
|
if (!len || !is_power_of_2(len)) {
|
|
crete_warn(dev, "is not power of 2\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
addr = dma_alloc_coherent(dev, len, &mapping, GFP_KERNEL);
|
|
if (!addr)
|
|
return -ENOMEM;
|
|
|
|
rxcpr->rxcq_base = addr;
|
|
rxcpr->rxcq_mapping = mapping;
|
|
rxcpr->ring_size = queuesize;
|
|
rxcpr->ring_mask = queuesize - 1;
|
|
rxcpr->bd_len = len;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crete_alloc_rings(struct crete_core_dev *cdev)
|
|
{
|
|
int ret;
|
|
int ring_size = cdev->ring_size;
|
|
int i;
|
|
struct crete_rxring_info *rxring;
|
|
struct crete_txring_info *txring;
|
|
struct crete_rxcp_ring_info *rxcpring;
|
|
struct crete_txcp_ring_info *txcpring;
|
|
struct device *dev = cdev->device;
|
|
|
|
if (ring_size <= 0)
|
|
return -EBUSY;
|
|
|
|
rxring = cdev->rxring;
|
|
crete_info(dev, "rx ring alloc mem ring size:%u\n", ring_size);
|
|
for (i = 0; i < ring_size; i++) {
|
|
ret = crete_alloc_rxring_mem(cdev, &rxring[i]);
|
|
if (ret)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
crete_info(dev, "tx ring alloc mem ring size:%u\n", ring_size);
|
|
txring = cdev->txring;
|
|
for (i = 0; i < ring_size; i++) {
|
|
ret = crete_alloc_txring_mem(cdev, &txring[i]);
|
|
if (ret)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
rxcpring = cdev->rxcpring;
|
|
for (i = 0; i < ring_size; i++) {
|
|
ret = crete_alloc_rxcpring_mem(cdev, &rxcpring[i]);
|
|
if (ret)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
crete_info(dev, "rxcpring mem alloc mem\n");
|
|
txcpring = cdev->txcpring;
|
|
for (i = 0; i < ring_size; i++) {
|
|
ret = crete_alloc_txcpring_mem(cdev, &txcpring[i]);
|
|
if (ret)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
for (i = 0; i < ring_size; i++) {
|
|
txcpring->wrap_counter = 0;
|
|
rxcpring->wrap_counter = 0;
|
|
rxcpring += 1;
|
|
txcpring += 1;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crete_free_rxring_mem(struct crete_core_dev *cdev)
|
|
{
|
|
int i;
|
|
struct crete_rxring_info *rxr;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
|
|
crete_info(dev, "free rxring mem\n");
|
|
for (i = 0; i < cdev->ring_size; i++) {
|
|
rxr = &cdev->rxring[i];
|
|
dma_free_coherent(dev, rxr->bd_len, (void *)rxr->rx_bd,
|
|
rxr->rx_bd_mapping);
|
|
rxr->rx_bd = NULL;
|
|
rxr->rx_bd_mapping = 0;
|
|
rxr->bd_len = 0;
|
|
kfree(rxr->rxq_stats);
|
|
rxr->rxq_stats = NULL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crete_free_txring_mem(struct crete_core_dev *cdev)
|
|
{
|
|
int i;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
struct crete_txring_info *txr;
|
|
|
|
crete_info(dev, "free txring mem\n");
|
|
for (i = 0; i < cdev->ring_size; i++) {
|
|
txr = &cdev->txring[i];
|
|
dma_free_coherent(dev, txr->bd_len, (void *)txr->tx_bd,
|
|
txr->tx_bd_mapping);
|
|
txr->tx_bd = NULL;
|
|
txr->tx_bd_mapping = 0;
|
|
txr->bd_len = 0;
|
|
kfree(txr->txq_stats);
|
|
txr->txq_stats = NULL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crete_free_txcpring_mem(struct crete_core_dev *cdev)
|
|
{
|
|
int i;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
struct crete_txcp_ring_info *txcpr;
|
|
|
|
crete_info(dev, "free txcpring mem\n");
|
|
for (i = 0; i < cdev->ring_size; i++) {
|
|
txcpr = &cdev->txcpring[i];
|
|
dma_free_coherent(dev, txcpr->bd_len, (void *)txcpr->txcq_base,
|
|
txcpr->txcq_mapping);
|
|
txcpr->txcq_base = NULL;
|
|
txcpr->txcq_mapping = 0;
|
|
txcpr->bd_len = 0;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int crete_free_rxcpring_mem(struct crete_core_dev *cdev)
|
|
{
|
|
int i;
|
|
struct device *dev = &cdev->pdev->dev;
|
|
struct crete_rxcp_ring_info *rxcpr;
|
|
|
|
dev = &cdev->pdev->dev;
|
|
crete_info(dev, "free rxcpring mem\n");
|
|
for (i = 0; i < cdev->ring_size; i++) {
|
|
rxcpr = &cdev->rxcpring[i];
|
|
dma_free_coherent(dev, rxcpr->bd_len, (void *)rxcpr->rxcq_base,
|
|
rxcpr->rxcq_mapping);
|
|
rxcpr->rxcq_base = NULL;
|
|
rxcpr->rxcq_mapping = 0;
|
|
rxcpr->bd_len = 0;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void crete_free_rings(struct crete_core_dev *cdev)
|
|
{
|
|
crete_free_rxring_mem(cdev);
|
|
crete_free_txring_mem(cdev);
|
|
crete_free_txcpring_mem(cdev);
|
|
crete_free_rxcpring_mem(cdev);
|
|
}
|
|
|
|
static void crete_cdev_ring_exit(struct crete_core_dev *cdev)
|
|
{
|
|
kfree(cdev->rxcpring);
|
|
kfree(cdev->txcpring);
|
|
kfree(cdev->db);
|
|
kfree(cdev->txring_mapping);
|
|
kfree(cdev->txring);
|
|
kfree(cdev->rxring_mapping);
|
|
kfree(cdev->rxring);
|
|
kfree(cdev->jnapi);
|
|
cdev->rxcpring = NULL;
|
|
cdev->txcpring = NULL;
|
|
cdev->db = NULL;
|
|
cdev->txring_mapping = NULL;
|
|
cdev->txring = NULL;
|
|
cdev->rxring_mapping = NULL;
|
|
cdev->rxring = NULL;
|
|
cdev->jnapi = NULL;
|
|
}
|
|
|
|
void crete_rel_msixirq(struct crete_core_dev *cdev);
|
|
static void crete_delete_rings(struct crete_core_dev *cdev)
|
|
{
|
|
crete_rel_msixirq(cdev);
|
|
|
|
crete_free_rings(cdev);
|
|
crete_cdev_ring_exit(cdev);
|
|
}
|
|
|
|
static int crete_trim_rings(struct crete_core_dev *cdev)
|
|
{
|
|
int cpunums = num_online_cpus();
|
|
int maxqpnums = cdev->cap.qpcap.max_qp_num;
|
|
|
|
cdev->ring_size = min(cpunums, maxqpnums);
|
|
if (!cdev->ring_size) {
|
|
crete_err(cdev->device, "ring size zero not right\n");
|
|
return -1;
|
|
}
|
|
|
|
cdev->max_qp_num = cdev->ring_size;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void crete_init_napi(struct crete_core_dev *cdev)
|
|
{
|
|
int i;
|
|
struct crete_napi *jnapi;
|
|
struct crete_napi *rx_jnapi;
|
|
struct crete_napi *tx_jnapi;
|
|
|
|
jnapi = cdev->jnapi;
|
|
crete_info(&cdev->pdev->dev, "crete init napi\n");
|
|
|
|
for (i = 0; i < cdev->ring_size; i++) {
|
|
rx_jnapi = &jnapi[i * 2]; // rx ring is even number
|
|
tx_jnapi = &jnapi[i * 2 + 1]; // tx ring is odd number
|
|
|
|
rx_jnapi->cdev = cdev;
|
|
rx_jnapi->rxring = &cdev->rxring[i];
|
|
rx_jnapi->rxring->id = i * 2;
|
|
rx_jnapi->rxring->priv = rx_jnapi;
|
|
rx_jnapi->rxcpring = &cdev->rxcpring[i];
|
|
rx_jnapi->rxcpring->priv = rx_jnapi;
|
|
|
|
tx_jnapi->cdev = cdev;
|
|
tx_jnapi->txring = &cdev->txring[i];
|
|
tx_jnapi->txring->id = i * 2 + 1;
|
|
tx_jnapi->txring->priv = tx_jnapi;
|
|
tx_jnapi->txcpring = &cdev->txcpring[i];
|
|
tx_jnapi->txcpring->priv = tx_jnapi;
|
|
}
|
|
|
|
}
|
|
|
|
/* reserve the msix irq vector */
|
|
static int crete_reserve_msixirq(struct crete_core_dev *cdev)
|
|
{
|
|
#define TX_IRQ_NAME "%s-tx-%d"
|
|
#define RX_IRQ_NAME "%s-rx-%d"
|
|
int i, rc = 0, qp_num, max_irqs, qp_irqs, irqno;
|
|
struct crete_irq_info *irq_info;
|
|
struct crete_napi *jnapi;
|
|
struct crete_rxring_info *rxr;
|
|
struct crete_txring_info *txr;
|
|
|
|
if (!(cdev->flags & CRETE_FLAG_HAS_MSIX)) {
|
|
crete_err(cdev->device, "no msix cap, return failed\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
qp_num = cdev->ring_size;
|
|
qp_irqs = qp_num << 1;
|
|
max_irqs = cdev->irq_num;
|
|
|
|
if (max_irqs < qp_irqs)
|
|
return 0;
|
|
|
|
for (i = 0; i < qp_irqs; i++) {
|
|
irqno = crete_req_msixirq(cdev);
|
|
irq_info = &cdev->irq_info[irqno];
|
|
jnapi = &cdev->jnapi[i];
|
|
crete_info(cdev->device, "irqno =%d vector =%d i=%d add lyx\n", irqno,
|
|
irq_info->vector, i);
|
|
snprintf(irq_info->name, IFNAMSIZ + 2,
|
|
(i & 0x1) ? TX_IRQ_NAME : RX_IRQ_NAME,
|
|
pci_name(cdev->pdev), i >> 1);
|
|
if (i & 0x1) {
|
|
txr = jnapi->txring;
|
|
txr->vec = irqno;
|
|
} else {
|
|
rxr = jnapi->rxring;
|
|
rxr->vec = irqno;
|
|
}
|
|
irq_info->requested = 1;
|
|
}
|
|
|
|
return rc;
|
|
}
|
|
|
|
void crete_rel_msixirq(struct crete_core_dev *cdev)
|
|
{
|
|
int i, qp_irqs, irqno;
|
|
struct crete_irq_info *irq_info;
|
|
struct crete_napi *jnapi;
|
|
|
|
if (!(cdev->flags & CRETE_FLAG_HAS_MSIX))
|
|
return;
|
|
|
|
qp_irqs = cdev->ring_size << 1;
|
|
|
|
for (i = 0; i < qp_irqs; i++) {
|
|
jnapi = &cdev->jnapi[i];
|
|
irqno = (i & 0x1) ? jnapi->txring->vec : jnapi->rxring->vec;
|
|
irq_info = &cdev->irq_info[irqno];
|
|
if (!irq_info->requested)
|
|
break;
|
|
irq_info->requested = 0;
|
|
crete_free_msixirq(cdev, irqno);
|
|
}
|
|
|
|
}
|
|
|
|
static int crete_reserve_rings(struct crete_core_dev *cdev)
|
|
{
|
|
int ret, i;
|
|
|
|
/*
|
|
* alloc ring memory
|
|
*/
|
|
ret = crete_cdev_ring_init(cdev);
|
|
if (ret)
|
|
return ret;
|
|
|
|
/*
|
|
* alloc ring bd dma memory
|
|
*/
|
|
ret = crete_alloc_rings(cdev);
|
|
if (ret)
|
|
goto err;
|
|
|
|
/* init ring napi */
|
|
crete_init_napi(cdev);
|
|
|
|
/* request msix irq vectors */
|
|
crete_reserve_msixirq(cdev);
|
|
|
|
/* create the hw qp */
|
|
for (i = 0; i < cdev->ring_size; i++) {
|
|
ret = crete_create_txrxqp(cdev, i);
|
|
if (ret)
|
|
goto err1;
|
|
}
|
|
|
|
return 0;
|
|
|
|
err1:
|
|
crete_rel_msixirq(cdev);
|
|
|
|
crete_free_rings(cdev);
|
|
|
|
err:
|
|
crete_cdev_ring_exit(cdev);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int crete_probe(struct auxiliary_device *adev,
|
|
const struct auxiliary_device_id *id)
|
|
{
|
|
struct crete_aux_dev *aux_dev =
|
|
container_of(adev, struct crete_aux_dev, adev);
|
|
struct crete_core_dev *core_dev = aux_dev->core_dev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
struct device *dev = &pdev->dev;
|
|
struct crete_priv *priv;
|
|
struct net_device *netdev;
|
|
pm_message_t state = { };
|
|
int err;
|
|
|
|
err = crete_set_dev_type(core_dev, CRETE_JNET_DEV);
|
|
if (err < 0) {
|
|
dev_err(&core_dev->pdev->dev, "set jnet dev type failed\n");
|
|
return -EINVAL;
|
|
}
|
|
/* get core dev cap init */
|
|
err = crete_cdev_cap_init(core_dev);
|
|
if (err) {
|
|
dev_err(dev, "crete core dev get cap failed\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
err = crete_features_negotiate(core_dev);
|
|
if (err < 0) {
|
|
dev_err(&core_dev->pdev->dev, "features negotiate failed\n");
|
|
return -EINVAL;
|
|
}
|
|
/* reserve the ring size */
|
|
err = crete_trim_rings(core_dev);
|
|
if (err)
|
|
return err;
|
|
|
|
netdev = crete_create_netdev(core_dev);
|
|
if (!netdev) {
|
|
dev_err(dev, "crete_create_netdev failed\n");
|
|
return -ENOMEM;
|
|
}
|
|
|
|
err = crete_build_nic_netdev(netdev);
|
|
priv = netdev_priv(netdev);
|
|
//auxiliary_set_drvdata(adev, priv);
|
|
|
|
err = crete_resume(adev);
|
|
if (err) {
|
|
dev_err(dev, "crete_resume failed, %d\n", err);
|
|
goto err_crete_resume;
|
|
}
|
|
|
|
err = crete_init_mac_addr(netdev);
|
|
if (err) {
|
|
dev_err(dev, "Unable to initialize mac address.\n");
|
|
goto err_init_mac_addr;
|
|
}
|
|
|
|
err = crete_reserve_rings(core_dev);
|
|
if (err) {
|
|
dev_err(dev, "Reserve rings failed\n");
|
|
goto err_init_mac_addr;
|
|
}
|
|
|
|
crete_dcbnl_init_app(priv);
|
|
crete_dcbnl_initialize(priv);
|
|
|
|
err = register_netdev(netdev);
|
|
if (err) {
|
|
dev_err(dev, "register_netdev failed, %d\n", err);
|
|
goto err_register_netdev;
|
|
}
|
|
/* carrier off reporting is important to ethtool even BEFORE open */
|
|
netif_carrier_off(netdev);
|
|
return 0;
|
|
|
|
err_register_netdev:
|
|
crete_delete_rings(core_dev);
|
|
|
|
err_init_mac_addr:
|
|
crete_suspend(adev, state);
|
|
|
|
err_crete_resume:
|
|
crete_destroy_netdev(netdev);
|
|
|
|
return err;
|
|
}
|
|
|
|
/*
|
|
* about the anolis 5.10.134-14 kernel version
|
|
* auxliary define with return int value
|
|
*/
|
|
#ifdef SNIC_ANOLIS_VERSION14
|
|
|
|
static int crete_remove(struct auxiliary_device *adev)
|
|
{
|
|
struct crete_aux_dev *aux_dev =
|
|
container_of(adev, struct crete_aux_dev, adev);
|
|
struct crete_core_dev *core_dev = aux_dev->core_dev;
|
|
struct net_device *netdev = core_dev->netdev;
|
|
pm_message_t state = { };
|
|
|
|
unregister_netdev(netdev);
|
|
|
|
crete_delete_rings(core_dev);
|
|
|
|
crete_suspend(adev, state);
|
|
crete_destroy_netdev(netdev);
|
|
return 0;
|
|
}
|
|
|
|
#else
|
|
|
|
static void crete_remove(struct auxiliary_device *adev)
|
|
{
|
|
struct crete_aux_dev *aux_dev =
|
|
container_of(adev, struct crete_aux_dev, adev);
|
|
struct crete_core_dev *core_dev = aux_dev->core_dev;
|
|
struct net_device *netdev = core_dev->netdev;
|
|
pm_message_t state = { };
|
|
|
|
unregister_netdev(netdev);
|
|
|
|
crete_delete_rings(core_dev);
|
|
|
|
crete_suspend(adev, state);
|
|
crete_destroy_netdev(netdev);
|
|
}
|
|
|
|
#endif
|
|
|
|
static const struct auxiliary_device_id crete_id_table[] = {
|
|
{.name = CRETE_ADEV_NAME ".eth", },
|
|
{ },
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(auxiliary, crete_id_table);
|
|
|
|
static struct auxiliary_driver crete_aux_driver = {
|
|
.name = "eth",
|
|
.probe = crete_probe,
|
|
.remove = crete_remove,
|
|
.suspend = crete_suspend,
|
|
.resume = crete_resume,
|
|
.id_table = crete_id_table,
|
|
};
|
|
|
|
int crete_init(void)
|
|
{
|
|
int ret;
|
|
|
|
ret = auxiliary_driver_register(&crete_aux_driver);
|
|
if (ret)
|
|
return ret;
|
|
/*
|
|
*ret = crete_rep_init();
|
|
*if (ret)
|
|
* auxiliary_driver_unregister(&crete_aux_driver);
|
|
*/
|
|
return ret;
|
|
}
|
|
|
|
void crete_cleanup(void)
|
|
{
|
|
/* crete_rep_cleanup(); */
|
|
auxiliary_driver_unregister(&crete_aux_driver);
|
|
}
|
|
|
|
#if 0
|
|
/* NEED_ETH_HW_ADDR_SET
|
|
*
|
|
* eth_hw_addr_set was added by upstream commit
|
|
* 48eab831ae8b ("net: create netdev->dev_addr assignment helpers")
|
|
*
|
|
* Using eth_hw_addr_set became required in 5.17, when the dev_addr field in
|
|
* the netdev struct was constified. See 48eab831ae8b ("net: create
|
|
* netdev->dev_addr assignment helpers")
|
|
*/
|
|
#ifdef NEED_ETH_HW_ADDR_SET
|
|
static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr)
|
|
{
|
|
ether_addr_copy(dev->dev_addr, addr);
|
|
}
|
|
#endif /* NEED_ETH_HW_ADDR_SET */
|
|
#endif
|
|
|
|
int crete_change_mac_addr(struct net_device *netdev, void *p)
|
|
{
|
|
struct sockaddr *addr = p;
|
|
struct crete_priv *priv = netdev_priv(netdev);
|
|
struct crete_core_dev *core_dev = priv->coredev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
|
|
int rc = 0;
|
|
|
|
if (!is_valid_ether_addr(addr->sa_data))
|
|
return -EADDRNOTAVAIL;
|
|
|
|
if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
|
|
return 0;
|
|
|
|
rc = crete_approve_mac(core_dev, addr->sa_data);
|
|
if (rc)
|
|
return rc;
|
|
dev_err(&pdev->dev, "VF MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
|
|
(unsigned char)addr->sa_data[0],
|
|
(unsigned char)addr->sa_data[1],
|
|
(unsigned char)addr->sa_data[2],
|
|
(unsigned char)addr->sa_data[3],
|
|
(unsigned char)addr->sa_data[4],
|
|
(unsigned char)addr->sa_data[5]);
|
|
netif_addr_lock_bh(netdev);
|
|
eth_hw_addr_set(netdev, addr->sa_data);
|
|
netif_addr_unlock_bh(netdev);
|
|
rc = crete_set_default_mac(netdev);
|
|
if (rc)
|
|
return rc;
|
|
crete_set_rx_mode(netdev);
|
|
return rc;
|
|
}
|
|
|
|
static int crete_init_mac_addr(struct net_device *netdev)
|
|
{
|
|
int rc = 0;
|
|
static int count1;
|
|
struct crete_priv *priv = netdev_priv(netdev);
|
|
struct crete_core_dev *core_dev = priv->coredev;
|
|
struct pci_dev *pdev = core_dev->pdev;
|
|
|
|
if (crete_core_is_pf(core_dev)) {
|
|
/* pf stub mac */
|
|
count1++;
|
|
core_dev->pf.mac_addr[0] = 0x52;
|
|
core_dev->pf.mac_addr[1] = (PCI_VENDOR_ID_CRETE >> 8) & 0xff;
|
|
core_dev->pf.mac_addr[2] = PCI_VENDOR_ID_CRETE & 0xff;
|
|
core_dev->pf.mac_addr[3] = (PF_DEVICE_ID >> 8) & 0xff;
|
|
core_dev->pf.mac_addr[4] = PF_DEVICE_ID & 0xff;
|
|
core_dev->pf.mac_addr[5] = count1 & 0xff;
|
|
eth_hw_addr_set(netdev, core_dev->pf.mac_addr);
|
|
rc = crete_set_default_mac(netdev);
|
|
if (rc) {
|
|
dev_err(&pdev->dev,
|
|
"Set pf default MAC address failed rc:0x%x",
|
|
rc);
|
|
return rc;
|
|
}
|
|
|
|
} else {
|
|
struct crete_vf_info *vf = &core_dev->vf;
|
|
|
|
if (is_valid_ether_addr(vf->mac_addr)) {
|
|
/* overwrite netdev dev_addr with admin VF MAC */
|
|
eth_hw_addr_set(netdev, vf->mac_addr);
|
|
} else {
|
|
eth_hw_addr_random(netdev);
|
|
}
|
|
rc = crete_set_default_mac(netdev);
|
|
if (rc) {
|
|
dev_err(&pdev->dev,
|
|
"Set vf default MAC address failed rc:0x%x",
|
|
rc);
|
|
return rc;
|
|
}
|
|
|
|
rc = crete_approve_mac(core_dev, netdev->dev_addr);
|
|
if (rc) {
|
|
dev_err(&pdev->dev,
|
|
"Set approve vf MAC address failed rc:0x%x",
|
|
rc);
|
|
return rc;
|
|
}
|
|
}
|
|
return rc;
|
|
}
|
|
|
|
static bool crete_uc_list_updated(struct net_device *dev)
|
|
{
|
|
struct crete_priv *priv = netdev_priv(dev);
|
|
struct netdev_hw_addr *ha;
|
|
int off = 0;
|
|
|
|
if (netdev_uc_count(dev) != (priv->uc_filter_count - 1))
|
|
return true;
|
|
|
|
netdev_for_each_uc_addr(ha, dev) {
|
|
if (!ether_addr_equal(ha->addr, priv->uc_list + off))
|
|
return true;
|
|
|
|
off += ETH_ALEN;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static bool crete_mc_list_updated(struct net_device *dev, u32 *rx_mask)
|
|
{
|
|
struct crete_priv *priv = netdev_priv(dev);
|
|
struct netdev_hw_addr *ha;
|
|
int mc_count = 0;
|
|
bool update = false;
|
|
int off = 0;
|
|
|
|
netdev_for_each_mc_addr(ha, dev) {
|
|
if (mc_count >= CRETE_MAX_MC_ADDRS) {
|
|
*rx_mask |= L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
|
|
priv->mc_list_count = 0;
|
|
return false;
|
|
}
|
|
if (!ether_addr_equal(ha->addr, priv->mc_list + off)) {
|
|
memcpy(priv->mc_list + off, ha->addr, ETH_ALEN);
|
|
update = true;
|
|
}
|
|
off += ETH_ALEN;
|
|
mc_count++;
|
|
}
|
|
if (mc_count)
|
|
*rx_mask |= L2_SET_RX_MASK_REQ_MASK_MCAST;
|
|
|
|
if (mc_count != priv->mc_list_count) {
|
|
priv->mc_list_count = mc_count;
|
|
update = true;
|
|
}
|
|
return update;
|
|
}
|
|
|
|
static void crete_queuework_set_rx_mode(struct crete_priv *priv)
|
|
{
|
|
/* no rx mode for uplink rep */
|
|
|
|
queue_work(priv->wq, &priv->set_rx_mode_work);
|
|
}
|
|
|
|
void crete_set_rx_mode(struct net_device *dev)
|
|
{
|
|
struct crete_priv *priv = netdev_priv(dev);
|
|
struct crete_core_dev *core_dev = priv->coredev;
|
|
bool mc_update;
|
|
bool uc_update = false;
|
|
u32 mask;
|
|
|
|
/* dev open */
|
|
if (!test_bit(__CRETE_DOWN, &core_dev->state))
|
|
return;
|
|
|
|
mask = priv->rx_mask;
|
|
mask &= ~(L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
|
|
L2_SET_RX_MASK_REQ_MASK_MCAST |
|
|
L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
|
|
L2_SET_RX_MASK_REQ_MASK_BCAST);
|
|
|
|
if (dev->flags & IFF_PROMISC)
|
|
mask |= L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
|
|
|
|
uc_update = crete_uc_list_updated(dev);
|
|
|
|
if (dev->flags & IFF_BROADCAST)
|
|
mask |= L2_SET_RX_MASK_REQ_MASK_BCAST;
|
|
if (dev->flags & IFF_ALLMULTI) {
|
|
mask |= L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
|
|
priv->mc_list_count = 0;
|
|
} else if (dev->flags & IFF_MULTICAST) {
|
|
mc_update = crete_mc_list_updated(dev, &mask);
|
|
}
|
|
|
|
if (mask != priv->rx_mask || uc_update || mc_update) {
|
|
priv->rx_mask = mask;
|
|
crete_queuework_set_rx_mode(priv);
|
|
}
|
|
}
|
|
|