/*
 * Copyright (C) 2017
 *
 * Brick Yang <printfxxx@163.com>
 *
 * This program is free software. You can redistribute it and/or
 * modify it as you like.
 */

/**
 * @file	dpaa2.c
 * @brief	DPAA2 ethernet driver for simplebit
 */

#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/acpi.h>
#include <linux/mutex.h>
#include <linux/kallsyms.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/fsl/mc.h>

#include <soc/fsl/dpaa2-fd.h>
#include <soc/fsl/dpaa2-global.h>

#include <mtrace.h>
#include <netdev.h>
#include <worker.h>
#include <cmd.h>

#include "qbman.h"
#include "dprc.h"
#include "dpni.h"
#include "dpcon.h"
#include "dpbp.h"
#include "dpio.h"
#include "dpmac.h"
#include "dpmng.h"

#define SVR_LS1080A		0x87030000
#define SVR_LS2080A		0x87010000
#define SVR_LS2088A		0x87090000
#define SVR_LX2160A		0x87360000

#define DPAA2_DPNI_MAX_FRM_SZ	1536
#define DPAA2_DPNI_DATA_ALIGN	256
#define DPAA2_DPA_BP_SZ		(DPAA2_DPNI_MAX_FRM_SZ + DPAA2_DPNI_DATA_ALIGN)

struct dpaa2_fq;
typedef void (*qbman_cb_dqrr_t)(struct qbman_swp *, struct dpaa2_fq *,
				const struct dpaa2_dq __iomem *);

typedef struct dpaa2_io_info {
	struct qbman_swp *swp;
	struct fsl_mc_device *mdev;
	struct dpio_attr attr;
} dpaa2_io_info_t;

typedef struct dpmac_dev {
	struct dpmac_attr attr;
	struct fsl_mc_device *mdev;
	struct fwnode_handle *fn_phy;
	phy_interface_t phy_if;
	atomic64_t rx_packets, rx_bytes;
} dpmac_dev_t;

typedef struct dpcon_dev {
	__percpu u8 *pcpu_chid;
	struct dpcon_attr attr;
	struct fsl_mc_device *mdev;
} dpcon_dev_t;

typedef struct dpbp_dev {
	struct dpbp_attr attr;
	struct fsl_mc_device *mdev;
} dpbp_dev_t;

typedef struct dpaa2_fq {
	struct dpni_queue_id qid;
	struct fsl_mc_device *mdev;
	qbman_cb_dqrr_t dqrr;
} dpaa2_fq_t;

typedef struct dpaa2_ndev {
	unsigned int flags;
#define DPAA2_NDEV_F_PAUSE_AUTONEG	0x1u
	struct net_device *ndev;
	dpmac_dev_t *dpmac;
	dpcon_dev_t *dpcon;
	dpbp_dev_t *rx_dpbp, *tx_clean_dpbp;
	dpaa2_fq_t *rx_fqs, *tx_fqs, *tx_conf_fqs, *rx_err_fq;
	struct dpni_attr attr;
	struct dpni_link_state dpni_state;
	unsigned long dpni_state_jiffies;
} dpaa2_ndev_t;

static char *dprc_name;
static bool use_tx_conf = false;
static unsigned int skb_buf_nr;
static unsigned int poll_dqrr_budget = 32;
static struct fsl_mc_device *dprc_mdev;
static struct mc_soc_version soc_ver;

static DEFINE_PER_CPU(dpaa2_io_info_t, cpu_dpaa2_io_infos);

module_param_named(dprc, dprc_name, charp, S_IRUGO);
MODULE_PARM_DESC(dprc, "DPRC name");

module_param_named(tx_conf, use_tx_conf, bool, S_IRUGO);
MODULE_PARM_DESC(tx_conf, "Use tx confirm");

module_param_named(bufs, skb_buf_nr, uint, S_IRUGO);
MODULE_PARM_DESC(bufs, "Number of skb buffers");

module_param_named(budget, poll_dqrr_budget, uint, S_IRUGO);
MODULE_PARM_DESC(budget, "Budget of poll dqrr");

static int dpaa2_dpio_probe(struct fsl_mc_device *mdev)
{
	int rc;

	if (mdev->dev.parent != &dprc_mdev->dev) {
		rc = -ENOTSUPP;
		goto err;
	}

	dev_dbg(&mdev->dev, "probed\n");

	return 0;
err:
	return rc;
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 5, 0)
static int
#else
static void
#endif
dpaa2_dpio_remove(struct fsl_mc_device *mdev)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 5, 0)
	return 0;
#endif
}

static const struct fsl_mc_device_id dpaa2_dpio_match_id_table[] = {
	{
		.vendor = FSL_MC_VENDOR_FREESCALE,
		.obj_type = "dpio",
	},
	{ .vendor = 0x0 }
};

static struct fsl_mc_driver dpaa2_dpio_driver = {
	.driver = {
		.name = KBUILD_MODNAME "-dpio",
		.owner = THIS_MODULE,
	},
	.probe = dpaa2_dpio_probe,
	.remove = dpaa2_dpio_remove,
	.match_id_table = dpaa2_dpio_match_id_table,
};

static int dpaa2_dpmac_probe(struct fsl_mc_device *mdev)
{
	int rc;

	if (mdev->dev.parent != &dprc_mdev->dev) {
		rc = -ENOTSUPP;
		goto err;
	}

	dev_dbg(&mdev->dev, "probed\n");

	return 0;
err:
	return rc;
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 5, 0)
static int
#else
static void
#endif
dpaa2_dpmac_remove(struct fsl_mc_device *mdev)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 5, 0)
	return 0;
#endif
}

static const struct fsl_mc_device_id dpaa2_dpmac_match_id_table[] = {
	{
		.vendor = FSL_MC_VENDOR_FREESCALE,
		.obj_type = "dpmac",
	},
	{ .vendor = 0x0 }
};

static struct fsl_mc_driver dpaa2_dpmac_driver = {
	.driver = {
		.name = KBUILD_MODNAME "-dpmac",
		.owner = THIS_MODULE,
	},
	.probe = dpaa2_dpmac_probe,
	.remove = dpaa2_dpmac_remove,
	.match_id_table = dpaa2_dpmac_match_id_table,
};

static int dpaa2_dpio_get_cluster_sdest(unsigned int cpu)
{
	unsigned int base, shift;

	switch (soc_ver.svr & 0xffff0000) {
	case SVR_LS1080A:
		base = 2;
		shift = 2;
		break;
	case SVR_LS2080A:
	case SVR_LS2088A:
	case SVR_LX2160A:
		base = 0;
		shift = 1;
		break;
	default:
		return -1;
	}

	return base + (cpu >> shift);
}

static long cpu_qbman_swp_create(void *arg)
{
	int sdest;
	long rc;
	unsigned int cpu;
	dpaa2_io_info_t *info;
	struct fsl_mc_device *mdev = arg;
	struct qbman_swp_desc *desc;

	cpu = smp_processor_id();
	info = per_cpu_ptr(&cpu_dpaa2_io_infos, cpu);
	info->mdev = mdev;

	if (!(desc = kzalloc(sizeof(*desc), GFP_KERNEL)) || MTRACE_KMEM_ADD(desc)) {
		dev_err(&mdev->dev, "%s(): failed to alloc memory\n", __func__);
		rc = -ENOMEM;
		goto err;
	}

	if ((rc = dpio_open(dprc_mdev->mc_io, 0, mdev->obj_desc.id, &mdev->mc_handle))) {
		dev_err(&mdev->dev, "failed to open dpio\n");
		goto err_open;
	}

	if ((rc = dpio_reset(dprc_mdev->mc_io, 0, mdev->mc_handle))) {
		dev_err(&mdev->dev, "failed to reset dpio\n");
		goto err_reset;
	}

	if ((rc = dpio_get_attributes(dprc_mdev->mc_io, 0, mdev->mc_handle, &info->attr))) {
		dev_err(&mdev->dev, "failed to get attrs\n");
		goto err_get_attr;
	}
	desc->qman_version = info->attr.qbman_version;

	if ((rc = dpio_enable(dprc_mdev->mc_io, 0, mdev->mc_handle))) {
		dev_err(&mdev->dev, "failed to enable dpio\n");
		goto err_enable;
	}

	switch (soc_ver.svr & 0xffff0000) {
	case SVR_LS1080A:
		desc->flags |= SWP_F_DE | SWP_F_EQCR_VB | SWP_F_DQRR_VB;
		break;
	case SVR_LS2080A:
	case SVR_LS2088A:
		desc->flags |= SWP_F_EST | SWP_F_DE | SWP_F_EQCR_VB | SWP_F_DQRR_VB;
		break;
	case SVR_LX2160A:
		desc->flags |= SWP_F_EST | SWP_F_DE | SWP_F_DQRR_VB | SWP_F_MEMBACK;
		break;
	default:
		break;
	}

	if (desc->flags & SWP_F_MEMBACK) {
		desc->cena_size = resource_size(&mdev->regions[2]);
		desc->cena_bar = memremap(mdev->regions[2].start, desc->cena_size, MEMREMAP_WB);
	} else {
		desc->cena_size = resource_size(&mdev->regions[0]);
		desc->cena_bar = ioremap_cache_ns(mdev->regions[0].start, desc->cena_size);
	}
	if (!(desc->cena_bar)) {
		dev_err(&mdev->dev, "failed to map cena region\n");
		rc = -EIO;
		goto err_map_cena;
	}

	desc->cinh_size = resource_size(&mdev->regions[1]);
	desc->cinh_bar = ioremap(mdev->regions[1].start, desc->cinh_size);
	if (!(desc->cinh_bar)) {
		dev_err(&mdev->dev, "failed to map cinh region\n");
		rc = -EIO;
		goto err_map_cinh;
	}

	if ((sdest = dpaa2_dpio_get_cluster_sdest(cpu)) < 0) {
		dev_err(&mdev->dev, "failed to get cluster sdest\n");
		rc = -EIO;
		goto err_sdest;
	}

	if ((rc = dpio_set_stashing_destination(dprc_mdev->mc_io, 0, mdev->mc_handle, sdest))) {
		dev_err(&mdev->dev, "failed to get set stashing destination\n");
		goto err_set_stash;
	}

	if (!(info->swp = qbman_swp_init(desc))) {
		dev_err(&mdev->dev, "failed to init sw portal\n");
		rc = -EIO;
		goto err_swp_init;
	}

	return 0;

err_swp_init:
err_set_stash:
err_sdest:
	iounmap(desc->cinh_bar);
err_map_cinh:
	if (desc->flags & SWP_F_MEMBACK) {
		memunmap(desc->cena_bar);
	} else {
		iounmap(desc->cena_bar);
	}
err_map_cena:
	dpio_disable(dprc_mdev->mc_io, 0, mdev->mc_handle);
err_enable:
err_get_attr:
err_reset:
	dpio_close(dprc_mdev->mc_io, 0, mdev->mc_handle);
err_open:
	MTRACE_KMEM_DEL(desc);
	kfree(desc);
err:
	info->mdev = NULL;
	return rc;
}

static long cpu_qbman_swp_destroy(void *arg)
{
	dpaa2_io_info_t *info;
	struct fsl_mc_device *mdev;
	const struct qbman_swp_desc *desc;

	info = this_cpu_ptr(&cpu_dpaa2_io_infos);

	if (info->swp) {
		mdev = info->mdev;
		desc = info->swp->desc;
		qbman_swp_finish(info->swp);
		iounmap(desc->cinh_bar);
		if (desc->flags & SWP_F_MEMBACK) {
			memunmap(desc->cena_bar);
		} else {
			iounmap(desc->cena_bar);
		}
		dpio_disable(dprc_mdev->mc_io, 0, mdev->mc_handle);
		dpio_close(dprc_mdev->mc_io, 0, mdev->mc_handle);
		MTRACE_KMEM_DEL(desc);
		kfree(desc);
		put_device(&mdev->dev);
		info->mdev = NULL;
		info->swp = NULL;
	}

	return 0;
}

static int dpaa2_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
	u8 cmd_verb;
	int rc;
	dpaa2_fq_t *fq;
	dma_addr_t baddr;
	dpaa2_ndev_t *netdev;
	unsigned int txq;
	struct qbman_swp *swp;
	struct qbman_enq_cmd *c;

	netdev = netdev_priv(ndev);
	swp = this_cpu_ptr(&cpu_dpaa2_io_infos)->swp;
	if (!(c = qbman_swp_eqcr_next(swp, &cmd_verb))) {
		goto err;
	}
	baddr = sb_skbh_to_baddr(skb->head);
	txq = skb_get_queue_mapping(skb);
	if (txq >= ndev->real_num_tx_queues) {
		txq = txq % ndev->real_num_tx_queues;
	}
	fq = &netdev->tx_fqs[txq];
	qbman_swp_cmd_zero(c);
	cmd_verb |= QBMAN_EQCR_REJECT;
	c->tgtid = cpu_to_le32(fq->qid.fqid);
	dpaa2_fd_set_addr(&c->fd, baddr);
	dpaa2_fd_set_offset(&c->fd, skb_headroom(skb));
	dpaa2_fd_set_len(&c->fd, skb->len);
	if (!use_tx_conf) {
		dpaa2_fd_set_bpid(&c->fd, netdev->tx_clean_dpbp->attr.bpid);
	}
	dpaa2_fd_set_format(&c->fd, dpaa2_fd_single);
	rc = qbman_swp_eqcr_submit(swp, c, cmd_verb, true);
	BUG_ON(rc);

	return NETDEV_TX_OK;
err:
	return NETDEV_TX_BUSY;
}

static inline u64 dpaa2_dpmac_cntr_update_return(atomic64_t *v, u64 val)
{
	u32 upper, lower;
	u64 old, new, cur;

	while (1) {
		old = atomic64_read(v);
		if (val >> 32) {
			new = val;
		} else {
			lower = (u32)val;
			upper = (old >> 32) + (lower < (u32)old ? 1 : 0);
			new = (u64)upper << 32 | lower;
		}
		cur = atomic64_cmpxchg(v, old, new);
		new = max(cur, new);
		if ((cur == old) || (cur == new)) {
			break;
		}
	}

	return new;
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
static struct net_device_stats *dpaa2_get_stats(struct net_device *ndev)
{
	u64 pkts, bytes;
	dpmac_dev_t *dpmac;
	dpaa2_ndev_t *netdev;
	union dpni_statistics stat;
	struct fsl_mc_device *mdev, *mdev_dpmac;

	mdev = to_fsl_mc_device(ndev->dev.parent);
	netdev = netdev_priv(ndev);
	dpmac = netdev->dpmac;

	if (dpmac) {
		mdev_dpmac = dpmac->mdev;
		if ((dpmac_get_counter(mdev->mc_io, 0, mdev_dpmac->mc_handle, DPMAC_CNT_ING_ALL_FRAME, &pkts))) {
			goto err;
		}
		if (dpmac_get_counter(mdev->mc_io, 0, mdev_dpmac->mc_handle, DPMAC_CNT_ING_BYTE, &bytes)) {
			goto err;
		}
		pkts = dpaa2_dpmac_cntr_update_return(&dpmac->rx_packets, pkts);
		bytes = dpaa2_dpmac_cntr_update_return(&dpmac->rx_bytes, bytes);
	} else {
		if (dpni_get_statistics(mdev->mc_io, 0, mdev->mc_handle, 0, &stat)) {
			goto err;
		}
		pkts = stat.page_0.ingress_all_frames;
		bytes = stat.page_0.ingress_all_bytes;
		/*
		 * XXX:
		 * Due to no bytes counters for ingress frames that filtered or discard,
		 * we cannot get accurate rx bytes for non-mac connection
		 */
		if (dpni_get_statistics(mdev->mc_io, 0, mdev->mc_handle, 2, &stat)) {
			goto err;
		}
		pkts += stat.page_2.ingress_filtered_frames +
			stat.page_2.ingress_discarded_frames +
			stat.page_2.ingress_nobuffer_discards;
	}

	ndev->stats.rx_packets = pkts;
	ndev->stats.rx_bytes = bytes;
	goto ok;
err:
ok:
	return &ndev->stats;
}
#else
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
static struct rtnl_link_stats64 *
#else
static void
#endif
dpaa2_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats)
{
	u64 pkts, bytes;
	dpmac_dev_t *dpmac;
	dpaa2_ndev_t *netdev;
	union dpni_statistics stat;
	struct fsl_mc_device *mdev, *mdev_dpmac;

	mdev = to_fsl_mc_device(ndev->dev.parent);
	netdev = netdev_priv(ndev);
	dpmac = netdev->dpmac;

	if (dpmac) {
		mdev_dpmac = dpmac->mdev;
		if ((dpmac_get_counter(mdev->mc_io, 0, mdev_dpmac->mc_handle, DPMAC_CNT_ING_ALL_FRAME, &pkts))) {
			goto err;
		}
		if (dpmac_get_counter(mdev->mc_io, 0, mdev_dpmac->mc_handle, DPMAC_CNT_ING_BYTE, &bytes)) {
			goto err;
		}
		pkts = dpaa2_dpmac_cntr_update_return(&dpmac->rx_packets, pkts);
		bytes = dpaa2_dpmac_cntr_update_return(&dpmac->rx_bytes, bytes);
	} else {
		if (dpni_get_statistics(mdev->mc_io, 0, mdev->mc_handle, 0, &stat)) {
			goto err;
		}
		pkts = stat.page_0.ingress_all_frames;
		bytes = stat.page_0.ingress_all_bytes;
		/*
		 * XXX:
		 * Due to no bytes counters for ingress frames that filtered or discard,
		 * we cannot get accurate rx bytes for non-mac connection
		 */
		if (dpni_get_statistics(mdev->mc_io, 0, mdev->mc_handle, 2, &stat)) {
			goto err;
		}
		pkts += stat.page_2.ingress_filtered_frames +
			stat.page_2.ingress_discarded_frames +
			stat.page_2.ingress_nobuffer_discards;
	}

	stats->rx_packets = pkts;
	stats->rx_bytes = bytes;
	goto ok;
err:
ok:
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
	return stats;
#else
	return;
#endif
}
#endif

static struct net_device_ops dpaa2_netdev_ops = {
	.ndo_start_xmit = dpaa2_start_xmit,
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
	.ndo_get_stats = dpaa2_get_stats,
#else
	.ndo_get_stats64 = dpaa2_get_stats64,
#endif
};

static void dpaa2_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pp)
{
	u8 fc;
	u16 lcladv;
	dpaa2_ndev_t *netdev;
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
	struct ethtool_cmd cmd;
	if ((phy_ethtool_gset(ndev->phydev, &cmd))) {
		goto err;
	}
	lcladv = ethtool_adv_to_mii_adv_t(cmd.advertising);
#else
	struct ethtool_link_ksettings ks;
	phy_ethtool_ksettings_get(ndev->phydev, &ks);
	lcladv = linkmode_adv_to_mii_adv_t(ks.link_modes.advertising);
#endif
	netdev = netdev_priv(ndev);
	pp->autoneg = (netdev->flags & DPAA2_NDEV_F_PAUSE_AUTONEG) ? 1 : 0;
	fc = mii_resolve_flowctrl_fdx(lcladv, lcladv);
	pp->rx_pause = fc & FLOW_CTRL_RX ? 1 : 0;
	pp->tx_pause = fc & FLOW_CTRL_TX ? 1 : 0;
	return;
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
err:
	return;
#endif
}

static int dpaa2_set_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pp)
{
	int rc, cap;
	u16 adv;
	dpaa2_ndev_t *netdev;
	struct phy_device *phydev;
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
	struct ethtool_cmd cmd;
#else
	struct ethtool_link_ksettings ks;
#endif
	phydev = ndev->phydev;
	netdev = netdev_priv(ndev);
	netdev->flags = (netdev->flags & ~DPAA2_NDEV_F_PAUSE_AUTONEG) |
			(pp->autoneg ? DPAA2_NDEV_F_PAUSE_AUTONEG : 0);

	cap = (pp->rx_pause ? FLOW_CTRL_RX : 0) | (pp->tx_pause ? FLOW_CTRL_TX : 0);
	adv = mii_advertise_flowctrl(cap);
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
	if ((rc = phy_ethtool_gset(phydev, &cmd))) {
		goto err;
	}
	cmd.advertising = (cmd.advertising & ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause)) |
			  ((adv & ADVERTISE_PAUSE_CAP) ? ADVERTISED_Pause : 0) |
			  ((adv & ADVERTISE_PAUSE_ASYM) ? ADVERTISED_Asym_Pause : 0);
	if ((rc = phy_ethtool_sset(phydev, &cmd))) {
		goto err;
	}
#else
	phy_ethtool_ksettings_get(phydev, &ks);
	linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, ks.link_modes.advertising,
			 adv & ADVERTISE_PAUSE_CAP);
	linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, ks.link_modes.advertising,
			 adv & ADVERTISE_PAUSE_ASYM);
	if ((rc = phy_ethtool_ksettings_set(phydev, &ks))) {
		goto err;
	}
#endif
	if (!netdev->dpmac || !netdev->dpmac->fn_phy) {
		netdev->dpni_state.up = 0;
	}
	return 0;
err:
	return rc;
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
static int dpaa2_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
{
	return phy_ethtool_gset(ndev->phydev, cmd);
}

static int dpaa2_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
{
	return phy_ethtool_sset(ndev->phydev, cmd);
}

static struct ethtool_ops dpaa2_ethtool_ops = {
	.get_pauseparam = dpaa2_get_pauseparam,
	.set_pauseparam = dpaa2_set_pauseparam,
	.get_settings   = dpaa2_get_settings,
	.set_settings   = dpaa2_set_settings,
};
#else
static int dpaa2_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *ks)
{
	phy_ethtool_ksettings_get(ndev->phydev, ks);

	return 0;
}

static int dpaa2_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *ks)
{
	int rc;
	dpaa2_ndev_t *netdev;

	netdev = netdev_priv(ndev);

	if ((rc = phy_ethtool_ksettings_set(ndev->phydev, ks))) {
		goto err;
	}

	if (!netdev->dpmac || !netdev->dpmac->fn_phy) {
		netdev->dpni_state.up = 0;
	}
	return 0;
err:
	return rc;
}

static struct ethtool_ops dpaa2_ethtool_ops = {
	.get_pauseparam	    = dpaa2_get_pauseparam,
	.set_pauseparam     = dpaa2_set_pauseparam,
	.get_link_ksettings = dpaa2_get_link_ksettings,
	.set_link_ksettings = dpaa2_set_link_ksettings,
};
#endif

static void dpaa2_pcpu_flow_poll(flow_t *flow)
{
	u8 cmd_verb;
	dpaa2_ndev_t *netdev;
	unsigned int nr, num, to_clean;
	struct qbman_swp *swp;
	struct qbman_acq_cmd *c;
	const struct qbman_acq_resp __iomem *r;

	netdev = netdev_priv(flow->netdev->ndev);
	swp = this_cpu_ptr(&cpu_dpaa2_io_infos)->swp;

	to_clean = min(flow->q_used, flow->qwt);
	while (to_clean) {
		nr = min(to_clean, 7u);
		while (!(c = qbman_swp_cr_next(swp, &cmd_verb))) {
			continue;
		}
		cmd_verb |= QBMAN_CR_ACQUIRE;
		c->bpid = cpu_to_le16(netdev->tx_clean_dpbp->attr.bpid);
		c->num = nr;
		qbman_swp_cr_submit(swp, c, cmd_verb);
		while (!(r = qbman_swp_rr_next(swp, cmd_verb))) {
			continue;
		}
		if (WARN_ON(r->rslt != QBMAN_MC_RSLT_OK)) {
			break;
		}
		num = r->num;
		BUG_ON(num > nr);
		to_clean -= num;
		flow->q_used -= num;
		if (num < nr) {
			break;
		}
	}
}

static unsigned int dpaa2_netdev_tx_burst(struct net_device *ndev, struct sk_buff **skbs, unsigned int nr)
{
	u8 cmd_verb;
	int rc;
	dpaa2_fq_t *fq;
	dma_addr_t baddr;
	dpaa2_ndev_t *netdev;
	unsigned int i, txq;
	struct sk_buff *skb;
	struct qbman_swp *swp;
	struct qbman_enq_cmd *c;

	netdev = netdev_priv(ndev);
	swp = this_cpu_ptr(&cpu_dpaa2_io_infos)->swp;
	for (i = 0; i < nr; i++) {
		if (!(c = qbman_swp_eqcr_next(swp, &cmd_verb))) {
			break;
		}
		skb = skbs[i];
		baddr = sb_skbh_to_baddr(skb->head);
		txq = skb_get_queue_mapping(skb);
		if (txq >= ndev->real_num_tx_queues) {
			txq = txq % ndev->real_num_tx_queues;
		}
		fq = &netdev->tx_fqs[txq];
		qbman_swp_cmd_zero(c);
		cmd_verb |= QBMAN_EQCR_REJECT;
		c->tgtid = cpu_to_le32(fq->qid.fqid);
		dpaa2_fd_set_addr(&c->fd, baddr);
		dpaa2_fd_set_offset(&c->fd, skb_headroom(skb));
		dpaa2_fd_set_len(&c->fd, skb->len);
		if (!use_tx_conf) {
			dpaa2_fd_set_bpid(&c->fd, netdev->tx_clean_dpbp->attr.bpid);
		}
		dpaa2_fd_set_format(&c->fd, dpaa2_fd_single);
		rc = qbman_swp_eqcr_submit(swp, c, cmd_verb, false);
		BUG_ON(rc);
	}

	if (i) {
		rc = qbman_swp_eqcr_submit(swp, NULL, 0, true);
 		BUG_ON(rc);
	}

	return i;
}

static int dpaa2_netdev_ioctl(struct net_device *ndev, proto_handle_t *handle, proto_rxd_t *rxd)
{
	int i, j, rc;
	u64 tmp, tx_pkts, rx_pkts;
	const char *arg_cmd;
	dpaa2_ndev_t *netdev;
	union dpni_statistics stat;
	struct fsl_mc_device *mdev, *mdev_dpmac;

	if (!(arg_cmd = proto_get_str(&rxd->buf, &rxd->len))) {
		rc = -EINVAL;
		goto err;
	}

	mdev = to_fsl_mc_device(ndev->dev.parent);
	netdev = netdev_priv(ndev);

	if (!strcmp(arg_cmd, "get_stats")) {
		if (netdev->dpmac) {
			mdev_dpmac = netdev->dpmac->mdev;
			if ((rc = dpmac_get_counter(mdev->mc_io, 0, mdev_dpmac->mc_handle, DPMAC_CNT_EGR_GOOD_FRAME, &tx_pkts))) {
				goto err;
			}
			if ((rc = dpmac_get_counter(mdev->mc_io, 0, mdev_dpmac->mc_handle, DPMAC_CNT_ING_ALL_FRAME, &rx_pkts))) {
				goto err;
			}
			cmd_pr_info(handle, "<tx> pkts=%llu\n", tx_pkts);
			cmd_pr_info(handle, "<rx> pkts=%llu\n", rx_pkts);
			for (i = 0; i <= DPMAC_CNT_EGR_GOOD_FRAME; i++) {
				if ((rc = dpmac_get_counter(mdev->mc_io, 0, mdev_dpmac->mc_handle, i, &tmp))) {
					goto err;
				}
				cmd_pr_info(handle, "stats[%d]=%llu\n", i, tmp);
			}
		} else {
			if ((rc = dpni_get_statistics(mdev->mc_io, 1, mdev->mc_handle, 0, &stat))) {
				goto err;
			}
			tx_pkts = stat.page_1.egress_all_frames;
			if ((rc = dpni_get_statistics(mdev->mc_io, 0, mdev->mc_handle, 0, &stat))) {
				goto err;
			}
			rx_pkts = stat.page_0.ingress_all_frames;
			if ((rc = dpni_get_statistics(mdev->mc_io, 2, mdev->mc_handle, 0, &stat))) {
				goto err;
			}
			rx_pkts += stat.page_2.ingress_filtered_frames +
				   stat.page_2.ingress_discarded_frames +
				   stat.page_2.ingress_nobuffer_discards;
			cmd_pr_info(handle, "<tx> pkts=%llu\n", tx_pkts);
			cmd_pr_info(handle, "<rx> pkts=%llu\n", rx_pkts);
			for (i = 0; i < 3; i++) {
				if ((rc = dpni_get_statistics(mdev->mc_io, i, mdev->mc_handle, 0, &stat))) {
					goto err;
				}
				for (j = 0; j < DPNI_STATISTICS_CNT; j++) {
					cmd_pr_info(handle, "stats[%d][%d]=%llu\n", i, j, stat.raw.counter[j]);
				}
			}
		}
	} else {
		cmd_pr_err(handle, "ERR: invalid command \"%s\"\n", arg_cmd);
		rc = -EINVAL;
	}

	return 0;
err:
	return rc;
}

static int dpaa2_netdev_force_stop(struct net_device *ndev)
{
	int rc;
	dpaa2_ndev_t *netdev;
	struct fsl_mc_device *mdev;
	struct dpni_port_cfg port_cfg;

	mdev = to_fsl_mc_device(ndev->dev.parent);
	netdev = netdev_priv(ndev);

	if ((rc = dpni_disable(mdev->mc_io, 0, mdev->mc_handle))) {
		dev_err(&mdev->dev, "failed to disable dpni\n");
		goto err;
	}

	if (netdev->dpmac) {
		port_cfg.loopback_en = 1;
		if ((rc = dpni_set_port_cfg(mdev->mc_io, 0, mdev->mc_handle,
					    DPNI_PORT_CFG_LOOPBACK, &port_cfg))) {
			dev_err(&mdev->dev, "failed to set dpni port cfg\n");
			goto err;
		}

		msleep(MSEC_PER_SEC);

		port_cfg.loopback_en = 0;
		if ((rc = dpni_set_port_cfg(mdev->mc_io, 0, mdev->mc_handle,
					    DPNI_PORT_CFG_LOOPBACK, &port_cfg))) {
			dev_err(&mdev->dev, "failed to set dpni port cfg\n");
			goto err;
		}

		if ((rc = dpni_enable(mdev->mc_io, 0, mdev->mc_handle))) {
			dev_err(&mdev->dev, "failed to enable dpni\n");
			goto err;
		}
	}

	return 0;
err:
	return rc;
}

static netdev_priv_ops_t dpaa2_ndev_ops = {
	.pcpu_flow_poll    = dpaa2_pcpu_flow_poll,
	.netdev_tx_burst   = dpaa2_netdev_tx_burst,
	.netdev_ioctl      = dpaa2_netdev_ioctl,
	.netdev_force_stop = dpaa2_netdev_force_stop,
};

static void dpaa2_netdev_name(struct fsl_mc_device *mdev, char *name)
{
	snprintf(name, IFNAMSIZ, "dpni-%u", mdev->obj_desc.id);
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0)
static int dpaa2_dpmac_match(struct device *dev, void *arg)
#else
static int dpaa2_dpmac_match(struct device *dev, const void *arg)
#endif
{
	const int *id = arg;
	struct fsl_mc_device *mdev;

	mdev = to_fsl_mc_device(dev);

	return dev->driver == &dpaa2_dpmac_driver.driver && mdev->obj_desc.id == *id;
}

static dpmac_dev_t *dpaa2_dpmac_init(struct fsl_mc_device *mdev_dpni)
{
	int rc, state;
	u32 id;
	dpmac_dev_t *dpmac;
	struct device *dev, *dev_parent, dev_tmp;
	struct device_node *dpmacs = NULL;
	struct fsl_mc_device *mdev;
	struct fwnode_handle *fwnode, *child;
	struct dprc_endpoint ep1, ep2;

	ep1.id = mdev_dpni->obj_desc.id;
	ep1.if_id = 0;
	strcpy(ep1.type, mdev_dpni->obj_desc.type);
	if ((rc = dprc_get_connection(dprc_mdev->mc_io, 0, dprc_mdev->mc_handle, &ep1, &ep2, &state))) {
		dev_err(&dprc_mdev->dev, "failed to get connection\n");
		goto err;
	}

	if  (strcmp(ep2.type, "dpmac")) {
		dpmac = NULL;
		goto ok;
	}

	if (!(dpmac = kzalloc(sizeof(*dpmac), GFP_KERNEL)) || MTRACE_KMEM_ADD(dpmac)) {
		dev_err(&mdev_dpni->dev, "%s(): failed to alloc memory\n", __func__);
		rc = -ENOMEM;
		goto err;
	}

	if (!(dev = bus_find_device(&fsl_mc_bus_type, NULL, &ep2.id, dpaa2_dpmac_match))) {
		dev_err(&mdev_dpni->dev, "failed to find \"%s.%d\"\n", ep2.type, ep2.id);
		rc = -ENODEV;
		goto err_find;
	}
	mdev = to_fsl_mc_device(dev);
	dpmac->mdev = mdev;

	if ((rc = dpmac_open(mdev_dpni->mc_io, 0, mdev->obj_desc.id, &mdev->mc_handle))) {
		dev_err(&mdev->dev, "failed to open dpmac\n");
		goto err_open;
	}

	if ((rc = dpmac_get_attributes(mdev_dpni->mc_io, 0, mdev->mc_handle, &dpmac->attr))) {
		dev_err(&mdev->dev, "failed to get attrs\n");
		goto err_get_attr;
	}

	for (dev_parent = NULL; dev; dev = dev->parent) {
		if (dev->bus != &fsl_mc_bus_type) {
			continue;
		}
		dev_parent = dev;
	}

	fwnode = dev_fwnode(dev_parent);
	if (is_of_node(fwnode)) {
		memset(&dev_tmp, 0, sizeof(dev_tmp));
		device_initialize(&dev_tmp);
		if (!(dpmacs = of_find_node_by_name(NULL, "dpmacs"))) {
			dev_err(&mdev->dev, "failed to of node \"dpmacs\"\n");
			rc = -ENODEV;
			goto err_fn_mac;
		}
		dev_tmp.of_node = dpmacs;
		dev_parent = &dev_tmp;
	} else if (!is_acpi_node(fwnode)) {
		dev_err(&mdev->dev, "failed to get fwnode\n");
		rc = -ENODEV;
		goto err_fn_mac;
	}

	device_for_each_child_node(dev_parent, child) {
		rc = -EINVAL;
		if (is_of_node(child)) {
			rc = of_property_read_u32(to_of_node(child), "reg", &id);
		} else if (is_acpi_device_node(child)) {
			rc = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), &id);
		}
		if (!rc && id == dpmac->attr.id) {
			break;
		}
	}
	of_node_put(dpmacs);

	if (!child) {
		dev_err(&mdev->dev, "failed to get fwnode\n");
		rc = -ENODEV;
		goto err_fn_mac;
	}

	if (dpmac->attr.link_type == DPMAC_LINK_TYPE_PHY) {
		if (IS_ERR(dpmac->fn_phy = fwnode_get_phy_node(child))) {
			rc = PTR_ERR(dpmac->fn_phy);
			dpmac->fn_phy = NULL;
			goto err_fn_phy;
		}
		dpmac->phy_if = fwnode_get_phy_mode(child);
		if (dpmac->phy_if < 0) {
			goto err_phy_if;
		}
	} else {
		switch (dpmac->attr.eth_if) {
		case DPMAC_ETH_IF_RGMII:
			dpmac->phy_if = PHY_INTERFACE_MODE_RGMII;
			break;
		case DPMAC_ETH_IF_USXGMII:
			dpmac->phy_if = PHY_INTERFACE_MODE_USXGMII;
			break;
		case DPMAC_ETH_IF_QSGMII:
			dpmac->phy_if = PHY_INTERFACE_MODE_QSGMII;
			break;
		case DPMAC_ETH_IF_SGMII:
			dpmac->phy_if = PHY_INTERFACE_MODE_SGMII;
			break;
		case DPMAC_ETH_IF_XFI:
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
			dpmac->phy_if = PHY_INTERFACE_MODE_XGMII;
#else
			dpmac->phy_if = PHY_INTERFACE_MODE_10GBASER;
#endif
			break;
		default:
			goto err_phy_if;
		}
	}
	fwnode_handle_put(child);
ok:
	return dpmac;

err_phy_if:
	fwnode_handle_put(dpmac->fn_phy);
err_fn_phy:
	fwnode_handle_put(child);
err_fn_mac:
err_get_attr:
	dpmac_close(mdev_dpni->mc_io, 0, mdev->mc_handle);
err_open:
	put_device(&mdev->dev);
err_find:
	MTRACE_KMEM_DEL(dpmac);
	kfree(dpmac);
err:
	return ERR_PTR(rc);
}

static void dpaa2_dpmac_clean(struct fsl_mc_device *mdev, dpmac_dev_t *dpmac)
{
	if (dpmac) {
		fwnode_handle_put(dpmac->fn_phy);
		dpmac_close(mdev->mc_io, 0, dpmac->mdev->mc_handle);
		put_device(&dpmac->mdev->dev);
		MTRACE_KMEM_DEL(dpmac);
		kfree(dpmac);
	}
}

static dpcon_dev_t *dpaa2_dpcon_init(struct fsl_mc_device *mdev_dpni)
{
	int rc;
	dpcon_dev_t *dpcon;
	struct fsl_mc_device *mdev;

	if (!(dpcon = kzalloc(sizeof(*dpcon), GFP_KERNEL)) || MTRACE_KMEM_ADD(dpcon)) {
		dev_err(&mdev_dpni->dev, "%s(): failed to alloc memory\n", __func__);
		rc = -ENOMEM;
		goto err;
	}

	if (!(dpcon->pcpu_chid = alloc_percpu(u8)) || MTRACE_PERCPU_ADD(dpcon->pcpu_chid)) {
		dev_err(&mdev_dpni->dev, "%s(): failed to alloc memory\n", __func__);
		rc = -ENOMEM;
		goto err_alloc;
	}

	if ((rc = fsl_mc_object_allocate(mdev_dpni, FSL_MC_POOL_DPCON, &mdev))) {
		dev_err(&mdev_dpni->dev, "failed to alloc dpcon\n");
		goto err_obj;
	}
	dpcon->mdev = mdev;

	if ((rc = dpcon_open(mdev_dpni->mc_io, 0, mdev->obj_desc.id, &mdev->mc_handle))) {
		dev_err(&mdev->dev, "failed to open dpcon\n");
		goto err_open;
	}

	if ((rc = dpcon_reset(mdev_dpni->mc_io, 0, mdev->mc_handle))) {
		dev_err(&mdev->dev, "failed to reset dpcon\n");
		goto err_reset;
	}

	if ((rc = dpcon_get_attributes(mdev_dpni->mc_io, 0, mdev->mc_handle, &dpcon->attr))) {
		dev_err(&mdev->dev, "failed to get dpcon attrs\n");
		goto err_get_attr;
	}
	dev_dbg(&mdev->dev, "id=%d, qbman_ch_id=%u\n", dpcon->attr.id, dpcon->attr.qbman_ch_id);

	if ((rc = dpcon_enable(mdev_dpni->mc_io, 0, mdev->mc_handle))) {
		dev_err(&mdev->dev, "failed to enable dpcon\n");
		goto err_enable;
	}

	return dpcon;

err_enable:
err_get_attr:
err_reset:
	dpcon_close(mdev_dpni->mc_io, 0, mdev->mc_handle);
err_open:
	fsl_mc_object_free(mdev);
err_obj:
	MTRACE_PERCPU_DEL(dpcon->pcpu_chid);
	free_percpu(dpcon->pcpu_chid);
err_alloc:
	MTRACE_KMEM_DEL(dpcon);
	kfree(dpcon);
err:
	return ERR_PTR(rc);
}

static void dpaa2_dpcon_clean(struct fsl_mc_device *mdev, dpcon_dev_t *dpcon)
{
	dpcon_disable(mdev->mc_io, 0, dpcon->mdev->mc_handle);
	dpcon_close(mdev->mc_io, 0, dpcon->mdev->mc_handle);
	fsl_mc_object_free(dpcon->mdev);
	MTRACE_PERCPU_DEL(dpcon->pcpu_chid);
	free_percpu(dpcon->pcpu_chid);
	MTRACE_KMEM_DEL(dpcon);
	kfree(dpcon);
}

static dpbp_dev_t *dpaa2_dpbp_init(struct fsl_mc_device *mdev_dpni, size_t sz, unsigned int nr)
{
	u8 cmd_verb;
	int rc;
	u64 bufs[7];
	dpbp_dev_t *dpbp;
	dma_addr_t baddr;
	unsigned int i, n, cnt;
	struct sk_buff *skb;
	struct qbman_swp *swp;
	struct net_device *ndev;
	struct fsl_mc_device *mdev;
	struct qbman_acq_cmd *c_acq;
	struct qbman_rel_cmd *c_rel;
	const struct qbman_acq_resp __iomem *r;

	if (!(dpbp = kzalloc(sizeof(*dpbp), GFP_KERNEL)) || MTRACE_KMEM_ADD(dpbp)) {
		dev_err(&mdev_dpni->dev, "%s(): failed to alloc memory\n", __func__);
		rc = -ENOMEM;
		goto err;
	}

	if ((rc = fsl_mc_object_allocate(mdev_dpni, FSL_MC_POOL_DPBP, &mdev))) {
		dev_err(&mdev_dpni->dev, "failed to alloc dpbp\n");
		goto err_obj;
	}
	dpbp->mdev = mdev;

	if ((rc = dpbp_open(mdev_dpni->mc_io, 0, mdev->obj_desc.id, &mdev->mc_handle))) {
		dev_err(&mdev->dev, "failed to open dpbp\n");
		goto err_open;
	}

	if ((rc = dpbp_reset(mdev_dpni->mc_io, 0, mdev->mc_handle))) {
		dev_err(&mdev->dev, "failed to open dpbp\n");
		goto err_reset;
	}

	if ((rc = dpbp_get_attributes(mdev_dpni->mc_io, 0, mdev->mc_handle, &dpbp->attr))) {
		dev_err(&mdev->dev, "failed to get dpbp attrs\n");
		goto err_get_attr;
	}

	if ((rc = dpbp_enable(mdev_dpni->mc_io, 0, mdev->mc_handle))) {
		dev_err(&mdev->dev, "failed to enable dpbp\n");
		goto err_enable;
	}

	swp = this_cpu_ptr(&cpu_dpaa2_io_infos)->swp;
	cnt = 0;
	n = 7;
	while (n) {
		while (!(c_acq = qbman_swp_cr_next(swp, &cmd_verb))) {
			continue;
		}
		cmd_verb |= QBMAN_CR_ACQUIRE;
		c_acq->bpid = cpu_to_le16(dpbp->attr.bpid);
		c_acq->num = n;
		qbman_swp_cr_submit(swp, c_acq, cmd_verb);
		while (!(r = qbman_swp_rr_next(swp, cmd_verb))) {
			continue;
		}
		if (WARN_ON(r->rslt != QBMAN_MC_RSLT_OK)) {
			break;
		}
		BUG_ON(r->num > n);
		cnt += r->num;
		n = r->num ? : n / 2;
	}

	if (cnt) {
		dev_warn(&mdev->dev, "dpbp %u: drained %u bufs\n", dpbp->attr.id, cnt);
	}

	ndev = dev_get_drvdata(&mdev_dpni->dev);
	cnt = 0;
	while (nr) {
		n = min(nr, 7u);
		for (i = 0; i < n; i++) {
			if (!(skb = sb_alloc_skb(ndev, sz, GFP_KERNEL | GFP_DMA))
			||  MTRACE_SB_SKB_ADD(skb)) {
				while (i--) {
					baddr = bufs[i];
					skb = sb_skbh_to_skb(sb_baddr_to_skbh(ndev, baddr));
					BUG_ON(!skb);
					MTRACE_SB_SKB_DEL(skb);
					sb_kfree_skb(skb);
				}
				rc = -ENOMEM;
				goto err_alloc;
			}
			baddr = sb_skbh_to_baddr(skb->head);
			bufs[i] = baddr;
		}
		while (!(c_rel = qbman_swp_rcr_next(swp, &cmd_verb))) {
			continue;
		}
		cmd_verb |= QBMAN_RCR_RELEASE | n;
		c_rel->bpid = cpu_to_le16(dpbp->attr.bpid);
		for (i = 0; i < n; i++) {
			c_rel->buf[i] = cpu_to_le64(bufs[i]);
		}
		rc = qbman_swp_rcr_submit(swp, c_rel, cmd_verb, (n == nr) ? true : false);
		BUG_ON(rc);
		nr -= n;
		cnt += n;
	}

	if (cnt) {
		dev_info(&mdev->dev, "dpbp %u: released %u bufs\n", dpbp->attr.id, cnt);
	}

	return dpbp;

err_alloc:
	n = 7;
	while (n) {
		while (!(c_acq = qbman_swp_cr_next(swp, &cmd_verb))) {
			continue;
		}
		cmd_verb |= QBMAN_CR_ACQUIRE;
		c_acq->bpid = cpu_to_le16(dpbp->attr.bpid);
		c_acq->num = n;
		qbman_swp_cr_submit(swp, c_acq, cmd_verb);
		while (!(r = qbman_swp_rr_next(swp, cmd_verb))) {
			continue;
		}
		if (WARN_ON(r->rslt != QBMAN_MC_RSLT_OK)) {
			break;
		}
		BUG_ON(r->num > n);
		for (i = 0; i < r->num; i++) {
			baddr = le64_to_cpu(r->buf[i]);
			skb = sb_skbh_to_skb(sb_baddr_to_skbh(ndev, baddr));
			BUG_ON(!skb);
			MTRACE_SB_SKB_DEL(skb);
			sb_kfree_skb(skb);
		}
		n = r->num ? : n / 2;
	}
	dpbp_disable(mdev_dpni->mc_io, 0, mdev->mc_handle);
err_enable:
err_get_attr:
err_reset:
	dpbp_close(mdev_dpni->mc_io, 0, mdev->mc_handle);
err_open:
	fsl_mc_object_free(mdev);
err_obj:
	MTRACE_KMEM_DEL(dpbp);
	kfree(dpbp);
err:
	return ERR_PTR(rc);
}

static void dpaa2_dpbp_clean(struct fsl_mc_device *mdev, dpbp_dev_t *dpbp, unsigned int nr)
{
	u8 cmd_verb;
	dma_addr_t baddr;
	unsigned int i, n, cnt;
	struct sk_buff *skb;
	struct qbman_swp *swp;
	struct net_device *ndev;
	struct qbman_acq_cmd *c;
	const struct qbman_acq_resp __iomem *r;

	swp = this_cpu_ptr(&cpu_dpaa2_io_infos)->swp;
	ndev = dev_get_drvdata(&mdev->dev);
	cnt = 0;
	n = 7;
	while (n) {
		while (!(c = qbman_swp_cr_next(swp, &cmd_verb))) {
			continue;
		}
		cmd_verb |= QBMAN_CR_ACQUIRE;
		c->bpid = cpu_to_le16(dpbp->attr.bpid);
		c->num = n;
		qbman_swp_cr_submit(swp, c, cmd_verb);
		while (!(r = qbman_swp_rr_next(swp, cmd_verb))) {
			continue;
		}
		if (WARN_ON(r->rslt != QBMAN_MC_RSLT_OK)) {
			break;
		}
		BUG_ON(r->num > n);
		for (i = 0; i < r->num; i++) {
			baddr = le64_to_cpu(r->buf[i]);
			skb = sb_skbh_to_skb(sb_baddr_to_skbh(ndev, baddr));
			BUG_ON(!skb);
			MTRACE_SB_SKB_DEL(skb);
			sb_kfree_skb(skb);
		}
		cnt += r->num;
		n = r->num ? : n / 2;
	}

	if (cnt) {
		dev_info(&dpbp->mdev->dev, "dpbp %u: drained %u bufs\n", dpbp->attr.id, cnt);
	}
	WARN_ON(cnt != nr);

	dpbp_disable(mdev->mc_io, 0, dpbp->mdev->mc_handle);
	dpbp_close(mdev->mc_io, 0, dpbp->mdev->mc_handle);
	fsl_mc_object_free(dpbp->mdev);
	MTRACE_KMEM_DEL(dpbp);
	kfree(dpbp);
}

static int dpaa2_netdev_dpni_init(dpaa2_ndev_t *netdev)
{
	int rc;
	struct net_device *ndev;
	struct fsl_mc_device *mdev;
	struct dpni_error_cfg err_cfg;
	struct dpni_pools_cfg pool_cfg;
	struct dpni_buffer_layout layout;
	enum dpni_confirmation_mode mode;

	ndev = netdev->ndev;
	mdev = to_fsl_mc_device(ndev->dev.parent);

	if ((rc = dpni_open(mdev->mc_io, 0, mdev->obj_desc.id, &mdev->mc_handle))) {
		dev_err(&mdev->dev, "failed to open dpni\n");
		goto err;
	}

	if ((rc = dpni_reset(mdev->mc_io, 0, mdev->mc_handle))) {
		dev_err(&mdev->dev, "failed to reset dpni\n");
		goto err_reset;
	}

	if ((rc = dpni_get_attributes(mdev->mc_io, 0, mdev->mc_handle, &netdev->attr))) {
		dev_err(&mdev->dev, "failed to get attrs\n");
		goto err_get_attr;
	}

	if ((netdev->attr.num_queues < ndev->real_num_tx_queues)
	||  (netdev->attr.num_queues < ndev->real_num_rx_queues)) {
		dev_err(&mdev->dev, "invalid setting of num_queues\n");
		rc = -EINVAL;
		goto err_num_queues;
	}

	if ((rc = dpni_set_max_frame_length(mdev->mc_io, 0, mdev->mc_handle, DPAA2_DPNI_MAX_FRM_SZ))) {
		dev_err(&mdev->dev, "failed to set max frame length\n");
		goto err_max_frm;
	}

	memset(&layout, 0, sizeof(layout));
	layout.data_align = DPAA2_DPNI_DATA_ALIGN;
	layout.options = DPNI_BUF_LAYOUT_OPT_DATA_ALIGN;
	if ((rc = dpni_set_buffer_layout(mdev->mc_io, 0, mdev->mc_handle, DPNI_QUEUE_RX, &layout))) {
		dev_err(&mdev->dev, "failed to set rx buffer layout\n");
		goto err_buf_layout;
	}

	memset(&layout, 0, sizeof(layout));
	if ((rc = dpni_set_buffer_layout(mdev->mc_io, 0, mdev->mc_handle, DPNI_QUEUE_TX, &layout))) {
		dev_err(&mdev->dev, "failed to set tx buffer layout\n");
		goto err_buf_layout;
	}

	memset(&layout, 0, sizeof(layout));
	if ((rc = dpni_set_buffer_layout(mdev->mc_io, 0, mdev->mc_handle, DPNI_QUEUE_TX_CONFIRM, &layout))) {
		dev_err(&mdev->dev, "failed to set tx confirm buffer layout\n");
		goto err_buf_layout;
	}

	memset(&pool_cfg, 0, sizeof(pool_cfg));
	pool_cfg.num_dpbp = 1;
	pool_cfg.pools[0].dpbp_id = netdev->rx_dpbp->attr.id;
	pool_cfg.pools[0].backup_pool = 0;
	pool_cfg.pools[0].buffer_size = DPAA2_DPA_BP_SZ;
	if ((rc = dpni_set_pools(mdev->mc_io, 0, mdev->mc_handle, &pool_cfg))) {
		dev_err(&mdev->dev, "failed to set pools\n");
		goto err_set_pool;
	}

	mode = use_tx_conf ? DPNI_CONF_AFFINE : DPNI_CONF_DISABLE;
	if ((rc = dpni_set_tx_confirmation_mode(mdev->mc_io, 0, mdev->mc_handle, mode))) {
		dev_err(&mdev->dev, "failed to set tx confirm mode\n");
		goto err_tx_conf_mode;
	}

	err_cfg.errors = DPNI_ERROR_FLE | DPNI_ERROR_FPE;
	err_cfg.set_frame_annotation = 0;
	err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
	if ((rc = dpni_set_errors_behavior(mdev->mc_io, 0, mdev->mc_handle, &err_cfg))) {
		dev_err(&mdev->dev, "failed to set errors behavior\n");
		goto err_behavior;
	}

	err_cfg.errors = DPNI_ERROR_EOFHE | DPNI_ERROR_PHE | DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
	err_cfg.set_frame_annotation = 0;
	err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
	if ((rc = dpni_set_errors_behavior(mdev->mc_io, 0, mdev->mc_handle, &err_cfg))) {
		dev_err(&mdev->dev, "failed to set errors behavior\n");
		goto err_behavior;
	}

	if ((rc = dpni_set_unicast_promisc(mdev->mc_io, 0, mdev->mc_handle, 1))) {
		dev_err(&mdev->dev, "failed to enable unicast promiscuous\n");
		goto err_promisc;
	}

	if ((rc = dpni_set_multicast_promisc(mdev->mc_io, 0, mdev->mc_handle, 1))) {
		dev_err(&mdev->dev, "failed to enable multicast promiscuous\n");
		goto err_promisc;
	}

	return 0;

err_promisc:
err_behavior:
err_tx_conf_mode:
err_set_pool:
err_buf_layout:
err_max_frm:
err_num_queues:
err_get_attr:
err_reset:
	dpni_close(mdev->mc_io, 0, mdev->mc_handle);
err:
	return rc;
}

static void dpaa2_netdev_dpni_clean(dpaa2_ndev_t *netdev)
{
	struct fsl_mc_device *mdev;

	mdev = to_fsl_mc_device(netdev->ndev->dev.parent);

	dpni_close(mdev->mc_io, 0, mdev->mc_handle);
}

static void dpaa2_rx_dqrr(struct qbman_swp *swp, struct dpaa2_fq *fq,
			  const struct dpaa2_dq __iomem *dq)
{
	u8 cmd_verb;
	int rc;
	dma_addr_t baddr;
	struct sk_buff *skb;
	struct net_device *ndev;
	const struct dpaa2_fd __iomem *fd;
	struct qbman_rel_cmd *c;

	ndev = dev_get_drvdata(&fq->mdev->dev);

	fd = (const void __iomem *)dpaa2_dq_fd(dq);
	BUG_ON(dpaa2_fd_get_format(fd) != dpaa2_fd_single);
	baddr = dpaa2_fd_get_addr(fd);
	skb = sb_skbh_to_skb(sb_baddr_to_skbh(ndev, baddr));
	BUG_ON(!skb);
	skb_reserve(skb, dpaa2_fd_get_offset(fd));
	skb_put(skb, dpaa2_fd_get_len(fd));
	skb_reset_mac_header(skb);
	skb_pull(skb, ETH_HLEN);
	sb_netdev_receive(skb, ndev);

	BUG_ON(skb_shared(skb));
	skb_trim(skb, 0);
	skb_reserve(skb, -skb_headroom(skb));
	while (!(c = qbman_swp_rcr_next(swp, &cmd_verb))) {
		continue;
	}
	cmd_verb |= QBMAN_RCR_RELEASE | 1;
	c->bpid = cpu_to_le16(dpaa2_fd_get_bpid(fd));
	c->buf[0] = cpu_to_le64(baddr);
	rc = qbman_swp_rcr_submit(swp, c, cmd_verb, true);
	BUG_ON(rc);
}

static void dpaa2_rx_err_dqrr(struct qbman_swp *swp, struct dpaa2_fq *fq,
			      const struct dpaa2_dq __iomem *dq)
{
	u8 cmd_verb;
	int rc;
	const struct dpaa2_fd __iomem *fd;
	struct qbman_rel_cmd *c;

	fd = (const void __iomem *)dpaa2_dq_fd(dq);
	BUG_ON(dpaa2_fd_get_format(fd) != dpaa2_fd_single);
	while (!(c = qbman_swp_rcr_next(swp, &cmd_verb))) {
		continue;
	}
	cmd_verb |= QBMAN_RCR_RELEASE | 1;
	c->bpid = cpu_to_le16(dpaa2_fd_get_bpid(fd));
	c->buf[0] = cpu_to_le64(dpaa2_fd_get_addr(fd));
	rc = qbman_swp_rcr_submit(swp, c, cmd_verb, true);
	BUG_ON(rc);
}

static void dpaa2_tx_conf_dqrr(struct qbman_swp *swp, struct dpaa2_fq *fq,
			       const struct dpaa2_dq __iomem *dq)
{
	dma_addr_t baddr;
	struct sk_buff *skb;
	struct net_device *ndev;
	const struct dpaa2_fd __iomem *fd;

	ndev = dev_get_drvdata(&fq->mdev->dev);

	fd = (const void __iomem *)dpaa2_dq_fd(dq);
	baddr = dpaa2_fd_get_addr(fd);
	skb = sb_skbh_to_skb(sb_baddr_to_skbh(ndev, baddr));
	BUG_ON(!skb);
	sb_kfree_skb(skb);
}

static dpaa2_fq_t *dpaa2_dpni_fqs_init(struct fsl_mc_device *mdev, enum dpni_queue_type type,
				       u32 count, u16 dest, qbman_cb_dqrr_t dqrr)
{
	u8 q_opt;
	int rc;
	dpaa2_fq_t *fqs;
	unsigned int i;
	struct dpni_queue q;

	if (!(fqs = kzalloc(sizeof(*fqs) * count, GFP_KERNEL)) || MTRACE_KMEM_ADD(fqs)) {
		dev_err(&mdev->dev, "%s(): failed to alloc memory\n", __func__);
		rc = -ENOMEM;
		goto err;
	}

	for (i = 0; i < count; i++) {
		fqs[i].mdev = mdev;
		fqs[i].dqrr = dqrr;
		if ((rc = dpni_get_queue(mdev->mc_io, 0, mdev->mc_handle, type, 0, i, &q, &fqs[i].qid))) {
			dev_err(&mdev->dev, "failed to get fq %u\n", i);
			goto err_get_q;
		}
		if (!dqrr) {
			continue;
		}
		q.destination.id = dest;
		q.destination.type = DPNI_DEST_DPCON;
		q.destination.hold_active = 0;
		q.destination.priority = 0;
		q.user_context = (u64)&fqs[i];
		q.flc.value = 0;
		q.flc.stash_control = 1;
		q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST | DPNI_QUEUE_OPT_HOLD_ACTIVE;
		q_opt |= (type == DPNI_QUEUE_RX) ? DPNI_QUEUE_OPT_FLC : 0;
		if ((rc = dpni_set_queue(mdev->mc_io, 0, mdev->mc_handle, type, 0, i, q_opt, &q))) {
			dev_err(&mdev->dev, "failed to set fq %u\n", i);
			goto err_set_q;
		}
	}

	return fqs;

err_set_q:
err_get_q:
	MTRACE_KMEM_DEL(fqs);
	kfree(fqs);
err:
	return ERR_PTR(rc);
}

static void dpaa2_dpni_fqs_clean(struct fsl_mc_device *mdev, enum dpni_queue_type type,
				 dpaa2_fq_t *fqs, u32 count)
{
	u8 q_opt;
	unsigned int i;
	struct dpni_queue q;

	for (i = 0; i < count; i++) {
		if (!fqs[i].dqrr) {
			continue;
		}
		if (dpni_get_queue(mdev->mc_io, 0, mdev->mc_handle, type, 0, i, &q, &fqs[i].qid)) {
			dev_err(&mdev->dev, "failed to get fq %u\n", i);
			continue;
		}
		q.user_context = 0;
		q_opt = DPNI_QUEUE_OPT_USER_CTX;
		q_opt |= (type == DPNI_QUEUE_RX) ? DPNI_QUEUE_OPT_FLC : 0;
		if (dpni_set_queue(mdev->mc_io, 0, mdev->mc_handle, type, 0, i, q_opt, &q)) {
			dev_err(&mdev->dev, "failed to set fq %u\n", i);
			continue;
		}
	}

	MTRACE_KMEM_DEL(fqs);
	kfree(fqs);
}

static int dpaa2_netdev_fqs_init(dpaa2_ndev_t *netdev)
{
	int rc;
	struct net_device *ndev;
	struct fsl_mc_device *mdev;

	ndev = netdev->ndev;
	mdev = to_fsl_mc_device(ndev->dev.parent);

	if (IS_ERR(netdev->rx_fqs = dpaa2_dpni_fqs_init(mdev, DPNI_QUEUE_RX, ndev->real_num_rx_queues,
							netdev->dpcon->attr.id, dpaa2_rx_dqrr))) {
		dev_err(&mdev->dev, "failed to init rx_fqs\n");
		rc = PTR_ERR(netdev->rx_fqs);
		goto err;
	}
	dev_dbg(&mdev->dev, "rx_fqs: fqids=%u:%u\n", netdev->rx_fqs[0].qid.fqid, ndev->real_num_rx_queues);

	if (IS_ERR(netdev->rx_err_fq = dpaa2_dpni_fqs_init(mdev, DPNI_QUEUE_RX_ERR, 1,
							   netdev->dpcon->attr.id, dpaa2_rx_err_dqrr))) {
		dev_err(&mdev->dev, "failed to init rx_err_fq\n");
		rc = PTR_ERR(netdev->rx_err_fq);
		goto err_rx_err_fq;
	}
	dev_dbg(&mdev->dev, "rx_err_fq: fqid=%u\n", netdev->rx_err_fq->qid.fqid);

	if (IS_ERR(netdev->tx_conf_fqs = dpaa2_dpni_fqs_init(mdev, DPNI_QUEUE_TX_CONFIRM, ndev->real_num_tx_queues,
							     netdev->dpcon->attr.id, dpaa2_tx_conf_dqrr))) {
		dev_err(&mdev->dev, "failed to init tx_conf_fqs\n");
		rc = PTR_ERR(netdev->tx_conf_fqs);
		goto err_tx_conf_fqs;
	}
	dev_dbg(&mdev->dev, "tx_conf_fqs: fqids=%u:%u\n", netdev->tx_conf_fqs[0].qid.fqid, ndev->real_num_tx_queues);

	if (IS_ERR(netdev->tx_fqs = dpaa2_dpni_fqs_init(mdev, DPNI_QUEUE_TX, ndev->real_num_tx_queues, 0, NULL))) {
		dev_err(&mdev->dev, "failed to init tx_fqs\n");
		rc = PTR_ERR(netdev->tx_fqs);
		goto err_tx_fqs;
	}
	dev_dbg(&mdev->dev, "tx_fqs: fqids=%u:%u\n", netdev->tx_fqs[0].qid.fqid, ndev->real_num_tx_queues);

	return 0;

err_tx_fqs:
	dpaa2_dpni_fqs_clean(mdev, DPNI_QUEUE_TX_CONFIRM, netdev->tx_conf_fqs, ndev->real_num_tx_queues);
err_tx_conf_fqs:
	dpaa2_dpni_fqs_clean(mdev, DPNI_QUEUE_RX_ERR, netdev->rx_err_fq, 1);
err_rx_err_fq:
	dpaa2_dpni_fqs_clean(mdev, DPNI_QUEUE_RX, netdev->rx_fqs, ndev->real_num_rx_queues);
err:
	return rc;
}

static void dpaa2_netdev_fqs_clean(dpaa2_ndev_t *netdev)
{
	struct net_device *ndev;
	struct fsl_mc_device *mdev;

	ndev = netdev->ndev;
	mdev = to_fsl_mc_device(ndev->dev.parent);

	dpaa2_dpni_fqs_clean(mdev, DPNI_QUEUE_TX, netdev->tx_fqs, ndev->real_num_tx_queues);
	dpaa2_dpni_fqs_clean(mdev, DPNI_QUEUE_TX_CONFIRM, netdev->tx_conf_fqs, ndev->real_num_tx_queues);
	dpaa2_dpni_fqs_clean(mdev, DPNI_QUEUE_RX_ERR, netdev->rx_err_fq, 1);
	dpaa2_dpni_fqs_clean(mdev, DPNI_QUEUE_RX, netdev->rx_fqs, ndev->real_num_rx_queues);
}

static long dpaa2_netdev_init(void *arg)
{
	long rc;
	char name[IFNAMSIZ];
	dpaa2_ndev_t *netdev;
	struct net_device *ndev;
	struct fsl_mc_device *mdev = arg;

	dpaa2_netdev_name(mdev, name);

	if (!sb_netdev_in_filter(name)) {
		rc = -ENODEV;
		goto err;
	}

	if (!(ndev = sb_netdev_alloc(sizeof(*netdev), nr_cpu_ids, 1))) {
		dev_err(&mdev->dev, "%s(): failed to alloc memory\n", __func__);
		rc = -ENOMEM;
		goto err;
	}
	SET_NETDEV_DEV(ndev, &mdev->dev);
	ndev->netdev_ops = &dpaa2_netdev_ops;
	ndev->ethtool_ops = &dpaa2_ethtool_ops;
	dev_set_drvdata(&mdev->dev, ndev);
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0)
	ndev->features |= NETIF_F_LLTX;
#else
	ndev->lltx = true;
#endif
	ndev->priv_flags |= IFF_TX_SKB_SHARING;
	strncpy(ndev->name, name, sizeof(name));
	netdev = netdev_priv(ndev);
	netdev->ndev = ndev;
	netdev->flags |= DPAA2_NDEV_F_PAUSE_AUTONEG;

	if ((rc = fsl_mc_portal_allocate(mdev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mdev->mc_io))) {
		dev_err(&mdev->dev, "failed to alloc mc portal\n");
		goto err_mc_io;
	}

	if (IS_ERR(netdev->dpmac = dpaa2_dpmac_init(mdev))) {
		dev_err(&mdev->dev, "failed to init dpmac\n");
		rc = PTR_ERR(netdev->dpmac);
		goto err_dpmac_init;
	}

	if (IS_ERR(netdev->dpcon = dpaa2_dpcon_init(mdev))) {
		dev_err(&mdev->dev, "failed to init dpcon\n");
		rc = PTR_ERR(netdev->dpcon);
		goto err_dpcon_init;
	}

	if (IS_ERR(netdev->rx_dpbp = dpaa2_dpbp_init(mdev, DPAA2_DPA_BP_SZ, skb_buf_nr))) {
		dev_err(&mdev->dev, "failed to init rx_dpbp\n");
		rc = PTR_ERR(netdev->rx_dpbp);
		goto err_rx_dpbp_init;
	}

	if (IS_ERR(netdev->tx_clean_dpbp = dpaa2_dpbp_init(mdev, 0, 0))) {
		dev_err(&mdev->dev, "failed to init tx_clean_dpbp\n");
		rc = PTR_ERR(netdev->tx_clean_dpbp);
		goto err_tx_clean_bp_init;
	}

	if ((rc = dpaa2_netdev_dpni_init(netdev))) {
		dev_err(&mdev->dev, "failed to init dpni\n");
		goto err_dpni_init;
	}

	if ((rc = dpaa2_netdev_fqs_init(netdev))) {
		dev_err(&mdev->dev, "failed to init fqs\n");
		goto err_fqs_init;
	}

	if ((rc = sb_netdev_register(ndev))) {
		dev_err(&mdev->dev, "failed to register netdev\n");
		goto err_reg;
	}

	return 0;

err_reg:
	dpaa2_netdev_fqs_clean(netdev);
err_fqs_init:
	dpaa2_netdev_dpni_clean(netdev);
err_dpni_init:
	dpaa2_dpbp_clean(mdev, netdev->tx_clean_dpbp, 0);
err_tx_clean_bp_init:
	dpaa2_dpbp_clean(mdev, netdev->rx_dpbp, skb_buf_nr);
err_rx_dpbp_init:
	dpaa2_dpcon_clean(mdev, netdev->dpcon);
err_dpcon_init:
	dpaa2_dpmac_clean(mdev, netdev->dpmac);
err_dpmac_init:
	fsl_mc_portal_free(mdev->mc_io);
err_mc_io:
	sb_netdev_free(ndev);
err:
	return rc;
}

static long dpaa2_netdev_clean(void *arg)
{
	dpaa2_ndev_t *netdev;
	struct net_device *ndev;
	struct fsl_mc_device *mdev = arg;

	ndev = dev_get_drvdata(&mdev->dev);
	netdev = netdev_priv(ndev);

	sb_netdev_unregister(ndev);

	dpaa2_netdev_fqs_clean(netdev);

	dpaa2_netdev_dpni_clean(netdev);

	dpaa2_dpbp_clean(mdev, netdev->tx_clean_dpbp, 0);

	dpaa2_dpbp_clean(mdev, netdev->rx_dpbp, skb_buf_nr);

	dpaa2_dpcon_clean(mdev, netdev->dpcon);

	dpaa2_dpmac_clean(mdev, netdev->dpmac);

	fsl_mc_portal_free(mdev->mc_io);

	sb_netdev_free(ndev);

	return 0;
}

static long dpaa2_netdev_bind(void *arg)
{
	u8 *chid;
	long rc;
	unsigned int cpu;
	dpaa2_ndev_t *netdev = arg;
	dpaa2_io_info_t *info;
	struct fsl_mc_device *mdev;

	cpu = smp_processor_id();
	info = per_cpu_ptr(&cpu_dpaa2_io_infos, cpu);
	mdev = to_fsl_mc_device(netdev->ndev->dev.parent);
	chid = per_cpu_ptr(netdev->dpcon->pcpu_chid, cpu);
	if ((rc = dpio_add_static_dequeue_channel(mdev->mc_io, 0, info->mdev->mc_handle,
						  netdev->dpcon->attr.id, chid))) {
		dev_err(&mdev->dev, "failed to add static dequeue channel\n");
		goto err;
	}
	qbman_swp_push_set(info->swp, *chid, true);
	dev_dbg(&mdev->dev, "bind to cpu %u\n", cpu);

	return 0;
err:
	return rc;
}

static long dpaa2_netdev_unbind(void *arg)
{
	u8 *chid;
	long rc;
	unsigned int cpu;
	dpaa2_ndev_t *netdev = arg;
	dpaa2_io_info_t *info;
	struct fsl_mc_device *mdev;

	cpu = smp_processor_id();
	info = per_cpu_ptr(&cpu_dpaa2_io_infos, cpu);
	mdev = to_fsl_mc_device(netdev->ndev->dev.parent);
	chid = per_cpu_ptr(netdev->dpcon->pcpu_chid, cpu);
	qbman_swp_push_set(info->swp, *chid, false);
	if ((rc = dpio_remove_static_dequeue_channel(mdev->mc_io, 0, info->mdev->mc_handle,
						     netdev->dpcon->attr.id))) {
		dev_err(&info->mdev->dev, "failed to remove static dequeue channel\n");
		goto err;
	}
	dev_dbg(&mdev->dev, "unbind from cpu %u\n", cpu);

	return 0;
err:
	return rc;
}

static int dpaa2_fixed_phy_link_update(struct net_device *ndev, struct fixed_phy_status *status)
{
	int rc;
	u64 mask;
	dpaa2_ndev_t *netdev;
	unsigned long now;
	struct fsl_mc_device *mdev;
	struct dpni_link_state dpni_state, *state;

	mdev = to_fsl_mc_device(ndev->dev.parent);
	netdev = netdev_priv(ndev);
	state = &netdev->dpni_state;

	now = jiffies;
	if (now - netdev->dpni_state_jiffies >= msecs_to_jiffies(1000)) {
		if ((rc = dpni_get_link_state(mdev->mc_io, 0, mdev->mc_handle, &dpni_state))) {
			dev_err(&mdev->dev, "failed to get dpni link state\n");
			goto err;
		}
		netdev->dpni_state_jiffies = now;
		mask = (DPNI_LINK_OPT_HALF_DUPLEX | DPNI_LINK_OPT_PAUSE | DPNI_LINK_OPT_ASYM_PAUSE);
		if ((dpni_state.rate != state->rate)
		||  ((dpni_state.options ^ state->options) & mask)) {
			dpni_state.up = 0;
		}
		*state = dpni_state;
	}

	status->link = state->up;
	status->speed = state->up ? (state->rate ? : SPEED_10000) : SPEED_10;
	status->duplex = (state->options & DPNI_LINK_OPT_HALF_DUPLEX) ? DUPLEX_HALF : DUPLEX_FULL;
	status->pause = (state->options & DPNI_LINK_OPT_PAUSE) ? 1 : 0;
	status->asym_pause = (state->options & DPNI_LINK_OPT_ASYM_PAUSE) ? 1 : 0;
	return 0;
err:
	status->link = 0;
	status->speed = SPEED_10;
	status->duplex = 0;
	status->pause = 0;
	status->asym_pause = 0;
	return rc;
}

static void dpaa2_netdev_adjust_link(struct net_device *ndev)
{
	u16 adv, lcladv, rmtadv;
	dpaa2_ndev_t *netdev;
	struct phy_device *phydev;
	struct fsl_mc_device *mdev, *mdev_dpmac;
	struct dpni_link_cfg dpni_cfg;
	struct dpmac_link_state dpmac_state;

	mdev = to_fsl_mc_device(ndev->dev.parent);
	phydev = ndev->phydev;
	netdev = netdev_priv(ndev);

	phy_print_status(phydev);

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
	lcladv = ethtool_adv_to_mii_adv_t(phydev->advertising);
	rmtadv = ethtool_adv_to_mii_adv_t(phydev->lp_advertising);
#else
	lcladv = linkmode_adv_to_mii_adv_t(phydev->advertising);
	rmtadv = linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
#endif
	if ((phydev->autoneg == AUTONEG_ENABLE) &&
	    (netdev->flags & DPAA2_NDEV_F_PAUSE_AUTONEG)) {
		adv = mii_advertise_flowctrl(mii_resolve_flowctrl_fdx(lcladv, rmtadv));
	} else {
		adv = mii_advertise_flowctrl(mii_resolve_flowctrl_fdx(lcladv, lcladv));
	}

	if (netdev->dpmac) {
		mdev_dpmac = netdev->dpmac->mdev;
		dpmac_state.up = phydev->link;
		dpmac_state.rate = (phydev->link && (phydev->speed != SPEED_UNKNOWN)) ? phydev->speed : 0;
		dpmac_state.options  = DPMAC_LINK_OPT_AUTONEG;
		dpmac_state.options |= (phydev->duplex == DUPLEX_HALF) ? DPMAC_LINK_OPT_HALF_DUPLEX : 0;
		dpmac_state.options |= (adv & ADVERTISE_PAUSE_CAP) ? DPMAC_LINK_OPT_PAUSE : 0;
		dpmac_state.options |= (adv & ADVERTISE_PAUSE_ASYM) ? DPMAC_LINK_OPT_ASYM_PAUSE : 0;
		if (dpmac_set_link_state(mdev->mc_io, 0, mdev_dpmac->mc_handle, &dpmac_state)) {
			dev_err(&mdev_dpmac->dev, "failed to set dpmac link state\n");
			goto err;
		}
	} else {
		dpni_cfg.rate = 0;
		dpni_cfg.options = DPNI_LINK_OPT_AUTONEG;
		dpni_cfg.options |= (phydev->duplex == DUPLEX_HALF) ? DPNI_LINK_OPT_HALF_DUPLEX : 0;
		dpni_cfg.options |= (adv & ADVERTISE_PAUSE_CAP) ? DPNI_LINK_OPT_PAUSE : 0;
		dpni_cfg.options |= (adv & ADVERTISE_PAUSE_ASYM) ? DPNI_LINK_OPT_ASYM_PAUSE : 0;
		if ((dpni_set_link_cfg(mdev->mc_io, 0, mdev->mc_handle, &dpni_cfg))) {
			dev_err(&mdev->dev, "failed to set link cfg\n");
			goto err;
		}
	}

	return;
err:
	return;
}

static int dpaa2_netdev_phy_init(dpaa2_ndev_t *netdev)
{
	int rc;
	dpmac_dev_t *dpmac;
	struct phy_device *phydev;
	struct fsl_mc_device *mdev, *mdev_dpmac;
	struct dpni_link_cfg dpni_cfg;
	struct fixed_phy_status status;

	dpmac = netdev->dpmac;
	mdev = to_fsl_mc_device(netdev->ndev->dev.parent);

	if (dpmac && dpmac->fn_phy) {
		mdev_dpmac = dpmac->mdev;
		if (!(phydev = fwnode_phy_find_device(dpmac->fn_phy))) {
			dev_err(&mdev_dpmac->dev, "failed to find phy\n");
			rc = -ENODEV;
			goto err;
		}
	} else {
		if ((rc = dpni_get_link_cfg(mdev->mc_io, 0, mdev->mc_handle, &dpni_cfg))) {
			dev_err(&mdev->dev, "failed to get link cfg\n");
			goto err;
		}

		status.link = 0;
		status.speed = SPEED_1000;
		status.duplex = (dpni_cfg.options & DPNI_LINK_OPT_HALF_DUPLEX) ? DUPLEX_HALF : DUPLEX_FULL;
		status.pause = (dpni_cfg.options & DPNI_LINK_OPT_PAUSE) ? 1 : 0;
		status.asym_pause = (dpni_cfg.options & DPNI_LINK_OPT_ASYM_PAUSE) ? 1 : 0;
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)
		phydev = fixed_phy_register(PHY_POLL, &status, -1, NULL);
#else
		phydev = fixed_phy_register(PHY_POLL, &status, NULL);
#endif
		if (IS_ERR(phydev)) {
			dev_err(&mdev->dev, "failed to register fixed phy\n");
			rc = PTR_ERR(phydev);
			phydev = NULL;
			goto err;
		}
	}

	if ((rc = phy_connect_direct(netdev->ndev, phydev, dpaa2_netdev_adjust_link,
				     dpmac ? dpmac->phy_if : PHY_INTERFACE_MODE_INTERNAL))) {
		dev_err(&netdev->ndev->dev, "failed to connect to phy\n");
		goto err_connect;
	}

	if (!dpmac || !dpmac->fn_phy) {
		if ((rc = fixed_phy_set_link_update(phydev, dpaa2_fixed_phy_link_update))) {
			phydev_err(phydev, "failed to set link update callback\n");
			goto err_fp_set;
		}
	}

	return 0;

err_fp_set:
	phy_disconnect(phydev);
err_connect:
	if (dpmac && dpmac->fn_phy) {
		phy_device_free(phydev);
	} else {
		fixed_phy_unregister(phydev);
	}
err:
	return rc;
}

static void dpaa2_netdev_phy_clean(dpaa2_ndev_t *netdev)
{
	struct phy_device *phydev;

	phydev = netdev->ndev->phydev;

	phy_disconnect(phydev);

	if (netdev->dpmac && netdev->dpmac->fn_phy) {
		phy_device_free(phydev);
	} else {
		fixed_phy_set_link_update(phydev, NULL);
		fixed_phy_unregister(phydev);
	}
}

static int dpaa2_netdev_start(dpaa2_ndev_t *netdev)
{
	int rc, en;
	struct fsl_mc_device *mdev;

	mdev = to_fsl_mc_device(netdev->ndev->dev.parent);

	while (1) {
		if ((rc = dpni_enable(mdev->mc_io, 0, mdev->mc_handle))) {
			dev_err(&mdev->dev, "failed to enable dpni\n");
			goto err;
		}
		if ((rc = dpni_is_enabled(mdev->mc_io, 0, mdev->mc_handle, &en))) {
			dev_err(&mdev->dev, "failed to check if dpni enabled\n");
			goto err;
		}
		if (en) {
			break;
		}
		msleep(10);
	}

	phy_start(netdev->ndev->phydev);

	return 0;
err:
	return rc;
}

static int dpaa2_netdev_stop(dpaa2_ndev_t *netdev)
{
	int rc, en;
	struct fsl_mc_device *mdev, *mdev_dpmac;
	struct dpni_link_cfg dpni_cfg;
	struct dpmac_link_state dpmac_state;

	mdev = to_fsl_mc_device(netdev->ndev->dev.parent);

	phy_stop(netdev->ndev->phydev);

	if (netdev->dpmac) {
		mdev_dpmac = netdev->dpmac->mdev;
		dpmac_state.up = 0;
		dpmac_state.rate = 0;
		dpmac_state.options = DPMAC_LINK_OPT_AUTONEG;
		if ((rc = dpmac_set_link_state(mdev->mc_io, 0, mdev_dpmac->mc_handle, &dpmac_state))) {
			dev_err(&mdev_dpmac->dev, "failed to set dpmac link state\n");
			goto err;
		}
	} else {
		dpni_cfg.rate = 0;
		dpni_cfg.options = DPNI_LINK_OPT_AUTONEG;
		if ((rc = dpni_set_link_cfg(mdev->mc_io, 0, mdev->mc_handle, &dpni_cfg))) {
			dev_err(&mdev->dev, "failed to set link cfg\n");
			goto err;
		}
	}

	while (1) {
		if ((rc = dpni_disable(mdev->mc_io, 0, mdev->mc_handle))) {
			dev_err(&mdev->dev, "failed to disable dpni\n");
			goto err;
		}
		if ((rc = dpni_is_enabled(mdev->mc_io, 0, mdev->mc_handle, &en))) {
			dev_err(&mdev->dev, "failed to check if dpni enabled\n");
			goto err;
		}
		if (!en) {
			break;
		}
		msleep(10);
	}

	return 0;
err:
	return rc;
}

static int dpaa2_netdev_probe(struct fsl_mc_device *mdev)
{
	int rc;
	unsigned int cpu, last;
	dpaa2_ndev_t *netdev;
	struct net_device *ndev;

	if (mdev->dev.parent != &dprc_mdev->dev) {
		rc = -ENOTSUPP;
		goto err;
	}

	cpu = cpumask_first(worker_cpumask);
	if ((rc = work_on_cpu(cpu, dpaa2_netdev_init, mdev))) {
		goto err;
	}

	ndev = dev_get_drvdata(&mdev->dev);
	netdev = netdev_priv(ndev);
	last = nr_cpu_ids;
	for_each_cpu(cpu, worker_cpumask) {
		if ((rc = work_on_cpu(cpu, dpaa2_netdev_bind, netdev))) {
			last = cpu;
			goto err_bind;
		}
	}

	if ((rc = dpaa2_netdev_phy_init(netdev))) {
		dev_err(&ndev->dev, "failed to init phy\n");
		goto err_phy_init;
	}

	if ((rc = dpaa2_netdev_start(netdev))) {
		dev_err(&ndev->dev, "failed to start netdev\n");
		goto err_start;
	}

	if ((rc = sb_netdev_notify(ndev, NETDEV_UP, &dpaa2_ndev_ops))) {
		dev_err(&ndev->dev, "failed to notify netdev\n");
		goto err_notify;
	}

	dev_dbg(&ndev->dev, "probed\n");

	return 0;

err_notify:
	dpaa2_netdev_stop(netdev);
	/* Waitting for on-the-fly frames finish */
	msleep(MSEC_PER_SEC * 3);
err_start:
	dpaa2_netdev_phy_clean(netdev);
err_phy_init:
err_bind:
	for_each_cpu(cpu, worker_cpumask) {
		if (cpu == last) {
			break;
		}
		work_on_cpu(cpu, dpaa2_netdev_unbind, netdev);
	}
	cpu = cpumask_first(worker_cpumask);
	work_on_cpu(cpu, dpaa2_netdev_clean, mdev);
err:
	return rc;
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 5, 0)
static int
#else
static void
#endif
dpaa2_netdev_remove(struct fsl_mc_device *mdev)
{
	unsigned int cpu;
	dpaa2_ndev_t *netdev;
	struct net_device *ndev;

	ndev = dev_get_drvdata(&mdev->dev);
	netdev = netdev_priv(ndev);

	sb_netdev_notify(ndev, NETDEV_GOING_DOWN, &dpaa2_ndev_ops);

	sb_netdev_notify(ndev, NETDEV_DOWN, NULL);

	dpaa2_netdev_stop(netdev);
	/* Waitting for on-the-fly frames finish */
	msleep(MSEC_PER_SEC * 3);

	dpaa2_netdev_phy_clean(netdev);

	for_each_cpu(cpu, worker_cpumask) {
		work_on_cpu(cpu, dpaa2_netdev_unbind, netdev);
	}

	cpu = cpumask_first(worker_cpumask);
	work_on_cpu(cpu, dpaa2_netdev_clean, mdev);

	dev_dbg(&ndev->dev, "removed\n");

#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 5, 0)
	return 0;
#endif
}

static const struct fsl_mc_device_id dpaa2_dpni_match_id_table[] = {
	{
		.vendor = FSL_MC_VENDOR_FREESCALE,
		.obj_type = "dpni",
	},
	{ .vendor = 0x0 }
};

static struct fsl_mc_driver dpaa2_netdev_driver = {
	.driver = {
		.name = KBUILD_MODNAME "-dpni",
		.owner = THIS_MODULE,
	},
	.probe = dpaa2_netdev_probe,
	.remove = dpaa2_netdev_remove,
	.match_id_table = dpaa2_dpni_match_id_table,
};

static void dpaa2_netdev_poll_dqrr(void *arg)
{
	int rc;
	dpaa2_fq_t *fq;
	unsigned int i;
	struct qbman_swp *swp;
	const struct dpaa2_dq __iomem *dq;

	swp = this_cpu_ptr(&cpu_dpaa2_io_infos)->swp;

	for (i = 0; i < poll_dqrr_budget; i++) {
		if (!(dq = qbman_swp_dqrr_next(swp))) {
			rc = qbman_swp_dqrr_consume(swp, NULL, true);
			BUG_ON(rc);
			break;
		}
		if ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ) {
			if ((dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_VALIDFRAME)) {
				fq = (dpaa2_fq_t *)dpaa2_dq_fqd_ctx(dq);
				fq->dqrr(swp, fq, dq);
			}
		}
		rc = qbman_swp_dqrr_consume(swp, dq, ((i + 1) == poll_dqrr_budget) ? true : false);
		BUG_ON(rc);
	}
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0)
static int dpaa2_dpio_match(struct device *dev, void *arg)
#else
static int dpaa2_dpio_match(struct device *dev, const void *arg)
#endif
{
	return dev->driver == &dpaa2_dpio_driver.driver;
}

static int __init dpaa2_init(void)
{
	int rc;
	unsigned int cpu;
	struct device *dev;

	if ((rc = mtrace_init())) {
		goto err;
	}

	if (use_tx_conf) {
		dpaa2_ndev_ops.pcpu_flow_poll = NULL;
	}

	if (!dprc_name) {
		pr_err("DPRC container not specified\n");
		rc = -ENODEV;
		goto err_dprc;
	}

	if (!(dev = bus_find_device_by_name(&fsl_mc_bus_type, NULL, dprc_name))) {
		pr_err("Failed to find DPRC container \"%s\"\n", dprc_name);
		rc = -ENODEV;
		goto err_find_dprc;
	}
	dprc_mdev = to_fsl_mc_device(dev);

	if ((rc = fsl_mc_driver_register(&dpaa2_dpio_driver))) {
		goto err_dpio_drv_reg;
	}

	if ((rc = fsl_mc_driver_register(&dpaa2_dpmac_driver))) {
		goto err_dpmac_drv_reg;
	}

	if ((rc = mc_get_soc_version(dprc_mdev->mc_io, 0, &soc_ver))) {
		pr_err("Failed to get soc version\n");
		goto err_soc_ver;
	}

	dev = NULL;
	for_each_cpu(cpu, worker_cpumask) {
		if (!(dev = bus_find_device(&fsl_mc_bus_type, dev, NULL, dpaa2_dpio_match))) {
			pr_err("CPU %u: failed to get DPIO\n", cpu);
			rc = -ENODEV;
			break;
		}
		if ((rc = work_on_cpu(cpu, cpu_qbman_swp_create, to_fsl_mc_device(dev)))) {
			put_device(dev);
			break;
		}
	}
	if (cpu < nr_cpu_ids) {
		goto err_swp_create;
	}

	if ((rc = sb_worker_reg_poll(KBUILD_MODNAME, dpaa2_netdev_poll_dqrr, NULL))) {
		goto err_reg_poll;
	}

	if ((rc = fsl_mc_driver_register(&dpaa2_netdev_driver))) {
		goto err_netdev_drv_reg;
	}

	return 0;

err_netdev_drv_reg:
	sb_worker_unreg_poll(KBUILD_MODNAME, dpaa2_netdev_poll_dqrr, NULL);
err_reg_poll:
err_swp_create:
	for_each_cpu(cpu, worker_cpumask) {
		work_on_cpu(cpu, cpu_qbman_swp_destroy, NULL);
	}
err_soc_ver:
	fsl_mc_driver_unregister(&dpaa2_dpmac_driver);
err_dpmac_drv_reg:
	fsl_mc_driver_unregister(&dpaa2_dpio_driver);
err_dpio_drv_reg:
	put_device(&dprc_mdev->dev);
err_find_dprc:
err_dprc:
	mtrace_finish();
err:
	return rc;
}

static void __exit dpaa2_exit(void)
{
	unsigned int cpu;

	fsl_mc_driver_unregister(&dpaa2_netdev_driver);

	sb_worker_unreg_poll(KBUILD_MODNAME, dpaa2_netdev_poll_dqrr, NULL);

	for_each_cpu(cpu, worker_cpumask) {
		work_on_cpu(cpu, cpu_qbman_swp_destroy, NULL);
	}

	fsl_mc_driver_unregister(&dpaa2_dpmac_driver);

	fsl_mc_driver_unregister(&dpaa2_dpio_driver);

	put_device(&dprc_mdev->dev);

	mtrace_finish();
}

module_init(dpaa2_init);
module_exit(dpaa2_exit);

MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DPAA2 driver for simplebits");
