/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2012-2020. All rights reserved.
 * Description:
 * Author: huawei
 * Create: 2018-05-10
 */
#ifdef CONFIG_GENERIC_BUG
#undef CONFIG_GENERIC_BUG
#endif
#ifdef CONFIG_BUG
#undef CONFIG_BUG
#endif

#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/unistd.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
#include <linux/device.h>
#include <linux/atomic.h>
#include <linux/platform_device.h>
#include <linux/capability.h>
#include <linux/time.h>
#include <asm/setup.h>
#include <linux/proc_fs.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/version.h>

#include <linux/circ_buf.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>

#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/of_address.h>

#include "util.h"
#include "higmac.h"
#include "drv_profile.h"
#include "drv_log.h"
#include "devdrv_dfm.h"
#include "securec.h"
#include "hns_mdio.h"

#define HIGMAC_LOG_PREFIX "higmac"
#define higmac_err(fmt...) drv_err(HIGMAC_LOG_PREFIX, fmt)
#define higmac_warn(fmt...) drv_warn(HIGMAC_LOG_PREFIX, fmt)
#define higmac_info(fmt...) drv_info(HIGMAC_LOG_PREFIX, fmt)
#define higmac_debug(fmt...) drv_debug(HIGMAC_LOG_PREFIX, fmt)

#define HAS_TSO_CAP(hw_cap) ((((hw_cap) >> 28) & 0x3) == VER_TSO)
#define HAS_RXHASH_CAP(hw_cap) ((hw_cap)&BIT(30))
#define HAS_RSS_CAP(hw_cap) ((hw_cap)&BIT(31))

#define RGMII_SPEED_1000 0x2c
#define RGMII_SPEED_100 0x2f
#define RGMII_SPEED_10 0x2d
#define MII_SPEED_100 0x0f
#define MII_SPEED_10 0x0d
#define RMII_SPEED_100 0x8f
#define RMII_SPEED_10 0x8d
#define GMAC_FULL_DUPLEX BIT(4)

#define HIGMAC_PAUSE_DEFAULT 0

static unsigned int flow_ctrl_en = FLOW_OFF;
static int tx_flow_ctrl_pause_time = CONFIG_TX_FLOW_CTRL_PAUSE_TIME;
static int tx_flow_ctrl_pause_interval = CONFIG_TX_FLOW_CTRL_PAUSE_INTERVAL;
static int tx_flow_ctrl_active_threshold = CONFIG_TX_FLOW_CTRL_ACTIVE_THRESHOLD;
static int tx_flow_ctrl_deactive_threshold = CONFIG_TX_FLOW_CTRL_DEACTIVE_THRESHOLD;
static inline void higmac_enable_rxcsum_drop(struct higmac_netdev_local *ld, bool drop);

#define DEFAULT_MSG_ENABLE (NETIF_MSG_LINK) /* NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK */
static int debug = -1;
module_param(debug, int, 0000);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");

struct higmac_rdr_dump_mng rdr_mng = { 0 };

void higmac_dfm_error(const char *err_info, u32 e_id);

/* *********************** rgmii mbist test ********************** */
#define SC_MBIST_CPUI_DATAOUT 0x7500
#define SC_MBIST_CPUI_DATAIN 0x3504
#define SC_MBIST_CPUI_WRITE_EN 0x3508

#define SC_MBIST_CPUI_ENABLE 0x3500
#define SC_MBIST_CPUI_SMS_FUNC_RESET 0x350c

#define SC_MBIST_CPUI_CLK_SEL 0x0108
#define SC_FUNC_MBIST_CLK_EN 0x0620
#define SC_FUNC_MBIST_CLK_DIS 0x0624
#define SC_FUNC_MBIST_RESET_DREQ 0x0d24

static void __iomem *io_sub_sys_base;

void CPUWrite(u32 value)
{
    u32 i = 0;
    u32 tmp;

    tmp = readl_relaxed(io_sub_sys_base + SC_MBIST_CPUI_DATAOUT);
    while ((tmp & 0x40000000L) != 0x40000000L) {
        i++;
        msleep(1);
        if (i >= 10000) {
            higmac_err("[CPUWrite]time out\n");
            return;
        }
        tmp = readl_relaxed(io_sub_sys_base + SC_MBIST_CPUI_DATAOUT);
    }

    writel_relaxed(value, (io_sub_sys_base + SC_MBIST_CPUI_DATAIN));
    msleep(1);
    writel_relaxed(0x01, (io_sub_sys_base + SC_MBIST_CPUI_WRITE_EN));
    msleep(3);
    writel_relaxed(0x00, (io_sub_sys_base + SC_MBIST_CPUI_WRITE_EN));
}

u32 CPURead(void)
{
    u32 i = 0;
    u32 tmp;

    tmp = readl_relaxed(io_sub_sys_base + SC_MBIST_CPUI_DATAOUT);
    while ((tmp & 0x40000000L) != 0x40000000L) {
        i++;
        msleep(1);
        if (i >= 10000) {
            higmac_err("[CPURead]time out\n");
            return 1;
        }
        tmp = readl_relaxed(io_sub_sys_base + SC_MBIST_CPUI_DATAOUT);
    }

    tmp = tmp & 0x3fffffff;
    tmp = tmp | 0x40000000;

    return tmp;
}

void CPUWait(u32 value)
{
    usleep_range(10 * value, 11 * value);
}

void CPUWrite_code(const char *code)
{
    u32 value = 0;
    u32 i, bit, bit_val, len;

    len = strlen(code);
    for (i = 0; i < len; i++) {
        bit_val = code[i] - '0';
        bit = len - 1 - i;
        value |= (bit_val << bit);
    }

    CPUWrite(value);
}

int CPURead_code(const char *code)
{
    u32 value = 0;
    u32 mask_val = 0;
    u32 i, bit, bit_val, mask_bit_val, len;
    static u32 global_counter;
    u32 actual_val;

    len = strlen(code);
    for (i = 0; i < len; i++) {
        if (code[i] == 'x') {
            bit_val = 0;
            mask_bit_val = 0;
        } else {
            bit_val = code[i] - '0';
            mask_bit_val = 1;
        }

        bit = len - 1 - i;
        value |= (bit_val << bit);
        mask_val |= (mask_bit_val << bit);
    }

    global_counter++;
    actual_val = CPURead();
    if ((actual_val & mask_val) != value) {
        higmac_warn("compare time %d, value 0x%x:0x%x mask 0x%x is not expect.\n", global_counter, actual_val, value,
            mask_val);
        return -1;
    } else {
        return 0;
    }
}

void mbist_rst(u32 rst)
{
    writel_relaxed(rst, (io_sub_sys_base + SC_MBIST_CPUI_SMS_FUNC_RESET));
}

void mbist_setup(void)
{
    writel_relaxed(0x1, (io_sub_sys_base + SC_MBIST_CPUI_ENABLE));
    writel_relaxed(0x0, (io_sub_sys_base + SC_MBIST_CPUI_CLK_SEL));
    writel_relaxed(0x1, (io_sub_sys_base + SC_FUNC_MBIST_CLK_EN));
    writel_relaxed(0x0, (io_sub_sys_base + SC_FUNC_MBIST_CLK_DIS));
    writel_relaxed(0x1, (io_sub_sys_base + SC_FUNC_MBIST_RESET_DREQ));

    mbist_rst(1);
    CPUWait(20);
    mbist_rst(0);
    CPUWait(20);
    mbist_rst(1);
    CPUWait(20);
}

int rgmii_mbist(void)
{
    int ret = 0;

    CPUWrite_code("00000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10001000000000000000000000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10100000000000000000000000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10101000010000000000000000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10111111110000000000000000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10100100010000000000000000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10111111111111011111111111111100");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10100011110000000000000000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10100000000000010000100000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10101000000000000001000000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10100000000000001000100000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10101000000000011111110000100000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10100000000000000111100000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("01010100010100010000000000000000");
    CPUWait(32);
    CPUWrite_code("10100000000000000000000100010100");
    CPUWait(32);
    CPUWait(7990);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    ret += CPURead_code("01xx101xxxx101xxxxxxxxxxxxxxxxxx");
    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    ret += CPURead_code("01xxxxxxxxxxxxxxxxxx101xxxx101xx");
    CPUWrite_code("10100000000000010000000000000000");
    CPUWait(32);
    ret += CPURead_code("01xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("01100110100110100000000000000000");
    CPUWait(32);
    CPUWrite_code("10100000000000000000000110100110");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);

    ret += CPURead_code("01xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);

    ret += CPURead_code("0100xxxxxxxxxxxxxxxxxxxxxxxxxxxx");
    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);

    ret += CPURead_code("01xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);

    ret += CPURead_code("01xxxxxxxxxxxxxxx00xxxxxxxxxxxxx");
    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);

    ret += CPURead_code("010000xxxxxxxxxxxxxxxxxxxxxxxxxx");
    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);

    ret += CPURead_code("01xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);

    ret += CPURead_code("0100000000000000xxxxxxxxxxxxxxxx");
    CPUWrite_code("10100000000000010000000000000000");
    CPUWait(32);

    ret += CPURead_code("01xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);

    CPUWrite_code("10010000000000000000000000000000");
    CPUWait(32);

    higmac_info("rgmii mbist finish\n");
    return ret;
}

int rgmii_mbist_bypass(void)
{
    int ret = 0;

    CPUWrite_code("00000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10001000000000000000000000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10100000000000000000000000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10101000010000000000000000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10111111110000000000000000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10100100010000000000000000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10111111111111011111111111111100");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10100011110000000000000000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10100000000000010000100000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10101000000000000001000000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10100000000000001000100000000000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10101000000000011111110010100000");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10100000000000000111100000000000");
    CPUWait(32);

    CPUWrite_code("01010010001000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10100000000000000000000100010100");
    CPUWait(32);
    CPUWait(7990);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    ret += CPURead_code("01x101xxxxx101xxxxxxxxxxxxxxxxxx");
    CPUWrite_code("10100000000000010000000000000000");
    CPUWait(32);
    ret += CPURead_code("01xxxxxxxxxxxxxxxxxxxxxxxx101xxx");

    CPUWrite_code("01100011010000000000000000000000");
    CPUWait(32);
    CPUWrite_code("10100000000000000000000110100110");
    CPUWait(32);

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);
    ret += CPURead_code("01xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);

    ret += CPURead_code("0100xxxxxxxxxxxxxxxxxxxxxxxxxxxx");
    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);

    ret += CPURead_code("01xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);

    ret += CPURead_code("01xxxxxxxxxxxxxx0000xxxxxxxxxxxx");
    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);

    ret += CPURead_code("01xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);

    ret += CPURead_code("01xxxxxxxxxxxxxx00000000000000xx");
    CPUWrite_code("10100000000000010000000000000000");
    CPUWait(32);
    ret += CPURead_code("01xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");

    CPUWrite_code("01000000000000000000000000000000");
    CPUWait(32);

    CPUWrite_code("10010000000000000000000000000000");
    CPUWait(32);

    higmac_info("rgmii mbist bypass finish\n");
    return ret;
}

int start_mbist(struct higmac_netdev_local *priv, int has_phy)
{
    int ret;

    io_sub_sys_base = priv->sysctrl_base;
    if (io_sub_sys_base) {
        mbist_rst(0);
        mbist_setup();

        if (has_phy) {
            ret = rgmii_mbist();
        } else {
            ret = rgmii_mbist_bypass();
        }

        if (ret) {
            higmac_warn("rgmii mbist failed\n");
            return 0; /* return 0 also */
        }

        higmac_info("rgmii mbist success\n");
    }

    return 0;
}


/* *********************** rgmii mbist test end ********************** */

static void higmac_clk_enable(struct higmac_netdev_local *priv)
{
    writel_relaxed(BITS_SC_RGMII_CLK_ALL, priv->sysctrl_base + SC_RGMII_CLK_EN);
}

static void higmac_clk_test_and_enable(struct higmac_netdev_local *priv)
{
    u32 val = readl_relaxed(priv->sysctrl_base + SC_MII_CLK_ST);
    /* some clk is disabled */
    if ((val & BITS_SC_RGMII_CLK_ALL) != BITS_SC_RGMII_CLK_ALL)
        higmac_clk_enable(priv);
}

static void higmac_clk_disable(struct higmac_netdev_local *priv)
{
    writel_relaxed(BITS_SC_RGMII_CLK_ALL, priv->sysctrl_base + SC_RGMII_CLK_DIS);
}

static void higmac_reset_all(struct higmac_netdev_local *priv)
{
    /* reset */
    writel_relaxed(BITS_SC_MII_RESET_ALL, priv->sysctrl_base + SC_MII_RESET_REQ);
}

static void higmac_unreset_all(struct higmac_netdev_local *priv)
{
    /* unreset */
    writel_relaxed(BITS_SC_MII_RESET_ALL, priv->sysctrl_base + SC_MII_RESET_DREQ);
}

static void higmac_reset_macif(struct higmac_netdev_local *priv)
{
    /* SC_MII_RESET_REQ write 1 to reset, write 0 not available */
    writel_relaxed(BIT_SC_MII_RESET_MACIF, priv->sysctrl_base + SC_MII_RESET_REQ);
}

static void higmac_unreset_macif(struct higmac_netdev_local *priv)
{
    /* SC_MII_RESET_DREQ write 1 to unreset, write 0 not available */
    writel_relaxed(BIT_SC_MII_RESET_MACIF, priv->sysctrl_base + SC_MII_RESET_DREQ);
}

void higmac_mac_core_reset(struct higmac_netdev_local *priv)
{
    /* undo reset */
    higmac_unreset_all(priv);
    usleep_range(50, 60);

    /* soft reset mac port */
    higmac_reset_all(priv);
    usleep_range(50, 60);

    /* undo reset */
    higmac_unreset_all(priv);
    usleep_range(50, 60);
}

void higmac_hw_phy_reset(struct higmac_netdev_local *priv)
{
}

void higmac_hw_all_clk_disable(struct higmac_netdev_local *priv)
{
    /* If macif clock is enabled when suspend, we should
     * disable it here.
     * Because when resume, PHY will link up again and
     * macif clock will be enabled too. If we don't disable
     * macif clock in suspend, macif clock will be enabled twice.
     * This is called in suspend, when net device is down,
     * MAC clk is disabled.
     * So we need to judge whether MAC clk is enabled,
     * otherwise kernel will WARNING if clk disable twice.
     */
    if (priv->netdev->flags & IFF_UP)
        higmac_clk_disable(priv);
}

STATIC void higmac_hw_all_clk_enable(struct higmac_netdev_local *priv)
{
    /* If net device is down when suspend, we should not enable MAC clk. */
    if (priv->netdev->flags & IFF_UP)
        higmac_clk_enable(priv);
}

STATIC unsigned int higmac_auto_nego_link(struct higmac_netdev_local *priv)
{
    return HIGMAC_LINKED;
}

static unsigned int higmac_auto_nego_speed(struct higmac_netdev_local *priv)
{
    return (unsigned int)priv->cfg_speed; /* No phy, fix user config */
}

static unsigned int higmac_auto_nego_duplex(struct higmac_netdev_local *priv)
{
    return (unsigned int)priv->cfg_duplex; /* No phy, fix user config */
}

STATIC u32 higmac_config_val(phy_interface_t phy_mode, u32 speed)
{
    u32 val = 0;

    switch (phy_mode) {
        case PHY_INTERFACE_MODE_RGMII:
            if (speed == SPEED_1000)
                val = RGMII_SPEED_1000;
            else if (speed == SPEED_100)
                val = RGMII_SPEED_100;
            else
                val = RGMII_SPEED_10;
            break;
        case PHY_INTERFACE_MODE_MII:
            if (speed == SPEED_100)
                val = MII_SPEED_100;
            else
                val = MII_SPEED_10;
            break;
        case PHY_INTERFACE_MODE_RMII:
            if (speed == SPEED_100)
                val = RMII_SPEED_100;
            else
                val = RMII_SPEED_10;
            break;
        default:
            higmac_warn("not supported mode\n");
            val = MII_SPEED_10;
            break;
    }

    return val;
}

static void higmac_config_port(struct net_device *dev, u32 speed, u32 duplex)
{
    struct higmac_netdev_local *priv = netdev_priv(dev);
    u32 val;

    val = higmac_config_val(priv->phy_mode, speed);

    if (duplex)
        val |= GMAC_FULL_DUPLEX;

    higmac_reset_macif(priv);
    writel_relaxed(val, priv->gmac_iobase + MAC_IF_STAT_CTRL0);
    higmac_unreset_macif(priv);

    writel_relaxed(BIT_MODE_CHANGE_EN, priv->gmac_iobase + MODE_CHANGE_EN);
    if (speed == SPEED_1000)
        val = GMAC_SPEED_1000;
    else if (speed == SPEED_100)
        val = GMAC_SPEED_100;
    else
        val = GMAC_SPEED_10;
    writel_relaxed(val, priv->gmac_iobase + PORT_MODE);
    writel_relaxed(0, priv->gmac_iobase + MODE_CHANGE_EN);
    writel_relaxed(duplex, priv->gmac_iobase + MAC_DUPLEX_HALF_CTRL);
}

static void higmac_set_desc_depth(struct higmac_netdev_local *priv, u32 rx, u32 tx)
{
    u32 reg;
    int i;
    u32 val;

    writel(BITS_RX_FQ_DEPTH_EN, priv->gmac_iobase + RX_FQ_REG_EN);
    val = readl(priv->gmac_iobase + RX_FQ_DEPTH);
    val &= ~Q_ADDR_HI8_MASK;
    val |= rx << DESC_WORD_SHIFT;
    writel(val, priv->gmac_iobase + RX_FQ_DEPTH);
    writel(0, priv->gmac_iobase + RX_FQ_REG_EN);

    writel(BITS_RX_BQ_DEPTH_EN, priv->gmac_iobase + RX_BQ_REG_EN);
    val = readl(priv->gmac_iobase + RX_BQ_DEPTH);
    val &= ~Q_ADDR_HI8_MASK;
    val |= rx << DESC_WORD_SHIFT;
    writel(val, priv->gmac_iobase + RX_BQ_DEPTH);
    for (i = 1; i < priv->num_rxqs; i++) {
        reg = RX_BQ_DEPTH_QUEUE(i);
        val = readl(priv->gmac_iobase + reg);
        val &= ~Q_ADDR_HI8_MASK;
        val |= rx << DESC_WORD_SHIFT;
        writel(val, priv->gmac_iobase + reg);
    }
    writel(0, priv->gmac_iobase + RX_BQ_REG_EN);

    writel(BITS_TX_BQ_DEPTH_EN, priv->gmac_iobase + TX_BQ_REG_EN);
    val = readl(priv->gmac_iobase + TX_BQ_DEPTH);
    val &= ~Q_ADDR_HI8_MASK;
    val |= tx << DESC_WORD_SHIFT;
    writel(val, priv->gmac_iobase + TX_BQ_DEPTH);
    writel(0, priv->gmac_iobase + TX_BQ_REG_EN);

    writel(BITS_TX_RQ_DEPTH_EN, priv->gmac_iobase + TX_RQ_REG_EN);
    val = readl(priv->gmac_iobase + TX_RQ_DEPTH);
    val &= ~Q_ADDR_HI8_MASK;
    val |= tx << DESC_WORD_SHIFT;
    writel(val, priv->gmac_iobase + TX_RQ_DEPTH);
    writel(0, priv->gmac_iobase + TX_RQ_REG_EN);
}

static void higmac_set_rx_fq(struct higmac_netdev_local *priv, dma_addr_t phy_addr)
{
#if defined(CONFIG_HIGMAC_DDR_64BIT)
    u32 val;
#endif
    writel(BITS_RX_FQ_START_ADDR_EN, priv->gmac_iobase + RX_FQ_REG_EN);
#if defined(CONFIG_HIGMAC_DDR_64BIT)
    val = readl(priv->gmac_iobase + RX_FQ_DEPTH);
    val &= Q_ADDR_HI8_MASK;
    val |= (phy_addr >> REG_BIT_WIDTH) << Q_ADDR_HI8_OFFSET;
    writel(val, priv->gmac_iobase + RX_FQ_DEPTH);
#endif
    writel((u32)phy_addr, priv->gmac_iobase + RX_FQ_START_ADDR);
    writel(0, priv->gmac_iobase + RX_FQ_REG_EN);
}

static void higmac_set_rx_bq(struct higmac_netdev_local *priv, dma_addr_t phy_addr)
{
#if defined(CONFIG_HIGMAC_DDR_64BIT)
    u32 val;
#endif
    writel(BITS_RX_BQ_START_ADDR_EN, priv->gmac_iobase + RX_BQ_REG_EN);
#if defined(CONFIG_HIGMAC_DDR_64BIT)
    val = readl(priv->gmac_iobase + RX_BQ_DEPTH);
    val &= Q_ADDR_HI8_MASK;
    val |= (phy_addr >> REG_BIT_WIDTH) << Q_ADDR_HI8_OFFSET;
    writel(val, priv->gmac_iobase + RX_BQ_DEPTH);
#endif
    writel((u32)phy_addr, priv->gmac_iobase + RX_BQ_START_ADDR);
    writel(0, priv->gmac_iobase + RX_BQ_REG_EN);
}

static void higmac_set_tx_bq(struct higmac_netdev_local *priv, dma_addr_t phy_addr)
{
#if defined(CONFIG_HIGMAC_DDR_64BIT)
    u32 val;
#endif
    writel(BITS_TX_BQ_START_ADDR_EN, priv->gmac_iobase + TX_BQ_REG_EN);
#if defined(CONFIG_HIGMAC_DDR_64BIT)
    val = readl(priv->gmac_iobase + TX_BQ_DEPTH);
    val &= Q_ADDR_HI8_MASK;
    val |= (phy_addr >> REG_BIT_WIDTH) << Q_ADDR_HI8_OFFSET;
    writel(val, priv->gmac_iobase + TX_BQ_DEPTH);
#endif
    writel((u32)phy_addr, priv->gmac_iobase + TX_BQ_START_ADDR);
    writel(0, priv->gmac_iobase + TX_BQ_REG_EN);
}

static void higmac_set_tx_rq(struct higmac_netdev_local *priv, dma_addr_t phy_addr)
{
#if defined(CONFIG_HIGMAC_DDR_64BIT)
    u32 val;
#endif
    writel(BITS_TX_RQ_START_ADDR_EN, priv->gmac_iobase + TX_RQ_REG_EN);
#if defined(CONFIG_HIGMAC_DDR_64BIT)
    val = readl(priv->gmac_iobase + TX_RQ_DEPTH);
    val &= Q_ADDR_HI8_MASK;
    val |= (phy_addr >> REG_BIT_WIDTH) << Q_ADDR_HI8_OFFSET;
    writel(val, priv->gmac_iobase + TX_RQ_DEPTH);
#endif
    writel((u32)phy_addr, priv->gmac_iobase + TX_RQ_START_ADDR);
    writel(0, priv->gmac_iobase + TX_RQ_REG_EN);
}

static void higmac_hw_set_desc_addr(struct higmac_netdev_local *priv)
{
    u32 reg;
    int i;
#if defined(CONFIG_HIGMAC_DDR_64BIT)
    u32 val;
#endif

    higmac_set_rx_fq(priv, priv->rx_fq.phys_addr);
    higmac_set_rx_bq(priv, priv->rx_bq.phys_addr);
    higmac_set_tx_rq(priv, priv->tx_rq.phys_addr);
    higmac_set_tx_bq(priv, priv->tx_bq.phys_addr);

    for (i = 1; i < priv->num_rxqs; i++) {
        reg = RX_BQ_START_ADDR_QUEUE(i);
        writel(BITS_RX_BQ_START_ADDR_EN, priv->gmac_iobase + RX_BQ_REG_EN);
#if defined(CONFIG_HIGMAC_DDR_64BIT)
        val = readl(priv->gmac_iobase + reg);
        val &= Q_ADDR_HI8_MASK;
        val |= ((priv->pool[3 + i].phys_addr) >> REG_BIT_WIDTH) << Q_ADDR_HI8_OFFSET;
        writel(val, priv->gmac_iobase + reg);
#endif
        writel((u32)(priv->pool[3 + i].phys_addr), priv->gmac_iobase + reg);
        writel(0, priv->gmac_iobase + RX_BQ_REG_EN);
    }
}

static void higmac_set_rss_cap(struct higmac_netdev_local *priv)
{
    u32 val = 0;

    if (priv->has_rxhash_cap)
        val |= BIT_RXHASH_CAP;
    if (priv->has_rss_cap)
        val |= BIT_RSS_CAP;
    writel(val, priv->gmac_iobase + HW_CAP_EN);
}

static void higmac_set_queue_th(struct higmac_netdev_local *ld, unsigned int threshold, unsigned int offset)
{
    unsigned int val;

    val = readl(ld->gmac_iobase + offset);
    val &= ~(BITS_QUEUE_TH_MASK << BITS_QUEUE_TH_OFFSET);
    val |= (threshold & BITS_QUEUE_TH_MASK) << BITS_QUEUE_TH_OFFSET;

    writel(val, ld->gmac_iobase + offset);
}

static void higmac_queue_threshold(struct higmac_netdev_local *ld)
{
    unsigned int low_threshold = QUEUE_THRESHOLD_LOW;
    unsigned int high_threshold = QUEUE_THRESHOLD_HIGH;

    higmac_set_queue_th(ld, low_threshold, RX_FQ_ALEMPTY_TH);
    higmac_set_queue_th(ld, high_threshold, RX_FQ_ALFULL_TH);
    higmac_set_queue_th(ld, low_threshold, RX_BQ_ALEMPTY_TH);
    higmac_set_queue_th(ld, high_threshold, RX_BQ_ALFULL_TH);

    higmac_set_queue_th(ld, low_threshold, TX_BQ_ALEMPTY_TH);
    higmac_set_queue_th(ld, high_threshold, TX_BQ_ALFULL_TH);
    higmac_set_queue_th(ld, low_threshold, TX_RQ_ALEMPTY_TH);
    higmac_set_queue_th(ld, high_threshold, TX_RQ_ALFULL_TH);
}

static void higmac_sc_user_init(struct higmac_netdev_local *ld)
{
    unsigned int val;

    val = readl(ld->sysctrl_base + SC_CFG_AWUSER_M_RGMII);
    val |= BIT_SC_AWRUSER_EN;

    writel(val, ld->sysctrl_base + SC_CFG_AWUSER_M_RGMII);

    val = readl(ld->sysctrl_base + SC_CFG_ARUSER_M_RGMII);
    val |= BIT_SC_AWRUSER_EN;

    writel(val, ld->sysctrl_base + SC_CFG_ARUSER_M_RGMII);
}

static void higmac_hw_init(struct higmac_netdev_local *priv)
{
    u32 val;
    u32 reg;
    int i;

    /* enable read/write channel cache */
    val = readl(priv->gmac_iobase + BURST_OUTSTANDING_REG);
    val |= (WR_CHANNEL_CACHE_MASK << WR_CHANNEL_CACHE_OFFSET);
    writel(val, priv->gmac_iobase + BURST_OUTSTANDING_REG);

    /* disable and clear all interrupts */
    writel(0, priv->gmac_iobase + ENA_PMU_INT);
    writel(~0, priv->gmac_iobase + RAW_PMU_INT);

    for (i = 1; i < priv->num_rxqs; i++) {
        reg = RSS_ENA_INT_QUEUE(i);
        writel(0, priv->gmac_iobase + reg);
    }
    writel(~0, priv->gmac_iobase + RSS_RAW_PMU_INT);

    /* enable CRC erro packets filter */
    val = readl(priv->gmac_iobase + REC_FILT_CONTROL);
    val |= BIT_CRC_ERR_PASS;
    writel(val, priv->gmac_iobase + REC_FILT_CONTROL);

    /* set tx min packet length */
    val = readl(priv->gmac_iobase + CRF_MIN_PACKET);
    val &= ~BIT_MASK_TX_MIN_LEN;
    val |= ETH_HLEN << BIT_OFFSET_TX_MIN_LEN;
    writel(val, priv->gmac_iobase + CRF_MIN_PACKET);

    /* fix bug for udp and ip error check */
    writel(CONTROL_WORD_CONFIG, priv->gmac_iobase + CONTROL_WORD);

    writel(0, priv->gmac_iobase + COL_SLOT_TIME);

    writel(DUPLEX_HALF, priv->gmac_iobase + MAC_DUPLEX_HALF_CTRL);

    val = RX_BQ_INT_THRESHOLD | (TX_RQ_INT_THRESHOLD << BITS_OFFSET_TX_RQ_IN_TH);
    writel(val, priv->gmac_iobase + IN_QUEUE_TH);

    writel(RX_RQ_IN_TIMEOUT_CNT, priv->gmac_iobase + RX_BQ_IN_TIMEOUT_TH);

    writel(TX_RQ_IN_TIMEOUT_CNT, priv->gmac_iobase + TX_RQ_IN_TIMEOUT_TH);

    higmac_set_desc_depth(priv, RX_DESC_NUM, TX_DESC_NUM);

    higmac_queue_threshold(priv);

    higmac_sc_user_init(priv);
}

static inline void higmac_irq_enable(struct higmac_netdev_local *ld)
{
    writel(BIT(16) | BIT(18) | RX_BQ_IN_INT | RX_BQ_IN_TIMEOUT_INT | TX_RQ_IN_INT | TX_RQ_IN_TIMEOUT_INT,
        ld->gmac_iobase + ENA_PMU_INT);
}

static inline void higmac_irq_enable_queue(struct higmac_netdev_local *ld, int rxq_id)
{
    if (rxq_id) {
        u32 reg;

        reg = RSS_ENA_INT_QUEUE(rxq_id);
        writel(~0, ld->gmac_iobase + reg);
    } else {
        higmac_irq_enable(ld);
    }
}

static inline void higmac_irq_enable_all_queue(struct higmac_netdev_local *ld)
{
    int i;

    for (i = 0; i < ld->num_rxqs; i++)
        higmac_irq_enable_queue(ld, i);
}

static inline void higmac_irq_disable(struct higmac_netdev_local *ld)
{
    writel(0, ld->gmac_iobase + ENA_PMU_INT);
}

static inline void higmac_irq_disable_queue(struct higmac_netdev_local *ld, int rxq_id)
{
    if (rxq_id) {
        u32 reg;

        reg = RSS_ENA_INT_QUEUE(rxq_id);
        writel(0, ld->gmac_iobase + reg);
    } else {
        higmac_irq_disable(ld);
    }
}

static inline void higmac_irq_disable_all_queue(struct higmac_netdev_local *ld)
{
    int i;

    for (i = 0; i < ld->num_rxqs; i++)
        higmac_irq_disable_queue(ld, i);
}

static inline bool higmac_queue_irq_disabled(struct higmac_netdev_local *ld, int rxq_id)
{
    u32 reg, val;

    if (rxq_id)
        reg = RSS_ENA_INT_QUEUE(rxq_id);
    else
        reg = ENA_PMU_INT;
    val = readl(ld->gmac_iobase + reg);

    return !val;
}

static inline void higmac_hw_desc_enable(struct higmac_netdev_local *ld)
{
    writel(0xF, ld->gmac_iobase + DESC_WR_RD_ENA);
}

static inline void higmac_hw_desc_disable(struct higmac_netdev_local *ld)
{
    writel(0, ld->gmac_iobase + DESC_WR_RD_ENA);
}

static inline void higmac_port_enable(struct higmac_netdev_local *ld)
{
    writel(BITS_TX_EN | BITS_RX_EN, ld->gmac_iobase + PORT_EN);
}

static inline void higmac_port_disable(struct higmac_netdev_local *ld)
{
    writel(0, ld->gmac_iobase + PORT_EN);
}

static void higmac_set_flow_threshold(struct higmac_netdev_local *ld, unsigned int threshold, unsigned int offset)
{
    unsigned int val;

    val = readl(ld->gmac_iobase + offset);
    val &= ~(BITS_Q_PAUSE_TH_MASK << BITS_Q_PAUSE_TH_OFFSET);
    val |= (threshold & BITS_Q_PAUSE_TH_MASK) << BITS_Q_PAUSE_TH_OFFSET;

    writel(val, ld->gmac_iobase + offset);
}

void higmac_set_flow_ctrl_params(struct higmac_netdev_local *ld)
{
    unsigned int empty_th = ld->flow_ctrl_active_threshold;
    unsigned int full_th = ld->flow_ctrl_deactive_threshold;
    unsigned int rec_filter;

    writel(ld->pause_time, ld->gmac_iobase + FC_TX_TIMER);
    writel(ld->pause_interval, ld->gmac_iobase + PAUSE_THR);

    higmac_set_flow_threshold(ld, empty_th, RX_FQ_ALEMPTY_TH);
    higmac_set_flow_threshold(ld, full_th, RX_FQ_ALFULL_TH);
    higmac_set_flow_threshold(ld, empty_th, RX_BQ_ALEMPTY_TH);
    higmac_set_flow_threshold(ld, full_th, RX_BQ_ALFULL_TH);

    writel(0, ld->gmac_iobase + CRF_TX_PAUSE);

    rec_filter = readl(ld->gmac_iobase + REC_FILT_CONTROL);
    rec_filter |= BIT_PAUSE_FRM_PASS;
    writel(rec_filter, ld->gmac_iobase + REC_FILT_CONTROL);
}

void higmac_set_flow_ctrl_state(struct higmac_netdev_local *ld, int pause)
{
    unsigned int flow_rx_q_en;
    unsigned int flow;

    flow_rx_q_en = readl(ld->gmac_iobase + RX_PAUSE_EN);
    flow_rx_q_en &= ~BIT_RX_PAUSE_EN;

    flow = readl(ld->gmac_iobase + PAUSE_EN);
    flow &= ~(BIT_RX_FDFC | BIT_TX_FDFC);

    if (pause) {
        if (ld->flow_ctrl & FLOW_TX)
            flow_rx_q_en |= BIT_RX_PAUSE_EN;

        if (ld->flow_ctrl & FLOW_RX)
            flow |= BIT_RX_FDFC;
        if (ld->flow_ctrl & FLOW_TX)
            flow |= BIT_TX_FDFC;
    }

    writel(flow_rx_q_en, ld->gmac_iobase + RX_PAUSE_EN);
    writel(flow, ld->gmac_iobase + PAUSE_EN);

    higmac_info("pause frame is %s\n", pause ? "enabled" : "disabled");

    return;
}

static void higmac_set_flow_ctrl_args(struct higmac_netdev_local *ld)
{
    ld->old_pause = HIGMAC_PAUSE_DEFAULT;
    ld->flow_ctrl = flow_ctrl_en;
    ld->pause_time = tx_flow_ctrl_pause_time;
    ld->pause_interval = tx_flow_ctrl_pause_interval;
    ld->flow_ctrl_active_threshold = tx_flow_ctrl_active_threshold;
    ld->flow_ctrl_deactive_threshold = tx_flow_ctrl_deactive_threshold;
}

/* set gmac's multicast list, here we setup gmac's mc filter */
STATIC void higmac_gmac_multicast_list(struct net_device *dev)
{
    struct higmac_netdev_local *ld = netdev_priv(dev);
    unsigned int rec_filter;

    rec_filter = readl(ld->gmac_iobase + REC_FILT_CONTROL);
    /* when set gmac in promisc mode
     * a. dev in IFF_PROMISC mode
     */
    if ((dev->flags & IFF_PROMISC)) {
        /* promisc mode.received all pkgs. */
        rec_filter &= ~(BIT_BC_DROP_EN | BIT_MC_MATCH_EN | BIT_UC_MATCH_EN);
    } else {
        /* drop uc pkgs with field 'DA' not match our's */
        rec_filter |= BIT_UC_MATCH_EN;

        if (dev->flags & IFF_BROADCAST) /* no broadcast */
            rec_filter &= ~BIT_BC_DROP_EN;
        else
            rec_filter |= BIT_BC_DROP_EN;

        if (netdev_mc_empty(dev) || !(dev->flags & IFF_MULTICAST)) {
            /* haven't join any mc group */
            writel(0, ld->gmac_iobase + PORT_MC_ADDR_LOW);
            writel(0, ld->gmac_iobase + PORT_MC_ADDR_HIGH);
            rec_filter |= BIT_MC_MATCH_EN;
        } else if (netdev_mc_count(dev) == 1 && (dev->flags & IFF_MULTICAST)) {
            struct netdev_hw_addr *ha = NULL;
            unsigned int d = 0;

            netdev_for_each_mc_addr(ha, dev)
            {
                if (ha == NULL)
                    return;
                d = (ha->addr[0] << 8) | (ha->addr[1]);
                writel(d, ld->gmac_iobase + PORT_MC_ADDR_HIGH);

                d = (ha->addr[2] << 24) | (ha->addr[3] << 16) | (ha->addr[4] << 8) | (ha->addr[5]);
                writel(d, ld->gmac_iobase + PORT_MC_ADDR_LOW);
            }
            rec_filter |= BIT_MC_MATCH_EN;
        } else {
            rec_filter &= ~BIT_MC_MATCH_EN;
        }
    }
    writel(rec_filter, ld->gmac_iobase + REC_FILT_CONTROL);
}

static void higmac_reclaim_rx_queue_skb(struct higmac_netdev_local *ld)
{
    struct sk_buff *skb = NULL;
    unsigned long flags;
    int i;

    spin_lock_irqsave(&ld->rxlock, flags);
    for (i = 0; i < RX_DESC_NUM; i++) {
        skb = ld->rx_qmap[i].skb;
        if (skb == NULL)
            continue;

        dev_kfree_skb_any(skb);

        if (!HAS_CAP_CCI(ld->hw_cap))
            dma_unmap_single(ld->dev, ld->rx_qmap[i].dma_addr, ld->rx_qmap[i].len, DMA_FROM_DEVICE);

        ld->rx_fq.skb[i] = NULL;
        ld->rx_qmap[i].skb = NULL;
    }
    spin_unlock_irqrestore(&ld->rxlock, flags);

    return;
}

static void higmac_reclaim_tx_queue_skb(struct higmac_netdev_local *ld)
{
    struct sk_buff *skb = NULL;
    int i, j;
    for (i = 0; i < TX_DESC_NUM; i++) {
        skb = ld->tx_qmap[i].skb;
        if (skb == NULL)
            continue;

        dev_kfree_skb_any(skb);

        if (!HAS_CAP_CCI(ld->hw_cap)) {
            dma_unmap_single(ld->dev, ld->tx_qmap[i].dma_addr, ld->tx_qmap[i].len, DMA_TO_DEVICE);

            for (j = 0; j < ld->tx_qmap[i].nfrags; j++) {
                dma_unmap_single(ld->dev, ld->tx_qmap[i].frags_dma[j], ld->tx_qmap[i].frags_len[j], DMA_TO_DEVICE);
            }
        }

        ld->tx_bq.skb[i] = NULL;
        ld->tx_qmap[i].skb = NULL;
    }
}

/* the func stop the hw desc and relaim the software skb resource
 * before reusing the gmac, you'd better reset the gmac
 */
void higmac_reclaim_rx_tx_resource(struct higmac_netdev_local *ld)
{
    unsigned long rxflags, txflags;
    int rd_offset, wr_offset;
    int i;

    higmac_irq_disable_all_queue(ld);
    higmac_port_disable(ld);
    higmac_hw_desc_disable(ld);

    writel(STOP_RX_TX, ld->gmac_iobase + STOP_CMD);

    higmac_reclaim_rx_queue_skb(ld);
    higmac_reclaim_tx_queue_skb(ld);

    netdev_reset_queue(ld->netdev);

    spin_lock_irqsave(&ld->rxlock, rxflags);
    /* rx_bq: logic write pointer */
    wr_offset = readl(ld->gmac_iobase + RX_BQ_WR_ADDR);
    /* rx_bq: software read pointer */
    rd_offset = readl(ld->gmac_iobase + RX_BQ_RD_ADDR);

    writel(wr_offset, ld->gmac_iobase + RX_BQ_RD_ADDR);

    for (i = 1; i < ld->num_rxqs; i++) {
        u32 rx_bq_wr_reg, rx_bq_rd_reg;

        rx_bq_wr_reg = RX_BQ_WR_ADDR_QUEUE(i);
        rx_bq_rd_reg = RX_BQ_RD_ADDR_QUEUE(i);

        wr_offset = readl(ld->gmac_iobase + rx_bq_wr_reg);
        writel(wr_offset, ld->gmac_iobase + rx_bq_rd_reg);
    }

    /* rx_fq: software write pointer */
    wr_offset = readl(ld->gmac_iobase + RX_FQ_WR_ADDR);
    /* rx_fq: logic read pointer */
    rd_offset = readl(ld->gmac_iobase + RX_FQ_RD_ADDR);

    writel(rd_offset, ld->gmac_iobase + RX_FQ_WR_ADDR);

    spin_unlock_irqrestore(&ld->rxlock, rxflags);

    /* no need to wait pkts in tx_rq finish to free all skb,
     * because higmac_xmit_reclaim is in the tx_lock,
     */
    spin_lock_irqsave(&ld->txlock, txflags);
    /* tx_rq: logic write */
    wr_offset = readl(ld->gmac_iobase + TX_RQ_WR_ADDR);
    /* tx_rq: software read */
    rd_offset = readl(ld->gmac_iobase + TX_RQ_RD_ADDR);

    writel(wr_offset, ld->gmac_iobase + TX_RQ_RD_ADDR);

    /* tx_bq: logic read */
    rd_offset = readl(ld->gmac_iobase + TX_BQ_RD_ADDR);

    writel(rd_offset, ld->gmac_iobase + TX_BQ_WR_ADDR);

    spin_unlock_irqrestore(&ld->txlock, txflags);
}

static void higmac_set_multicast_list(struct net_device *dev);

static void higmac_hw_set_mac_addr(struct net_device *dev)
{
    struct higmac_netdev_local *priv = netdev_priv(dev);
    unsigned char *mac = dev->dev_addr;
    u32 val;

    val = mac[1] | (mac[0] << 8);
    writel(val, priv->gmac_iobase + STATION_ADDR_HIGH);

    val = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
    writel(val, priv->gmac_iobase + STATION_ADDR_LOW);
}

static void higmac_rx_refill(struct higmac_netdev_local *priv);

/* reset and re-config gmac */
void higmac_restart(struct higmac_netdev_local *ld)
{
    unsigned long rxflags, txflags;

    /* restart hw engine now */
    higmac_mac_core_reset(ld);

    spin_lock_irqsave(&ld->rxlock, rxflags);
    spin_lock_irqsave(&ld->txlock, txflags);

    higmac_hw_init(ld);
    higmac_hw_set_mac_addr(ld->netdev);
    higmac_hw_set_desc_addr(ld);

    higmac_set_flow_ctrl_params(ld);
    higmac_set_flow_ctrl_state(ld, ld->old_pause);

    /* rxcsum drop is disabled when gmac reset */
    if (ld->netdev->features & NETIF_F_RXCSUM) {
        higmac_enable_rxcsum_drop(ld, true);
    }

    /* we don't set macif here, it will be set in adjust_link */
    if (ld->netdev->flags & IFF_UP) {
        /* when resume, only do the following operations
         * when dev is up before suspend.
         */
        higmac_rx_refill(ld);
        higmac_set_multicast_list(ld->netdev);

        higmac_hw_desc_enable(ld);
        higmac_port_enable(ld);
        higmac_irq_enable_all_queue(ld);
    }
    spin_unlock_irqrestore(&ld->txlock, txflags);
    spin_unlock_irqrestore(&ld->rxlock, rxflags);
}

static int higmac_net_set_mac_address(struct net_device *dev, void *p)
{
    int ret;

    ret = eth_mac_addr(dev, p);
    if (!ret) {
        higmac_hw_set_mac_addr(dev);
    }

    return ret;
}

static const char *higmac_speed_to_str(int speed)
{
    switch (speed) {
        case SPEED_10:
            return "10Mbps";
        case SPEED_100:
            return "100Mbps";
        case SPEED_1000:
            return "1Gbps";
        case SPEED_UNKNOWN:
            return "Unknown";
        default:
            return "Unsupported";
    }
}

static const char *higmac_pause_to_str(int pause)
{
    switch (pause) {
        case FLOW_OFF:
            return "off";
        case FLOW_RX:
            return "rx";
        case FLOW_TX:
            return "tx";
        case FLOW_AUTO:
            return "rx/tx";
        default:
            return "Unsupported";
    }
}

STATIC void higmac_print_status(struct higmac_netdev_local *priv)
{
    if (priv->old_link) {
        higmac_info("Link is Up - %s/%s - flow control %s\n", higmac_speed_to_str(priv->old_speed),
            DUPLEX_FULL == priv->old_duplex ? "Full" : "Half", higmac_pause_to_str(priv->flow_ctrl));
    } else {
        higmac_info("Link is Down\n");
    }
}

STATIC void higmac_link_deal(struct higmac_netdev_local *priv, unsigned int link, unsigned int speed,
    unsigned int duplex, unsigned int pause)
{
    int status_changed = 0;

    if (link) {
        if ((duplex != priv->old_duplex) || (speed != priv->old_speed)) {
            /* chang duplex and speed */
            higmac_config_port(priv->netdev, speed, duplex);

            priv->old_speed = speed;
            priv->old_duplex = duplex;
            status_changed = 1;
        }

        if (pause != priv->old_pause) {
            /* priv->old_pause changed by two ways:
             * 1.Init by higmac_set_flow_ctrl_args() with default value;
             * 2.Here, when pause != old_pause
             */
            priv->old_pause = pause;
            higmac_set_flow_ctrl_state(priv, priv->old_pause);
        }

        if (link != priv->old_link) {
            /* if the port has phy, netif_carrier_on will be called
             * in phy_state_machine.
             */
            if (priv->phy_node == NULL)
                netif_carrier_on(priv->netdev);

            priv->old_link = link;
            status_changed = 1;
        }
    } else {
        if (link != priv->old_link) {
            if (priv->phy_node == NULL)
                netif_carrier_off(priv->netdev);

            priv->old_link = link;
            priv->old_speed = SPEED_UNKNOWN;
            priv->old_duplex = DUPLEX_UNKNOWN;
            status_changed = 1;
        }
    }

    if ((status_changed) && netif_msg_link(priv))
        higmac_print_status(priv);
}

STATIC void higmac_adjust_link(struct net_device *dev)
{
    struct higmac_netdev_local *priv = netdev_priv(dev);
    struct phy_device *phy = priv->phy;
    u32 pause;

    pause = phy->autoneg ? phy->pause : HIGMAC_PAUSE_DEFAULT;
    higmac_link_deal(priv, phy->link, phy->speed, phy->duplex, pause);
}

STATIC void higmac_update_link(struct higmac_netdev_local *priv)
{
    u32 duplex;
    u32 speed;
    u32 link;

    if (test_bit(SERVICE_STATE_LINK_CLOSE, &priv->service_state))
        return;

    link = higmac_auto_nego_link(priv);
    speed = higmac_auto_nego_speed(priv);
    duplex = higmac_auto_nego_duplex(priv);

    /* no phy no autoneg,pause use default value */
    higmac_link_deal(priv, link, speed, duplex, HIGMAC_PAUSE_DEFAULT);
}

int higmac_tx_avail(struct higmac_netdev_local *ld)
{
    unsigned int tx_bq_wr_offset;
    unsigned int tx_bq_rd_offset;

    tx_bq_wr_offset = readl(ld->gmac_iobase + TX_BQ_WR_ADDR);
    tx_bq_rd_offset = readl(ld->gmac_iobase + TX_BQ_RD_ADDR);

    return (tx_bq_rd_offset >> DESC_BYTE_SHIFT) + TX_DESC_NUM - (tx_bq_wr_offset >> DESC_BYTE_SHIFT) - 1;
}

static int higmac_init_sg_desc_queue(struct higmac_netdev_local *ld)
{
    ld->sg_count = ld->tx_bq.count + HIGMAC_SG_DESC_ADD;
    if (HAS_CAP_CCI(ld->hw_cap)) {
        ld->dma_sg_desc = kmalloc_array(ld->sg_count, sizeof(struct sg_desc), GFP_KERNEL);
        if (ld->dma_sg_desc)
            ld->dma_sg_phy = virt_to_phys(ld->dma_sg_desc);
    } else {
        ld->dma_sg_desc = (struct sg_desc *)dma_alloc_coherent(ld->dev, ld->sg_count * sizeof(struct sg_desc),
            &ld->dma_sg_phy, GFP_KERNEL);
    }

    if (!ld->dma_sg_desc) {
        higmac_err("alloc sg desc dma error!\n");
        return -ENOMEM;
    }

    ld->sg_head = 0;
    ld->sg_tail = 0;

    return 0;
}

static void higmac_destroy_sg_desc_queue(struct higmac_netdev_local *ld)
{
    if (ld->dma_sg_desc) {
        if (HAS_CAP_CCI(ld->hw_cap))
            kfree(ld->dma_sg_desc);
        else
            dma_free_coherent(ld->dev, ld->sg_count * sizeof(struct sg_desc), ld->dma_sg_desc, ld->dma_sg_phy);
        ld->dma_sg_desc = NULL;
    }
}

static void higmac_monitor_func(struct higmac_netdev_local *priv)
{
    struct net_device *ndev = priv->netdev;
    unsigned long flags;

    if (test_bit(SERVICE_STATE_REFILL_CLOSE, &priv->service_state))
        return;

    if (!netif_running(ndev)) {
        higmac_trace(7, "network driver is stopped.");
        return;
    }

    /* work queue could interrupted by interrupt,
     * so we should disable irq */
    spin_lock_irqsave(&priv->rxlock, flags);
    higmac_rx_refill(priv);
    spin_unlock_irqrestore(&priv->rxlock, flags);
}

static void higmac_rx_refill(struct higmac_netdev_local *priv)
{
    struct higmac_desc *desc = NULL;
    struct sk_buff *skb = NULL;
    u32 start, end, num, pos, i;
    u32 len = HIETH_MAX_FRAME_SIZE;
    dma_addr_t addr;

    /* software write pointer */
    start = dma_cnt((u32)readl(priv->gmac_iobase + RX_FQ_WR_ADDR));
    /* logic read pointer */
    end = dma_cnt((u32)readl(priv->gmac_iobase + RX_FQ_RD_ADDR));
    num = CIRC_SPACE(start, end, RX_DESC_NUM);

    for (i = 0, pos = start; i < num; i++) {
        if (priv->rx_fq.skb[pos] || priv->rx_qmap[pos].skb)
            break;

        skb = netdev_alloc_skb_ip_align(priv->netdev, len);
        if (unlikely(skb == NULL)) {
            higmac_err("alloc skb is null!\n");
            break;
        }

        if (!HAS_CAP_CCI(priv->hw_cap)) {
            addr = dma_map_single(priv->dev, skb->data, len, DMA_FROM_DEVICE);
            if (dma_mapping_error(priv->dev, addr)) {
                higmac_err("dma_map_single failed\n");
                dev_kfree_skb_any(skb);
                break;
            }
        } else {
            addr = virt_to_phys(skb->data);
        }

        desc = priv->rx_fq.desc + pos;
        desc->data_buff_addr = (u32)addr;
#if defined(CONFIG_HIGMAC_DDR_64BIT)
        desc->reserve31 = addr >> REG_BIT_WIDTH;
#endif
        priv->rx_fq.skb[pos] = skb;
        priv->rx_qmap[pos].skb = skb;
        priv->rx_qmap[pos].dma_addr = addr;
        priv->rx_qmap[pos].len = len;

        desc->buffer_len = len - 1;
        desc->data_len = 0;
        desc->fl = 0;
        desc->descvid = DESC_VLD_FREE;
        desc->skb_id = pos;

        pos = dma_ring_incr(pos, RX_DESC_NUM);
    }

    /* This barrier is important here.  It is required to ensure
     * the ARM CPU flushes it's DMA write buffers before proceeding
     * to the next instruction, to ensure that GMAC will see
     * our descriptor changes in memory
     */
    HIGMAC_SYNC_BARRIER();

    if (pos != start)
        writel(dma_byte(pos), priv->gmac_iobase + RX_FQ_WR_ADDR);
}

STATIC int higmac_rx_unmap(struct higmac_netdev_local *ld, struct higmac_desc *desc, struct sk_buff *skb, u16 skb_id)
{
    dma_addr_t addr;
    int ret;

    if (!HAS_CAP_CCI(ld->hw_cap)) {
        addr = desc->data_buff_addr;

#if defined(CONFIG_HIGMAC_DDR_64BIT)
        addr |= (dma_addr_t)(desc->reserve31) << REG_BIT_WIDTH;
#endif
        if (addr == ld->rx_qmap[skb_id].dma_addr) {
            dma_unmap_single(ld->dev, addr, HIETH_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
        } else {
            higmac_err("inconsistent! desc->skb(0x%pK),rx_fq.skb[%d](0x%pK)\n", skb, skb_id, ld->rx_fq.skb[skb_id]);

            ret = snprintf_s(rdr_mng.buffer, rdr_mng.buf_len, rdr_mng.buf_len - 1,
                "%s desc->skb(0x%p),rx_fq.skb[%d](0x%p)"
                "data_buff_addr(0x%x),reserve31(0x%x),addr(0x%lx),dma_addr(0x%lx)\n",
                HIGMAC_E_DESC_RX_INCONST, skb, skb_id, ld->rx_fq.skb[skb_id], desc->data_buff_addr, desc->reserve31,
                (unsigned long)addr, (unsigned long)ld->rx_qmap[skb_id].dma_addr);
            if (ret > 0) {
                higmac_dfm_error(rdr_mng.buffer, HIGMAC_E_RX_INCONST);
            }

            return -1;
        }
    }

    return 0;
}

STATIC int higmac_rx_csum_set(struct net_device *dev, struct sk_buff *skb, struct higmac_desc *desc)
{
    if (dev->features & NETIF_F_RXCSUM) {
        int hdr_csum_done = desc->header_csum_done;
        int payload_csum_done = desc->payload_csum_done;
        int hdr_csum_err = desc->header_csum_err;
        int payload_csum_err = desc->payload_csum_err;
        if (hdr_csum_done && payload_csum_done) {
            if (unlikely(hdr_csum_err || payload_csum_err)) {
                higmac_err("csum err, hdr_csum_err=%d,payload_csum_err=%d\n", hdr_csum_err, payload_csum_err);
                dev->stats.rx_errors++;
                dev->stats.rx_crc_errors++;
                dev_kfree_skb_any(skb);
                return -1;
            } else {
                skb->ip_summed = CHECKSUM_UNNECESSARY;
            }
        }
    }

    return 0;
}

struct sk_buff *higmac_rx_get_skb(struct higmac_netdev_local *ld, u16 skb_id)
{
    struct sk_buff *skb = NULL;
    int ret;

    spin_lock(&ld->rxlock);

    skb = ld->rx_qmap[skb_id].skb;
    if (unlikely(skb == NULL)) {
        spin_unlock(&ld->rxlock);
        higmac_err("inconsistent rx_qmap skb\n");

        ret = snprintf_s(rdr_mng.buffer, rdr_mng.buf_len, rdr_mng.buf_len - 1, "%s ld->rx_qmap[%d].skb is NULL!\n",
            HIGMAC_E_DESC_RX_INCONST, skb_id);
        if (ret > 0) {
            higmac_dfm_error(rdr_mng.buffer, HIGMAC_E_RX_INCONST);
        }

        return NULL;
    }

    /* data consistent check */
    if (unlikely(skb != ld->rx_fq.skb[skb_id])) {
        spin_unlock(&ld->rxlock);
        higmac_err("inconsistent! desc->skb(0x%pK),rx_fq.skb[%d](0x%pK)\n", skb, skb_id, ld->rx_fq.skb[skb_id]);

        ret = snprintf_s(rdr_mng.buffer, rdr_mng.buf_len, rdr_mng.buf_len - 1,
            "%s desc->skb(0x%pK),rx_fq.skb[%d](0x%pK)\n", HIGMAC_E_DESC_RX_INCONST, skb, skb_id, ld->rx_fq.skb[skb_id]);
        if (ret > 0) {
            higmac_dfm_error(rdr_mng.buffer, HIGMAC_E_RX_INCONST);
        }

        return NULL;
    } else {
        ld->rx_fq.skb[skb_id] = NULL;
    }

    spin_unlock(&ld->rxlock);

    return skb;
}

STATIC int higmac_rx(struct net_device *dev, int limit, int rxq_id)
{
    struct higmac_netdev_local *ld = netdev_priv(dev);
    struct sk_buff *skb = NULL;
    struct higmac_desc *desc = NULL;
    u32 start, end, num, pos, i, len;
    u32 rx_bq_rd_reg, rx_bq_wr_reg;
    u16 skb_id;

    rx_bq_rd_reg = RX_BQ_RD_ADDR_QUEUE(rxq_id);
    rx_bq_wr_reg = RX_BQ_WR_ADDR_QUEUE(rxq_id);

    /* software read pointer */
    start = dma_cnt((u32)readl(ld->gmac_iobase + rx_bq_rd_reg));
    /* logic write pointer */
    end = dma_cnt((u32)readl(ld->gmac_iobase + rx_bq_wr_reg));
    num = CIRC_CNT(end, start, RX_DESC_NUM);
    if (num > limit)
        num = limit;

    /* ensure get updated desc */
    rmb();
    for (i = 0, pos = start; i < num; i++) {
        if (rxq_id)
            desc = ld->pool[3 + rxq_id].desc + pos;
        else
            desc = ld->rx_bq.desc + pos;
        skb_id = desc->skb_id;

        if ((skb = higmac_rx_get_skb(ld, skb_id)) == NULL)
            break;

        len = desc->data_len;

        if (0 != higmac_rx_unmap(ld, desc, skb, skb_id))
            break;

        skb_put(skb, len);
        if (skb->len > HIETH_MAX_FRAME_SIZE) {
            higmac_err("rcv len err, len = %d\n", skb->len);
            dev->stats.rx_errors++;
            dev->stats.rx_length_errors++;
            dev_kfree_skb_any(skb);
            goto next;
        }

        skb->protocol = eth_type_trans(skb, dev);
        skb->ip_summed = CHECKSUM_NONE;

#if defined(CONFIG_HIGMAC_RXCSUM)
        if (0 != higmac_rx_csum_set(dev, skb, desc))
            goto next;
#endif
        if ((dev->features & NETIF_F_RXHASH) && desc->has_hash)
            skb_set_hash(skb, desc->rxhash, desc->l3_hash ? PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4);
        skb_record_rx_queue(skb, rxq_id);

        dev->stats.rx_packets++;
        dev->stats.rx_bytes += skb->len;

        napi_gro_receive(&ld->q_napi[rxq_id].napi, skb);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
        /* cloud, 4.11 no need do this */
#else
        /* mini */
        dev->last_rx = jiffies;
#endif

    next:
        spin_lock(&ld->rxlock);
        ld->rx_qmap[skb_id].skb = NULL;
        ld->rx_qmap[skb_id].dma_addr = 0;
        spin_unlock(&ld->rxlock);
        pos = dma_ring_incr(pos, RX_DESC_NUM);
    }

    if (pos != start)
        writel(dma_byte(pos), ld->gmac_iobase + rx_bq_rd_reg);

    spin_lock(&ld->rxlock);
    higmac_rx_refill(ld);
    spin_unlock(&ld->rxlock);

    return num;
}

STATIC int higmac_check_tx_err(struct higmac_netdev_local *ld, struct higmac_tso_desc *tx_bq_desc,
    unsigned int desc_pos)
{
    unsigned int tx_err = tx_bq_desc->tx_err;
    int ret;

    if (unlikely(tx_err & ERR_ALL)) {
        struct sg_desc *desc_cur = NULL;
        int *sg_word = NULL;
        int i;

        WARN((tx_err & ERR_ALL), "TX ERR: desc1=0x%x, desc2=0x%x, desc3=0x%x, desc4=0x%x\n", tx_bq_desc->data_buff_addr,
            tx_bq_desc->desc1.val, tx_bq_desc->reserve_desc2, tx_bq_desc->tx_err);

        desc_cur = ld->dma_sg_desc + ld->tx_bq.sg_desc_offset[desc_pos];
        sg_word = (int *)desc_cur;
        for (i = 0; i < sizeof(struct sg_desc) / sizeof(int); i++) {
            higmac_err("sg_desc word[%d]=0x%x\n", i, sg_word[i]);
        }

        ret = snprintf_s(rdr_mng.buffer, rdr_mng.buf_len, rdr_mng.buf_len - 1,
            "%s desc1=0x%x, desc2=0x%x, desc3=0x%x, desc4=0x%x\n", HIGMAC_E_DESC_TX_BD_ERR, tx_bq_desc->data_buff_addr,
            tx_bq_desc->desc1.val, tx_bq_desc->reserve_desc2, tx_bq_desc->tx_err);
        if (ret > 0) {
            higmac_dfm_error(rdr_mng.buffer, HIGMAC_E_TX_BD_ERROR);
        }
        return -1;
    }

    return 0;
}

static int higmac_xmit_release_gso(struct higmac_netdev_local *ld, struct higmac_tso_desc *tx_rq_desc,
    unsigned int desc_pos)
{
    struct sk_buff *skb = ld->tx_qmap[desc_pos].skb;
    int pkt_type;
    int nfrags = skb_shinfo(skb)->nr_frags;
    dma_addr_t addr;
    size_t len;

    if (unlikely(higmac_check_tx_err(ld, tx_rq_desc, desc_pos) < 0)) {
        /* dev_close */
        higmac_irq_disable_all_queue(ld);
        higmac_hw_desc_disable(ld);

        netif_carrier_off(ld->netdev);
        netif_stop_queue(ld->netdev);

        if (ld->phy != NULL)
            phy_stop(ld->phy);

        set_bit(SERVICE_STATE_REFILL_CLOSE, &ld->service_state);
        return -1;
    }

    if (skb_is_gso(skb) || nfrags)
        pkt_type = PKT_SG;
    else
        pkt_type = PKT_NORMAL;

    if (pkt_type == PKT_NORMAL) {
        if (!HAS_CAP_CCI(ld->hw_cap)) {
            addr = tx_rq_desc->data_buff_addr;
#if defined(CONFIG_HIGMAC_DDR_64BIT)
            addr |= (dma_addr_t)(tx_rq_desc->reserve_desc2 & TX_DESC_HI8_MASK) << REG_BIT_WIDTH;
#endif
            dma_unmap_single(ld->dev, addr, skb->len, DMA_TO_DEVICE);
        }
    } else {
        if (!HAS_CAP_CCI(ld->hw_cap)) {
            struct sg_desc *desc_cur = NULL;
            unsigned int desc_offset;
            int i;

            desc_offset = ld->tx_bq.sg_desc_offset[desc_pos];
            WARN_ON(desc_offset != ld->sg_tail);
            desc_cur = ld->dma_sg_desc + desc_offset;

            addr = desc_cur->linear_addr;
#if defined(CONFIG_HIGMAC_DDR_64BIT)
            addr |= (dma_addr_t)(desc_cur->reserv3 >> SG_DESC_HI8_OFFSET) << REG_BIT_WIDTH;
#endif
            len = desc_cur->linear_len;
            dma_unmap_single(ld->dev, addr, len, DMA_TO_DEVICE);
            for (i = 0; i < nfrags; i++) {
                addr = desc_cur->frags[i].addr;
#if defined(CONFIG_HIGMAC_DDR_64BIT)
                addr |= (dma_addr_t)(desc_cur->frags[i].reserved >> SG_DESC_HI8_OFFSET) << REG_BIT_WIDTH;
#endif
                len = desc_cur->frags[i].size;
                dma_unmap_page(ld->dev, addr, len, DMA_TO_DEVICE);
            }
        }

        ld->sg_tail = (ld->sg_tail + 1) % ld->sg_count;
    }

    return 0;
}

static int higmac_xmit_release_cci(struct higmac_netdev_local *priv, struct higmac_desc *desc, struct sk_buff *skb,
    int pos)
{
    dma_addr_t addr;
    int ret;

    addr = desc->data_buff_addr;
#if defined(CONFIG_HIGMAC_DDR_64BIT)
    addr |= (dma_addr_t)(desc->rxhash & TX_DESC_HI8_MASK) << REG_BIT_WIDTH;
#endif
    if ((addr == priv->tx_qmap[pos].dma_addr) && (skb->len == priv->tx_qmap[pos].len)) {
        dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
    } else {
        higmac_err("inconsistent! wired, tx skb[%d](%pK) != skb(%pK)\n", pos, priv->tx_bq.skb[pos], skb);

        ret = snprintf_s(rdr_mng.buffer, rdr_mng.buf_len, rdr_mng.buf_len - 1,
            "%s! wired, tx skb[%d](%p) != skb(%p),"
            "data_buff_addr(0x%x),rxhash(0x%x),addr(0x%lx),dma_addr(0x%lx)\n",
            HIGMAC_E_DESC_TX_INCONST, pos, priv->tx_bq.skb[pos], skb, desc->data_buff_addr, desc->rxhash,
            (unsigned long)addr, (unsigned long)priv->tx_qmap[pos].dma_addr);
        if (ret > 0) {
            higmac_dfm_error(rdr_mng.buffer, HIGMAC_E_TX_INCONST);
        }

        return -1;
    }

    return 0;
}

STATIC void higmac_xmit_reclaim(struct net_device *dev)
{
    struct sk_buff *skb = NULL;
    struct higmac_desc *desc = NULL;
    struct higmac_tso_desc *tso_desc = NULL;
    struct higmac_netdev_local *priv = netdev_priv(dev);
    unsigned int bytes_compl = 0;
    unsigned int pkts_compl = 0;
    u32 start, end, num, pos, i;
    int ret;

    spin_lock(&priv->txlock);

    /* software read */
    start = dma_cnt((u32)readl(priv->gmac_iobase + TX_RQ_RD_ADDR));
    /* logic write */
    end = dma_cnt((u32)readl(priv->gmac_iobase + TX_RQ_WR_ADDR));
    num = CIRC_CNT(end, start, TX_DESC_NUM);

    for (i = 0, pos = start; i < num; i++) {
        skb = priv->tx_qmap[pos].skb;
        if (unlikely(skb == NULL)) {
            higmac_err("inconsistent tx_qmap skb\n");

            ret = snprintf_s(rdr_mng.buffer, rdr_mng.buf_len, rdr_mng.buf_len - 1, "%s skb is NULL!\n",
                HIGMAC_E_DESC_TX_INCONST);
            if (ret > 0) {
                higmac_dfm_error(rdr_mng.buffer, HIGMAC_E_TX_INCONST);
            }
            break;
        }

        if (skb != priv->tx_bq.skb[pos]) {
            higmac_err("inconsistent! wired, tx skb[%d](%pK) != skb(%pK)\n", pos, priv->tx_bq.skb[pos], skb);

            ret = snprintf_s(rdr_mng.buffer, rdr_mng.buf_len, rdr_mng.buf_len - 1,
                "%s wired, tx skb[%d](%pK) != skb(%pK)\n", HIGMAC_E_DESC_TX_INCONST, pos, priv->tx_bq.skb[pos], skb);
            if (ret > 0) {
                higmac_dfm_error(rdr_mng.buffer, HIGMAC_E_TX_INCONST);
            }
            break;
        }

        desc = priv->tx_rq.desc + pos;
        if (priv->tso_supported) {
            tso_desc = (struct higmac_tso_desc *)desc;
            ret = higmac_xmit_release_gso(priv, tso_desc, pos);
            if (ret < 0)
                break;
        } else if (!HAS_CAP_CCI(priv->hw_cap)) {
            ret = higmac_xmit_release_cci(priv, desc, skb, pos);
            if (ret < 0)
                break;
        }

        pkts_compl++;
        bytes_compl += skb->len;
        priv->tx_bq.skb[pos] = NULL;
        priv->tx_qmap[pos].skb = NULL;
        priv->tx_qmap[pos].dma_addr = 0;
        priv->tx_qmap[pos].len = 0;
        dev_consume_skb_any(skb);
        pos = dma_ring_incr(pos, TX_DESC_NUM);
    }

    if (pos != start)
        writel(dma_byte(pos), priv->gmac_iobase + TX_RQ_RD_ADDR);

    if (pkts_compl || bytes_compl)
        netdev_completed_queue(dev, pkts_compl, bytes_compl);

    if (unlikely(netif_queue_stopped(priv->netdev)) && pkts_compl)
        netif_wake_queue(priv->netdev);

    spin_unlock(&priv->txlock);
}

STATIC int higmac_poll(struct napi_struct *napi, int budget)
{
    struct higmac_napi *q_napi = container_of(napi, struct higmac_napi, napi);
    struct higmac_netdev_local *priv = q_napi->ndev_priv;
    struct net_device *dev = priv->netdev;
    int work_done = 0;
    int task = budget;
    u32 ints, num;
    u32 raw_int_reg, raw_int_mask;

    if (q_napi->rxq_id) {
        raw_int_reg = RSS_RAW_PMU_INT;
        raw_int_mask = DEF_INT_MASK_QUEUE((unsigned int)q_napi->rxq_id);
    } else {
        raw_int_reg = RAW_PMU_INT;
        raw_int_mask = DEF_INT_MASK;
    }

    do {
        if (!q_napi->rxq_id)
            higmac_xmit_reclaim(dev);
        num = higmac_rx(dev, task, q_napi->rxq_id);
        work_done += num;
        task -= num;
        if (work_done >= budget)
            break;

        ints = readl(priv->gmac_iobase + raw_int_reg);
        ints &= raw_int_mask;
        writel(ints, priv->gmac_iobase + raw_int_reg);
    } while (ints);

    if (work_done < budget) {
        napi_complete(napi);
        higmac_irq_enable_queue(priv, q_napi->rxq_id);
    }

    return work_done;
}

STATIC irqreturn_t higmac_interrupt(int irq, void *dev_id)
{
    struct higmac_napi *q_napi = (struct higmac_napi *)dev_id;
    struct higmac_netdev_local *ld = q_napi->ndev_priv;
    u32 ints;
    u32 raw_int_reg, raw_int_mask;

    if (higmac_queue_irq_disabled(ld, q_napi->rxq_id))
        return IRQ_NONE;

    if (q_napi->rxq_id) {
        raw_int_reg = RSS_RAW_PMU_INT;
        raw_int_mask = DEF_INT_MASK_QUEUE((unsigned int)q_napi->rxq_id);
    } else {
        raw_int_reg = RAW_PMU_INT;
        raw_int_mask = DEF_INT_MASK;
    }

    ints = readl(ld->gmac_iobase + raw_int_reg);
    ints &= raw_int_mask;
    writel(ints, ld->gmac_iobase + raw_int_reg);

    if (likely(ints)) {
        higmac_irq_disable_queue(ld, q_napi->rxq_id);
        napi_schedule(&q_napi->napi);
    }

    return IRQ_HANDLED;
}

static inline __be16 higmac_get_l3_proto(struct sk_buff *skb)
{
    __be16 l3_proto;

    l3_proto = skb->protocol;
    if (skb->protocol == htons(ETH_P_8021Q))
        l3_proto = vlan_get_protocol(skb);

    return l3_proto;
}

static inline unsigned int higmac_get_l4_proto(struct sk_buff *skb)
{
    __be16 l3_proto;
    unsigned int l4_proto = IPPROTO_MAX;

    l3_proto = higmac_get_l3_proto(skb);
    if (l3_proto == htons(ETH_P_IP))
        l4_proto = ip_hdr(skb)->protocol;
    else if (l3_proto == htons(ETH_P_IPV6))
        l4_proto = ipv6_hdr(skb)->nexthdr;

    return l4_proto;
}

static inline bool higmac_skb_is_ipv6(struct sk_buff *skb)
{
    return (higmac_get_l3_proto(skb) == htons(ETH_P_IPV6));
}

static inline bool higmac_skb_is_udp(struct sk_buff *skb)
{
    return (higmac_get_l4_proto(skb) == IPPROTO_UDP);
}

STATIC int higmac_check_hw_capability_for_udp(struct sk_buff *skb)
{
    struct ethhdr *eth = NULL;

    /* hardware can't dea with UFO broadcast packet */
    eth = (struct ethhdr *)(skb->data);
    if (skb_is_gso(skb) && is_broadcast_ether_addr(eth->h_dest))
        return -ENOTSUPP;

    return 0;
}

static int higmac_check_hw_capability_for_ipv6(struct sk_buff *skb)
{
    unsigned int l4_proto;

    l4_proto = ipv6_hdr(skb)->nexthdr;
    if ((l4_proto != IPPROTO_TCP) && (l4_proto != IPPROTO_UDP)) {
        /* when IPv6 next header is not tcp or udp,
         * it means that IPv6 next header is extension header.
         * Hardware can't deal with this case,
         * so do checksumming by software or do GSO by software.
         */
        if (skb_is_gso(skb))
            return -ENOTSUPP;
        if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
            return -EFAULT;
    }

    return 0;
}

static inline bool higmac_skb_is_ipv4_with_options(struct sk_buff *skb)
{
    return ((higmac_get_l3_proto(skb) == htons(ETH_P_IP)) && (ip_hdr(skb)->ihl > 5));
}

static int higmac_check_hw_capability(struct sk_buff *skb)
{
    int ret = 0;

    /* if tcp_mtu_probe() use (2 * tp->mss_cache) as probe_size,
     * the linear data length will be larger than 2048,
     * the MAC can't handle it, so let the software do it.
     */
    if (skb_is_gso(skb) && (skb_headlen(skb) > 2048))
        return -ENOTSUPP;

    if (higmac_skb_is_ipv6(skb)) {
        ret = higmac_check_hw_capability_for_ipv6(skb);
        if (ret)
            return ret;
    }

    if (higmac_skb_is_udp(skb)) {
        ret = higmac_check_hw_capability_for_udp(skb);
        if (ret)
            return ret;
    }

    if (((skb->ip_summed == CHECKSUM_PARTIAL) || skb_is_gso(skb)) && higmac_skb_is_ipv4_with_options(skb))
        return -ENOTSUPP;

    return 0;
}

static void higmac_do_udp_checksum(struct sk_buff *skb)
{
    int offset;
    __wsum csum;
    __sum16 udp_csum;

    offset = skb_checksum_start_offset(skb);
    WARN_ON(offset >= skb_headlen(skb));
    csum = skb_checksum(skb, offset, skb->len - offset, 0);

    offset += skb->csum_offset;
    WARN_ON(offset + sizeof(__sum16) > skb_headlen(skb));
    udp_csum = csum_fold(csum);
    if (udp_csum == 0)
        udp_csum = CSUM_MANGLED_0;

    *(__sum16 *)(skb->data + offset) = udp_csum;

    skb->ip_summed = CHECKSUM_NONE;
}

STATIC unsigned int higmac_get_l3_info(struct higmac_netdev_local *ld, struct sk_buff *skb,
    struct higmac_tso_desc *tx_bq_desc, __be16 *l3_proto, unsigned int *l4_proto, unsigned char *coe_enable)
{
    int max_data_len = skb->len - ETH_HLEN;
    unsigned int max_mss = ETH_DATA_LEN;

    *l3_proto = skb->protocol;
    if (skb->protocol == htons(ETH_P_8021Q)) {
        *l3_proto = vlan_get_protocol(skb);
        tx_bq_desc->desc1.tx.vlan_flag = 1;
        max_data_len -= VLAN_HLEN;
    }

    if (*l3_proto == htons(ETH_P_IP)) {
        struct iphdr *iph;

        iph = ip_hdr(skb);
        tx_bq_desc->desc1.tx.ip_ver = PKT_IPV4;
        tx_bq_desc->desc1.tx.ip_hdr_len = iph->ihl;

        if ((max_data_len >= GSO_MAX_SIZE) && (ntohs(iph->tot_len) <= (iph->ihl << 2)))
            iph->tot_len = htons(GSO_MAX_SIZE - 1);

        max_mss -= iph->ihl * WORD_TO_BYTE;
        *l4_proto = iph->protocol;
    } else if (*l3_proto == htons(ETH_P_IPV6)) {
        tx_bq_desc->desc1.tx.ip_ver = PKT_IPV6;
        tx_bq_desc->desc1.tx.ip_hdr_len = PKT_IPV6_HDR_LEN;
        max_mss -= PKT_IPV6_HDR_LEN * WORD_TO_BYTE;
        *l4_proto = ipv6_hdr(skb)->nexthdr;
    } else {
        *coe_enable = 0;
    }

    return max_mss;
}

STATIC void higmac_get_l4_info(struct higmac_netdev_local *ld, struct sk_buff *skb, struct higmac_tso_desc *tx_bq_desc,
    __be16 l3_proto, unsigned char l4_proto, unsigned int max_mss, unsigned char coe_enable)
{
    if (l4_proto == IPPROTO_TCP) {
        tx_bq_desc->desc1.tx.prot_type = PKT_TCP;
        if (tcp_hdr(skb)->doff >= 0) {
            tx_bq_desc->desc1.tx.prot_hdr_len = tcp_hdr(skb)->doff;
            max_mss -= tcp_hdr(skb)->doff * WORD_TO_BYTE;
        }
    } else if (l4_proto == IPPROTO_UDP) {
        tx_bq_desc->desc1.tx.prot_type = PKT_UDP;
        tx_bq_desc->desc1.tx.prot_hdr_len = PKT_UDP_HDR_LEN;
        if (l3_proto == htons(ETH_P_IPV6))
            max_mss -= sizeof(struct frag_hdr);
    } else {
        coe_enable = 0;
    }

    if (skb_is_gso(skb))
        tx_bq_desc->desc1.tx.data_len = (skb_shinfo(skb)->gso_size > max_mss) ? max_mss : skb_shinfo(skb)->gso_size;
    else
        tx_bq_desc->desc1.tx.data_len = skb->len;

    if (coe_enable && skb_is_gso(skb) && (l4_proto == IPPROTO_UDP))
        higmac_do_udp_checksum(skb);

    if (coe_enable)
        tx_bq_desc->desc1.tx.coe_flag = 1;

    return;
}

STATIC void higmac_get_pkt_info(struct higmac_netdev_local *ld, struct sk_buff *skb, struct higmac_tso_desc *tx_bq_desc)
{
    int nfrags = skb_shinfo(skb)->nr_frags;
    __be16 l3_proto = 0; /* level 3 protocol */
    unsigned int l4_proto = IPPROTO_MAX;
    unsigned char coe_enable = 0;
    unsigned int max_mss;

    if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
        coe_enable = 1;

    tx_bq_desc->desc1.val = 0;

    if (skb_is_gso(skb)) {
        tx_bq_desc->desc1.tx.tso_flag = 1;
        tx_bq_desc->desc1.tx.sg_flag = 1;
    } else if (nfrags) {
        tx_bq_desc->desc1.tx.sg_flag = 1;
    }

    max_mss = higmac_get_l3_info(ld, skb, tx_bq_desc, &l3_proto, &l4_proto, &coe_enable);

    higmac_get_l4_info(ld, skb, tx_bq_desc, l3_proto, l4_proto, max_mss, coe_enable);

    tx_bq_desc->desc1.tx.nfrags_num = nfrags;
    tx_bq_desc->desc1.tx.hw_own = DESC_VLD_BUSY;
}

STATIC int higmac_xmit_gso_normal(struct higmac_netdev_local *ld, struct sk_buff *skb,
    struct higmac_tso_desc *tx_bq_desc, unsigned int desc_pos)
{
    dma_addr_t addr;
    int ret;

    if (!HAS_CAP_CCI(ld->hw_cap)) {
        addr = dma_map_single(ld->dev, skb->data, skb->len, DMA_TO_DEVICE);
        ret = dma_mapping_error(ld->dev, addr);
        if (unlikely(ret)) {
            higmac_err("Normal Packet DMA Mapping fail.\n");
            return -EFAULT;
        }
        tx_bq_desc->data_buff_addr = (u32)addr;
#if defined(CONFIG_HIGMAC_DDR_64BIT)
        tx_bq_desc->reserve_desc2 = (addr >> REG_BIT_WIDTH) & TX_DESC_HI8_MASK;
#endif
    } else {
        addr = virt_to_phys(skb->data);
        tx_bq_desc->data_buff_addr = (u32)addr;
#if defined(CONFIG_HIGMAC_DDR_64BIT)
        tx_bq_desc->reserve_desc2 = (addr >> REG_BIT_WIDTH) & TX_DESC_HI8_MASK;
#endif
    }

    ld->tx_qmap[desc_pos].dma_addr = addr;
    ld->tx_qmap[desc_pos].len = skb->len;
    ld->tx_qmap[desc_pos].nfrags = 0;

    return 0;
}

STATIC int higmac_xmit_nfrages_set_bd(struct higmac_netdev_local *ld, struct sk_buff *skb, struct sg_desc *desc_cur,
    unsigned int desc_pos)
{
    int nfrags = skb_shinfo(skb)->nr_frags;
    dma_addr_t dma_addr;
    phys_addr_t phys_addr;
    int ret;
    int i;

    ld->tx_qmap[desc_pos].nfrags = nfrags;

    for (i = 0; i < nfrags; i++) {
        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
        int len = frag->bv_len;

        if (!HAS_CAP_CCI(ld->hw_cap)) {
            dma_addr = skb_frag_dma_map(ld->dev, frag, 0, len, DMA_TO_DEVICE);
            ret = dma_mapping_error(ld->dev, dma_addr);
            if (unlikely(ret)) {
                higmac_err("skb frag DMA Mapping fail.");
                return -EFAULT;
            }
            desc_cur->frags[i].addr = (u32)dma_addr;
#if defined(CONFIG_HIGMAC_DDR_64BIT)
            desc_cur->frags[i].reserved = (dma_addr >> REG_BIT_WIDTH) << SG_DESC_HI8_OFFSET;
#endif

            ld->tx_qmap[desc_pos].frags_dma[i] = dma_addr;
            ld->tx_qmap[desc_pos].frags_len[i] = len;
        } else {
            phys_addr = page_to_phys(skb_frag_page(frag)) + frag->bv_offset;
            desc_cur->frags[i].addr = (u32)phys_addr;
#if defined(CONFIG_HIGMAC_DDR_64BIT)
            desc_cur->frags[i].reserved = (phys_addr >> REG_BIT_WIDTH) << SG_DESC_HI8_OFFSET;
#endif
        }
        desc_cur->frags[i].size = len;
    }

    return 0;
}


STATIC int higmac_xmit_gso_nfrages(struct higmac_netdev_local *ld, struct sk_buff *skb,
    struct higmac_tso_desc *tx_bq_desc, unsigned int desc_pos)
{
    struct sg_desc *desc_cur = NULL;
    phys_addr_t phys_addr;
    dma_addr_t dma_addr;
    dma_addr_t addr;
    int ret;

    if (unlikely(((ld->sg_head + 1) % ld->sg_count) == ld->sg_tail)) {
        /* SG pkt, but sg desc all used */
        higmac_err("WARNING: sg desc all used.\n");
        return -EBUSY;
    }

    desc_cur = ld->dma_sg_desc + ld->sg_head;

    if (tx_bq_desc->desc1.tx.tso_flag && tx_bq_desc->desc1.tx.ip_ver == PKT_IPV6 &&
        tx_bq_desc->desc1.tx.prot_type == PKT_UDP) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
        desc_cur->ipv6_id = ntohl(skb_shinfo(skb)->ip6_frag_id);
#else
        desc_cur->ipv6_id = 0;
#endif
    }

    desc_cur->total_len = skb->len;
    desc_cur->linear_len = skb_headlen(skb);

    if (!HAS_CAP_CCI(ld->hw_cap)) {
        dma_addr = dma_map_single(ld->dev, skb->data, desc_cur->linear_len, DMA_TO_DEVICE);
        ret = dma_mapping_error(ld->dev, dma_addr);
        if (unlikely(ret)) {
            higmac_err("DMA Mapping fail.");
            return -EFAULT;
        }
        desc_cur->linear_addr = (u32)dma_addr;
#if defined(CONFIG_HIGMAC_DDR_64BIT)
        desc_cur->reserv3 = (dma_addr >> REG_BIT_WIDTH) << SG_DESC_HI8_OFFSET;
#endif
        ld->tx_qmap[desc_pos].dma_addr = dma_addr;
        ld->tx_qmap[desc_pos].len = desc_cur->linear_len;
    } else {
        phys_addr = virt_to_phys(skb->data);
        desc_cur->linear_addr = (u32)phys_addr;
#if defined(CONFIG_HIGMAC_DDR_64BIT)
        desc_cur->reserv3 = (phys_addr >> REG_BIT_WIDTH) << SG_DESC_HI8_OFFSET;
#endif
    }
    if ((ret = higmac_xmit_nfrages_set_bd(ld, skb, desc_cur, desc_pos)) != 0)
        return ret;

    addr = ld->dma_sg_phy + ld->sg_head * sizeof(struct sg_desc);
    tx_bq_desc->data_buff_addr = (u32)addr;
#if defined(CONFIG_HIGMAC_DDR_64BIT)
    tx_bq_desc->reserve_desc2 = (addr >> REG_BIT_WIDTH) & TX_DESC_HI8_MASK;
#endif

    ld->tx_bq.sg_desc_offset[desc_pos] = ld->sg_head;

    ld->sg_head = (ld->sg_head + 1) % ld->sg_count;

    return 0;
}


static int higmac_xmit_gso(struct higmac_netdev_local *ld, struct sk_buff *skb, struct higmac_tso_desc *tx_bq_desc,
    unsigned int desc_pos)
{
    int pkt_type = PKT_NORMAL;
    int nfrags = skb_shinfo(skb)->nr_frags;
    int ret;

    if (skb_is_gso(skb) || nfrags) {
        /* TSO pkt or SG pkt */
        pkt_type = PKT_SG;
    } else { /* Normal pkt */
        pkt_type = PKT_NORMAL;
    }

    ret = higmac_check_hw_capability(skb);
    if (unlikely(ret)) {
        higmac_err("hw capability check failed! ret=%d\n", ret);
        return ret;
    }

    higmac_get_pkt_info(ld, skb, tx_bq_desc);

    if (pkt_type == PKT_NORMAL) {
        ret = higmac_xmit_gso_normal(ld, skb, tx_bq_desc, desc_pos);
    } else {
        ret = higmac_xmit_gso_nfrages(ld, skb, tx_bq_desc, desc_pos);
    }

    return ret;
}

STATIC netdev_tx_t higmac_net_xmit(struct sk_buff *skb, struct net_device *dev);

STATIC netdev_tx_t higmac_sw_gso(struct higmac_netdev_local *ld, struct sk_buff *skb)
{
    struct sk_buff *segs = NULL, *curr_skb = NULL;

    int gso_segs = skb_shinfo(skb)->gso_segs;
    if (gso_segs == 0 && skb_shinfo(skb)->gso_size != 0)
        gso_segs = DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
    /* Estimate the number of fragments in the worst case */
    if (unlikely(higmac_tx_avail(ld) < gso_segs)) {
        netif_stop_queue(ld->netdev);
        if (higmac_tx_avail(ld) < gso_segs) {
            ld->netdev->stats.tx_dropped++;
            ld->netdev->stats.tx_fifo_errors++;
            return NETDEV_TX_BUSY;
        }
        netif_wake_queue(ld->netdev);
    }

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
    /* cloud */
    segs = skb_gso_segment(skb, ld->netdev->features & ~(NETIF_F_CSUM_MASK | NETIF_F_SG | NETIF_F_GSO_SOFTWARE));
#else
    /* mini */
    segs = skb_gso_segment(skb, ld->netdev->features & ~(NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_GSO_SOFTWARE));
#endif
    if (IS_ERR_OR_NULL(segs))
        goto drop;
    do {
        curr_skb = segs;
        segs = segs->next;
        curr_skb->next = NULL;
        higmac_net_xmit(curr_skb, ld->netdev);
    } while (segs != NULL);

    dev_kfree_skb_any(skb);
    return NETDEV_TX_OK;

drop:
    dev_kfree_skb_any(skb);
    ld->netdev->stats.tx_dropped++;
    return NETDEV_TX_OK;
}

STATIC netdev_tx_t higmac_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct higmac_netdev_local *ld = netdev_priv(dev);
    struct higmac_desc *desc = NULL;
    dma_addr_t addr;
    unsigned long txflags;
    int ret;
    u32 pos;

    if (skb->len < ETH_HLEN) {
        dev_kfree_skb_any(skb);
        dev->stats.tx_errors++;
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
    }

    /* if adding higmac_xmit_reclaim here, iperf tcp client
     * performance will be affected, from 550M(avg) to 513M~300M
     */
    /* software write pointer */
    pos = dma_cnt((u32)readl(ld->gmac_iobase + TX_BQ_WR_ADDR));

    spin_lock_irqsave(&ld->txlock, txflags);

    if (unlikely(ld->tx_qmap[pos].skb || ld->tx_bq.skb[pos])) {
        dev->stats.tx_dropped++;
        dev->stats.tx_fifo_errors++;
        netif_stop_queue(dev);
        spin_unlock_irqrestore(&ld->txlock, txflags);

        return NETDEV_TX_BUSY;
    }

    ld->tx_bq.skb[pos] = skb;
    ld->tx_qmap[pos].skb = skb;

    desc = ld->tx_bq.desc + pos;

    if (ld->tso_supported) {
        ret = higmac_xmit_gso(ld, skb, (struct higmac_tso_desc *)desc, pos);
        if (unlikely(ret < 0)) {
            ld->tx_qmap[pos].skb = NULL;
            ld->tx_bq.skb[pos] = NULL;
            spin_unlock_irqrestore(&ld->txlock, txflags);

            if (ret == -ENOTSUPP)
                return higmac_sw_gso(ld, skb);

            dev_kfree_skb_any(skb);
            dev->stats.tx_dropped++;
            return NETDEV_TX_OK;
        }
    } else {
        if (!HAS_CAP_CCI(ld->hw_cap)) {
            addr = dma_map_single(ld->dev, skb->data, skb->len, DMA_TO_DEVICE);
            if (unlikely(dma_mapping_error(ld->dev, addr))) {
                higmac_err("dma_map_single failed!\n");
                dev_kfree_skb_any(skb);
                dev->stats.tx_dropped++;
                ld->tx_qmap[pos].skb = NULL;
                ld->tx_bq.skb[pos] = NULL;
                spin_unlock_irqrestore(&ld->txlock, txflags);
                return NETDEV_TX_OK;
            }
            desc->data_buff_addr = (u32)addr;
#if defined(CONFIG_HIGMAC_DDR_64BIT)
            desc->rxhash = (addr >> REG_BIT_WIDTH) & TX_DESC_HI8_MASK;
#endif
        } else {
            addr = virt_to_phys(skb->data);
            desc->data_buff_addr = (u32)addr;
#if defined(CONFIG_HIGMAC_DDR_64BIT)
            desc->rxhash = (addr >> REG_BIT_WIDTH) & TX_DESC_HI8_MASK;
#endif
        }

        ld->tx_qmap[pos].dma_addr = addr;
        ld->tx_qmap[pos].len = skb->len;
        ld->tx_qmap[pos].nfrags = 0;

        desc->buffer_len = HIETH_MAX_FRAME_SIZE - 1;
        desc->data_len = skb->len;
        desc->fl = DESC_FL_FULL;
        desc->descvid = DESC_VLD_BUSY;
    }

    /* This barrier is important here.  It is required to ensure
     * the ARM CPU flushes it's DMA write buffers before proceeding
     * to the next instruction, to ensure that GMAC will see
     * our descriptor changes in memory
     */
    HIGMAC_SYNC_BARRIER();

    pos = dma_ring_incr(pos, TX_DESC_NUM);
    writel(dma_byte(pos), ld->gmac_iobase + TX_BQ_WR_ADDR);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
    /* cloud */
    netif_trans_update(dev);
#else
    dev->trans_start = jiffies;
#endif
    dev->stats.tx_packets++;
    dev->stats.tx_bytes += skb->len;
    netdev_sent_queue(dev, skb->len);

    spin_unlock_irqrestore(&ld->txlock, txflags);

    return NETDEV_TX_OK;
}

void higmac_enable_napi(struct higmac_netdev_local *priv)
{
    struct higmac_napi *q_napi = NULL;
    int i;

    for (i = 0; i < priv->num_rxqs; i++) {
        q_napi = &priv->q_napi[i];
        napi_enable(&q_napi->napi);
    }
}

void higmac_disable_napi(struct higmac_netdev_local *priv)
{
    struct higmac_napi *q_napi = NULL;
    int i;

    for (i = 0; i < priv->num_rxqs; i++) {
        q_napi = &priv->q_napi[i];
        napi_disable(&q_napi->napi);
    }
}

STATIC int higmac_net_open(struct net_device *ndev)
{
    struct higmac_netdev_local *ld = netdev_priv(ndev);
    unsigned long flags;

    higmac_clk_enable(ld);

    /* If we configure mac address by
     * "ifconfig ethX hw ether XX:XX:XX:XX:XX:XX",
     * the ethX must be down state and mac core clock is disabled
     * which results the mac address has not been configured
     * in mac core register.
     * So we must set mac address again here,
     * because mac core clock is enabled at this time
     * and we can configure mac address to mac core register.
     */
    higmac_hw_set_mac_addr(ndev);

    /* We should use netif_carrier_off() here,
     * because the default state should be off.
     * And this call should before phy_start().
     */
    ld->old_link = 0;
    netif_carrier_off(ndev);

    higmac_enable_napi(ld);

    higmac_info("napi enabled.\n");

    if (ld->phy != NULL)
        phy_start(ld->phy);

    higmac_hw_desc_enable(ld);
    higmac_port_enable(ld);
    higmac_irq_enable_all_queue(ld);

    higmac_info("hardware enabled.\n");

    spin_lock_irqsave(&ld->rxlock, flags);
    higmac_rx_refill(ld);
    spin_unlock_irqrestore(&ld->rxlock, flags);

    higmac_info("rx queue fill ok.\n");

    netif_start_queue(ndev);

    mod_timer(&ld->service_timer, jiffies + SERVICE_TIMER_HZ);

    higmac_info("higmac open successful.\n");

    return 0;
}

STATIC int higmac_net_close(struct net_device *dev)
{
    struct higmac_netdev_local *ld = netdev_priv(dev);

    del_timer_sync(&ld->service_timer);

    higmac_irq_disable_all_queue(ld);
    higmac_port_disable(ld);
    higmac_hw_desc_disable(ld);

    higmac_info("hardware disabled.\n");

    higmac_disable_napi(ld);

    netif_carrier_off(dev);
    netif_stop_queue(dev);

    higmac_info("netdev queue stoped.\n");

    if (ld->phy != NULL)
        phy_stop(ld->phy);

    higmac_clk_disable(ld);

    higmac_info("higmac close successful.\n");

    return 0;
}

STATIC void higmac_net_timeout(struct net_device *dev, unsigned int txqueue)
{
    int ret;

    dev->stats.tx_errors++;

    ret = snprintf_s(rdr_mng.buffer, rdr_mng.buf_len, rdr_mng.buf_len - 1, "%s tx_errors = %lx\n",
        HIGMAC_E_DESC_TIMEOUT, dev->stats.tx_errors);
    if (ret > 0) {
        higmac_dfm_error(rdr_mng.buffer, HIGMAC_E_TX_TIMEOUT);
    }

    netif_wake_queue(dev);
    netdev_reset_queue(dev);
}

static void higmac_set_multicast_list(struct net_device *dev)
{
    higmac_gmac_multicast_list(dev);
}

static inline void higmac_enable_rxcsum_drop(struct higmac_netdev_local *ld, bool drop)
{
    unsigned int v;

    v = readl(ld->gmac_iobase + TSO_COE_CTRL);
    if (drop)
        v |= COE_ERR_DROP;
    else
        v &= ~COE_ERR_DROP;

    v &= (~BIT(0));
    writel(v, ld->gmac_iobase + TSO_COE_CTRL);
}

static int higmac_set_features(struct net_device *dev, netdev_features_t features)
{
    struct higmac_netdev_local *ld = netdev_priv(dev);
    netdev_features_t changed = dev->features ^ features;

    if (changed & NETIF_F_RXCSUM) {
        if (features & NETIF_F_RXCSUM)
            higmac_enable_rxcsum_drop(ld, true);
        else
            higmac_enable_rxcsum_drop(ld, false);
    }

    return 0;
}

static struct net_device_stats *higmac_net_get_stats(struct net_device *dev)
{
    return &dev->stats;
}

STATIC int higmac_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
    struct higmac_netdev_local *priv = netdev_priv(dev);

    if (!netif_running(dev))
        return -EINVAL;

    if (priv->phy != NULL) {
        return phy_mii_ioctl(priv->phy, rq, cmd);
    } else {
        return -ENODEV;
    }
}

STATIC void higmac_get_drvinfo(struct net_device *net_dev, struct ethtool_drvinfo *info)
{
    int ret;

    ret = strncpy_s(info->driver, ETHTOOL_BUF_MAX_LEN, HIGMAC_DRIVER_NAME, sizeof(HIGMAC_DRIVER_NAME));
    if (ret) {
        higmac_err("strncpy_s driver name failed. ret %d\n", ret);
    }

    ret = strncpy_s(info->version, ETHTOOL_BUF_MAX_LEN, HIGMAC_DRIVER_SERSION, sizeof(HIGMAC_DRIVER_SERSION));
    if (ret) {
        higmac_err("strncpy_s driver sersion failed. ret %d\n", ret);
    }

    ret = strncpy_s(info->bus_info, ETHTOOL_BUSINFO_LEN, HIGMAC_BUS_NAME, sizeof(HIGMAC_BUS_NAME));
    if (ret) {
        higmac_err("strncpy_s bus name failed. ret %d\n", ret);
    }
}

STATIC unsigned int higmac_get_link(struct net_device *net_dev)
{
    struct higmac_netdev_local *ld = netdev_priv(net_dev);
    unsigned int link;

    if (ld->phy != NULL)
        link = ld->phy->link ? HIGMAC_LINKED : 0;
    else
        link = ld->old_link;

    return link;
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
STATIC int higmac_get_settings(struct net_device *net_dev, struct ethtool_cmd *cmd)
{
    struct higmac_netdev_local *ld = netdev_priv(net_dev);

    if (ld->phy != NULL) {
        return phy_ethtool_gset(ld->phy, cmd);
    } else {
        cmd->autoneg = AUTONEG_DISABLE;
        cmd->duplex = ld->old_duplex;
        ethtool_cmd_speed_set(cmd, ld->old_speed);
        return 0;
    }
}

STATIC int higmac_set_settings(struct net_device *net_dev, struct ethtool_cmd *cmd)
{
    struct higmac_netdev_local *ld = netdev_priv(net_dev);

    if (!capable(CAP_NET_ADMIN))
        return -EPERM;

    if (ld->phy != NULL) {
        return phy_ethtool_sset(ld->phy, cmd);
    } else {
        if (cmd->autoneg != AUTONEG_DISABLE)
            return -EINVAL;
        ld->cfg_speed = ethtool_cmd_speed(cmd);
        ld->cfg_duplex = cmd->duplex;

        return 0;
    }
}
#else
STATIC int higmac_get_link_settings(struct net_device *net_dev, struct ethtool_link_ksettings *cmd)
{
    struct higmac_netdev_local *ld = netdev_priv(net_dev);

    if (ld->phy != NULL) {
        return phy_ethtool_get_link_ksettings(net_dev, cmd);
    } else {
        cmd->base.autoneg = AUTONEG_DISABLE;
        cmd->base.speed = ld->old_speed;
        cmd->base.duplex = ld->old_duplex;
        return 0;
    }
}

STATIC int higmac_set_link_settings(struct net_device *net_dev, const struct ethtool_link_ksettings *cmd)
{
    struct higmac_netdev_local *ld = netdev_priv(net_dev);

    if (ld->phy != NULL) {
        return phy_ethtool_set_link_ksettings(net_dev, cmd);
    } else {
        if (cmd->base.autoneg != AUTONEG_DISABLE)
            return -EINVAL;
        ld->cfg_speed = cmd->base.speed;
        ld->cfg_duplex = cmd->base.duplex;
        return 0;
    }
}
#endif

static struct {
    const char str[ETH_GSTRING_LEN];
} higmac_gstrings_test[] = {
    { "Mac    Loopback test" },
    { "Serdes Loopback test" },
    { "Phy    Loopback test" }
};

static struct {
    const char str[ETH_GSTRING_LEN];
} higmac_ethtool_stats[] = {
    { "RX_OCTETS_OK_CNT" },
    { "RX_OCTETS_BAD_CNT" },
    { "RX_UC_PKTS" },
    { "RX_MC_PKTS" },
    { "RX_BC_PKTS" },
    { "RX_PKTS_64OCTETS" },
    { "RX_PKTS_65TO127OCTETS" },
    { "RX_PKTS_128TO255OCTETS" },
    { "RX_PKTS_256TO511OCTETS" },
    { "RX_PKTS_512TO1023OCTETS" },
    { "RX_PKTS_1024TO1518OCTETS" },
    { "RX_PKTS_1519TOMAXOCTETS" },
    { "RX_FCS_ERRORS" },
    { "RX_TAGGED" },
    { "RX_DATA_ERR" },
    { "RX_ALIGN_ERRORS" },
    { "RX_LONG_ERRORS" },
    { "RX_JABBER_ERRORS" },
    { "RX_PAUSE_MACCTRL_FRAMCNT" },
    { "RX_UNKNOWN_MACCTRL_FRAMCNT" },
    { "RX_VERY_LONG_ERR_CNT" },
    { "RX_RUNT_ERR_CNT" },
    { "RX_SHORT_ERR_CNT" },
    { "OCTETS_TRANSMITTED_OK" },
    { "OCTETS_TRANSMITTED_BAD" },
    { "TX_UC_PKTS" },
    { "TX_MC_PKTS" },
    { "TX_BC_PKTS" },
    { "TX_PKTS_64OCTETS" },
    { "TX_PKTS_65TO127OCTETS" },
    { "TX_PKTS_128TO255OCTETS" },
    { "TX_PKTS_256TO511OCTETS" },
    { "TX_PKTS_512TO1023OCTETS" },
    { "TX_PKTS_1024TO1518OCTETS" },
    { "TX_PKTS_1519TOMAXOCTETS" },
    { "TX_LATE_COL_CNT" },
    { "TX_EXC_COL_ERR_CNT" },
    { "TX_EXCESSIVE_LENGTH_DROP" },
    { "TX_UNDERRUN" },
    { "TX_TAGGED" },
    { "TX_CRC_ERROR" },
    { "TX_PAUSE_FRAMES" },
    { "RX_OVERRUN_CNT" },
    { "RX_LATE_COL_CNT" },
    { "RX_LENGTHFIELD_ERR_CNT" },
    { "ERR_GIVEN_PKG_CNT" },
    { "SHORT_ERR_PKT_CNT" },
    { "OVER_FLOW_CNT" },
    { "OVER_LENGTH_CNT" },
    { "RX_COE_DROP_CNT" },
    { "RX_COE_PKT_CNT" },
    { "TSO_PKT_CNT" },
    { "TSO_PKT_EXC_CNT" }
};

STATIC int higmac_get_sset_count(struct net_device *net_dev, int sset)
{
    switch (sset) {
        case ETH_SS_TEST:
            return ARRAY_SIZE(higmac_gstrings_test);
        case ETH_SS_STATS:
            return ARRAY_SIZE(higmac_ethtool_stats);
        default:
            return -EOPNOTSUPP;
    }
}

STATIC void higmac_get_strings(struct net_device *net_dev, u32 strset, u8 *buf)
{
    if (strset == ETH_SS_TEST) {
        if (EOK != memcpy_s(buf, sizeof(higmac_gstrings_test), &higmac_gstrings_test, sizeof(higmac_gstrings_test)))
            higmac_err("memcpy_s failed.\n");
    } else {
        if (EOK != memcpy_s(buf, sizeof(higmac_ethtool_stats), &higmac_ethtool_stats, sizeof(higmac_ethtool_stats)))
            higmac_err("memcpy_s failed.\n");
    }
}

STATIC void higmac_get_ethtool_stats(struct net_device *net_dev, struct ethtool_stats *estats, u64 *stats)
{
    struct higmac_netdev_local *ld = netdev_priv(net_dev);
    int i = 0;

    stats[i++] = readl(ld->gmac_iobase + RX_OCTETS_OK_CNT);
    stats[i++] = readl(ld->gmac_iobase + RX_OCTETS_BAD_CNT);
    stats[i++] = readl(ld->gmac_iobase + RX_UC_PKTS);
    stats[i++] = readl(ld->gmac_iobase + RX_MC_PKTS);
    stats[i++] = readl(ld->gmac_iobase + RX_BC_PKTS);
    stats[i++] = readl(ld->gmac_iobase + RX_PKTS_64OCTETS);
    stats[i++] = readl(ld->gmac_iobase + RX_PKTS_65TO127OCTETS);
    stats[i++] = readl(ld->gmac_iobase + RX_PKTS_128TO255OCTETS);
    stats[i++] = readl(ld->gmac_iobase + RX_PKTS_256TO511OCTETS);
    stats[i++] = readl(ld->gmac_iobase + RX_PKTS_512TO1023OCTETS);
    stats[i++] = readl(ld->gmac_iobase + RX_PKTS_1024TO1518OCTETS);
    stats[i++] = readl(ld->gmac_iobase + RX_PKTS_1519TOMAXOCTETS);
    stats[i++] = readl(ld->gmac_iobase + RX_FCS_ERRORS);
    stats[i++] = readl(ld->gmac_iobase + RX_TAGGED);
    stats[i++] = readl(ld->gmac_iobase + RX_DATA_ERR);
    stats[i++] = readl(ld->gmac_iobase + RX_ALIGN_ERRORS);
    stats[i++] = readl(ld->gmac_iobase + RX_LONG_ERRORS);
    stats[i++] = readl(ld->gmac_iobase + RX_JABBER_ERRORS);
    stats[i++] = readl(ld->gmac_iobase + RX_PAUSE_MACCTRL_FRAMCNT);
    stats[i++] = readl(ld->gmac_iobase + RX_UNKNOWN_MACCTRL_FRAMCNT);
    stats[i++] = readl(ld->gmac_iobase + RX_VERY_LONG_ERR_CNT);
    stats[i++] = readl(ld->gmac_iobase + RX_RUNT_ERR_CNT);
    stats[i++] = readl(ld->gmac_iobase + RX_SHORT_ERR_CNT);
    stats[i++] = readl(ld->gmac_iobase + OCTETS_TRANSMITTED_OK);
    stats[i++] = readl(ld->gmac_iobase + OCTETS_TRANSMITTED_BAD);
    stats[i++] = readl(ld->gmac_iobase + TX_UC_PKTS);
    stats[i++] = readl(ld->gmac_iobase + TX_MC_PKTS);
    stats[i++] = readl(ld->gmac_iobase + TX_BC_PKTS);
    stats[i++] = readl(ld->gmac_iobase + TX_PKTS_64OCTETS);
    stats[i++] = readl(ld->gmac_iobase + TX_PKTS_65TO127OCTETS);
    stats[i++] = readl(ld->gmac_iobase + TX_PKTS_128TO255OCTETS);
    stats[i++] = readl(ld->gmac_iobase + TX_PKTS_256TO511OCTETS);
    stats[i++] = readl(ld->gmac_iobase + TX_PKTS_512TO1023OCTETS);
    stats[i++] = readl(ld->gmac_iobase + TX_PKTS_1024TO1518OCTETS);
    stats[i++] = readl(ld->gmac_iobase + TX_PKTS_1519TOMAXOCTETS);
    stats[i++] = readl(ld->gmac_iobase + TX_LATE_COL_CNT);
    stats[i++] = readl(ld->gmac_iobase + TX_EXC_COL_ERR_CNT);
    stats[i++] = readl(ld->gmac_iobase + TX_EXCESSIVE_LENGTH_DROP);
    stats[i++] = readl(ld->gmac_iobase + TX_UNDERRUN);
    stats[i++] = readl(ld->gmac_iobase + TX_TAGGED);
    stats[i++] = readl(ld->gmac_iobase + TX_CRC_ERROR);
    stats[i++] = readl(ld->gmac_iobase + TX_PAUSE_FRAMES);
    stats[i++] = readl(ld->gmac_iobase + RX_OVERRUN_CNT);
    stats[i++] = readl(ld->gmac_iobase + RX_LATE_COL_CNT);
    stats[i++] = readl(ld->gmac_iobase + RX_LENGTHFIELD_ERR_CNT);
    stats[i++] = readl(ld->gmac_iobase + ERR_GIVEN_PKG_CNT);
    stats[i++] = readl(ld->gmac_iobase + SHORT_ERR_PKT_CNT);
    stats[i++] = readl(ld->gmac_iobase + OVER_FLOW_CNT);
    stats[i++] = readl(ld->gmac_iobase + OVER_LENGTH_CNT);
    stats[i++] = readl(ld->gmac_iobase + RX_COE_DROP_CNT);
    stats[i++] = readl(ld->gmac_iobase + RX_COE_PKT_CNT);
    stats[i++] = readl(ld->gmac_iobase + TSO_PKT_CNT);
    stats[i++] = readl(ld->gmac_iobase + TSO_PKT_EXC_CNT);

    return;
}

STATIC void higmac_get_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *pause)
{
    struct higmac_netdev_local *ld = netdev_priv(net_dev);

    pause->rx_pause = 0;
    pause->tx_pause = 0;

    if (ld->phy != NULL)
        pause->autoneg = ld->phy->autoneg;

    if (ld->old_pause) {
        if (ld->flow_ctrl & FLOW_RX)
            pause->rx_pause = 1;
        if (ld->flow_ctrl & FLOW_TX)
            pause->tx_pause = 1;
    }
}

STATIC int higmac_set_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *pause)
{
    struct higmac_netdev_local *ld = netdev_priv(net_dev);
    struct phy_device *phy = ld->phy;
    unsigned int new_pause = FLOW_OFF;

    if (pause->rx_pause)
        new_pause |= FLOW_RX;
    if (pause->tx_pause)
        new_pause |= FLOW_TX;

    if (new_pause != ld->flow_ctrl)
        ld->flow_ctrl = new_pause;

    higmac_set_flow_ctrl_state(ld, ld->old_pause);

    if (phy != NULL) {
    /*
        phy->advertising &= ~SUPPORTED_Pause;
        if (ld->flow_ctrl)
            phy->advertising |= SUPPORTED_Pause;
    */
        if (phy->autoneg && netif_running(net_dev))
            return phy_start_aneg(phy);
    }

    return 0;
}

STATIC u32 higmac_ethtool_getmsglevel(struct net_device *ndev)
{
    struct higmac_netdev_local *priv = netdev_priv(ndev);

    return priv->msg_enable;
}

STATIC void higmac_ethtool_setmsglevel(struct net_device *ndev, u32 level)
{
    struct higmac_netdev_local *priv = netdev_priv(ndev);

    priv->msg_enable = level;
}


static void higmac_get_rss_key(struct higmac_netdev_local *priv)
{
    struct higmac_rss_info *rss = &priv->rss_info;
    u32 hkey;

    hkey = readl(priv->gmac_iobase + RSS_HASH_KEY);
    *((u32 *)rss->key) = hkey;
}

static int higmac_wait_rss_ready(struct higmac_netdev_local *priv)
{
    void __iomem *base = priv->gmac_iobase;
    int i;
    int timeout = 10000;

    for (i = 0; !((u32)readl(base + RSS_IND_TBL) & BIT_IND_TBL_READY); i++) {
        if (i == timeout) {
            higmac_err("wait rss ready timeout!\n");
            return -ETIMEDOUT;
        }
        usleep_range(10, 20);
    }

    return 0;
}

static void higmac_get_rss(struct higmac_netdev_local *priv)
{
    struct higmac_rss_info *rss = &priv->rss_info;
    u32 rss_val;
    int i;

    for (i = 0; i < rss->ind_tbl_size; i++) {
        if (higmac_wait_rss_ready(priv))
            break;
        writel(i, priv->gmac_iobase + RSS_IND_TBL);
        if (higmac_wait_rss_ready(priv))
            break;
        rss_val = readl(priv->gmac_iobase + RSS_IND_TBL);
        rss->ind_tbl[i] = (rss_val >> 10) & 0x3;
    }
}

static void higmac_config_hash_policy(struct higmac_netdev_local *priv)
{
    writel(priv->rss_info.hash_cfg, priv->gmac_iobase + RSS_HASH_CONFIG);
}

static const struct ethtool_ops hieth_ethtools_ops = {
    .get_drvinfo = higmac_get_drvinfo,
    .get_link = higmac_get_link,
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
    .get_settings = higmac_get_settings,
    .set_settings = higmac_set_settings,
#else
    .get_link_ksettings = higmac_get_link_settings,
    .set_link_ksettings = higmac_set_link_settings,
#endif
    .get_strings = higmac_get_strings,
    .get_sset_count = higmac_get_sset_count,
    .get_ethtool_stats = higmac_get_ethtool_stats,
    .get_pauseparam = higmac_get_pauseparam,
    .set_pauseparam = higmac_set_pauseparam,
    .get_msglevel = higmac_ethtool_getmsglevel,
    .set_msglevel = higmac_ethtool_setmsglevel,
};

static int higmac_eth_change_mtu(struct net_device *dev, int new_mtu)
{
    netdev_warn(dev, "%s is deprecated\n", __func__);
    dev->mtu = new_mtu;
    return 0;
}

static const struct net_device_ops hieth_netdev_ops = {
    .ndo_open = higmac_net_open,
    .ndo_stop = higmac_net_close,
    .ndo_start_xmit = higmac_net_xmit,
    .ndo_tx_timeout = higmac_net_timeout,
    .ndo_set_rx_mode = higmac_set_multicast_list,
    .ndo_set_features = higmac_set_features,
    .ndo_set_mac_address = higmac_net_set_mac_address,
    .ndo_change_mtu = higmac_eth_change_mtu,
    .ndo_get_stats = higmac_net_get_stats,
    .ndo_do_ioctl = higmac_net_ioctl,
};

static int higmac_of_get_param(struct higmac_netdev_local *ld, struct device_node *node)
{
    /* get auto eee */
    ld->autoeee = of_property_read_bool(node, "autoeee");
    /* get internal flag */
    ld->internal_phy = of_property_read_bool(node, "internal-phy");

    return 0;
}

static void higmac_verify_flow_ctrl_args(void)
{
#if defined(CONFIG_TX_FLOW_CTRL_SUPPORT)
    flow_ctrl_en |= FLOW_TX;
#endif
#if defined(CONFIG_RX_FLOW_CTRL_SUPPORT)
    flow_ctrl_en |= FLOW_RX;
#endif
    if (tx_flow_ctrl_active_threshold < FC_ACTIVE_MIN || tx_flow_ctrl_active_threshold > FC_ACTIVE_MAX)
        tx_flow_ctrl_active_threshold = FC_ACTIVE_DEFAULT;

    if (tx_flow_ctrl_deactive_threshold < FC_DEACTIVE_MIN || tx_flow_ctrl_deactive_threshold > FC_DEACTIVE_MAX)
        tx_flow_ctrl_deactive_threshold = FC_DEACTIVE_DEFAULT;

    if (tx_flow_ctrl_active_threshold >= tx_flow_ctrl_deactive_threshold) {
        tx_flow_ctrl_active_threshold = FC_ACTIVE_DEFAULT;
        tx_flow_ctrl_deactive_threshold = FC_DEACTIVE_DEFAULT;
    }

    if (tx_flow_ctrl_pause_time < 0 || tx_flow_ctrl_pause_time > FC_PAUSE_TIME_MAX)
        tx_flow_ctrl_pause_time = FC_PAUSE_TIME_DEFAULT;

    if (tx_flow_ctrl_pause_interval < 0 || tx_flow_ctrl_pause_interval > FC_PAUSE_TIME_MAX)
        tx_flow_ctrl_pause_interval = FC_PAUSE_INTERVAL_DEFAULT;

    /* pause interval should not bigger than pause time,
     * but should not too smaller to avoid sending too many pause frame.
     */
    if ((tx_flow_ctrl_pause_interval > tx_flow_ctrl_pause_time) ||
        (tx_flow_ctrl_pause_interval < ((unsigned int)tx_flow_ctrl_pause_time >> 1)))
        tx_flow_ctrl_pause_interval = tx_flow_ctrl_pause_time;
}

STATIC void higmac_destroy_hw_desc_queue(struct higmac_netdev_local *priv)
{
    int i;

    for (i = 0; i < QUEUE_NUMS + RSS_NUM_RXQS - 1; i++) {
        if (priv->pool[i].desc) {
            if (HAS_CAP_CCI(priv->hw_cap))
                kfree(priv->pool[i].desc);
            else
                dma_free_coherent(priv->dev, priv->pool[i].size, priv->pool[i].desc, priv->pool[i].phys_addr);
            priv->pool[i].desc = NULL;
        }
    }

    if (priv->rx_fq.skb != NULL) {
        kfree(priv->rx_fq.skb);
        priv->rx_fq.skb = NULL;
    }

    if (priv->tx_bq.skb != NULL) {
        kfree(priv->tx_bq.skb);
        priv->tx_bq.skb = NULL;
    }

    if (priv->tso_supported && (priv->tx_bq.sg_desc_offset != NULL)) {
        kfree(priv->tx_bq.sg_desc_offset);
        priv->tx_bq.sg_desc_offset = NULL;
    }

    if (priv->tx_qmap != NULL) {
        kfree(priv->tx_qmap);
        priv->tx_qmap = NULL;
    }

    if (priv->rx_qmap != NULL) {
        kfree(priv->rx_qmap);
        priv->rx_qmap = NULL;
    }
}

STATIC int higmac_init_hw_desc_queue(struct higmac_netdev_local *priv)
{
    struct device *dev = priv->dev;
    struct higmac_desc *virt_addr = NULL;
    dma_addr_t phys_addr = 0;
    int size, i;

    priv->rx_fq.count = RX_DESC_NUM;
    priv->rx_bq.count = RX_DESC_NUM;
    priv->tx_bq.count = TX_DESC_NUM;
    priv->tx_rq.count = TX_DESC_NUM;

    for (i = 1; i < RSS_NUM_RXQS; i++)
        priv->pool[3 + i].count = RX_DESC_NUM;

    for (i = 0; i < (QUEUE_NUMS + RSS_NUM_RXQS - 1); i++) {
        size = priv->pool[i].count * sizeof(struct higmac_desc);
        if (HAS_CAP_CCI(priv->hw_cap)) {
            virt_addr = kzalloc(size, GFP_KERNEL);
            if (virt_addr != NULL)
                phys_addr = virt_to_phys(virt_addr);
        } else {
            virt_addr = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
        }
        if (virt_addr == NULL)
            goto error_free_pool;

        priv->pool[i].size = size;
        priv->pool[i].desc = virt_addr;
        priv->pool[i].phys_addr = phys_addr;
    }

    priv->rx_fq.skb = kzalloc(priv->rx_fq.count * sizeof(char *), GFP_KERNEL);
    if (priv->rx_fq.skb == NULL)
        goto error_free_pool;

    priv->rx_qmap = kzalloc(priv->rx_fq.count * sizeof(struct higmac_rx_queue_map), GFP_KERNEL);
    if (priv->rx_qmap == NULL)
        goto error_free_pool;

    priv->tx_bq.skb = kzalloc(priv->tx_bq.count * sizeof(char *), GFP_KERNEL);
    if (priv->tx_bq.skb == NULL)
        goto error_free_pool;

    priv->tx_qmap = kzalloc(priv->tx_bq.count * sizeof(struct higmac_tx_queue_map), GFP_KERNEL);
    if (priv->tx_qmap == NULL)
        goto error_free_pool;

    if (priv->tso_supported) {
        priv->tx_bq.sg_desc_offset = kzalloc(priv->tx_bq.count * sizeof(int), GFP_KERNEL);
        if (!priv->tx_bq.sg_desc_offset)
            goto error_free_pool;
    }

    higmac_hw_set_desc_addr(priv);
    if (HAS_CAP_CCI(priv->hw_cap))
        higmac_info("higmac: ETH MAC supporte CCI.\n");

    return 0;

error_free_pool:
    higmac_destroy_hw_desc_queue(priv);

    return -ENOMEM;
}

void higmac_init_napi(struct higmac_netdev_local *priv)
{
    struct higmac_napi *q_napi = NULL;
    int i;

    for (i = 0; i < priv->num_rxqs; i++) {
        q_napi = &priv->q_napi[i];
        q_napi->rxq_id = i;
        q_napi->ndev_priv = priv;
        netif_napi_add(priv->netdev, &q_napi->napi, higmac_poll, NAPI_POLL_WEIGHT);
    }
}

void higmac_destroy_napi(struct higmac_netdev_local *priv)
{
    struct higmac_napi *q_napi = NULL;
    int i;

    for (i = 0; i < priv->num_rxqs; i++) {
        q_napi = &priv->q_napi[i];
        netif_napi_del(&q_napi->napi);
    }
}

int higmac_request_irqs(struct platform_device *pdev, struct higmac_netdev_local *priv)
{
    struct device *dev = priv->dev;
    int ret;
    int i;

    for (i = 0; i < priv->num_rxqs; i++) {
        ret = platform_get_irq(pdev, i);
        if (ret < 0) {
            higmac_err("No irq[%d] resource, ret=%d\n", i, ret);
            return ret;
        }
        priv->irq[i] = ret;

        ret = devm_request_irq(dev, priv->irq[i], higmac_interrupt, IRQF_SHARED, pdev->name, &priv->q_napi[i]);
        if (ret) {
            higmac_err("devm_request_irq failed, ret=%d\n", ret);
            return ret;
        }
    }

    return 0;
}

static const char *higmac_platform_to_str(int platform)
{
    switch (platform) {
        case PLATFORM_FPGA:
            return "FPGA";
        case PLATFORM_EMU:
            return "EMU";
        case PLATFORM_ESL:
            return "ESL";
        default:
            return "Unknown";
    }
}

static void higmac_nic_features(struct higmac_netdev_local *priv)
{
    struct net_device *ndev = priv->netdev;

    if ((priv->version & BIT_PLATFORM_MASK) == PLATFORM_ESL)
        return;

    if (priv->has_rxhash_cap)
        ndev->hw_features |= NETIF_F_RXHASH;
    if (priv->has_rss_cap)
        ndev->hw_features |= NETIF_F_NTUPLE;
    if (priv->tso_supported) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
        ndev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
#else
        ndev->hw_features |=
            NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO;
#endif
    }
#if defined(CONFIG_HIGMAC_RXCSUM)
    ndev->hw_features |= NETIF_F_RXCSUM;
    higmac_enable_rxcsum_drop(priv, true);
#endif

    ndev->features |= ndev->hw_features;
    ndev->features |= NETIF_F_HIGHDMA | NETIF_F_GSO;
    ndev->vlan_features |= ndev->features;

    return;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
STATIC void higmac_service_timer(struct timer_list *t)
{
    struct higmac_netdev_local *priv = from_timer(priv, t, service_timer);
#else
STATIC void higmac_service_timer(unsigned long data)
{
    struct higmac_netdev_local *priv = (struct higmac_netdev_local *)(uintptr_t)data;
#endif

    (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);

    (void)schedule_work(&priv->service_task);
}

STATIC void higmac_service_task(struct work_struct *work)
{
    struct higmac_netdev_local *priv = NULL;

    priv = container_of(work, struct higmac_netdev_local, service_task);

    higmac_monitor_func(priv);
    higmac_update_link(priv);
}

STATIC int marvell_88e1510_phy_delay_fix(struct phy_device *phy_dev)
{
    u32 v;
    int ret;
    int page;
    int reg;

    /* fixup rx/tx delay */
    page = 2;
    reg = 21;

    /* select page 2 */
    phy_write(phy_dev, 22, page);

    /* config RGMII rx/tx timing(page 21 bit 4/5) */
    ret = phy_read(phy_dev, reg);
    if (ret < 0)
        return ret;

    v = ret;
    v = (v & 0xffcf) | 0x10;

    phy_write(phy_dev, reg, v);

    higmac_info("phy fix register 2.21 : %x -> %x\n", ret, v);

    /* select to page 0 */
    phy_write(phy_dev, 22, 0);

    return 0;
}

STATIC int marvell_88e1510_phy_led_fix(struct phy_device *phy_dev)
{
    u32 v;
    int ret;
    int page;
    int reg;

    page = 3;

    /* select page 3 */
    phy_write(phy_dev, 22, page);

    /* 1��fixup register 3.16 phy led */
    reg = 16;

    ret = phy_read(phy_dev, reg);
    if (ret < 0)
        return ret;

    v = ret;
    v = (v & 0xff00) | 0x30;

    phy_write(phy_dev, reg, v);

    higmac_info("phy fix register 3.16 : %x -> %x\n", ret, v);

    /* 2��fixup register 3.17 phy led */
    reg = 17;

    /* led polarity */
    ret = phy_read(phy_dev, reg);
    if (ret < 0)
        return ret;

    v = ret;
    v = (v & 0xff00) | 0x15;

    phy_write(phy_dev, reg, v);

    higmac_info("phy fix register 3.17 : %x -> %x\n", ret, v);

    /* 3��fixup register 3.18 phy led */
    reg = 18;

    /* led polarity */
    ret = phy_read(phy_dev, reg);
    if (ret < 0)
        return ret;

    v = ret;
    v |= 0x80;

    phy_write(phy_dev, reg, v);

    higmac_info("phy fix register 3.18 : %x -> %x\n", ret, v);

    /* select to page 0 */
    phy_write(phy_dev, 22, 0);

    return 0;
}

STATIC int marvell_88e1510_phy_drive_capacity_fix(struct phy_device *phy_dev)
{
    u32 v;
    int ret;
    int page;
    int reg;

    page = 2;

    /* select page 3 */
    phy_write(phy_dev, 22, page);

    /* fixup register 2.24 phy led */
    reg = 24;

    ret = phy_read(phy_dev, reg);
    if (ret < 0)
        return ret;

    v = 0x5f4f;

    phy_write(phy_dev, reg, v);

    higmac_info("phy fix register 2.24 : %x -> %x\n", ret, v);

    /* select to page 0 */
    phy_write(phy_dev, 22, 0);

    return 0;
}

/* LPI : EEE operation or Low Power Idle */
STATIC int marvell_88e1510_phy_LPI_fix(struct phy_device *phy_dev)
{
    u32 v;
    int ret;
    int page;
    int reg;

    page = 18;

    /* select page */
    phy_write(phy_dev, 22, page);

    /* set phy LPI, register 18.0 */
    reg = 0;

    ret = phy_read(phy_dev, reg);
    if (ret < 0)
        return ret;

    v = ret;
    v |= 0x01;

    phy_write(phy_dev, reg, v);

    higmac_info("phy fix register 18.0 : %x -> %x\n", ret, v);

    /* select to page 0 */
    phy_write(phy_dev, 22, 0);

    return 0;
}

STATIC int marvell_88e1510_phy_fix(struct phy_device *phy_dev)
{
    int ret;

    ret = marvell_88e1510_phy_delay_fix(phy_dev);
    if (ret < 0)
        return ret;

    ret = marvell_88e1510_phy_led_fix(phy_dev);

    return ret;
}

/* Downshift Enable */
STATIC int marvell_88e1510_phy_downshift_fix(struct phy_device *phy_dev)
{
    u32 v;
    int ret;
    int page;
    int reg;

    page = 0;

    /* select page */
    phy_write(phy_dev, 22, page);

    reg = 16;
    ret = phy_read(phy_dev, reg);
    if (ret < 0)
        return ret;

    v = ret;
    v |= 0x1 << 11;

    phy_write(phy_dev, reg, v);

    higmac_info("phy fix register 16.11 : %x -> %x\n", ret, v);

    /* do restart autoneg & softreset */
    reg = MII_BMCR;
    ret = phy_read(phy_dev, reg);
    if (ret < 0)
        return ret;

    v = ret;
    v |= BMCR_ANRESTART;
    v |= BMCR_RESET;

    phy_write(phy_dev, reg, v);
    mdelay(100);

    return 0;
}
STATIC int marvell_88e1510_asic_phy_fix(struct phy_device *phy_dev)
{
    int ret;

    ret = marvell_88e1510_phy_drive_capacity_fix(phy_dev);
    if (ret < 0)
        return ret;

    ret = marvell_88e1510_phy_led_fix(phy_dev);
    if (ret < 0)
        return ret;

    ret = marvell_88e1510_phy_LPI_fix(phy_dev);
    if (ret < 0)
        return ret;

    ret = marvell_88e1510_phy_downshift_fix(phy_dev);

    return ret;
}

STATIC int realtek_8211f_phy_fix(struct phy_device *phy_dev)
{
    u32 v;
    int ret;
    int page;
    int reg;

    /* select page */
    page = RTL8211F_CTRL_PAGE_BASE;
    phy_write(phy_dev, RTL8211F_REG_PAGE, page);

    /* config led */
    reg = RTL8211F_REG_LED_A;
    v = RTL8211F_LED_A_DEFAULT;
    phy_write(phy_dev, reg, v);

    reg = RTL8211F_REG_LED_B;
    v = RTL8211F_LED_B_DEFAULT;
    phy_write(phy_dev, reg, v);

    /* select page */
    page = RTL8211F_DATA_PAGE_BASE;
    phy_write(phy_dev, RTL8211F_REG_PAGE, page);

    /* TX delay */
    reg = RTL8211F_REG_TX_DELAY;
    ret = phy_read(phy_dev, reg);
    if (ret < 0)
        return ret;

    v = ret;
    v |= RTL8211F_TX_DELAY;
    phy_write(phy_dev, reg, v);

    higmac_info("phy fix register 0x11 : 0x%x -> 0x%x\n", ret, v);

    /* RX delay */
    reg = RTL8211F_REG_RX_DELAY;
    ret = phy_read(phy_dev, reg);
    if (ret < 0)
        return ret;

    v = ret;
    v |= RTL8211F_RX_DELAY;
    phy_write(phy_dev, reg, v);

    higmac_info("phy fix register 0x15 : 0x%x -> 0x%x\n", ret, v);

    phy_write(phy_dev, RTL8211F_REG_PAGE, RTL8211F_WORK_PAGE_BASE);

    return 0;
}

STATIC void higmac_phy_fixups(struct higmac_netdev_local *priv)
{
    /* only fpga do delay fix up */
    if (((priv->version & BIT_PLATFORM_MASK) == PLATFORM_FPGA) && ((priv->version & BIT_VERSION_MASK) != 0)) {
        phy_register_fixup_for_uid(MARVELL_PHY_ID_88E1510, DEFAULT_PHY_MASK, marvell_88e1510_phy_fix);
    } else {
        phy_register_fixup_for_uid(REALTEK_PHY_ID_8211F, REALTEK_PHY_MASK, realtek_8211f_phy_fix);
        phy_register_fixup_for_uid(MARVELL_PHY_ID_88E1510, DEFAULT_PHY_MASK, marvell_88e1510_asic_phy_fix);
    }
}

static void higmac_phy_unfixups(void)
{
    phy_unregister_fixup_for_uid(MARVELL_PHY_ID_88E1510, DEFAULT_PHY_MASK);
}

STATIC int higmac_phy_config(struct higmac_netdev_local *priv)
{
    struct mii_bus *mdio_bus = NULL;
    int ret;

    if (priv->phy_node != NULL) {
        start_mbist(priv, 1);
        /* register phy fixup */
        higmac_phy_fixups(priv);
        priv->phy = of_phy_connect(priv->netdev, priv->phy_node, &higmac_adjust_link, 0, priv->phy_mode);
        if (priv->phy == NULL) {
            /* defered probe! nic driver will be probed agin at last */
            higmac_err("phy connect failed.trigger defered probe.\n");
            of_node_put(priv->phy_node);
            higmac_phy_unfixups();
            return -EPROBE_DEFER;
        }
    } else {
        mdio_bus = hns_get_mdiobus();
        if (mdio_bus == NULL) {
            higmac_err("hns_get_mdiobus failed.\n");
            return -ENODEV;
        }
        priv->phy = phy_find_first(mdio_bus);
        if (priv->phy == NULL) {
            start_mbist(priv, 0);
            priv->cfg_speed = SPEED_1000;
            priv->cfg_duplex = 1;
            higmac_info("No phy found or using lan switch\n");
            return 0;
        }
        higmac_info("find phy success without dts\n");
        start_mbist(priv, 1);
        /* register phy fixup */
        higmac_phy_fixups(priv);
        ret = phy_connect_direct(priv->netdev, priv->phy, higmac_adjust_link, priv->phy_mode);
        if (ret) {
            higmac_phy_unfixups();
            return ret;
        }
    }
    higmac_info("phy(id=0x%x) connetc ok.\n", priv->phy->phy_id);

    /* If the phy_id is mostly Fs, there is no device there */
    if ((priv->phy->phy_id & 0x1fffffff) == 0x1fffffff) {
        higmac_err("phy id (%d) invalid\n", priv->phy->phy_id);
        phy_disconnect(priv->phy);
        return -ENODEV;
    }

    return 0;
}
STATIC int higmac_phy_init(struct higmac_netdev_local *priv, struct device_node *node)
{
    int ret;
    phy_interface_t phy_mode;

    ret = of_get_phy_mode(node, &phy_mode);
    if (ret < 0) {
        higmac_err("not find phy-mode. whether has phy or not, tell me the mode.");
        return -EINVAL;
    }

    priv->phy_mode = phy_mode;

    higmac_info("phy mode = %d.\n", priv->phy_mode);

    priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
    ret = higmac_phy_config(priv);
    if (ret) {
        return ret;
    }

    if (priv->phy != NULL) {
        /* Stop Advertising 1000BASE Capability if interface is not RGMII */
        if ((priv->phy_mode == PHY_INTERFACE_MODE_MII) || (priv->phy_mode == PHY_INTERFACE_MODE_RMII)) {
            //priv->phy->advertising &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full);
	    linkmode_clear_bit(SUPPORTED_1000baseT_Half, priv->phy->advertising);
	    linkmode_clear_bit(SUPPORTED_1000baseT_Full, priv->phy->advertising);

            /* Internal FE phy's reg BMSR bit8 is wrong, make the kernel
             * believe it has the 1000base Capability, so fix it here
             */
            if (priv->phy->phy_id == HISILICON_PHY_ID_FESTAV200) {
                //priv->phy->supported &= ~(ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half);
	        linkmode_clear_bit(SUPPORTED_1000baseT_Half, priv->phy->supported);
	        linkmode_clear_bit(SUPPORTED_1000baseT_Full, priv->phy->supported);
	    }
        }

	/*
        priv->phy->supported |= SUPPORTED_Pause;
        if (priv->flow_ctrl)
            priv->phy->advertising |= SUPPORTED_Pause;
	*/
	phy_support_asym_pause(priv->phy);
        set_bit(SERVICE_STATE_LINK_CLOSE, &priv->service_state);

        higmac_info("ETH: %s, phy: %s, phy id: %x\n", phy_modes(priv->phy_mode), priv->phy->drv->name,
            priv->phy->phy_id);
    }

    return 0;
}

static void higmac_phy_uninit(struct higmac_netdev_local *priv)
{
    if (priv->phy != NULL)
        phy_disconnect(priv->phy);

    if (priv->phy_node != NULL)
        of_node_put(priv->phy_node);
}

struct higmac_sample {
    struct net_device *ndev;
    struct net_device_stats stats_bak;
    struct timespec64 last_ktime;
};

struct higmac_sample *sample_priv = NULL;

static struct {
    const char str[SAMPLE_NAME_MAX];
} higmac_sample_names[] = {
    { "bandwidth" },

    { "rxPacket/s" },
    { "rxByte/s" },
    { "rxPackets" },
    { "rxBytes" },
    { "rxErrors" },
    { "rxDropped" },

    { "txPacket/s" },
    { "txByte/s" },
    { "txPackets" },
    { "txBytes" },
    { "txErrors" },
    { "txDropped" }
};

static int sample_max_len(int max, int currunt)
{
    return (max > currunt) ? (max - currunt) : 0;
}

static int sample_timestamp(char *buf, int len, struct timespec64 *uptime)
{
    int wlen = 0;
    int tlen;

    tlen = snprintf_s(buf, len, len - 1, "%ld:%09ld", (long int)uptime->tv_sec, (long int)uptime->tv_nsec);
    if (tlen > 0) {
        wlen += tlen;
    }

    return wlen;
}

static int higmac_sample_header(void *buf, int len)
{
    int num = ARRAY_SIZE(higmac_sample_names);
    int wlen = 0;
    int tlen;
    int remain_len;
    int i;

    for (i = 0; i < num; i++) {
        remain_len = sample_max_len(len, wlen);
        tlen = snprintf_s(buf + wlen, remain_len, remain_len - 1, " %s", higmac_sample_names[i].str);
        if (tlen > 0) {
            wlen += tlen;
        }
    }

    remain_len = sample_max_len(len, wlen);
    tlen = snprintf_s(buf + wlen, remain_len, remain_len - 1, "\n");
    if (tlen > 0) {
        wlen += tlen;
    }

    return wlen;
}

static int higmac_sample_rx(struct net_device_stats *stats, char *buf, int len, long pps, long Bps)
{
    int wlen = 0;
    int tlen;
    int remain_len = 0;

    remain_len = sample_max_len(len, wlen);
    tlen = snprintf_s(buf + wlen, remain_len, remain_len - 1, " %ld", pps);
    if (tlen > 0) {
        wlen += tlen;
    }

    remain_len = sample_max_len(len, wlen);
    tlen = snprintf_s(buf + wlen, remain_len, remain_len - 1, " %ld", Bps);
    if (tlen > 0) {
        wlen += tlen;
    }

    remain_len = sample_max_len(len, wlen);
    tlen = snprintf_s(buf + wlen, remain_len, remain_len - 1, " %ld", stats->rx_packets);
    if (tlen > 0) {
        wlen += tlen;
    }

    remain_len = sample_max_len(len, wlen);
    tlen = snprintf_s(buf + wlen, remain_len, remain_len - 1, " %ld", stats->rx_bytes);
    if (tlen > 0) {
        wlen += tlen;
    }

    remain_len = sample_max_len(len, wlen);
    tlen = snprintf_s(buf + wlen, remain_len, remain_len - 1, " %ld", stats->rx_errors);
    if (tlen > 0) {
        wlen += tlen;
    }

    remain_len = sample_max_len(len, wlen);
    tlen = snprintf_s(buf + wlen, remain_len, remain_len - 1, " %ld", stats->rx_dropped);
    if (tlen > 0) {
        wlen += tlen;
    }

    return wlen;
}

static int higmac_sample_tx(struct net_device_stats *stats, char *buf, int len, long pps, long Bps)
{
    int wlen = 0;
    int tlen;
    int remain_len = 0;

    remain_len = sample_max_len(len, wlen);
    tlen = snprintf_s(buf + wlen, remain_len, remain_len - 1, " %ld", pps);
    if (tlen > 0) {
        wlen += tlen;
    }

    remain_len = sample_max_len(len, wlen);
    tlen = snprintf_s(buf + wlen, remain_len, remain_len - 1, " %ld", Bps);
    if (tlen > 0) {
        wlen += tlen;
    }

    remain_len = sample_max_len(len, wlen);
    tlen = snprintf_s(buf + wlen, remain_len, remain_len - 1, " %ld", stats->tx_packets);
    if (tlen > 0) {
        wlen += tlen;
    }

    remain_len = sample_max_len(len, wlen);
    tlen = snprintf_s(buf + wlen, remain_len, remain_len - 1, " %ld", stats->tx_bytes);
    if (tlen > 0) {
        wlen += tlen;
    }

    remain_len = sample_max_len(len, wlen);
    tlen = snprintf_s(buf + wlen, remain_len, remain_len - 1, " %ld", stats->tx_errors);
    if (tlen > 0) {
        wlen += tlen;
    }

    remain_len = sample_max_len(len, wlen);
    tlen = snprintf_s(buf + wlen, remain_len, remain_len - 1, " %ld", stats->tx_dropped);
    if (tlen > 0) {
        wlen += tlen;
    }

    return wlen;
}

int higmac_sample_fun(unsigned int device_id, void *buf, int len, int flag)
{
    struct net_device_stats *stats = NULL, *last_stats = NULL;
    struct higmac_netdev_local *priv = NULL;
    struct higmac_sample *sample = NULL;
    struct timespec64 uptime;
    long rxpps, txpps;
    long rxBps, txBps;
    int bandwidth;
    u64 last_ns;
    int wlen = 0;
    int tlen;
    int remain_len = 0;

    if ((buf == NULL) || (len <= 0)) {
        higmac_err("nic sample param error, buf %pK, len %d\n", buf, len);
        return 0;
    }

    sample = (struct higmac_sample *)sample_priv;
    if (sample == NULL) {
        higmac_warn("nic profile has not initialized.\n");
        return 0;
    }

    ktime_get_raw_ts64(&uptime);

    if (unlikely(((unsigned int)flag & SAMPLE_MASK) == SAMPLE_WITH_HEADER)) {
        wlen += sample_timestamp(buf + wlen, len - wlen, &uptime);
        wlen += higmac_sample_header(buf + wlen, len - wlen);
    }

    last_ns = (u64)timespec64_to_ns(&uptime) - (u64)timespec64_to_ns(&sample->last_ktime);
    if (!last_ns)
        return wlen;

    stats = &sample->ndev->stats;
    last_stats = &sample->stats_bak;

    rxpps = (stats->rx_packets - last_stats->rx_packets) * NSEC_PER_SEC / last_ns;
    txpps = (stats->tx_packets - last_stats->tx_packets) * NSEC_PER_SEC / last_ns;

    rxBps = (stats->rx_bytes - last_stats->rx_bytes) * NSEC_PER_SEC / last_ns;
    txBps = (stats->tx_bytes - last_stats->tx_bytes) * NSEC_PER_SEC / last_ns;

    last_stats->rx_packets = stats->rx_packets;
    last_stats->tx_packets = stats->tx_packets;
    last_stats->rx_bytes = stats->rx_bytes;
    last_stats->tx_bytes = stats->tx_bytes;
    sample->last_ktime = uptime;

    priv = netdev_priv(sample->ndev);
    if (priv->old_speed == SPEED_10)
        bandwidth = 10;
    else if (priv->old_speed == SPEED_100)
        bandwidth = 100;
    else
        bandwidth = 1000;

    /* timestamp */
    wlen += sample_timestamp(buf + wlen, len - wlen, &uptime);

    /* bandwidth */
    remain_len = sample_max_len(len, wlen);
    tlen = snprintf_s(buf + wlen, remain_len, remain_len - 1, " %d", bandwidth);
    if (tlen > 0) {
        wlen += tlen;
    }

    /* rx */
    wlen += higmac_sample_rx(stats, buf + wlen, len - wlen, rxpps, rxBps);

    /* tx */
    wlen += higmac_sample_tx(stats, buf + wlen, len - wlen, txpps, txBps);

    /* finish */
    remain_len = sample_max_len(len, wlen);
    tlen = snprintf_s(buf + wlen, remain_len, remain_len - 1, "\n");
    if (tlen > 0) {
        wlen += tlen;
    }

    if (wlen < len) {
        ((char *)buf)[wlen] = '\0';
    } else {
        ((char *)buf)[len - 1] = '\0';
    }

    return wlen;
}

EXPORT_SYMBOL(higmac_sample_fun);

STATIC int higmac_sample_init(struct net_device *ndev)
{
    sample_priv = (struct higmac_sample *)kzalloc(sizeof(struct higmac_sample), GFP_KERNEL);
    if (sample_priv == NULL) {
        higmac_err("profile register kzalloc failed.\n");
        return -ENOMEM;
    }

    sample_priv->ndev = ndev;
    ktime_get_raw_ts64(&sample_priv->last_ktime);

    return 0;
}

STATIC void higmac_sample_uninit(void)
{
    if (sample_priv != NULL)
        kfree(sample_priv);
    sample_priv = NULL;
}

void higmac_dump(u32 excep_id, u32 etype, u32 module_id, char **black_box_info)
{
    if (black_box_info != NULL)
        *black_box_info = rdr_mng.buffer;
    return;
}

void higmac_dfm_error(const char *err_info, u32 e_id)
{
    dfm_write_black_box(DFM_MODULE_ID_DRIVER, DFM_SUBMODULE_ID_NET, err_info);
    dfm_system_error_report(e_id, 0);
}

STATIC int higmac_dfm_init(void)
{
    struct dfm_module_register dfm_higmac_info = {0};
    char *mng = NULL;

    dfm_higmac_info.module_id = DFM_MODULE_ID_DRIVER;
    dfm_higmac_info.sub_module_id = DFM_SUBMODULE_ID_NET;
    dfm_higmac_info.ops_dump = &higmac_dump;

    if (dfm_register_module(&dfm_higmac_info)) {
        higmac_err("dfm_register_module failed.\n");
        return -1;
    }

    mng = (char *)kzalloc(HIGMAC_DFM_BUF_LEN, GFP_KERNEL);
    if (mng == NULL) {
        higmac_err("register kzalloc failed.\n");
        dfm_unregister_module(DFM_MODULE_ID_DRIVER, DFM_SUBMODULE_ID_NET);
        return -ENOMEM;
    }

    rdr_mng.buffer = mng;
    rdr_mng.buf_len = HIGMAC_DFM_BUF_LEN;

    return 0;
}

STATIC void higmac_dfm_uninit(void)
{
    dfm_unregister_module(DFM_MODULE_ID_DRIVER, DFM_SUBMODULE_ID_NET);

    kfree(rdr_mng.buffer);

    rdr_mng.buffer = NULL;
    rdr_mng.buf_len = 0;
}

STATIC int higmac_others_init(struct net_device *ndev)
{
    int ret;

    ret = higmac_sample_init(ndev);
    if (ret) {
        higmac_err("higmac_sample_init failed!\n");
        return -1;
    }

    higmac_info("nic profile sample init ok.\n");

    ret = higmac_dfm_init();
    if (ret) {
        higmac_err("higmac_dfm_init failed!\n");
        higmac_sample_uninit();
        return -1;
    }

    higmac_info("nic dfm init ok.\n");

    return 0;
}

STATIC int higmac_eeprom_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
    mm_segment_t old_fs;
    int ret = -ENOTTY;

    if (!file || !file->f_op || !file->f_op->unlocked_ioctl) {
        higmac_err("eeprom ioctl is NULL.\n");
        goto out;
    }

    //old_fs = get_fs();
    //set_fs(KERNEL_DS); /* lint !e501 */ /* kernel source */

    ret = file->f_op->unlocked_ioctl(file, cmd, arg);

    //set_fs(old_fs);
out:
    return ret;
}

STATIC int higmac_get_mac_in_eeprom(char *mac_addr)
{
    unsigned char mac[ETH_ALEN];
    struct eeprom_info einfo;
    struct file *file = NULL;
    int readlen;

    file = filp_open(EEPROM_NAME, O_RDWR | O_NDELAY, 0);
    if (IS_ERR(file)) {
        higmac_warn("eeprom file (%s) not existed.\n", EEPROM_NAME);
        return -1;
    }

    einfo.buf = (unsigned long)(uintptr_t)mac;
    einfo.count = ETH_ALEN;
    einfo.page_address = EEPROM_MAC_OFFSET + EEPROM_MAC_MAC_OFFSET;

    readlen = higmac_eeprom_ioctl(file, EEPROM_READ_CMD, (unsigned long)(uintptr_t)&einfo);
    if (ETH_ALEN != readlen) {
        higmac_err("read eeprom by ioctl failed. readlen = %d\n", readlen);
        filp_close(file, NULL);
        file = NULL;
        return -1;
    }

    mac_addr[0] = mac[0];
    mac_addr[1] = mac[1];
    mac_addr[2] = mac[2];
    mac_addr[3] = mac[3];
    mac_addr[4] = mac[4];
    mac_addr[5] = mac[5];

    filp_close(file, NULL);
    file = NULL;
    return 0;
}

/*
 * char *mac_addr : write mac addr(6 bytes) to this buffer
 */
STATIC int higmac_get_mac_from_user(struct device_node *node, char *mac_addr)
{
    int mac_where;

    if (of_property_read_u32_index(node, "mac-where", 0, &mac_where)) {
        higmac_warn("mac-where node is not setted.\n");
        return -1;
    }

    if (MAC_IN_EEPROM == mac_where)
        return higmac_get_mac_in_eeprom(mac_addr);

    /* others cases by user */
    return -1;
}

void higmac_mac_init(struct net_device *ndev, struct device_node *node)
{
    const char *mac_addr = NULL;

    /* 1. read user's mac addr */
    if (higmac_get_mac_from_user(node, ndev->dev_addr)) {
        /* 2. get mac by dts */
        mac_addr = of_get_mac_address(node);
        if (mac_addr != NULL)
            ether_addr_copy(ndev->dev_addr, mac_addr);
    }

    if (!is_valid_ether_addr(ndev->dev_addr)) {
        /* 3. set mac with random */
        eth_hw_addr_random(ndev);
        higmac_warn("using random MAC\n");
    }

    higmac_hw_set_mac_addr(ndev);

    higmac_info("nic mac set ok.\n");
}

STATIC void higmac_driver_capacity_evb_fix(struct higmac_netdev_local *priv)
{
    writel(IOMUX_FIX_VALUE, priv->iomux_base + REG_IOMUX_PADMG012);
    writel(IOMUX_FIX_VALUE, priv->iomux_base + REG_IOMUX_PADMG013);
    writel(IOMUX_FIX_VALUE, priv->iomux_base + REG_IOMUX_PADMG014);
    writel(IOMUX_FIX_VALUE, priv->iomux_base + REG_IOMUX_PADMG015);

    higmac_info("driver capacity evb fix ok.\n");
}

STATIC void higmac_driver_capacity_fix(struct higmac_netdev_local *priv, struct device_node *np)
{
    u32 platform_type;

    priv->iomux_base = NULL;

    if (of_property_read_u32_index(np, "platform-type", 0, &platform_type)) {
        higmac_info("platform_type node not config.\n");
        return;
    }

    /* currently, do it on evb */
    if (PLATFORM_TYPE_EVB == platform_type) {
        priv->iomux_base = of_iomap(np, MEM_IOMUX_IOBASE);
        if (priv->iomux_base == NULL) {
            higmac_info("iomux_base of_iomap failed\n");
            return;
        }

        higmac_driver_capacity_evb_fix(priv);

        higmac_info("driver capacity fix ok.\n");
    }

    return;
}

STATIC void higmac_driver_res_release(struct higmac_netdev_local *priv)
{
    if (priv->iomux_base != NULL) {
        iounmap(priv->iomux_base);
        priv->iomux_base = NULL;
    }

    return;
}

STATIC void higmac_others_uninit(void)
{
    higmac_sample_uninit();
    higmac_dfm_uninit();
}

struct net_device *higmac_alloc_ndev_and_resource(struct platform_device *pdev, struct higmac_netdev_local **ld)
{
    struct device *dev = &pdev->dev;
    struct device_node *node = dev->of_node;
    struct higmac_netdev_local *priv = NULL;
    struct net_device *ndev = NULL;
    struct resource *res = NULL;
    u32 dma_mask_bit;
    int num_rxqs;

    if (of_device_is_compatible(node, "hisilicon,higmac-v5")) {
        num_rxqs = RSS_NUM_RXQS;
    } else {
        num_rxqs = 1;
    }

    higmac_info("hardware queue number : %d.\n", num_rxqs);

    ndev = alloc_etherdev_mqs(sizeof(struct higmac_netdev_local), 1, num_rxqs);
    if (ndev == NULL) {
        higmac_err("alloc net device failed! num_rxqs=%d\n", num_rxqs);
        return NULL;
    }

    higmac_info("alloced a net device.\n");

    dma_mask_bit = HIGMAC_DMA_ADDR_MAX;
    if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_mask_bit))) {
        higmac_warn("dma mask %d bit set failed, try %d bit again!\n", dma_mask_bit, HIGMAC_DMA_ADDR_MIN);

        dma_mask_bit = HIGMAC_DMA_ADDR_MIN;
        if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_mask_bit))) {
            higmac_err("dma mask set %d bit failed!\n", dma_mask_bit);
            goto out_free_netdev;
        }
    }

    higmac_info("nic dma mask %d bit set ok.\n", dma_mask_bit);

    platform_set_drvdata(pdev, ndev);
    SET_NETDEV_DEV(ndev, dev);

    priv = netdev_priv(ndev);
    priv->dev = dev;
    priv->netdev = ndev;
    priv->num_rxqs = num_rxqs;

    if (of_device_is_compatible(node, "hisilicon,higmac-v3"))
        priv->hw_cap |= HW_CAP_CCI;

    res = platform_get_resource(pdev, IORESOURCE_MEM, MEM_GMAC_IOBASE);
    priv->gmac_iobase = devm_ioremap_resource(dev, res);
    if (IS_ERR(priv->gmac_iobase)) {
        higmac_err("nic rgmii iobase resource failed! ret=%ld\n", PTR_ERR(priv->gmac_iobase));
        goto out_free_netdev;
    }

    higmac_info("nic rgmii iobase resource ok.\n");

    res = platform_get_resource(pdev, IORESOURCE_MEM, MEM_SYSCTRL_IOBASE);
    priv->sysctrl_base = devm_ioremap_resource(dev, res);
    if (IS_ERR(priv->sysctrl_base)) {
        higmac_err("nic sysctrl iobase resource failed! ret=%ld\n", PTR_ERR(priv->sysctrl_base));
        goto out_free_netdev;
    }

    *ld = priv;

    return ndev;

out_free_netdev:
    free_netdev(ndev);
    ndev = NULL;

    return NULL;
}

STATIC int higmac_dev_probe(struct platform_device *pdev)
{
    struct device *dev = &pdev->dev;
    struct device_node *node = dev->of_node;
    struct net_device *ndev = NULL;
    struct higmac_netdev_local *priv = NULL;
    unsigned int hw_cap;
    int ret;

    higmac_verify_flow_ctrl_args();

    if (NULL == (ndev = higmac_alloc_ndev_and_resource(pdev, &priv)))
        return -ENOMEM;

    higmac_info("nic sysctrl iobase resource ok.\n");

    priv->version = readl(priv->sysctrl_base + SC_CFG_VER_VER);
    higmac_info("higmac hw version : %s B%x\n", higmac_platform_to_str(priv->version & BIT_PLATFORM_MASK),
        priv->version & BIT_VERSION_MASK);

    higmac_clk_test_and_enable(priv);

    /* phy reset, should be early than "of_mdiobus_register".
     * becausue "of_mdiobus_register" will read PHY register by MDIO.
     */
    higmac_hw_phy_reset(priv);

    higmac_of_get_param(priv, node);

    higmac_set_flow_ctrl_args(priv);

    ret = higmac_phy_init(priv, node);
    if (ret) {
        higmac_err("phy init failed! ret=%d\n", ret);
        goto out_clk_disable;
    }

    higmac_info("nic phy init ok.\n");

    /* mac core reset befor any hardware init.
     * rgmii mbist is doing in higmac_phy_init(), so mac core reset here.
     */
    higmac_mac_core_reset(priv);
    higmac_info("nic reset ok.\n");

    higmac_mac_init(ndev, node);

    hw_cap = readl(priv->gmac_iobase + CRF_MIN_PACKET);
    higmac_info("hw_cap = %x\n", hw_cap);

    priv->tso_supported = HAS_TSO_CAP(hw_cap);
    priv->has_rxhash_cap = HAS_RXHASH_CAP(hw_cap);
    priv->has_rss_cap = HAS_RSS_CAP(hw_cap);

    higmac_set_rss_cap(priv);
    higmac_get_rss_key(priv);
    if (priv->has_rss_cap) {
        priv->rss_info.ind_tbl_size = RSS_INDIRECTION_TABLE_SIZE;
        higmac_get_rss(priv);
    }

    if (priv->has_rxhash_cap) {
        priv->rss_info.hash_cfg = DEF_HASH_CFG;
        higmac_config_hash_policy(priv);
    }

    higmac_info("rss & hash init ok.\n");

    /* init hw controller */
    higmac_hw_init(priv);

    higmac_info("hardware init ok.\n");

    higmac_driver_capacity_fix(priv, node);

    higmac_set_flow_ctrl_params(priv);
    higmac_set_flow_ctrl_state(priv, priv->old_pause);

    higmac_info("flow contrl init ok.\n");

    ret = higmac_request_irqs(pdev, priv);
    if (ret) {
        higmac_err("nic request irq failed!\n");
        goto out_phy_unint;
    }

    higmac_info("nic request irq ok.\n");

    higmac_init_napi(priv);
    spin_lock_init(&priv->rxlock);
    spin_lock_init(&priv->txlock);
    spin_lock_init(&priv->pmtlock);

    /* init netdevice */
    ndev->irq = priv->irq[0];
    ndev->watchdog_timeo = 3 * HZ;
    ndev->netdev_ops = &hieth_netdev_ops;
    ndev->ethtool_ops = &hieth_ethtools_ops;

    higmac_nic_features(priv);

    higmac_info("ndev->features = %lx\n", (long)ndev->features);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
    timer_setup(&priv->service_timer, higmac_service_timer, 0);
#else
    setup_timer(&priv->service_timer, higmac_service_timer, (uintptr_t)priv);
#endif
    INIT_WORK(&priv->service_task, higmac_service_task);

    device_set_wakeup_capable(priv->dev, 1);
    /* In some mode, we don't want phy powerdown,
     * so I set wakeup enable all the time
     */
    device_set_wakeup_enable(priv->dev, 1);

    priv->wol_enable = false;

    priv->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);

    /* init hw desc queue */
    ret = higmac_init_hw_desc_queue(priv);
    if (ret) {
        higmac_err("nic desc queue init failed! ret=%d\n", ret);
        goto _error_hw_desc_queue;
    }

    higmac_info("nic desc queue init ok.\n");

    if (priv->tso_supported) {
        ret = higmac_init_sg_desc_queue(priv);
        if (ret)
            goto _error_sg_desc_queue;
    }

    /* register netdevice */
    ret = register_netdev(priv->netdev);
    if (ret) {
        higmac_err("register_ndev failed!\n");
        goto _error_sg_desc_queue;
    }

    higmac_info("netdev register ok.\n");

    /* reset queue here to make BQL only reset once.
     * if we put netdev_reset_queue() in higmac_net_open(),
     * the BQL will be reset when ifconfig eth0 down and up,
     * but the tx ring is not cleared before.
     * As a result, the NAPI poll will call netdev_completed_queue()
     * and BQL throw a bug.
     */
    netdev_reset_queue(ndev);

    higmac_clk_disable(priv);

    ret = higmac_others_init(ndev);
    if (ret) {
        higmac_err("higmac_others_init failed!\n");
        goto _error_netdev_reg;
    }

    higmac_info("higmac driver initialization successful.\n");

    return ret;

_error_netdev_reg:
    unregister_netdev(ndev);
_error_sg_desc_queue:
    if (priv->tso_supported)
        higmac_destroy_sg_desc_queue(priv);
_error_hw_desc_queue:
    higmac_destroy_hw_desc_queue(priv);
    higmac_destroy_napi(priv);
out_phy_unint:
    higmac_driver_res_release(priv);
    higmac_phy_uninit(priv);
out_clk_disable:
    higmac_clk_disable(priv);

    free_netdev(ndev);

    return ret;
}

STATIC int higmac_dev_remove(struct platform_device *pdev)
{
    struct net_device *ndev = platform_get_drvdata(pdev);
    struct higmac_netdev_local *priv = netdev_priv(ndev);

    higmac_others_uninit();

    higmac_driver_res_release(priv);

    higmac_info("nic others uninit ok.\n");

    /* stop the gmac and free all resource */
    cancel_work_sync(&priv->service_task);

    higmac_destroy_napi(priv);

    unregister_netdev(ndev);

    higmac_info("netdev unregister ok.\n");

    higmac_reclaim_rx_tx_resource(priv);

    higmac_info("reclaim resource ok.\n");

    if (priv->tso_supported)
        higmac_destroy_sg_desc_queue(priv);

    higmac_destroy_hw_desc_queue(priv);

    higmac_info("hardware desc queue destroy ok.\n");

    if (priv->phy != NULL) {
        phy_disconnect(priv->phy);
        higmac_phy_unfixups();
    }

    if (priv->phy_node != NULL) {
        of_node_put(priv->phy_node);
    }

    free_netdev(ndev);

    higmac_info("higmac driver remove successful.\n");

    return 0;
}

#ifdef CONFIG_PM

static void higmac_disable_irq(struct higmac_netdev_local *priv)
{
    int i;

    for (i = 0; i < priv->num_rxqs; i++)
        disable_irq(priv->irq[i]);
}

static void higmac_enable_irq(struct higmac_netdev_local *priv)
{
    int i;

    for (i = 0; i < priv->num_rxqs; i++)
        enable_irq(priv->irq[i]);
}

int higmac_dev_suspend(struct platform_device *pdev, pm_message_t state)
{
    struct net_device *ndev = platform_get_drvdata(pdev);
    struct higmac_netdev_local *priv = netdev_priv(ndev);

    higmac_disable_irq(priv);
    /* If support Wake on LAN, we should not disconnect phy
     * because it will call phy_suspend to power down phy.
     */
    if (!priv->wol_enable && (priv->phy != NULL))
        phy_disconnect(priv->phy);

    del_timer_sync(&priv->service_timer);
    cancel_work_sync(&priv->service_task);

    /* If suspend when netif is not up, the napi_disable will run into
     * dead loop and dpm_drv_timeout will give warning.
     */
    if (netif_running(ndev))
        higmac_disable_napi(priv);
    netif_device_detach(ndev);

    netif_carrier_off(ndev);

    /* If netdev is down, MAC clock is disabled.
     * So if we want to reclaim MAC rx and tx resource,
     * we must first enable MAC clock and then disable it.
     */
    if (!(ndev->flags & IFF_UP))
        higmac_clk_enable(priv);

    higmac_reclaim_rx_tx_resource(priv);

    if (!(ndev->flags & IFF_UP))
        higmac_clk_disable(priv);

    if (!priv->wol_enable && (priv->phy != NULL)) { /* if no WOL, then poweroff */
        /* no need to call genphy_resume() in resume,
         * because we reset everything
         */
        genphy_suspend(priv->phy); /* power down phy */
        msleep(20);
        higmac_hw_all_clk_disable(priv);
    }

    higmac_info("rgmii nic is suspended.\n");

    return 0;
}
EXPORT_SYMBOL(higmac_dev_suspend);

int higmac_dev_resume(struct platform_device *pdev)
{
    struct net_device *ndev = platform_get_drvdata(pdev);
    struct higmac_netdev_local *priv = netdev_priv(ndev);
    int ret;

    /* If we support Wake on LAN, we doesn't call clk_disable.
     * But when we resume, the uboot may off mac clock and reset phy
     * by re-write the mac CRG register.
     * So we first call clk_disable, and then clk_enable.
     */
    if (priv->wol_enable)
        higmac_hw_all_clk_disable(priv);

    higmac_hw_all_clk_enable(priv);
    /* internal FE_PHY: enable clk and reset  */
    higmac_hw_phy_reset(priv);

    /* If netdev is down, MAC clock is disabled.
     * So if we want to restart MAC and re-initialize it,
     * we must first enable MAC clock and then disable it.
     */
    if (!(ndev->flags & IFF_UP))
        higmac_clk_enable(priv);

    /* power on gmac */
    higmac_restart(priv);

    /* If support WoL, we didn't disconnect phy.
     * But when we resume, we reset PHY, so we want to
     * call phy_connect to make phy_fixup excuted.
     * This is important for internal PHY fix.
     */
    if (priv->wol_enable && (priv->phy != NULL))
        phy_disconnect(priv->phy);

    if (priv->phy != NULL) {
        ret = phy_connect_direct(ndev, priv->phy, higmac_adjust_link, priv->phy_mode);
        if (ret)
            return ret;
    }

    if (netif_running(ndev))
        higmac_enable_napi(priv);

    netif_device_attach(ndev);

    if ((ndev->flags & IFF_UP) && (priv->phy != NULL))
        phy_start(priv->phy);

    higmac_enable_irq(priv);

    if (!(ndev->flags & IFF_UP))
        higmac_clk_disable(priv);

    /* If we suspend and resume when net device is down,
     * some operations are unnecessary.
     */
    if (ndev->flags & IFF_UP) {
        mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
        priv->old_link = 0;
        priv->old_speed = SPEED_UNKNOWN;
        priv->old_duplex = DUPLEX_UNKNOWN;
    }

    higmac_info("rgmii nic is resumed.\n");

    return 0;
}
EXPORT_SYMBOL(higmac_dev_resume);
#else
#define higmac_dev_suspend NULL
#define higmac_dev_resume NULL
#endif

static const struct of_device_id higmac_of_match[] = {
    {.compatible = "hisilicon,higmac", },
    {.compatible = "hisilicon,higmac-v1", },
    {.compatible = "hisilicon,higmac-v2", },
    {.compatible = "hisilicon,higmac-v3", },
    {.compatible = "hisilicon,higmac-v4", },
    {.compatible = "hisilicon,higmac-v5", },
    { },
};

MODULE_DEVICE_TABLE(of, higmac_of_match);

static struct platform_driver higmac_dev_driver = {
    .probe = higmac_dev_probe,
    .remove = higmac_dev_remove,
    .suspend = higmac_dev_suspend,
    .resume = higmac_dev_resume,
    .driver = {
        .owner = THIS_MODULE,
        .name = HIGMAC_DRIVER_NAME,
        .of_match_table = higmac_of_match,
        },
};

static int __init higmac_init(void)
{
    int ret = 0;

    ret = platform_driver_register(&higmac_dev_driver);
    if (ret)
        return ret;

    return ret;
}

static void __exit higmac_exit(void)
{
    platform_driver_unregister(&higmac_dev_driver);
}

module_init(higmac_init);
module_exit(higmac_exit);

MODULE_DESCRIPTION("Hisilicon double GMAC driver");
MODULE_LICENSE("GPL v2");
