#include <netinet/in.h>
#include <netinet/tcp.h>
#include <netinet/if_ether.h>

#include "util.h"
#include "higmac.h"
#include "forward.h"
#include "eth_drv.h"
#include "gmac.h"
#include "ctrl.h"

/* AXI burst and outstanding config */
#define BURST_OUTSTANDING_REG    0x3014
#define BURST4_OUTSTANDING1      0x81ff
#define BURST_OUTSTANDING_OFFSET 16

struct higmac_board_info higmac_board_info[MAX_GMAC_NUMS];

externC void dma_cache_clean(AARCHPTR start, AARCHPTR end);
externC void dma_cache_inv(AARCHPTR start, AARCHPTR end);

extern struct higmac_platform_data *g_stHigmacPlatformData;
void higmac_hw_get_mac_addr(struct higmac_netdev_local *ld, unsigned char *mac)
{
    unsigned int reg;

    reg = higmac_readl(ld, STATION_ADDR_HIGH);
    mac[0] = (reg >> 8) & 0xff;
    mac[1] = reg & 0xff;

    reg = higmac_readl(ld, STATION_ADDR_LOW);
    mac[2] = (reg >> 24) & 0xff;
    mac[3] = (reg >> 16) & 0xff;
    mac[4] = (reg >> 8) & 0xff;
    mac[5] = reg & 0xff;
}

int higmac_hw_set_mac_addr(struct higmac_netdev_local *ld, unsigned char *mac)
{
    unsigned int reg;

    reg = mac[1] | (mac[0] << 8);
    higmac_writel(ld, reg, STATION_ADDR_HIGH);

    reg = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
    higmac_writel(ld, reg, STATION_ADDR_LOW);

    /* add uc addr in fwd table, use entry0--eth0, entry1--eth1 */
    // fwd_uc_mc_tbl_add(ld, mac, ld->index, ADD_UC);

    return 0;
}

/* config hardware queue depth */
void higmac_hw_set_desc_queue_depth(struct higmac_netdev_local *ld)
{
    if (HIGMAC_HWQ_RX_FQ_DEPTH > HIGMAC_MAX_QUEUE_DEPTH
        || HIGMAC_HWQ_RX_BQ_DEPTH > HIGMAC_MAX_QUEUE_DEPTH
        || HIGMAC_HWQ_TX_BQ_DEPTH > HIGMAC_MAX_QUEUE_DEPTH
        || HIGMAC_HWQ_TX_RQ_DEPTH > HIGMAC_MAX_QUEUE_DEPTH) {
        BUG();
    }

    higmac_writel_bits(ld, 1, RX_FQ_REG_EN, BITS_RX_FQ_DEPTH_EN);
    higmac_writel_bits(ld, HIGMAC_HWQ_RX_FQ_DEPTH << DESC_WORD_SHIFT, RX_FQ_DEPTH,
                       BITS_RX_FQ_DEPTH);
    higmac_writel_bits(ld, 0, RX_FQ_REG_EN, BITS_RX_FQ_DEPTH_EN);

    higmac_writel_bits(ld, 1, RX_BQ_REG_EN, BITS_RX_BQ_DEPTH_EN);
    higmac_writel_bits(ld, HIGMAC_HWQ_RX_BQ_DEPTH << DESC_WORD_SHIFT, RX_BQ_DEPTH,
                       BITS_RX_BQ_DEPTH);
    higmac_writel_bits(ld, 0, RX_BQ_REG_EN, BITS_RX_BQ_DEPTH_EN);

    higmac_writel_bits(ld, 1, TX_BQ_REG_EN, BITS_TX_BQ_DEPTH_EN);
    higmac_writel_bits(ld, HIGMAC_HWQ_TX_BQ_DEPTH << DESC_WORD_SHIFT, TX_BQ_DEPTH,
                       BITS_TX_BQ_DEPTH);
    higmac_writel_bits(ld, 0, TX_BQ_REG_EN, BITS_TX_BQ_DEPTH_EN);

    higmac_writel_bits(ld, 1, TX_RQ_REG_EN, BITS_TX_RQ_DEPTH_EN);
    higmac_writel_bits(ld, HIGMAC_HWQ_TX_RQ_DEPTH << DESC_WORD_SHIFT, TX_RQ_DEPTH,
                       BITS_TX_RQ_DEPTH);
    higmac_writel_bits(ld, 0, TX_RQ_REG_EN, BITS_TX_RQ_DEPTH_EN);
}

void higmac_hw_mac_core_init(struct higmac_netdev_local *ld)
{
#ifdef CONFIG_HIGMAC_AXI_BURST
    unsigned int val;

    /* config AXI parameter for better performance. */
    val = readl(ld->gmac_iobase + BURST_OUTSTANDING_REG);
    val >>= BURST_OUTSTANDING_OFFSET;
    if (!val) {
        writel(BURST4_OUTSTANDING1, ld->gmac_iobase + BURST_OUTSTANDING_REG);
    }
#endif
    /* disable and clear all interrupts */
    writel(0, ld->gmac_iobase + ENA_PMU_INT);
    writel(~0, ld->gmac_iobase + RAW_PMU_INT);

    /* enable CRC erro packets filter */
    higmac_writel_bits(ld, 1, REC_FILT_CONTROL, BIT_CRC_ERR_PASS);
    higmac_writel_bits(ld, 0, REC_FILT_CONTROL, BIT_MC_MATCH_EN);
    higmac_writel_bits(ld, 1, REC_FILT_CONTROL, BIT_UC_MATCH_EN);

    /* fix bug for udp and ip error check */
    writel(CONTROL_WORD_CONFIG, ld->gmac_iobase + CONTROL_WORD);

    writel(0, ld->gmac_iobase + COL_SLOT_TIME);
    writel(1, ld->gmac_iobase + MAC_DUPLEX_HALF_CTRL);

    /* FIXME: interrupt when rcv packets >= RX_BQ_INT_THRESHOLD */
    higmac_writel_bits(ld, RX_BQ_INT_THRESHOLD, IN_QUEUE_TH,
                       BITS_RX_BQ_IN_TH);
    higmac_writel_bits(ld, TX_RQ_INT_THRESHOLD, IN_QUEUE_TH,
                       BITS_TX_RQ_IN_TH);

    /* FIXME: rx_bq/tx_rq in timeout threshold */
    higmac_writel_bits(ld, 0x10000, RX_BQ_IN_TIMEOUT_TH,
                       BITS_RX_BQ_IN_TIMEOUT_TH);

    higmac_writel_bits(ld, 0x50000, TX_RQ_IN_TIMEOUT_TH,
                       BITS_TX_RQ_IN_TIMEOUT_TH);

    higmac_hw_set_desc_queue_depth(ld);
}

void higmac_set_rx_fq_hwq_addr(struct higmac_netdev_local *ld,
                               dma_addr_t phy_addr)
{
    higmac_writel_bits(ld, 1, RX_FQ_REG_EN,
                       BITS_RX_FQ_START_ADDR_EN);

    higmac_writel(ld, (UINT32)phy_addr, RX_FQ_START_ADDR);
#if defined(CONFIG_HIGMAC_DDR_64BIT)
    higmac_writel_bits(ld, (AARCHPTR)phy_addr >> REG_BIT_WIDTH, RX_FQ_DEPTH, BITS_RX_FQ_ADDR_HI8);
#endif

    higmac_writel_bits(ld, 0, RX_FQ_REG_EN,
                       BITS_RX_FQ_START_ADDR_EN);
}

void higmac_set_rx_bq_hwq_addr(struct higmac_netdev_local *ld,
                               dma_addr_t phy_addr)
{
    higmac_writel_bits(ld, 1, RX_BQ_REG_EN,
                       BITS_RX_BQ_START_ADDR_EN);

    higmac_writel(ld, (UINT32)phy_addr, RX_BQ_START_ADDR);
#if defined(CONFIG_HIGMAC_DDR_64BIT)
    higmac_writel_bits(ld, (AARCHPTR)phy_addr >> REG_BIT_WIDTH, RX_BQ_DEPTH, BITS_RX_BQ_ADDR_HI8);
#endif

    higmac_writel_bits(ld, 0, RX_BQ_REG_EN,
                       BITS_RX_BQ_START_ADDR_EN);
}

void higmac_set_tx_bq_hwq_addr(struct higmac_netdev_local *ld,
                               dma_addr_t phy_addr)
{
    higmac_writel_bits(ld, 1, TX_BQ_REG_EN,
                       BITS_TX_BQ_START_ADDR_EN);

    higmac_writel(ld, (UINT32)phy_addr, TX_BQ_START_ADDR);
#if defined(CONFIG_HIGMAC_DDR_64BIT)
    higmac_writel_bits(ld, (AARCHPTR)phy_addr >> REG_BIT_WIDTH, TX_BQ_DEPTH, BITS_TX_BQ_ADDR_HI8);
#endif

    higmac_writel_bits(ld, 0, TX_BQ_REG_EN,
                       BITS_TX_BQ_START_ADDR_EN);
}

void higmac_set_tx_rq_hwq_addr(struct higmac_netdev_local *ld,
                               dma_addr_t phy_addr)
{
    higmac_writel_bits(ld, 1, TX_RQ_REG_EN,
                       BITS_TX_RQ_START_ADDR_EN);

    higmac_writel(ld, (UINT32)phy_addr, TX_RQ_START_ADDR);
#if defined(CONFIG_HIGMAC_DDR_64BIT)
    higmac_writel_bits(ld, (AARCHPTR)phy_addr >> REG_BIT_WIDTH, TX_RQ_DEPTH, BITS_TX_RQ_ADDR_HI8);
#endif

    higmac_writel_bits(ld, 0, TX_RQ_REG_EN,
                       BITS_TX_RQ_START_ADDR_EN);
}

void higmac_hw_set_desc_queue_addr(struct higmac_netdev_local *ld)
{
    higmac_set_rx_fq_hwq_addr(ld, ld->rx_fq.phys_addr);
    higmac_set_rx_bq_hwq_addr(ld, ld->rx_bq.phys_addr);
    higmac_set_tx_rq_hwq_addr(ld, ld->tx_rq.phys_addr);
    higmac_set_tx_bq_hwq_addr(ld, ld->tx_bq.phys_addr);
}

int higmac_read_irqstatus(struct higmac_netdev_local *ld)
{
    int status;

    status = higmac_readl(ld, RAW_PMU_INT);

    return status;
}

int higmac_clear_irqstatus(struct higmac_netdev_local *ld, int irqs)
{
    int status;

    higmac_writel(ld, irqs, RAW_PMU_INT);
    status = higmac_read_irqstatus(ld);

    return status;
}

int higmac_irq_enable(struct higmac_netdev_local *ld, int irqs)
{
    int old;

    old = higmac_readl(ld, ENA_PMU_INT);
    higmac_writel(ld, old | irqs, ENA_PMU_INT);

    return old;
}

int higmac_irq_disable(struct higmac_netdev_local *ld, int irqs)
{
    int old;

    old = higmac_readl(ld, ENA_PMU_INT);
    higmac_writel(ld, old & (~irqs), ENA_PMU_INT);

    return old;
}

void higmac_hw_desc_enable(struct higmac_netdev_local *ld)
{
    writel(0xF, ld->gmac_iobase + DESC_WR_RD_ENA);
}

void higmac_hw_desc_disable(struct higmac_netdev_local *ld)
{
    writel(0, ld->gmac_iobase + DESC_WR_RD_ENA);
}

void higmac_port_enable(struct higmac_netdev_local *ld)
{
    higmac_writel_bits(ld, 1, PORT_EN, BITS_TX_EN);
    higmac_writel_bits(ld, 1, PORT_EN, BITS_RX_EN);
}

void higmac_port_disable(struct higmac_netdev_local *ld)
{
    higmac_writel_bits(ld, 0, PORT_EN, BITS_TX_EN);
    higmac_writel_bits(ld, 0, PORT_EN, BITS_RX_EN);
}

void higmac_rx_port_disable(struct higmac_netdev_local *ld)
{
    higmac_writel_bits(ld, 0, PORT_EN, BITS_RX_EN);
}

void higmac_rx_port_enable(struct higmac_netdev_local *ld)
{
    higmac_writel_bits(ld, 1, PORT_EN, BITS_RX_EN);
}

int higmac_init_hw_desc_queue(struct higmac_netdev_local *ld)
{
    struct higmac_desc *virt_addr;
    int size, i;
    struct los_sk_buff *skb;

    /* queue depth */
    ld->rx_fq.count = HIGMAC_HWQ_RX_FQ_DEPTH;
    ld->rx_bq.count = HIGMAC_HWQ_RX_BQ_DEPTH;
    ld->tx_bq.count = HIGMAC_HWQ_TX_BQ_DEPTH;
    ld->tx_rq.count = HIGMAC_HWQ_TX_RQ_DEPTH;

    for (i = 0; i < QUEUE_NUMS; i++) {
        size = ld->pool[i].count * sizeof(struct higmac_desc);
        size += 2 * CACHE_ALIGNED_SIZE;
        virt_addr = memalign(CACHE_ALIGNED_SIZE, size);
        if (virt_addr == NULL) {
            PRINTK("alloc desc pool[%d] error!\n", i);
            goto error_free_pool;
        }

        memset(virt_addr, 0, size);

        net_dma_cache_clean(virt_addr, size);
        net_dma_cache_inv(virt_addr, size);

        ld->pool[i].size = size;
        ld->pool[i].desc = virt_addr;
        ld->pool[i].phys_addr = (AARCHPTR)virt_addr;
        /* pr_info("pool[i]=%p, phys=0x%x\n", virt_addr, phys_addr); */
    }

    ld->rx_fq.skb = zalloc(ld->rx_fq.count
                           * sizeof(struct los_sk_buff *));
    if (ld->rx_fq.skb == NULL) {
        PRINTK("alloc gmac rx_fq skb array failed!\n");
        goto error_free_pool;
    }

    for (i = 0; i < ld->rx_fq.count; i++) {
        skb = los_alloc_skb_ip_align(HIETH_MAX_FRAME_SIZE);
        if (!skb) {
            PRINTK("%s: skb queue feed alloc failed\n", __func__);
            goto error_free_pool;
        }
        ld->rx_fq.skb[i] = skb;
    }

    ld->tx_bq.skb = zalloc(ld->tx_bq.count
                           * sizeof(struct los_sk_buff *));
    if (ld->tx_bq.skb == NULL) {
        PRINTK("alloc gmac tx_bq skb array failed!\n");
        goto error_free_pool;
    }

    for (i = 0; i < ld->tx_bq.count; i++) {
        skb = los_alloc_skb(HIETH_MAX_FRAME_SIZE);
        if (!skb) {
            PRINTK("%s: skb queue feed alloc failed\n", __func__);
            goto error_free_pool;
        }
        ld->tx_bq.skb[i] = skb;
    }

    higmac_hw_set_desc_queue_addr(ld);

    return 0;

error_free_pool:
    higmac_destroy_hw_desc_queue(ld);

    return -ENOMEM;
}

void higmac_destroy_hw_desc_queue(struct higmac_netdev_local *ld)
{
    int i;

    for (i = 0; i < ld->rx_fq.count; i++) {
        los_free_skb(ld->rx_fq.skb[i]);
    }

    for (i = 0; i < ld->tx_bq.count; i++) {
        los_free_skb(ld->tx_bq.skb[i]);
    }

    free(ld->rx_fq.skb);
    free(ld->tx_bq.skb);
    ld->rx_fq.skb = NULL;
    ld->tx_bq.skb = NULL;

    for (i = 0; i < QUEUE_NUMS; i++) {
        if (ld->pool[i].desc) {
            free(ld->pool[i].desc);
            ld->pool[i].desc = NULL;
        }
    }
}

int higmac_init_sg_desc_queue(struct higmac_netdev_local *ld)
{
    int size;

    ld->sg_count = ld->tx_bq.count + HIGMAC_SG_DESC_ADD;

    size = ld->sg_count * sizeof(struct sg_desc) + CACHE_ALIGNED_SIZE;
    ld->dma_sg_desc = memalign(CACHE_ALIGNED_SIZE, size);

    if (ld->dma_sg_desc == NULL) {
        PRINTK("alloc sg desc dma error!\n");
        return -ENOMEM;
    }

    ld->dma_sg_phy = (unsigned long)ld->dma_sg_desc;

    ld->sg_head = ld->sg_tail = 0;

    return 0;
}

void higmac_destroy_sg_desc_queue(struct higmac_netdev_local *ld)
{
    if (ld->dma_sg_desc) {
        free(ld->dma_sg_desc);
        ld->dma_sg_desc = NULL;
    }
}

#if 1
int higmac_xmit_release_skb(struct higmac_netdev_local *ld,
                            struct los_eth_driver *sc,
                            struct higmac_priv_s *priv)
{
    int tx_rq_wr_offset, tx_rq_rd_offset, pos;
    struct higmac_desc *tx_rq_desc;
    int tx_reclaim_cnt = 0;

    unsigned int regval;
    int ret = 0;

    HISI_NET_LOCK_GET(&(ld->tx_lock));

    tx_rq_wr_offset = higmac_readl_bits(ld, TX_RQ_WR_ADDR,
                                        BITS_TX_RQ_WR_ADDR); /* logic write */
    tx_rq_rd_offset = higmac_readl_bits(ld, TX_RQ_RD_ADDR,
                                        BITS_TX_RQ_RD_ADDR); /* software read */

    while (tx_rq_rd_offset != tx_rq_wr_offset) {
        pos = tx_rq_rd_offset >> DESC_BYTE_SHIFT;
        tx_rq_desc = ld->tx_rq.desc + pos;

        net_dma_cache_inv(tx_rq_desc, sizeof(struct higmac_desc));

        // ret = higmac_xmit_release_gso(ld, tx_rq_desc);

        // diag_printf("\nxhl -- ~~~~~~~~~~before write txrq rd\n");
        // diag_dump_buf(tx_rq_desc, DESC_SIZE);
        tx_rq_desc->data_buff_addr = 0;
#if defined(CONFIG_HIGMAC_DDR_64BIT)
        tx_rq_desc->reserve_desc2 = 0;
#endif
        tx_rq_rd_offset += DESC_SIZE;
        if (tx_rq_rd_offset == (HIGMAC_HWQ_TX_RQ_DEPTH << DESC_BYTE_SHIFT)) {
            tx_rq_rd_offset = 0;
        }
        // priv->tx_busy = 0;
        tx_reclaim_cnt++;

        higmac_writel_bits(ld, tx_rq_rd_offset,
                           TX_RQ_RD_ADDR, BITS_TX_RQ_RD_ADDR);
        // diag_printf("\nxhl -- ~~~~~~~~~~\n");
        // diag_dump_buf(tx_rq_desc, DESC_SIZE);
        // regval = higmac_readl_bits(ld, TX_RQ_RD_ADDR,
        //    BITS_TX_RQ_RD_ADDR); /* software read */

        // diag_printf("\nxhl -- higmac_xmit_release_skb tx_rq_rd_offset after write:%d\n", regval>>DESC_BYTE_SHIFT);
    }

    if (tx_reclaim_cnt && ld->tx_busy) {
        ld->tx_busy = 0;
        LOS_EventWrite(&(g_stHigmacPlatformData[priv->index].stEvent), EVENT_NET_CAN_SEND);
    }

    HISI_NET_LOCK_PUT(&(ld->tx_lock));

    return ret;
}

#ifdef HIGMAC_TSO_SUPPORTED

int higmac_check_tx_err(struct higmac_tso_desc *tx_bq_desc)
{
    unsigned int tx_err = tx_bq_desc->tx_err;
    return 0;
}

int higmac_xmit_release_gso(struct higmac_netdev_local *ld,
                            struct higmac_tso_desc *tx_bq_desc)
{
    return 0;
}

int higmac_get_pkt_info(struct higmac_netdev_local *ld,
                        struct pbuf *p,
                        struct higmac_tso_desc *tx_bq_desc)
{
    return 0;
}

int higmac_xmit_gso(struct higmac_netdev_local *ld,
                    unsigned int pos, struct pbuf *p,
                    struct higmac_tso_desc *tx_bq_desc)
{
    struct los_sk_buff *skb;
    struct pbuf *p_iter;
    int has_copied, copy, copy_left;
    int send_pkt_len;

    send_pkt_len = p->tot_len;
    if (send_pkt_len > HIETH_MAX_FRAME_SIZE) {
        PRINTK("%s: xmit error len=%d\n", __func__, send_pkt_len);
    }

    skb = ld->tx_bq.skb[pos];

    has_copied = 0;
    copy_left = send_pkt_len - has_copied;
    for (p_iter = p; p_iter && (copy_left > 0); p_iter = p_iter->next) {
        copy = p_iter->len;
        /* check copy_left to make sure no memory overflow
         * when copying to skb->data
         */
        if (copy > copy_left) {
            PRINTK("%s: send pbuf err! tot_len=%d, pkt_len=%d, copied=%d, cur_len=%d\n",
                   __func__, p->tot_len, send_pkt_len, has_copied, copy);
            copy = copy_left;
        }
        memcpy(skb->data + has_copied, (const void *)p_iter->payload, copy);
        has_copied += copy;
        copy_left = send_pkt_len - has_copied;
    }

    tx_bq_desc->desc1.val = 0;
    /* default config, default closed checksum offload function */
    tx_bq_desc->desc1.tx.tso_flag = HIGMAC_CSUM_DISABLE;
    tx_bq_desc->desc1.tx.coe_flag = HIGMAC_CSUM_DISABLE;
#if LWIP_TX_CSUM_OFFLOAD
    if (p->flags & PBUF_FLAG_TX_CSUM_PARTIAL) {
        struct iphdr *iphdr = (struct iphdr *)((char *)p->payload + sizeof(struct ethhdr));
        if ((iphdr->protocol == IPPROTO_TCP) || (iphdr->protocol == IPPROTO_UDP)) {
            if (iphdr->protocol == IPPROTO_TCP) {
                struct tcphdr *tcphdr = (struct tcphdr *)((char *)iphdr + (iphdr->ihl << 2));
                tx_bq_desc->desc1.tx.prot_hdr_len = tcphdr->doff;
                tx_bq_desc->desc1.tx.prot_type = HIGMAC_TRANS_TCP_TYPE_HW;
            } else {
                tx_bq_desc->desc1.tx.prot_hdr_len = 2; /* 8 bytes */
                tx_bq_desc->desc1.tx.prot_type = HIGMAC_TRANS_UDP_TYPE_HW;
            }
            if (iphdr->version == 4) {
                tx_bq_desc->desc1.tx.ip_ver = HIGMAC_IPV4_VERSION_HW;
            } else if (iphdr->version == 6) {
                tx_bq_desc->desc1.tx.ip_ver = HIGMAC_IPV6_VERSION_HW;
            }
            tx_bq_desc->desc1.tx.ip_hdr_len = iphdr->ihl;
            tx_bq_desc->desc1.tx.coe_flag = HIGMAC_CSUM_ENABLE;
            tx_bq_desc->desc1.tx.vlan_flag = HIGMAC_CSUM_DISABLE;
        }
    }
#endif
    net_dma_cache_clean(skb->data, send_pkt_len);
    tx_bq_desc->desc1.tx.sg_flag = 0;
    tx_bq_desc->desc1.tx.nfrags_num = 0;
    tx_bq_desc->desc1.tx.hw_own = DESC_VLD_BUSY;
    tx_bq_desc->data_buff_addr = (UINT32)(AARCHPTR)skb->data;
#if defined(CONFIG_HIGMAC_DDR_64BIT)
    tx_bq_desc->reserve_desc2 = ((AARCHPTR)skb->data >> REG_BIT_WIDTH) & TX_DESC_HI8_MASK;
#endif
    tx_bq_desc->desc1.tx.data_len = send_pkt_len;

    net_dma_cache_clean(tx_bq_desc, sizeof(*tx_bq_desc));

    return 0;
}
#endif

int higmac_feed_hw(struct higmac_netdev_local *ld, higmac_priv_t *priv)
{
    int rx_fq_wr_offset, rx_fq_rd_offset;
    struct higmac_desc *rx_fq_desc;
    int wr_rd_dist;
    int i = 0;
    int start, end, num = 0;
    int pos;
    struct los_sk_buff *skb;

    HISI_NET_LOCK_GET(&(ld->rx_lock));

    rx_fq_wr_offset = higmac_readl_bits(ld, RX_FQ_WR_ADDR,
                                        BITS_RX_FQ_WR_ADDR); /* software write pointer */
    rx_fq_rd_offset = higmac_readl_bits(ld, RX_FQ_RD_ADDR,
                                        BITS_RX_FQ_RD_ADDR); /* logic read pointer */

    if (rx_fq_wr_offset >= rx_fq_rd_offset) {
        wr_rd_dist = (HIGMAC_HWQ_RX_FQ_DEPTH << DESC_BYTE_SHIFT)
                     - (rx_fq_wr_offset - rx_fq_rd_offset);
    } else {
        wr_rd_dist = rx_fq_rd_offset - rx_fq_wr_offset;
    }
    wr_rd_dist >>= DESC_BYTE_SHIFT; /* offset was counted on bytes, desc size = 2^DESC_BYTE_SHIFT */

    /*
     * wr_rd_dist - 1 for logic reason.
     * Logic think the desc pool is full filled, ...?
     */
    for (i = 0; i < wr_rd_dist - 1; i++) {
        pos = rx_fq_wr_offset >> DESC_BYTE_SHIFT;
        rx_fq_desc = ld->rx_fq.desc + pos;

        skb = ld->rx_fq.skb[pos];
        net_dma_cache_inv((void *)skb->data, HIETH_MAX_FRAME_SIZE);

        rx_fq_desc->data_buff_addr = (UINT32)(AARCHPTR)skb->data;
#if defined(CONFIG_HIGMAC_DDR_64BIT)
        rx_fq_desc->reserve_desc3 = ((AARCHPTR)skb->data >> REG_BIT_WIDTH) << Q_ADDR_HI8_OFFSET;
#endif

        rx_fq_desc->buffer_len = HIETH_MAX_FRAME_SIZE - 1;
        rx_fq_desc->data_len = 0;
        rx_fq_desc->fl = 0;
        rx_fq_desc->descvid = DESC_VLD_FREE;

        net_dma_cache_clean(rx_fq_desc, sizeof(*rx_fq_desc));

        rx_fq_wr_offset += DESC_SIZE;
        if (rx_fq_wr_offset >= (HIGMAC_HWQ_RX_FQ_DEPTH << DESC_BYTE_SHIFT)) {
            rx_fq_wr_offset = 0;
        }

        /* This barrier is important here.  It is required to ensure
         * the ARM CPU flushes it's DMA write buffers before proceeding
         * to the next instruction, to ensure that GMAC will see
         * our descriptor changes in memory */
        HIGMAC_SYNC_BARRIER();

        higmac_writel_bits(ld, rx_fq_wr_offset, RX_FQ_WR_ADDR,
                           BITS_RX_FQ_WR_ADDR);
    }

    HISI_NET_LOCK_PUT(&(ld->rx_lock));

    return 0;
}
#endif
