#include "eth_drv.h"
#include "hieth.h"
#include "ctrl.h"
#include "mac.h"

#include "mdio.h"
#include <linux/delay.h>
#include <netinet/ip.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <netinet/if_ether.h>

externC void dma_cache_clean(AARCHPTR start, AARCHPTR end);
externC void dma_cache_inv(AARCHPTR start, AARCHPTR end);

extern struct hieth_platform_data *g_stHiethPlatformData;

static inline int fephy_expanded_read(struct hieth_netdev_local *ld, int phy_addr, int regnum)
{
    int ret;
    hieth_mdio_write(ld, phy_addr, MII_EXPMA, regnum);
    ret = hieth_mdio_read(ld, phy_addr, MII_EXPMD);
    return ret;
}

static inline int fephy_expanded_write(struct hieth_netdev_local *ld, int phy_addr, int regnum, int val)
{
    int ret;
    hieth_mdio_write(ld, phy_addr, MII_EXPMA, regnum);
    ret = hieth_mdio_write(ld, phy_addr, MII_EXPMD, val);
    return ret;
}

static void hieth_fephy_use_default_trim(struct hieth_netdev_local *ld, eth_phy_access_t *f)
{
    unsigned short val;
    int timeout = 3;
    PRINTK("No OTP data, festa PHY use default ATE parameters!\n");

    do {
        msleep(250);
        val = fephy_expanded_read(ld, f->phy_addr, REG_DEF_ATE);
        val &= BIT_AUTOTRIM_DONE;  // (0x1 << 0)
    } while (!val && --timeout);

    if (!timeout) {
        PRINTK("festa PHY wait autotrim done timeout! \n");
    }
    mdelay(5);
}
void hieth_fephy_trim(struct hieth_netdev_local *ld, eth_phy_access_t *f)
{
    unsigned int val;
    int timeout = 50;
    unsigned char ld_set;
    unsigned char ldo_set;
    unsigned char r_tuning;

    val = readl(SYS_CTRL_REG_BASE + 0x8024);
    ld_set = (val >> BIT_OFFSET_LD_SET) & BIT_MASK_LD_SET;
    ldo_set = (val >> BIT_OFFSET_LDO_SET) & BIT_MASK_LDO_SET;
    r_tuning = (val >> BIT_OFFSET_R_TUNING) & BIT_MASK_R_TUNING;
    if ((!ld_set) && (!ldo_set) && (!r_tuning)) {
        hieth_fephy_use_default_trim(ld, f);
        return;
    }
    val = fephy_expanded_read(ld, f->phy_addr, REG_LD_AM);
    val = (val & ~BIT_MASK_LD_SET) | (ld_set & BIT_MASK_LD_SET);
    fephy_expanded_write(ld, f->phy_addr, REG_LD_AM, val);

    val = fephy_expanded_read(ld, f->phy_addr, REG_LDO_AM);
    val = (val & ~BIT_MASK_LDO_SET) | (ldo_set & BIT_MASK_LDO_SET);
    fephy_expanded_write(ld, f->phy_addr, REG_LDO_AM, val);

    val = fephy_expanded_read(ld, f->phy_addr, REG_R_TUNING);
    val = (val & ~BIT_MASK_R_TUNING) | (r_tuning & BIT_MASK_R_TUNING);
    fephy_expanded_write(ld, f->phy_addr, REG_R_TUNING, val);

    val = fephy_expanded_read(ld, f->phy_addr, REG_WR_DONE);
    if (val & BIT_CFG_ACK) {
        PRINTK("festa PHY 0x3053 bit CFG_ACK value: 1\n");
    }
    val = val | BIT_CFG_DONE;

    fephy_expanded_write(ld, f->phy_addr, REG_WR_DONE, val);

    do {
        msleep(5);
        val = fephy_expanded_read(ld, f->phy_addr, REG_WR_DONE);
        val &= BIT_CFG_ACK;
    } while (!val && --timeout);

    if (!timeout) {
        PRINTK("festa PHY 0x3053 wait bit CFG_ACK timeout!\n");
    }

    mdelay(5);

    PRINTK("FEPHY: addr=%d ld_am=0x%x, ldo_am=0x%x, r_tuning=0x%x  DONE. \n", f->phy_addr, fephy_expanded_read(ld,
            f->phy_addr, REG_LD_AM), fephy_expanded_read(ld, f->phy_addr, REG_LDO_AM), fephy_expanded_read(ld, f->phy_addr,
                    REG_R_TUNING));
}

static inline void hieth_enable_rxcsum_drop(struct hieth_netdev_local *ld,
                                            bool drop)
{
    hieth_writel_bits(ld, drop, UD_REG_NAME(GLB_RX_COE_CTRL),
                      BITS_COE_IPHDR_DROP);
    hieth_writel_bits(ld, false, UD_REG_NAME(GLB_RX_COE_CTRL),
                      BITS_COE_PAYLOAD_DROP);
    hieth_writel_bits(ld, drop, UD_REG_NAME(GLB_RX_COE_CTRL),
                      BITS_COE_IPV6_UDP_ZERO_DROP);
}

static int hieth_port_reset(struct hieth_netdev_local *ld, int port)
{
    /* soft reset */
    if (ld->port == UP_PORT) {
        /* Note: sf ip need reset twice */
        hieth_writel_bits(ld, 1, GLB_SOFT_RESET,
                          BITS_ETH_SOFT_RESET_ALL);
        msleep(1);
        hieth_writel_bits(ld, 0, GLB_SOFT_RESET,
                          BITS_ETH_SOFT_RESET_ALL);
        msleep(1);
        hieth_writel_bits(ld, 1, GLB_SOFT_RESET,
                          BITS_ETH_SOFT_RESET_ALL);
        msleep(1);
        hieth_writel_bits(ld, 0, GLB_SOFT_RESET,
                          BITS_ETH_SOFT_RESET_ALL);
    } else if (ld->port == DOWN_PORT) {
        /* Note: sf ip need reset twice */
        hieth_writel_bits(ld, 1, GLB_SOFT_RESET,
                          BITS_ETH_SOFT_RESET_DOWN);
        msleep(1);
        hieth_writel_bits(ld, 0, GLB_SOFT_RESET,
                          BITS_ETH_SOFT_RESET_DOWN);
        msleep(1);
        hieth_writel_bits(ld, 1, GLB_SOFT_RESET,
                          BITS_ETH_SOFT_RESET_DOWN);
        msleep(1);
        hieth_writel_bits(ld, 0, GLB_SOFT_RESET,
                          BITS_ETH_SOFT_RESET_DOWN);
    }

    return 0;
}

static int hieth_port_init(struct hieth_netdev_local *ld, int port)
{
    /* HW DEFAULT RX-PKT-LEN-RANGE [42,1518]    */
    /* HW MAC FILTER TABLE DISABLE            */

    hieth_assert(port == ld->port);

    hieth_set_endian_mode(ld, HIETH_LITTLE_ENDIAN);

    hieth_set_linkstat(ld, 0);

    hieth_set_negmode(ld, HIETH_NEGMODE_CPUSET);

    /* clear all interrupt status */
    hieth_clear_irqstatus(ld, UD_BIT_NAME(BITS_IRQS_MASK));
    /* disable interrupts */
    hieth_writel_bits(ld, 0, GLB_RW_IRQ_ENA, UD_BIT_NAME(BITS_IRQS_ENA));
    hieth_irq_disable(ld, UD_BIT_NAME(BITS_IRQS_MASK));

#ifdef HIETH_TSO_SUPPORTED
    /* enable TSO debug for error handle */
    hieth_writel_bits(ld, 1, UD_REG_NAME(GLB_TSO_DBG_EN), BITS_TSO_DBG_EN);
#endif

    /* disable vlan func */
    hieth_writel_bits(ld, 0, GLB_FWCTRL, BITS_VLAN_ENABLE);

    /* enable UpEther<->CPU */
    hieth_writel_bits(ld, 1, GLB_FWCTRL, UD_BIT(ld->port, BITS_FW2CPU_ENA));
    hieth_writel_bits(ld, 0, GLB_FWCTRL, UD_BIT(ld->port, BITS_FWALL2CPU));
    hieth_writel_bits(ld, 1, GLB_MACTCTRL,
                      UD_BIT(ld->port, BITS_BROAD2CPU));
    hieth_writel_bits(ld, 1, GLB_MACTCTRL, UD_BIT(ld->port, BITS_MACT_ENA));
    hieth_writel_bits(ld, 1, GLB_MACTCTRL, UD_BIT(ld->port, BITS_MULTI2CPU));

    hieth_set_mac_leadcode_cnt_limit(ld, 0);
    hieth_set_rcv_len_max(ld, HIETH_MAX_RCV_LEN);

    return 0;
}

void hieth_hw_mac_core_init(struct hieth_netdev_local *ld)
{
    HISI_NET_LOCK_INIT(&(ld->tx_lock));
    HISI_NET_LOCK_INIT(&(ld->rx_lock));

    /* reset and init port */
    hieth_port_reset(ld, ld->port);
    hieth_port_init(ld, ld->port);

#ifdef HIETH_RXCSUM_SUPPORTED
    hieth_enable_rxcsum_drop(ld, true);
#endif

#ifdef HIETH_TSO_SUPPORTED
    ld->sg_head = ld->sg_tail = 0;
    ld->txq_head = ld->txq_tail = 0;
#endif
    ld->tx_hw_cnt = 0;

    /* setup hardware */
    hieth_set_hwq_depth(ld);
}

static inline int _hieth_irq_enable(struct hieth_netdev_local *ld, int irqs)
{
    int old;

    old = hieth_readl(ld, GLB_RW_IRQ_ENA);
    hieth_writel(ld, old | irqs, GLB_RW_IRQ_ENA);
    old = hieth_readl(ld, GLB_RW_IRQ_ENA);

    return old;
}

static inline int _hieth_irq_disable(struct hieth_netdev_local *ld, int irqs)
{
    int old;

    old = hieth_readl(ld, GLB_RW_IRQ_ENA);
    hieth_writel(ld, old & (~irqs), GLB_RW_IRQ_ENA);

    return old;
}

static inline int _hieth_read_irqstatus(struct hieth_netdev_local *ld)
{
    int status;

    status = hieth_readl(ld, GLB_RO_IRQ_STAT);

    return status;
}

int hieth_hw_set_macaddress(struct hieth_netdev_local *ld,
                            int ena,
                            unsigned char *mac)
{
    unsigned long reg;

    if (ld->port == DOWN_PORT) {
        hieth_writel_bits(ld, 1, GLB_DN_HOSTMAC_ENA, BITS_DN_HOST_ENA);
    }

    reg = mac[1] | (mac[0] << 8);
    if (ld->port == UP_PORT) {
        hieth_writel(ld, reg, GLB_HOSTMAC_H16);
    }
    else {
        hieth_writel(ld, reg, GLB_DN_HOSTMAC_H16);
    }

    reg = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
    if (ld->port == UP_PORT) {
        hieth_writel(ld, reg, GLB_HOSTMAC_L32);
    }
    else {
        hieth_writel(ld, reg, GLB_DN_HOSTMAC_L32);
    }

    return 0;
}

int hieth_hw_get_macaddress(struct hieth_netdev_local *ld, unsigned char *mac)
{
    unsigned long reg;

    if (ld->port == UP_PORT) {
        reg = hieth_readl(ld, GLB_HOSTMAC_H16);
    }
    else {
        reg = hieth_readl(ld, GLB_DN_HOSTMAC_H16);
    }
    mac[0] = (reg >> 8) & 0xff;
    mac[1] = reg & 0xff;

    if (ld->port == UP_PORT) {
        reg = hieth_readl(ld, GLB_HOSTMAC_L32);
    }
    else {
        reg = hieth_readl(ld, GLB_DN_HOSTMAC_L32);
    }
    mac[2] = (reg >> 24) & 0xff;
    mac[3] = (reg >> 16) & 0xff;
    mac[4] = (reg >> 8) & 0xff;
    mac[5] = reg & 0xff;

    return 0;
}

int _test_xmit_queue_ready(struct hieth_netdev_local *ld)
{
    return hieth_readl_bits(ld,
                            UD_REG_NAME(GLB_RO_QUEUE_STAT), BITS_XMITQ_RDY);
}

static inline int _test_recv_queue_ready(struct hieth_netdev_local *ld)
{
    return hieth_readl_bits(ld,
                            UD_REG_NAME(GLB_RO_QUEUE_STAT), BITS_RECVQ_RDY);
}

int hieth_irq_enable(struct hieth_netdev_local *ld, int irqs)
{
    int old;

    HISI_NET_LOCK_GET(&hieth_glb_reg_lock);
    old = _hieth_irq_enable(ld, irqs);
    HISI_NET_LOCK_PUT(&hieth_glb_reg_lock);

    return old;
}

int hieth_irq_disable(struct hieth_netdev_local *ld, int irqs)
{
    int old;

    HISI_NET_LOCK_GET(&hieth_glb_reg_lock);
    old = _hieth_irq_disable(ld, irqs);
    HISI_NET_LOCK_PUT(&hieth_glb_reg_lock);

    return old;
}

int hieth_read_irqstatus(struct hieth_netdev_local *ld)
{
    int status;

    status = _hieth_read_irqstatus(ld);

    return status;
}

int hieth_read_raw_irqstatus(struct hieth_netdev_local *ld)
{
    int status;

    status = hieth_readl(ld, GLB_RO_IRQ_STAT);

    return status;
}

int hieth_clear_irqstatus(struct hieth_netdev_local *ld, int irqs)
{
    int status;

    HISI_NET_LOCK_GET(&hieth_glb_reg_lock);
    hieth_writel(ld, irqs, GLB_RW_IRQ_RAW);
    status = _hieth_read_irqstatus(ld);
    HISI_NET_LOCK_PUT(&hieth_glb_reg_lock);

    return status;
}

int hieth_set_endian_mode(struct hieth_netdev_local *ld, int mode)
{
    int old;

    old = hieth_readl_bits(ld, GLB_ENDIAN_MOD, BITS_ENDIAN);
    hieth_writel_bits(ld, mode, GLB_ENDIAN_MOD, BITS_ENDIAN);

    return old;
}

int hieth_set_hwq_depth(struct hieth_netdev_local *ld)
{
    hieth_assert(ld->depth.hw_xmitq > 0 &&
                 ld->depth.hw_xmitq <= HIETH_MAX_QUEUE_DEPTH);

    if ((ld->depth.hw_xmitq) > HIETH_MAX_QUEUE_DEPTH) {
        BUG();
        return -1;
    }

    hieth_writel_bits(ld, ld->depth.hw_xmitq, UD_REG_NAME(GLB_QLEN_SET),
                      BITS_TXQ_DEP);
    hieth_writel_bits(ld, HIETH_MAX_QUEUE_DEPTH - ld->depth.hw_xmitq,
                      UD_REG_NAME(GLB_QLEN_SET), BITS_RXQ_DEP);

    return 0;
}

int hieth_hw_xmitq_ready(struct hieth_netdev_local *ld)
{
    int ret;

    ret = _test_xmit_queue_ready(ld);

    return ret;
}

// extern EVENT_CB_S gEthEvent;

int hieth_xmit_release_pkt(struct hieth_netdev_local *ld,
                           struct los_eth_driver *sc,
                           struct hieth_priv_s *priv)
{
    int ret = 0;
    struct tx_pkt_info *txq_cur;
    int tx_reclaim_cnt = 0;
    struct pbuf_info *pbuf;
    u32 i;

    HISI_NET_LOCK_GET(&(ld->tx_lock));

    while (hw_xmitq_cnt_inuse(ld) < ld->tx_hw_cnt) {
        hieth_assert(ld->tx_hw_cnt);

        txq_cur = ld->txq + ld->txq_tail;

        pbuf = priv->ram->pbuf_info + ld->txq_tail;
        for (i = 0; i < pbuf->sg_len; i++) {
            pbuf_dma_free(pbuf->dma_info[i]);
        }

        txq_cur->tx_addr = 0;

        //        free ((void *)(txq_cur->buf_ptr));
        ld->txq_tail++;
        if (ld->txq_tail == ld->q_size) {
            ld->txq_tail = 0;
        }

        tx_reclaim_cnt++;
        ld->tx_hw_cnt--;
    }

    if (tx_reclaim_cnt && ld->tx_busy) {
        ld->tx_busy = 0;
        LOS_EventWrite(&(g_stHiethPlatformData[priv->index].stEvent), EVENT_NET_CAN_SEND);
    }

    HISI_NET_LOCK_PUT(&(ld->tx_lock));
    return ret;
}

int hieth_xmit_gso(struct hieth_netdev_local *ld,
                   hieth_priv_t *priv, struct pbuf *p)
{
    struct tx_pkt_info *txq_cur;
    struct pbuf *p_iter;
    int send_pkt_len;
    int sg_len;
    u32 buf_addr, buf_len;
    int i;
    struct pbuf_info *pb_info;

    struct dma_tx_desc *dma_sg;

    send_pkt_len = p->tot_len;
    if (send_pkt_len > HIETH_MAX_FRAME_SIZE) {
        PRINTK("%s: xmit error len=%d\n", __func__, send_pkt_len);
    }

    pb_info = &(priv->ram->pbuf_info[ld->txq_head]);
    sg_len = 0;
    for (p_iter = p; p_iter != NULL; p_iter = p_iter->next) {
        if (sg_len >= MAX_ETH_DRV_SG) {
            PRINTK("eth_drv_send: sg list num err!");
            return -1;
        }
        pbuf_dma_ref(p_iter->dma_info);

        pb_info->dma_info[sg_len] = p_iter->dma_info;
        sg_len++;
    }
    pb_info->sg_len = sg_len;

    txq_cur = ld->txq + ld->txq_head;
    txq_cur->tx.val = 0;

    /* default config, default closed checksum offload function */
    txq_cur->tx.info.tso_flag = HIETH_CSUM_DISABLE;
    txq_cur->tx.info.coe_flag = HIETH_CSUM_DISABLE;
#if LWIP_TX_CSUM_OFFLOAD
    if (p->flags & PBUF_FLAG_TX_CSUM_PARTIAL) {
        struct iphdr *iphdr = (struct iphdr *)((char *)p->payload + sizeof(struct ethhdr));
        if ((iphdr->protocol == IPPROTO_TCP) || (iphdr->protocol == IPPROTO_UDP)) {
            if (iphdr->protocol == IPPROTO_TCP) {
                struct tcphdr *tcphdr = (struct tcphdr *)((char *)iphdr + (iphdr->ihl << 2));
                txq_cur->tx.info.prot_hdr_len = tcphdr->doff;
                txq_cur->tx.info.prot_type = HIETH_TRANS_TCP_TYPE_HW;
            } else {
                txq_cur->tx.info.prot_hdr_len = 2; /* 8 bytes */
                txq_cur->tx.info.prot_type = HIETH_TRANS_UDP_TYPE_HW;
            }
            if (iphdr->version == 4) {
                txq_cur->tx.info.ip_ver = HIETH_IPV4_VERSION_HW;
            } else if (iphdr->version == 6) {
                txq_cur->tx.info.ip_ver = HIETH_IPV6_VERSION_HW;
            }
            txq_cur->tx.info.ip_hdr_len = iphdr->ihl;
            txq_cur->tx.info.coe_flag = HIETH_CSUM_ENABLE;
            txq_cur->tx.info.vlan_flag = HIETH_CSUM_DISABLE;
        }
    }
#endif
    if (sg_len == 1) {
        txq_cur->tx.info.sg_flag = 0;
        net_dma_cache_clean((void *)p->payload, send_pkt_len);
        txq_cur->tx_addr = (unsigned int)p->payload;
    } else {
        txq_cur->tx.info.sg_flag = 1;
        txq_cur->tx.info.nfrags_num = sg_len - 1;

        dma_sg = priv->ram->dma_tx + ld->txq_head;
        dma_sg->total_len = send_pkt_len;

        buf_addr = (u32)p->payload;
        buf_len = p->len;
        net_dma_cache_clean((void *)buf_addr, buf_len);
        dma_sg->linear_addr = buf_addr;
        dma_sg->linear_len = buf_len;
        p_iter = p->next;
        for (i = 1; i < sg_len; i++) {
            buf_addr = (u32)p_iter->payload;
            buf_len = p_iter->len;
            net_dma_cache_clean((void *)buf_addr, buf_len);
            dma_sg->frags[i - 1].addr = buf_addr;
            dma_sg->frags[i - 1].size = buf_len;
            p_iter = p_iter->next;
        }

        net_dma_cache_clean((void *)dma_sg, sizeof(struct dma_tx_desc));
        txq_cur->tx_addr = (unsigned int)dma_sg;
    }

    txq_cur->tx.info.data_len = send_pkt_len + FCS_BYTES;

    hw_xmitq_pkg(ld, txq_cur->tx_addr, txq_cur->tx.val);

    ld->txq_head++;
    if (ld->txq_head == ld->q_size) {
        ld->txq_head = 0;
    }

    return 0;
}

int hieth_feed_hw(struct hieth_netdev_local *ld, hieth_priv_t *priv)
{
    int cnt = 0;
    struct pbuf *pbuf;
    unsigned int rx_feed_next = 0;

    HISI_NET_LOCK_GET(&(ld->rx_lock));

    while (hieth_readl_bits(ld, UD_REG_NAME(GLB_RO_QUEUE_STAT),
                            BITS_RECVQ_RDY)) {
        rx_feed_next = priv->rx_feed + 1;
        if (rx_feed_next == HIETH_HWQ_RXQ_DEPTH) {
            rx_feed_next = 0;
        }
        if (rx_feed_next == priv->rx_release) {
            break;
        }

        pbuf = pbuf_alloc(PBUF_RAW, (HIETH_MAX_FRAME_SIZE + ETH_PAD_SIZE), PBUF_RAM);
        if (!pbuf) {
            break;
        }

#if ETH_PAD_SIZE
        /* drop the padding word */
        if (pbuf_header(pbuf, -ETH_PAD_SIZE)) {
            PRINTK("[HIETH_ERROR]eth_drv_recv : pbuf_header drop failed\n");
            pbuf_free(pbuf);
            break;
        }
#endif

        priv->ram->rx_pbuf[priv->rx_feed] = pbuf;
        net_dma_cache_inv(pbuf->payload, HIETH_MAX_FRAME_SIZE);

        hieth_writel(ld, (unsigned int)pbuf->payload,
                     UD_REG_NAME(GLB_IQ_ADDR));

        priv->rx_feed = rx_feed_next;

        cnt++;
    }

    HISI_NET_LOCK_PUT(&(ld->rx_lock));

    return cnt;
}

/* vim: set ts=8 sw=8 tw=78: */
