/*****************************************************************************
 *  Include Section
 *  add all #include here
 *****************************************************************************/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/highmem.h>
#include <linux/proc_fs.h>
#include <linux/ctype.h>
#include <linux/version.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/semaphore.h>
#include <linux/phy.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/irqreturn.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <linux/crc32.h>
#include <mach/pmu.h>
#include "fh_qos_gmac.h"
#include "fh_qos_gmac_phyt.h"
#include <linux/gpio.h>
#include <mach/fh_gmac_plat.h>


#ifdef CONFIG_USE_OF
#include "fh_qos_dts_parse.h"
#endif
/*****************************************************************************
 * Define section
 * add all #define here
 *****************************************************************************/
#define TX_TIMEO	5000
#ifndef CACHE_LINE_SIZE
#define CACHE_LINE_SIZE		32
#endif

#define FLOW_CTL_MIN_RX_FIFO	0x1000
#define FLOW_CTL_DEACTIVE_RX_FIFO	0x400
#define FLOW_CTL_MAX_PAUSE_TIME	0xffff
#define FLOW_CTL_PAUSE_TIME		0x500
#define DUMMY_SK_BUFF_FLAG	0xDEADBEEF

#define CHANGE_BIT_LINK	BIT(0)
#define CHANGE_BIT_SPD	BIT(1)
#define CHANGE_BIT_DUP	BIT(2)
#define CHANGE_BIT_PAUSE	BIT(3)
#define DMA_CH_STOPPED   0
#define DMA_TX_CH_SUSPENDED 6

static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)");

/*****************************************************************************
 *  static fun;
 *****************************************************************************/
#ifndef CONFIG_USE_OF
static void __iomem *fh_qos_get_ctl_regs_resource(struct platform_device *pdev);
#endif
static int fh_qos_parse_plat_info(struct net_device *ndev,
struct platform_device *pdev, void __iomem *regs);
static void fh_qos_suspend(struct dw_qos *pGmac);
static void fh_qos_resume(struct dw_qos *pGmac);
static void fh_qos_set_speed(struct dw_qos *pGmac);
static void fh_qos_link_up(struct dw_qos *pGmac);
static void fh_qos_link_down(struct dw_qos *pGmac);
static int fh_qos_txq_malloc_desc(struct net_device *ndev, u32 q_no, u32 malloc_size);
static u32 fh_qos_tx_desc_avail(struct dw_qos *pGmac, u32 q_no);
static struct dwcqos_dma_desc *fh_qos_get_tx_desc_cur(struct dw_qos *pGmac, u32 q_no, u32 *index);
static u32 get_tx_skbuf_vadd(struct dw_qos *pGmac, u32 q_no, u32 index);
static u32 get_tx_skbuf_padd(struct dw_qos *pGmac, u32 q_no, u32 index);
static void set_tx_skbuf_vadd(struct dw_qos *pGmac, u32 q_no, u32 index, u32 vadd);
static void set_tx_skbuf_padd(struct dw_qos *pGmac, u32 q_no, u32 index, u32 padd);
static struct dwcqos_dma_desc *__fh_qos_get_tx_desc(struct dw_qos *pGmac, u32 q_no, u32 index);
static struct dwcqos_dma_desc *fh_qos_get_tx_desc_dirty(struct dw_qos *pGmac, u32 q_no, u32 *index);
static void fh_qos_txq_free_skb(struct net_device *ndev, u32 q_no);
static void fh_qos_txq_free_desc(struct net_device *ndev, u32 q_no);
static void fh_qos_tx_desc_index_init(struct dw_qos *pGmac, u32 q_no);
static int  fh_qos_rxq_malloc_desc(struct net_device *ndev, u32 q_no, u32 malloc_size);
static u32 fh_qos_rx_desc_avail(struct dw_qos *pGmac, u32 q_no);
static struct dwcqos_dma_desc *fh_qos_get_rx_desc_cur(struct dw_qos *pGmac, u32 q_no, u32 *index);
static struct dwcqos_dma_desc *__fh_qos_get_rx_desc(struct dw_qos *pGmac, u32 q_no, u32 index);
static u32 get_rx_skbuf_vadd(struct dw_qos *pGmac, u32 q_no, u32 index);
static u32 get_rx_skbuf_padd(struct dw_qos *pGmac, u32 q_no, u32 index);
static void set_rx_skbuf_vadd(struct dw_qos *pGmac, u32 q_no, u32 index, u32 vadd);
static void set_rx_skbuf_padd(struct dw_qos *pGmac, u32 q_no, u32 index, u32 padd);
#if 0 /* not used yet */
static void debug_rx_desc_dump(struct dw_qos *pGmac, u32 q_no);
#endif
static struct dwcqos_dma_desc *fh_qos_get_rx_desc_dirty(struct dw_qos *pGmac, u32 q_no, u32 *index);
static void fh_qos_rxq_free_skb(struct net_device *ndev, u32 q_no);
static void fh_qos_rxq_free_desc(struct net_device *ndev, u32 q_no);
static void fh_qos_rx_desc_index_init(struct dw_qos *pGmac, u32 q_no);
static int dwcqos_reset_dma_hw(struct dw_qos *pGmac);
static void dwcqos_set_dma_mode(struct dw_qos *pGmac, u32 val);
static void dwcqos_configure_bus(struct dw_qos *pGmac);
static void dwcqos_dma_tx_enable_set(struct dw_qos *pGmac, u32 dma_no, u32 enable);
static void dwcqos_dma_rx_enable_set(struct dw_qos *pGmac, u32 dma_no, u32 enable);
static void dwcqos_dma_isr_enable_set(struct dw_qos *pGmac, u32 dma_no, u32 val);
static u32 dwcqos_dma_isr_get(struct dw_qos *pGmac, u32 dma_no);
static void dwcqos_dma_isr_tx_set(struct dw_qos *pGmac, u32 dma_no, u32 enable);
static void dwcqos_dma_isr_rx_set(struct dw_qos *pGmac, u32 dma_no, u32 enable);
static u32 dwcqos_dma_isr_status_get(struct dw_qos *pGmac, u32 dma_no);
static void dwcqos_dma_isr_status_set(struct dw_qos *pGmac, u32 dma_no, u32 val);
static void dwcqos_set_hw_mac_addr(struct dw_qos *pGmac, u8 *mac);
static void dwcqos_disable_umac_addr(struct dw_qos *pGmac, unsigned int reg_n);
static void dwcqos_set_hw_mac_filter(struct dw_qos *pGmac, u32 filter);
static void dwcqos_set_hw_mac_interrupt(struct dw_qos *pGmac, u32 isr);
static void dwcqos_set_hw_mac_tx_flowctrl(struct dw_qos *pGmac, u32 q_no, u32 data);
static int dwcqos_get_hw_mac_tx_flowctrl(struct dw_qos *pGmac, u32 q_no);
static int dwcqos_get_mtl_rx_operation(struct dw_qos *pGmac, u32 q_no);
static void dwcqos_set_mtl_rx_operation(struct dw_qos *pGmac, u32 q_no, u32 data);
static void dwcqos_set_hw_parse_pause_frame(struct dw_qos *pGmac, u32 enable);
static u32 dwcqos_hw_rx_is_active(struct dw_qos *pGmac);
static u32 dwcqos_hw_halt_xmit(struct dw_qos *pGmac);
static void dwcqos_set_hw_tx_auto_flowctrl(struct dw_qos *pGmac, u32 rxq_no, u32 txq_no,
u32 pause_time, u32 enable);
static void dwcqos_set_hw_mac_config(struct dw_qos *pGmac, u32 config);
static int dwcqos_get_hw_mac_config(struct dw_qos *pGmac);
static void dwcqos_mac_spd_port_set(struct dw_qos *pGmac, int spd);
static void dwcqos_mac_duplex_set(struct dw_qos *pGmac, int duplex);
static void dwcqos_mac_route_rxqueue_set(struct dw_qos *pGmac);
static void dwcqos_queue_enable_set(struct dw_qos *pGmac, u32 queue_no, u32 enable);
static void dwcqos_mac_tx_enable(struct dw_qos *pGmac);
static void dwcqos_mac_tx_disable(struct dw_qos *pGmac);
static void dwcqos_mac_rx_enable(struct dw_qos *pGmac);
void dwcqos_mac_rx_disable(struct dw_qos *pGmac);
static int dwcqos_get_hw_mtl_txqx_debug(struct dw_qos *pGmac, u32 q_no);
static int dwcqos_get_hw_mtl_rxqx_debug(struct dw_qos *pGmac, u32 q_no);
static int dwcqos_get_hw_mtl_isr_status(struct dw_qos *pGmac, u32 q_no);
static void dwcqos_set_hw_mtl_isr_status(struct dw_qos *pGmac, u32 q_no, u32 val);
#if 0 /* not used yet */
static void dwcqos_dma_chan_set_mss(struct dw_qos *pGmac, u32 q_no, u32 val);
#endif
static int dwcqos_dma_get_interrupt_status(struct dw_qos *pGmac);
static int dwcqos_mac_get_interrupt_status(struct dw_qos *pGmac);
static void dwcqos_init_mtl_hw_rxdma_queue_map(struct dw_qos *pGmac);
static void dwcqos_dma_desc_init(struct dw_qos *pGmac);
static void dwcqos_dma_chan_init(struct dw_qos *pGmac);
static void refix_feature(struct dw_qos *pGmac);
static void parse_hw_feature(struct dw_qos *pGmac);
static void fh_qos_mac_add_init(struct dw_qos *pGmac);
static void qos_dev_mcast_set(struct net_device *ndev);
static int qos_dev_set_mac_addr(struct net_device *dev, void *p);
static int qos_dev_ioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd);
static void qos_dev_tx_timeout(struct net_device *ndev);
static int qos_dev_change_mtu(struct net_device *ndev, int new_mtu);
static int qos_dev_set_config(struct net_device *ndev, struct ifmap *map);
static void dwcqos_desc_init(struct net_device *ndev);
static int  dwcqos_init_sw(struct net_device *ndev, struct platform_device *pdev);
static int dwcqos_init_dma_hw(struct dw_qos *pGmac);
static void dwcqos_kick_tx_queue(struct dw_qos *pGmac, u32 q_no);
static void dwcqos_kick_rx_queue(struct dw_qos *pGmac, u32 q_no);
static void dwcqos_init_mtl_hw(struct dw_qos *pGmac);
static void dwcqos_init_mac_hw(struct dw_qos *pGmac);
static int dwcqos_init_hw(struct dw_qos *pGmac);
static void set_first_desc(struct dwcqos_dma_desc *pdesc);
static void set_last_desc(struct dwcqos_dma_desc *pdesc);
static void fh_qos_prepare_normal_send(struct dwcqos_dma_desc *pdesc,
u32 p_buf_add, u32 len, u32 total_len, u32 crc_flag);
static void set_desc_dma_valid(struct dwcqos_dma_desc *pdesc);
static void set_desc_isr_valid(struct dwcqos_dma_desc *pdesc);
static u32 get_desc_data_len(struct dwcqos_dma_desc *pdesc);
static int cal_tx_desc_require(struct sk_buff *skb, struct net_tx_queue *tx_queue);
static int qos_tso_xmit(struct sk_buff *skb, struct net_device *ndev);
static int qos_dev_xmit(struct sk_buff *skb, struct net_device *ndev);
static void tx_dirty_process(struct dw_qos *pGmac, struct net_tx_queue *queue,
u32 *tx_packet, u32 *tx_byte);
static int check_rx_packet(struct dw_qos *pGmac,
struct dwcqos_dma_desc *p_desc);
static void rx_refill_desc(struct dw_qos *pGmac, struct net_rx_queue *queue);
static int rx_valid_process(struct dw_qos *pGmac, struct net_rx_queue *queue, int limit);
static int fh_gmac_rx_napi_process(struct napi_struct *napi, int budget);
static void dwcqos_mtl_isr_process(struct dw_qos *pGmac);
static void dwcqos_read_mmc_counters(struct dw_qos *pGmac, u32 rx_mask,
u32 tx_mask, u32 lpc_mask);
static void dwcqos_mac_isr_process(struct dw_qos *pGmac);
static irqreturn_t fh_gmac_common_interrupt(int irq, void *dev_id);
static int qos_dev_open(struct net_device *ndev);
static int qos_dev_stop(struct net_device *ndev);
static int fh_tx_cleanup(struct dw_qos *pGmac, int qno);
static int fh_qos_gmac_probe(struct platform_device *pdev);
static int fh_qos_gmac_remove(struct platform_device *pdev);


/*****************************************************************************
 * func below
 *****************************************************************************/
static bool dwcqos_is_tx_dma_suspended(struct dw_qos *pGmac)
{
	u32 ret;

	ret = dw_readl(pGmac, dma.debug_status0);
	ret = ((ret & 0xF000) >> 12);

	return ((ret == DMA_TX_CH_SUSPENDED) || (ret == DMA_CH_STOPPED));
}


static void dwccqos_drain_tx_dma(struct dw_qos *pGmac)
{
	size_t limit = (TX_DESC_NUM * 1250) / 100;

	while (!dwcqos_is_tx_dma_suspended(pGmac) && limit--)
		udelay(200);

	if (!dwcqos_is_tx_dma_suspended(pGmac))
		netdev_info(pGmac->ndev, "Drain TX DMA Fail REG_DWCEQOS_DMA_DEBUG_ST0 0x%x\n",
		dw_readl(pGmac, dma.debug_status0));

}

static bool dwcqos_is_txq_suspended(struct dw_qos *pGmac)
{
	u32 reg;

	reg = dwcqos_get_hw_mtl_txqx_debug(pGmac, 0);
	return ((reg & 0x06) != 0x02) && (!(reg & 0x10));
}

static void dwcqos_drain_txq(struct dw_qos *pGmac)
{
	size_t limit = (TX_DESC_NUM * 1250) / 100;

	while (!dwcqos_is_txq_suspended(pGmac) && limit--)
		udelay(200);

	if (!dwcqos_is_txq_suspended(pGmac))
		netdev_info(pGmac->ndev, "Drain TXQ Fail TXQ0_DEBUG_ST 0x%08x\n",
		dwcqos_get_hw_mtl_txqx_debug(pGmac, 0));
}

static bool dwcqos_is_rxq_suspended(struct dw_qos *pGmac)
{
	u32 reg;

	reg = dwcqos_get_hw_mtl_rxqx_debug(pGmac, 0);

	return (((reg & 0x30) == 0) && (!(reg & 0x3fff0000)));
}

static void dwcqos_drain_rxq(struct dw_qos *pGmac)
{

	size_t limit = (RX_DESC_NUM * 1250) / 100;

	while (!dwcqos_is_rxq_suspended(pGmac) && limit--)
		udelay(200);

	if (!dwcqos_is_rxq_suspended(pGmac))
		netdev_info(pGmac->ndev, " MTL_RXQ0_DEBUG_ST 0x%08x REG_DWCEQOS_MAC_CFG 0x%08x\n",
		dwcqos_get_hw_mtl_rxqx_debug(pGmac, 0),
		dw_readl(pGmac, mac.config));
}

#ifndef CONFIG_USE_OF
static void __iomem *fh_qos_get_ctl_regs_resource(struct platform_device *pdev)
{
	void __iomem *base;
	struct resource *regs;
	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!regs) {
		pr_err("%s: ERROR: no resource\n", __func__);
		return 0;
	}

	base = ioremap_nocache(regs->start,
		regs->end - regs->start + 1);
	if (!base) {
		pr_err("%s: ERROR: memory mapping failed\n", __func__);
	}

	return base;
}
#endif

static int fh_qos_parse_plat_info(struct net_device *ndev,
struct platform_device *pdev, void __iomem *regs)
{
	int ret;
	struct fh_gmac_platform_data *p_plat_data;
	struct dw_qos *pGmac;

	pGmac = netdev_priv(ndev);
	p_plat_data = (struct fh_gmac_platform_data *)pdev->dev.platform_data;
	pGmac->phyreset_gpio = p_plat_data->phy_reset_pin;
	pGmac->regs = regs;
	pGmac->priv_data = p_plat_data;
	ndev->base_addr = (unsigned long)pGmac->regs;
	pGmac->clk = clk_get(&pdev->dev, "eth_clk");
	if (IS_ERR(pGmac->clk)) {
		//just warnning
		pr_err("find no 'eth_clk'\n");
	} else
		clk_prepare_enable(pGmac->clk);

	pGmac->rmii_clk = clk_get(&pdev->dev, "eth_rmii_clk");
	if (IS_ERR(pGmac->rmii_clk)) {
		//just warnning
		pr_err("find no 'eth_rmii_clk'\n");
	} else
		clk_prepare_enable(pGmac->rmii_clk);

	pGmac->hw_fea.feature0 = dw_readl(pGmac, mac.hw_feature_0);
	pGmac->hw_fea.feature1 = dw_readl(pGmac, mac.hw_feature_1);
	pGmac->hw_fea.feature2 = dw_readl(pGmac, mac.hw_feature_2);
	//pGmac->active_queue_index = FORCE_ACTIVE_QUEUE_INDEX;
	parse_hw_feature(pGmac);

	pGmac->tx_queue = (struct net_tx_queue *) kmalloc(sizeof(struct net_tx_queue) * pGmac->hw_fea.txq_num, GFP_KERNEL);
	if (!pGmac->tx_queue) {
		pr_err("malloc tx queue failed.....\n");
		ret = -ENOMEM;
		return ret;
	}
	pGmac->rx_queue = (struct net_rx_queue *) kmalloc(sizeof(struct net_rx_queue) * pGmac->hw_fea.rxq_num, GFP_KERNEL);

	if (!pGmac->rx_queue) {
		kfree(pGmac->tx_queue);
		pr_err("malloc rx queue failed.....\n");
		ret = -ENOMEM;
		return ret;
	}
	memset(pGmac->tx_queue, 0,
	sizeof(struct net_tx_queue) * pGmac->hw_fea.txq_num);
	memset(pGmac->rx_queue, 0,
	sizeof(struct net_rx_queue) * pGmac->hw_fea.rxq_num);
	//io..
	//irq..
	//phy reset gpio
	//clk parse
#ifdef CONFIG_USE_OF
	ndev->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
#else
	ndev->irq = platform_get_irq(pdev, 0);
#endif

	pGmac->common_irq_no = ndev->irq;
	sprintf(pGmac->common_irq_name, "%s%d", "qos", pdev->id);

	return 0;
}

static void fh_qos_suspend(struct dw_qos *pGmac)
{
	struct net_device *ndev;
	struct net_rx_queue *rx_queue;
	struct net_tx_queue *tx_queue;
	struct netdev_queue *txq;
	int ret;

	u32 qno;

	ndev = pGmac->ndev;
	if (!ndev || !netif_running(ndev))
		return;
	//close tx core
	netif_device_detach(ndev);
	//close isr. isr will schedule napi..so just close isr
	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.rxq_num, qno) {
		dwcqos_dma_isr_rx_set(pGmac, qno, 0);
	}

	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.txq_num, qno) {
		dwcqos_dma_isr_tx_set(pGmac, qno, 0);
	}

	rx_queue = &pGmac->rx_queue[0];
	spin_lock(&rx_queue->rx_lock);
	//close rx mac shift.
	dwcqos_mac_rx_disable(pGmac);
	dwcqos_drain_rxq(pGmac);
	dwcqos_dma_rx_enable_set(pGmac, 0, 0);
	ret = rx_valid_process(pGmac, rx_queue, RX_DESC_NUM);
	/*pr_err("suspend got rx packet [%x]\n",ret);*/
	rx_refill_desc(pGmac, rx_queue);
#if (0)
	if (pGmac->rx_queue[0].dirty_idx != pGmac->rx_queue[0].cur_idx) {
		pr_err("got data left in Rx queue. dirty %x : cur %x\n",
		pGmac->rx_queue[0].dirty_idx,
		pGmac->rx_queue[0].cur_idx);
	}
#endif
	spin_unlock(&rx_queue->rx_lock);

	//tx close
	tx_queue = &pGmac->tx_queue[0];
	spin_lock(&tx_queue->tx_lock);
	dwccqos_drain_tx_dma(pGmac);
	dwcqos_dma_tx_enable_set(pGmac, 0, 0);
	//close mtl.
	dwcqos_drain_txq(pGmac);
	//close tx shift
	dwcqos_mac_tx_disable(pGmac);
	fh_tx_cleanup(pGmac, 0);

	/*maybe mac got pause frame..close dwcqos_set_hw_parse_pause_frame func and tryit*/

	if (pGmac->tx_queue[0].dirty_idx != pGmac->tx_queue[0].cur_idx) {
		//cause will reset control..desc hw will back 0, here drop all app buf..
		txq = netdev_get_tx_queue(pGmac->ndev, 0);
#if (0)
		pr_err("got data left in Tx queue.may cause tx timeout... %x : %x\n",
		pGmac->tx_queue[0].dirty_idx,
		pGmac->tx_queue[0].cur_idx);
		netdev_err(pGmac->ndev, "drop all data left.\n");
#endif
		netif_tx_lock_bh(pGmac->ndev);
		netdev_tx_reset_queue(txq);
		netif_tx_unlock_bh(pGmac->ndev);
		memset(pGmac->tx_queue[0].p_skbuf, 0, sizeof(u32) * pGmac->tx_queue[0].desc_size);
		memset(pGmac->tx_queue[0].tx_skbuff_dma, 0, sizeof(u32) * pGmac->tx_queue[0].desc_size);
		memset(pGmac->tx_queue[0].p_descs, 0, pGmac->tx_queue[0].desc_size * sizeof(struct dwcqos_dma_desc));
	}
	spin_unlock(&tx_queue->tx_lock);
}


static void fh_qos_resume(struct dw_qos *pGmac)
{
	struct net_device *ndev;
	u32 qno;
	u32 i;
	ndev = pGmac->ndev;

	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.txq_num, i) {
		fh_qos_tx_desc_index_init(pGmac, i);
	}
	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.rxq_num, i) {
		fh_qos_rx_desc_index_init(pGmac, i);
	}

	dwcqos_init_hw(pGmac);
	qos_dev_mcast_set(ndev);
	dwcqos_mac_spd_port_set(pGmac, pGmac->speed);
	dwcqos_mac_duplex_set(pGmac, pGmac->duplex);
	/*msleep(100);*/
	DWCQOS_FOR_EACH_QUEUE(max_t(size_t, pGmac->hw_fea.rxq_num, pGmac->hw_fea.txq_num), qno) {
		dwcqos_dma_isr_enable_set(pGmac, qno,
		DWCQOS_DMA_CH_IE_NIE |
		DWCQOS_DMA_CH_IE_AIE |
		DWCQOS_DMA_CH_IE_FBEE);
	}

	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.txq_num, qno) {
		dwcqos_dma_isr_tx_set(pGmac, qno, 1);
	}

	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.rxq_num, qno) {
		//dwcqos_dma_rx_enable_set(pGmac, qno, 1);
		dwcqos_dma_isr_rx_set(pGmac, qno, 1);
	}
}

static void fh_qos_set_speed(struct dw_qos *pGmac)
{
	int speed;
	u32 inf_sup = pGmac->ac_reg_cfg->inf_sup;
	speed = pGmac->speed;
	fh_qos_suspend(pGmac);
	if (pGmac->ac_phy_info->ex_sync_mac_spd) {
		pGmac->ac_phy_info->ex_sync_mac_spd(speed,
		pGmac->ac_reg_cfg);
	}

	if (pGmac->inf_set) {
		switch (speed) {
		case  1000:
			pGmac->inf_set(PHY_INTERFACE_MODE_RGMII);
		break;
		case  100:
			if (inf_sup == PHY_INTERFACE_MODE_RGMII)
				pGmac->inf_set(PHY_INTERFACE_MODE_MII);
			else if (inf_sup == PHY_INTERFACE_MODE_RMII)
				pGmac->inf_set(PHY_INTERFACE_MODE_RMII);
			else
				pGmac->inf_set(PHY_INTERFACE_MODE_MII);
		break;
		case  10:
			if (inf_sup == PHY_INTERFACE_MODE_RGMII)
				pGmac->inf_set(PHY_INTERFACE_MODE_MII);
			else if (inf_sup == PHY_INTERFACE_MODE_RMII)
				pGmac->inf_set(PHY_INTERFACE_MODE_RMII);
			else
				pGmac->inf_set(PHY_INTERFACE_MODE_MII);
		break;

		default:
			pr_err("unknow spd %x\n", speed);
		break;
		}
	}

	fh_qos_resume(pGmac);
}

static void fh_qos_link_up(struct dw_qos *pGmac)
{
	if (pGmac->ac_phy_info->brd_link_up_cb)
		pGmac->ac_phy_info->brd_link_up_cb((void *)pGmac->mii);
	dwcqos_kick_rx_queue(pGmac, 0);
	dwcqos_mac_route_rxqueue_set(pGmac);
	dwcqos_queue_enable_set(pGmac, 0, 1);
	dwcqos_mac_rx_enable(pGmac);
	dwcqos_mac_tx_enable(pGmac);

	netif_device_attach(pGmac->ndev);
}

static void fh_qos_link_down(struct dw_qos *pGmac)
{

	if (pGmac->ac_phy_info->brd_link_down_cb)
		pGmac->ac_phy_info->brd_link_down_cb((void *)pGmac->mii);
	dwcqos_mac_rx_disable(pGmac);
	dwcqos_mac_tx_disable(pGmac);
	netif_carrier_off(pGmac->ndev);
}

void fh_qos_tx_flow_process(struct dw_qos *pGmac)
{
	u32 ret;
	struct phy_device *phydev;

	phydev = pGmac->phydev;
	ret = dwcqos_hw_halt_xmit(pGmac);
	if (phydev->pause) {
		if (pGmac->flow_ctrl & FLOW_TX)
			dwcqos_set_hw_tx_auto_flowctrl(pGmac, 0, 0, FLOW_CTL_PAUSE_TIME, 1);
		else
			dwcqos_set_hw_tx_auto_flowctrl(pGmac, 0, 0, FLOW_CTL_PAUSE_TIME, 0);
	} else
		dwcqos_set_hw_tx_auto_flowctrl(pGmac, 0, 0, FLOW_CTL_PAUSE_TIME, 0);
	dwcqos_set_hw_mac_config(pGmac, dwcqos_get_hw_mac_config(pGmac) | ret);
}

void fh_qos_adjust_link(struct net_device *ndev)
{
	struct dw_qos *pGmac;
	struct phy_device *phydev;
	int status_change = 0;

	pGmac = netdev_priv(ndev);
	phydev = pGmac->phydev;
	if (!phydev)
		return;

	spin_lock(&pGmac->lock);
	if (phydev->link) {
		if (pGmac->duplex != phydev->duplex) {
			pGmac->duplex = phydev->duplex;
			status_change |= CHANGE_BIT_DUP;
		}
		if (pGmac->speed != phydev->speed) {
			pGmac->speed = phydev->speed;
			status_change |= CHANGE_BIT_SPD;
		}
		if (pGmac->pause != phydev->pause) {
			pGmac->pause = phydev->pause;
			status_change |= CHANGE_BIT_PAUSE;
		}
	}

	if (phydev->link != pGmac->link) {
		pGmac->link = phydev->link;
		status_change |= CHANGE_BIT_LINK;
	}
	if ((status_change & CHANGE_BIT_SPD) || (status_change & CHANGE_BIT_DUP)) {
		//here will do hw reset!!!!
		fh_qos_set_speed(pGmac);
	}
	if (status_change & CHANGE_BIT_PAUSE)
		fh_qos_tx_flow_process(pGmac);

	if (status_change) {
		if (phydev->link) {
			netif_trans_update(pGmac->ndev);
			fh_qos_link_up(pGmac);
		} else {
			fh_qos_link_down(pGmac);
		}
		phy_print_status(phydev);
	}
	if (status_change == 0 && !dwcqos_hw_rx_is_active(pGmac))
		dwcqos_mac_rx_enable(pGmac);

	spin_unlock(&pGmac->lock);
}


static int fh_qos_txq_malloc_desc(struct net_device *ndev, u32 q_no, u32 malloc_size)
{
	struct dw_qos *pGmac;

	pGmac = netdev_priv(ndev);
	pGmac->tx_queue[q_no].id = q_no;
	pGmac->tx_queue[q_no].desc_size = malloc_size;

	//malloc skbuff array
	pGmac->tx_queue[q_no].p_skbuf = kmalloc(sizeof(u32) * malloc_size, GFP_KERNEL);
	if (!pGmac->tx_queue[q_no].p_skbuf) {
		pr_err("[tx alloc] :: no mem for p_skbuf\n");
		return -1;
	}
	memset(pGmac->tx_queue[q_no].p_skbuf, 0, sizeof(u32) * malloc_size);

	//malloc skbuff dma array
	pGmac->tx_queue[q_no].tx_skbuff_dma = kmalloc(sizeof(u32) * malloc_size, GFP_KERNEL);
	if (!pGmac->tx_queue[q_no].tx_skbuff_dma) {
		pr_err("[tx alloc] :: no mem for tx_skbuff_dma\n");
		kfree(pGmac->tx_queue[q_no].p_skbuf);
		return -1;
	}
	memset(pGmac->tx_queue[q_no].tx_skbuff_dma, 0, sizeof(u32) * malloc_size);
	pGmac->tx_queue[q_no].p_raw_desc = (void *)dma_alloc_coherent(pGmac->dev,
		sizeof(struct dwcqos_dma_desc) * pGmac->tx_queue[q_no].desc_size,
		&pGmac->tx_queue[q_no].descs_phy_base_addr,
		GFP_KERNEL);
	if (!pGmac->tx_queue[q_no].p_raw_desc) {
		pr_err("[tx alloc] :: no mem for p_skbuf\n");
		kfree(pGmac->tx_queue[q_no].p_skbuf);
		kfree(pGmac->tx_queue[q_no].tx_skbuff_dma);
		return -2;
	}
	pGmac->tx_queue[q_no].p_descs = (struct dwcqos_dma_desc *)pGmac->tx_queue[q_no].p_raw_desc;
	pGmac->tx_queue[q_no].descs_phy_tail_addr = pGmac->tx_queue[q_no].descs_phy_base_addr +
	(pGmac->tx_queue[q_no].desc_size) * sizeof(struct dwcqos_dma_desc);
	pGmac->tx_queue[q_no].hw_queue_size = pGmac->hw_fea.tx_fifo_size / pGmac->hw_fea.txq_num;
	pGmac->tx_queue[q_no].desc_xfer_max_size = MAX_EACH_DESC_XFER_SIZE;
	//pr_err("[tx_%d] :: phy base and tail is %x : %x\n",q_no, pGmac->tx_queue[q_no].descs_phy_base_addr, pGmac->tx_queue[q_no].descs_phy_tail_addr);
	memset(pGmac->tx_queue[q_no].p_descs, 0, pGmac->tx_queue[q_no].desc_size * sizeof(struct dwcqos_dma_desc));
	return 0;
}

static u32 fh_qos_tx_desc_avail(struct dw_qos *pGmac, u32 q_no)
{
	return pGmac->tx_queue[q_no].dirty_idx + pGmac->tx_queue[q_no].desc_size - pGmac->tx_queue[q_no].cur_idx;
}

static struct dwcqos_dma_desc *fh_qos_get_tx_desc_cur(struct dw_qos *pGmac, u32 q_no, u32 *index)
{
	struct dwcqos_dma_desc  *p_desc;
	//check sw fifo
	if (fh_qos_tx_desc_avail(pGmac, q_no) == 0)
		return 0;
	p_desc = &pGmac->tx_queue[q_no].p_descs[pGmac->tx_queue[q_no].cur_idx % pGmac->tx_queue[q_no].desc_size];
	//check hw desc dma own
	if (p_desc->desc3 & DWCQOS_DMA_TDES3_OWN)
		return 0;
	memset(p_desc, 0, sizeof(struct dwcqos_dma_desc));
	*index = pGmac->tx_queue[q_no].cur_idx;
	pGmac->tx_queue[q_no].cur_idx++;

	return p_desc;
}


static u32 get_tx_skbuf_vadd(struct dw_qos *pGmac, u32 q_no, u32 index)
{
	return (u32) pGmac->tx_queue[q_no].p_skbuf[index % pGmac->tx_queue[q_no].desc_size];
}

static u32 get_tx_skbuf_padd(struct dw_qos *pGmac, u32 q_no, u32 index)
{
	return (u32) pGmac->tx_queue[q_no].tx_skbuff_dma[index % pGmac->tx_queue[q_no].desc_size];
}

static void set_tx_skbuf_vadd(struct dw_qos *pGmac, u32 q_no, u32 index, u32 vadd)
{
	pGmac->tx_queue[q_no].p_skbuf[index % pGmac->tx_queue[q_no].desc_size] = vadd;
}

static void set_tx_skbuf_padd(struct dw_qos *pGmac, u32 q_no, u32 index, u32 padd)
{
	pGmac->tx_queue[q_no].tx_skbuff_dma[index % pGmac->tx_queue[q_no].desc_size] = padd;
}

static struct dwcqos_dma_desc *__fh_qos_get_tx_desc(struct dw_qos *pGmac, u32 q_no, u32 index)
{
	struct dwcqos_dma_desc  *p_desc;
	p_desc = &pGmac->tx_queue[q_no].p_descs[index];
	return p_desc;
}

static struct dwcqos_dma_desc *fh_qos_get_tx_desc_dirty(struct dw_qos *pGmac, u32 q_no, u32 *index)
{
	struct dwcqos_dma_desc  *p_desc;
	//check sw fifo
	if (pGmac->tx_queue[q_no].dirty_idx == pGmac->tx_queue[q_no].cur_idx)
		return 0;

	p_desc = &pGmac->tx_queue[q_no].p_descs[pGmac->tx_queue[q_no].dirty_idx % pGmac->tx_queue[q_no].desc_size];
	//check hw desc dma own
	if (p_desc->desc3 & DWCQOS_DMA_TDES3_OWN)
		return 0;
	*index = pGmac->tx_queue[q_no].dirty_idx;
	pGmac->tx_queue[q_no].dirty_idx++;
	return p_desc;
}

static void fh_qos_txq_free_skb(struct net_device *ndev, u32 q_no)
{
	struct dw_qos *pGmac;
	struct sk_buff *skb;
	u32 sk_buf_dma_add;
	struct dwcqos_dma_desc  *p_desc;
	u32 index;
	u32 len;

	pGmac = netdev_priv(ndev);
	for (index = 0; index < pGmac->tx_queue[q_no].desc_size; index++) {
		skb = (struct sk_buff *)get_tx_skbuf_vadd(pGmac, q_no, index);
		p_desc = __fh_qos_get_tx_desc(pGmac, q_no, index);
		sk_buf_dma_add = get_tx_skbuf_padd(pGmac, q_no, index);
		len = get_desc_data_len(p_desc);
		if (sk_buf_dma_add) {
			dma_unmap_single(pGmac->ndev->dev.parent, sk_buf_dma_add, len,
			DMA_TO_DEVICE);
		}
		if (((u32)skb != DUMMY_SK_BUFF_FLAG) && ((u32)skb != 0))
			dev_consume_skb_any(skb);
	}
}

static void fh_qos_txq_free_desc(struct net_device *ndev, u32 q_no)
{
	struct dw_qos *pGmac;

	pGmac = netdev_priv(ndev);
	kfree(pGmac->tx_queue[q_no].p_skbuf);
	kfree(pGmac->tx_queue[q_no].tx_skbuff_dma);
	dma_free_coherent(pGmac->dev, sizeof(struct dwcqos_dma_desc) * pGmac->tx_queue[q_no].desc_size,
	pGmac->tx_queue[q_no].p_raw_desc, pGmac->tx_queue[q_no].descs_phy_base_addr);
}


static void fh_qos_tx_desc_index_init(struct dw_qos *pGmac, u32 q_no)
{
	pGmac->tx_queue[q_no].cur_idx = 0;
	pGmac->tx_queue[q_no].dirty_idx = 0;
}

/*******/
static int  fh_qos_rxq_malloc_desc(struct net_device *ndev, u32 q_no, u32 malloc_size)
{
	struct sk_buff *skb;
	struct dw_qos *pGmac;
	struct dwcqos_dma_desc *p_descs;
	int i;

	pGmac = netdev_priv(ndev);
	pGmac->rx_queue[q_no].id = q_no;
	pGmac->rx_queue[q_no].desc_size = malloc_size;

	//malloc skbuff array
	pGmac->rx_queue[q_no].p_skbuf = kmalloc(sizeof(u32) * malloc_size, GFP_KERNEL);
	if (!pGmac->rx_queue[q_no].p_skbuf) {
		pr_err("[rx alloc] :: no mem for p_skbuf\n");
		return -1;
	}

	pGmac->rx_queue[q_no].rx_skbuff_dma = kmalloc(sizeof(u32) * malloc_size, GFP_KERNEL);
	if (!pGmac->rx_queue[q_no].rx_skbuff_dma) {
		kfree(pGmac->rx_queue[q_no].p_skbuf);
		pr_err("[rx alloc] :: no mem for tx_skbuff_dma\n");
		return -1;
	}
	memset(pGmac->rx_queue[q_no].rx_skbuff_dma, 0, sizeof(u32) * malloc_size);

	pGmac->rx_queue[q_no].p_raw_desc = (void *)dma_alloc_coherent(pGmac->dev,
		sizeof(struct dwcqos_dma_desc) * pGmac->rx_queue[q_no].desc_size,
		&pGmac->rx_queue[q_no].descs_phy_base_addr,
		GFP_KERNEL);
	if (!pGmac->rx_queue[q_no].p_raw_desc) {
		pr_err("[rx alloc] :: no mem for p_skbuf\n");
		kfree(pGmac->rx_queue[q_no].rx_skbuff_dma);
		kfree(pGmac->rx_queue[q_no].p_skbuf);
		return -1;
	}

	pGmac->rx_queue[q_no].p_descs = (struct dwcqos_dma_desc *)pGmac->rx_queue[q_no].p_raw_desc;
	pGmac->rx_queue[q_no].descs_phy_tail_addr = pGmac->rx_queue[q_no].descs_phy_base_addr +
	(pGmac->rx_queue[q_no].desc_size) * sizeof(struct dwcqos_dma_desc);
	pGmac->rx_queue[q_no].hw_queue_size = pGmac->hw_fea.rx_fifo_size / pGmac->hw_fea.rxq_num;
	pGmac->rx_queue[q_no].desc_xfer_max_size = MAX_EACH_DESC_REV_SIZE;
	memset(pGmac->rx_queue[q_no].p_descs, 0, pGmac->rx_queue[q_no].desc_size * sizeof(struct dwcqos_dma_desc));
	/*pr_err("[rx_%d] :: phy base and tail is %x : %x\n",q_no, pGmac->rx_queue[q_no].descs_phy_base_addr, pGmac->rx_queue[q_no].descs_phy_tail_addr);*/
	for (i = 0, p_descs = pGmac->rx_queue[q_no].p_descs; i < malloc_size; i++) {
		skb = netdev_alloc_skb(ndev, pGmac->rx_queue[q_no].desc_xfer_max_size);
		if (unlikely(skb == NULL)) {
			pr_err("%s: Rx init fails; skb is NULL\n", __func__);
			BUG_ON(1);
		}

		pGmac->rx_queue[q_no].p_skbuf[i] = (u32)skb;
		pGmac->rx_queue[q_no].rx_skbuff_dma[i] =
		(u32)dma_map_single(pGmac->dev, skb->data, pGmac->rx_queue[q_no].desc_xfer_max_size, DMA_FROM_DEVICE);
		wmb();
		p_descs[i].desc0 = (u32) (pGmac->rx_queue[q_no].rx_skbuff_dma[i]);
		p_descs[i].desc1 = 0;
		p_descs[i].desc2 = 0;
		p_descs[i].desc3 = DWCQOS_DMA_RDES3_BUF1V | DWCQOS_DMA_RDES3_OWN | DWCQOS_DMA_RDES3_INTE;
		wmb();

	}
	return 0;

}

static u32 fh_qos_rx_desc_avail(struct dw_qos *pGmac, u32 q_no)
{
	return pGmac->rx_queue[q_no].dirty_idx + pGmac->rx_queue[q_no].desc_size - pGmac->rx_queue[q_no].cur_idx;
}


static struct dwcqos_dma_desc *fh_qos_get_rx_desc_cur(struct dw_qos *pGmac, u32 q_no, u32 *index)
{
	struct dwcqos_dma_desc *p_desc;
	//check sw fifo
	if (fh_qos_rx_desc_avail(pGmac, q_no) == 0)
		return 0;
	p_desc = &pGmac->rx_queue[q_no].p_descs[pGmac->rx_queue[q_no].cur_idx % pGmac->rx_queue[q_no].desc_size];
	//check hw desc dma own
	if (p_desc->desc3 & DWCQOS_DMA_TDES3_OWN)
		return 0;
	*index = pGmac->rx_queue[q_no].cur_idx;
	pGmac->rx_queue[q_no].cur_idx++;

	return p_desc;
}


static struct dwcqos_dma_desc *__fh_qos_get_rx_desc(
struct dw_qos *pGmac, u32 q_no, u32 index)
{
	struct dwcqos_dma_desc  *p_desc;

	p_desc = &pGmac->rx_queue[q_no].p_descs[index];
	return p_desc;
}

static u32 get_rx_skbuf_vadd(struct dw_qos *pGmac, u32 q_no, u32 index)
{
	return (u32) pGmac->rx_queue[q_no].p_skbuf[index % pGmac->rx_queue[q_no].desc_size];
}

static u32 get_rx_skbuf_padd(struct dw_qos *pGmac, u32 q_no, u32 index)
{
	return (u32) pGmac->rx_queue[q_no].rx_skbuff_dma[index % pGmac->rx_queue[q_no].desc_size];
}

static void set_rx_skbuf_vadd(struct dw_qos *pGmac, u32 q_no, u32 index, u32 vadd)
{
	pGmac->rx_queue[q_no].p_skbuf[index % pGmac->rx_queue[q_no].desc_size] = vadd;
}

static void set_rx_skbuf_padd(struct dw_qos *pGmac, u32 q_no, u32 index, u32 padd)
{
	pGmac->rx_queue[q_no].rx_skbuff_dma[index % pGmac->rx_queue[q_no].desc_size] = padd;
}
#if 0 /* not used yet */
static void debug_rx_desc_dump(struct dw_qos *pGmac, u32 q_no)
{
	struct dwcqos_dma_desc  *p_desc;
	int i;
	struct dwcqos_dma_desc *p_desc_add;

	p_desc_add = (struct dwcqos_dma_desc *)pGmac->rx_queue[q_no].descs_phy_base_addr;
	pr_err("q_no = %04d : cur_idx = %d : dty_idx = %d\n",
	q_no, pGmac->rx_queue[q_no].cur_idx, pGmac->rx_queue[q_no].dirty_idx);
	for (i = 0; i < pGmac->rx_queue[q_no].desc_size; i++) {
		p_desc = &pGmac->rx_queue[q_no].p_descs[i];
		pr_err("[index : %d] :: desc0 : desc1 : desc2 : desc3 : p_add = %08x : %08x : %08x : %08x %08x\n", i,
		p_desc->desc0, p_desc->desc1,
		p_desc->desc2, p_desc->desc3, (u32)&p_desc_add[i]);
	}

}
#endif

static struct dwcqos_dma_desc *fh_qos_get_rx_desc_dirty(struct dw_qos *pGmac, u32 q_no, u32 *index)
{
	struct dwcqos_dma_desc  *p_desc;
	//check sw fifo
	if (pGmac->rx_queue[q_no].dirty_idx == pGmac->rx_queue[q_no].cur_idx)
		return 0;

	p_desc = &pGmac->rx_queue[q_no].p_descs[pGmac->rx_queue[q_no].dirty_idx % pGmac->rx_queue[q_no].desc_size];
	//check hw desc dma own
	if (p_desc->desc3 & DWCQOS_DMA_TDES3_OWN)
		return 0;
	*index = pGmac->rx_queue[q_no].dirty_idx;
	pGmac->rx_queue[q_no].dirty_idx++;
	return p_desc;
}

static void fh_qos_rxq_free_skb(struct net_device *ndev, u32 q_no)
{
	struct dw_qos *pGmac;
	struct sk_buff *skb;
	u32 sk_buf_dma_add;
	struct dwcqos_dma_desc  *p_desc;
	u32 index;
	u32 len;

	pGmac = netdev_priv(ndev);
	for (index = 0; index < pGmac->rx_queue[q_no].desc_size; index++) {
		skb = (struct sk_buff *)get_rx_skbuf_vadd(pGmac, q_no, index);
		p_desc = __fh_qos_get_rx_desc(pGmac, q_no, index);
		sk_buf_dma_add = get_rx_skbuf_padd(pGmac, q_no, index);
		len = p_desc->desc3  & 0x7fff;
		if (sk_buf_dma_add) {
			dma_unmap_single(pGmac->ndev->dev.parent,
			sk_buf_dma_add, len,
			DMA_FROM_DEVICE);
		}
		if ((u32)skb != 0)
			dev_consume_skb_any(skb);
	}
}

static void fh_qos_rxq_free_desc(struct net_device *ndev, u32 q_no)
{
	struct dw_qos *pGmac;

	pGmac = netdev_priv(ndev);
	kfree(pGmac->rx_queue[q_no].p_skbuf);
	kfree(pGmac->rx_queue[q_no].rx_skbuff_dma);
	dma_free_coherent(pGmac->dev,
	sizeof(struct dwcqos_dma_desc) * pGmac->rx_queue[q_no].desc_size,
	pGmac->rx_queue[q_no].p_raw_desc, pGmac->rx_queue[q_no].descs_phy_base_addr);
}

static void fh_qos_rx_desc_index_init(struct dw_qos *pGmac, u32 q_no)
{
	pGmac->rx_queue[q_no].cur_idx = 0;
	pGmac->rx_queue[q_no].dirty_idx = 0;
}

static int dwcqos_reset_dma_hw(struct dw_qos *pGmac)
{
	int ret = -1;
	int i = 5000;
	u32 reg;
	dw_writel(pGmac, dma.mode, 1 << 0);
	do {
		i--;
		udelay(1);
		reg = dw_readl(pGmac, dma.mode);
	} while ((reg & DWCQOS_DMA_MODE_SWR) && i);
	/* We might experience a timeout if the chip clock mux is broken */
	if (!i)
		pr_err("DMA reset timed out!\n");
	else
		ret = 0;

	return ret;
}

static void dwcqos_set_dma_mode(struct dw_qos *pGmac, u32 val)
{
	dw_writel(pGmac, dma.mode, val);
}

static void dwcqos_configure_bus(struct dw_qos *pGmac)
{
	//dw_writel(pGmac, dma.sysbus_mode, 2 << 16 | 7 << 1);
	//dw_writel(pGmac, dma.sysbus_mode, 0x303100c);
	//dw_writel(pGmac, dma.sysbus_mode, 0x0707100e);
	dw_writel(pGmac, dma.sysbus_mode, 0x0303100e);
}

static void dwcqos_dma_tx_enable_set(struct dw_qos *pGmac, u32 dma_no, u32 enable)
{
	u32 ret;

	ret = dw_readl(pGmac, dma.chan[dma_no].tx_control);
	ret &= ~(1 << 0);
	ret |= (enable << 0);
	dw_writel(pGmac, dma.chan[dma_no].tx_control, ret);
}

static void dwcqos_dma_rx_enable_set(struct dw_qos *pGmac, u32 dma_no, u32 enable)
{
	u32 ret;

	ret = dw_readl(pGmac, dma.chan[dma_no].rx_control);
	ret &= ~(1 << 0);
	ret |= (enable << 0);
	dw_writel(pGmac, dma.chan[dma_no].rx_control, ret);
}

static void dwcqos_dma_isr_enable_set(struct dw_qos *pGmac, u32 dma_no, u32 val)
{
	u32 ret;

	ret = dw_readl(pGmac, dma.chan[dma_no].interrupt_enable);
	ret &= ~(0xffff << 0);
	ret |= (val << 0);
	//pr_err("isr set[%d] :: %x\n",dma_no, ret);
	dw_writel(pGmac, dma.chan[dma_no].interrupt_enable, ret);
}

static u32 dwcqos_dma_isr_get(struct dw_qos *pGmac, u32 dma_no)
{
	u32 ret;

	ret = dw_readl(pGmac, dma.chan[dma_no].interrupt_enable);
	return ret;
}

static void dwcqos_dma_isr_tx_set(struct dw_qos *pGmac, u32 dma_no, u32 enable)
{
	u32 ret;

	ret = dwcqos_dma_isr_get(pGmac, dma_no);
	ret &= ~(DWCQOS_DMA_CH_IE_TIE);
	if (enable)
		ret |= DWCQOS_DMA_CH_IE_TIE;

	dwcqos_dma_isr_enable_set(pGmac, dma_no, ret);
}


static void dwcqos_dma_isr_rx_set(
struct dw_qos *pGmac, u32 dma_no, u32 enable)
{
	u32 ret;

	ret = dwcqos_dma_isr_get(pGmac, dma_no);
	ret &= ~(DWCQOS_DMA_CH_IE_RIE | DWCQOS_DMA_CH_IE_RBUE);
	if (enable)
		ret |= (DWCQOS_DMA_CH_IE_RIE | DWCQOS_DMA_CH_IE_RBUE);

	dwcqos_dma_isr_enable_set(pGmac, dma_no, ret);
}

static u32 dwcqos_dma_isr_status_get(struct dw_qos *pGmac, u32 dma_no)
{
	u32 ret;

	ret = dw_readl(pGmac, dma.chan[dma_no].status);
	return ret;
}


static void dwcqos_dma_isr_status_set(struct dw_qos *pGmac, u32 dma_no, u32 val)
{
	dw_writel(pGmac, dma.chan[dma_no].status, val);
}

static void dwcqos_set_hw_mac_addr(struct dw_qos *pGmac, u8 *mac)
{
	u32 macHigh = 0;
	u32 macLow = 0;

	macHigh = mac[5] << 8 | mac[4];
	macLow = mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0];
	//pr_err("mac hi : low = %x : %x\n",macHigh, macLow);
	dw_writel(pGmac, mac.mac_addr[0].addr_hi, macHigh);
	dw_writel(pGmac, mac.mac_addr[0].addr_lo, macLow);
}

static void dwcqos_disable_umac_addr(struct dw_qos *pGmac, unsigned int reg_n)
{
	/* Do not disable MAC address 0 */
	if (reg_n != 0)
		dw_writel(pGmac, mac.mac_addr[reg_n].addr_hi, 0);
}

static void dwcqos_set_hw_mac_filter(struct dw_qos *pGmac, u32 filter)
{
	dw_writel(pGmac, mac.packet_filter, filter);
}

static void dwcqos_set_hw_mac_interrupt(struct dw_qos *pGmac, u32 isr)
{
	dw_writel(pGmac, mac.interrupt_enable, isr);
}

static void dwcqos_set_hw_mac_tx_flowctrl(struct dw_qos *pGmac, u32 q_no, u32 data)
{
	dw_writel(pGmac, mac.tx_flow_ctrl[q_no], data);
}

static int dwcqos_get_hw_mac_tx_flowctrl(struct dw_qos *pGmac, u32 q_no)
{
	return dw_readl(pGmac, mac.tx_flow_ctrl[q_no]);
}

static int dwcqos_get_mtl_rx_operation(struct dw_qos *pGmac, u32 q_no)
{
	if (q_no == 0)
		return dw_readl(pGmac, mtl.q_0.rxq_operation_mode);
	else
		return dw_readl(pGmac, mtl.q_x[q_no - 1].rxq_operation_mode);
}

static void dwcqos_set_mtl_rx_operation(struct dw_qos *pGmac, u32 q_no, u32 data)
{
	if (q_no == 0)
		dw_writel(pGmac, mtl.q_0.rxq_operation_mode, data);
	else
		dw_writel(pGmac, mtl.q_x[q_no - 1].rxq_operation_mode, data);
}

//mac will parse pause frame,then halt mac transmitter for pause time
static void dwcqos_set_hw_parse_pause_frame(struct dw_qos *pGmac, u32 enable)
{
	int ret;

	ret = dw_readl(pGmac, mac.rx_flow_ctrl);
	ret &= ~(1 << 0);
	if (enable)
		ret |= (1 << 0);
	dw_writel(pGmac, mac.rx_flow_ctrl, ret);
}

static u32 dwcqos_hw_rx_is_active(struct dw_qos *pGmac)
{
	u32 temp;

	temp = dw_readl(pGmac, mac.config);
	if (temp & DWCQOS_MAC_RX_POS)
		return 1;
	else
		return 0;
}

static u32 dwcqos_hw_halt_xmit(struct dw_qos *pGmac)
{
	int ret;
	u32 temp;

	temp = dw_readl(pGmac, mac.config);
	do {
		ret = dw_readl(pGmac, mac.config);
		ret &= ~(3);
		dw_writel(pGmac, mac.config, ret);
		ret = dw_readl(pGmac, mac.config);
	} while (ret & 3);
	return temp & 3;
}

//tx pause, sw could set reg to generate a pause frame.
//or when rx fifo exceed lev, auto send pause frame. but need hw cfg support
static void dwcqos_set_hw_tx_auto_flowctrl(struct dw_qos *pGmac, u32 rxq_no, u32 txq_no,
u32 pause_time, u32 enable)
{
	u32 ret;
	u32 rfa;
	u32 rfd;
	//first check rx fifo lev. should >= 4KB
	ret = (u32)dwcqos_get_mtl_rx_operation(pGmac, rxq_no);
	ret = ((ret & 0x3ff00000) >> 20) + 1;
	ret *= 256;
	if (ret < FLOW_CTL_MIN_RX_FIFO) {
		pr_err("[rx%d] :: faild to set tx flow, cause min rx 0x%x\n",
		rxq_no, FLOW_CTL_MIN_RX_FIFO);
		return;
	}
	if (pause_time > FLOW_CTL_MAX_PAUSE_TIME) {
		pr_err("[tx%d] :: faild to set tx flow, cause max pause time 0x%x\n",
		txq_no, FLOW_CTL_MAX_PAUSE_TIME);
		return;
	}
	//windonw active,here set full - 1K
	rfa = 0;
	//windonw deactive, here set empty + 1K
	//0 means 1K, step is 0.5K
	rfd = (ret - (FLOW_CTL_DEACTIVE_RX_FIFO + 0x400)) / 0x200;
	/*pr_err("deactive lev [%d]\n",ret - (0x400 + rfd * 0x200));*/
	/*pr_err("active lev [%d]\n",ret - (0x400 + rfa * 0x200));*/

	//set mtl
	ret = (u32)dwcqos_get_mtl_rx_operation(pGmac, rxq_no);
	//clear enable | rfa | rfd
	ret &= ~((0x000fff00) | (1 << 7));
	ret |= (rfa << 8) | (rfd << 14);
	if (enable)
		ret |= 1 << 7;

	dwcqos_set_mtl_rx_operation(pGmac, rxq_no, ret);
	//mac
	ret = dwcqos_get_hw_mac_tx_flowctrl(pGmac, txq_no);
	//clear enable | pause time
	ret &= ~((1 << 1) | (0xffff << 16));
	ret |= pause_time << 16;
	if (enable)
		ret |= 1 << 1;
	dwcqos_set_hw_mac_tx_flowctrl(pGmac, txq_no, ret);
}


static void dwcqos_set_hw_mac_config(struct dw_qos *pGmac, u32 config)
{
	dw_writel(pGmac, mac.config, config);
}

static int dwcqos_get_hw_mac_config(struct dw_qos *pGmac)
{
	return dw_readl(pGmac, mac.config);
}

static void dwcqos_mac_spd_port_set(struct dw_qos *pGmac, int spd)
{
	int ret;

	ret = dwcqos_get_hw_mac_config(pGmac);
	ret &= ~(3 << 14);
	/*1000M is 0*/
	if (spd == 100)
		ret |= (1 << 15 | 1 << 14);
	if (spd == 10)
		ret |= (1 << 15);

	dwcqos_set_hw_mac_config(pGmac, ret);
}

static void dwcqos_mac_duplex_set(struct dw_qos *pGmac, int duplex)
{
	int ret;

	ret = dwcqos_get_hw_mac_config(pGmac);
	ret &= ~(1 << 13);
	ret |= (duplex << 13);

	dwcqos_set_hw_mac_config(pGmac, ret);
}

//maybe could set a map to route diff packet to diff rx queue..
//here set last queue rx mul and broadcast packet
static void dwcqos_mac_route_rxqueue_set(struct dw_qos *pGmac)
{
	//int i;
	#if (0)
	int val = queue_no << 0 | queue_no << 4 | queue_no << 8
	| queue_no << 12 | queue_no << 16 | 1 << 20;
	dw_writel(pGmac, mac.rxq_ctrl[1], val);
	#endif
	dw_writel(pGmac, mac.rxq_ctrl[1], (1 << 20) | ((pGmac->hw_fea.rxq_num - 1) << 16));
}

static void dwcqos_queue_enable_set(struct dw_qos *pGmac, u32 queue_no, u32 enable)
{
	int ret;

	ret = dw_readl(pGmac, mac.rxq_ctrl[0]);
	ret &= ~(3 << (queue_no * 2));
	if (enable) {
		ret |= 2 << (queue_no * 2);
	}
	dw_writel(pGmac, mac.rxq_ctrl[0], ret);
}

static void dwcqos_mac_tx_enable(struct dw_qos *pGmac)
{
	int ret;

	ret = dwcqos_get_hw_mac_config(pGmac);
	ret |= DWCQOS_MAC_TX_POS;
	dwcqos_set_hw_mac_config(pGmac, ret);
}

static void dwcqos_mac_tx_disable(struct dw_qos *pGmac)
{
	int ret;

	do {
		ret = dw_readl(pGmac, mac.config);
		ret &= ~DWCQOS_MAC_TX_POS;
		dw_writel(pGmac, mac.config, ret);
		ret = dw_readl(pGmac, mac.config);
	} while (ret & DWCQOS_MAC_TX_POS);
}

static void dwcqos_mac_rx_enable(struct dw_qos *pGmac)
{
	int ret;

	ret = dwcqos_get_hw_mac_config(pGmac);
	ret |= DWCQOS_MAC_RX_POS;
	dwcqos_set_hw_mac_config(pGmac, ret);
}

void dwcqos_mac_rx_disable(struct dw_qos *pGmac)
{
	int ret;

	do {
		ret = dw_readl(pGmac, mac.config);
		ret &= ~DWCQOS_MAC_RX_POS;
		dw_writel(pGmac, mac.config, ret);
		ret = dw_readl(pGmac, mac.config);
	} while (ret & DWCQOS_MAC_RX_POS);
}

static int dwcqos_get_hw_mtl_txqx_debug(struct dw_qos *pGmac, u32 q_no)
{
	int ret;

	if (q_no == 0)
		ret = dw_readl(pGmac, mtl.q_0.txq_debug);
	else
		ret = dw_readl(pGmac, mtl.q_x[q_no - 1].txq_debug);
	return ret;
}

static int dwcqos_get_hw_mtl_rxqx_debug(struct dw_qos *pGmac, u32 q_no)
{
	int ret;

	if (q_no == 0)
		ret = dw_readl(pGmac, mtl.q_0.rxq_debug);
	else
		ret = dw_readl(pGmac, mtl.q_x[q_no - 1].rxq_debug);
	return ret;
}

static int dwcqos_get_hw_mtl_isr_status(struct dw_qos *pGmac, u32 q_no)
{
	int ret;

	if (q_no == 0)
		ret = dw_readl(pGmac, mtl.q_0.interrupt_control_status);
	else
		ret = dw_readl(pGmac, mtl.q_x[q_no - 1].interrupt_control_status);
	return ret;
}


static void dwcqos_set_hw_mtl_isr_status(struct dw_qos *pGmac, u32 q_no, u32 val)
{

	if (q_no == 0)
		dw_writel(pGmac, mtl.q_0.interrupt_control_status, val);
	else
		dw_writel(pGmac, mtl.q_x[q_no - 1].interrupt_control_status, val);
}
#if 0 /* not used yet */
static void dwcqos_dma_chan_set_mss(struct dw_qos *pGmac, u32 q_no, u32 val)
{
	u32 ret;
	ret = dw_readl(pGmac, dma.chan[q_no].control);
	ret &= ~(0x3fff);
	ret |= (val & 0x3fff);
	dw_writel(pGmac, dma.chan[q_no].control, ret);
}
#endif

static int dwcqos_dma_get_interrupt_status(struct dw_qos *pGmac)
{
	int ret;
	ret = dw_readl(pGmac, dma.interrupt_status);
	return ret;
}

static int dwcqos_mac_get_interrupt_status(struct dw_qos *pGmac)
{
	int ret;
	ret = dw_readl(pGmac, mac.interrupt_status);
	return ret;
}

static void dwcqos_init_mtl_hw_rxdma_queue_map(struct dw_qos *pGmac)
{
	int q_map_0 = 0;
	int q_map_1 = 0;
	int i;

	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.rxq_num, i) {
		if (i < 4) {
			q_map_0 |= i << i * 8;
		} else {
			q_map_1 |= (i-4) << (i-4) * 8;
		}
	}
	dw_writel(pGmac, mtl.rxq_dma_map0, 0x100);
	//dw_writel(pGmac, mtl.rxq_dma_map1, q_map_1);

}

static void dwcqos_dma_desc_init(struct dw_qos *pGmac)
{
	int i;
	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.txq_num, i) {
		dw_writel(pGmac, dma.chan[i].txdesc_list_laddr, pGmac->tx_queue[i].descs_phy_base_addr);
		//do not set tail point here...or if you only have one tx desc, the hw will get in suspend mode.
		//dw_writel(pGmac, dma.chan[i].txdesc_tail_pointer, pGmac->tx_queue[i].descs_phy_tail_addr);
		dw_writel(pGmac, dma.chan[i].txdesc_ring_len, pGmac->tx_queue[i].desc_size - 1);
	}

	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.rxq_num, i) {
		dw_writel(pGmac, dma.chan[i].rxdesc_list_laddr, pGmac->rx_queue[i].descs_phy_base_addr);
		//dw_writel(pGmac, dma.chan[i].rxdesc_tail_pointer, pGmac->rx_queue[i].descs_phy_tail_addr);
		dw_writel(pGmac, dma.chan[i].rxdesc_ring_len, pGmac->rx_queue[i].desc_size - 1);
	}
}

static void dwcqos_dma_chan_init(struct dw_qos *pGmac)
{
	int i;
	int reg;
	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.tx_dma_num, i) {
		//dma chan0 ctrl. dma chan0 tx ctrl.
		reg = 8 << 16;
		dw_writel(pGmac, dma.chan[i].control, 0x110000);
		if (pGmac->hw_fea.tso_flag)
			reg |= DWCQOS_DMA_CH_TX_TSE;
		dw_writel(pGmac, dma.chan[i].tx_control, reg);
		//dma chan0 interrupt..
		dwcqos_dma_isr_enable_set(pGmac, i, 0);
		//clear isr int status.
		dwcqos_dma_isr_status_set(pGmac, i, 0xffffffff);
	}

	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.rx_dma_num, i) {
		//dma rx ctrl..
		dw_writel(pGmac, dma.chan[i].rx_control, 1 << 31 | 2 << 16 | 2048 << 1);
	}


	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.tx_dma_num, i) {
		dwcqos_dma_tx_enable_set(pGmac, i, 1);
	}

	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.rx_dma_num, i) {
		dwcqos_dma_rx_enable_set(pGmac, i, 1);
	}

}

static void refix_feature(struct dw_qos *pGmac)
{
	pGmac->hw_fea.rxq_num = 1;
	pGmac->hw_fea.rx_dma_num = 1;
	pGmac->hw_fea.txq_num = 1;
	pGmac->hw_fea.tx_dma_num = 1;

}

static void parse_hw_feature(struct dw_qos *pGmac)
{
	//queue num..
	unsigned int tx_fifo_size, rx_fifo_size;

	pGmac->hw_fea.rxq_num = (pGmac->hw_fea.feature2 & 0xf) + 1;
	pGmac->hw_fea.txq_num = ((pGmac->hw_fea.feature2 >> 6) & 0xf) + 1;
	pGmac->hw_fea.tx_dma_num = ((pGmac->hw_fea.feature2 >> 18) & 0xf) + 1;
	pGmac->hw_fea.rx_dma_num = ((pGmac->hw_fea.feature2 >> 12) & 0xf) + 1;
	pGmac->hw_fea.tso_flag = (pGmac->hw_fea.feature1  & (1 << 18)) ;
	refix_feature(pGmac);
	//fifo size..
	tx_fifo_size  = (pGmac->hw_fea.feature1 >> 6) & 0x1f;
	rx_fifo_size = (pGmac->hw_fea.feature1 >> 0) & 0x1f;
	pGmac->hw_fea.tx_fifo_size = 128 << tx_fifo_size;
	pGmac->hw_fea.rx_fifo_size = 128 << rx_fifo_size;
	/*
	pr_err("[queue ] : %x : %x; [fifo size] : %x : %x\n",pGmac->hw_fea.rxq_num,pGmac->hw_fea.txq_num,
	pGmac->hw_fea.tx_fifo_size, pGmac->hw_fea.rx_fifo_size);
	pr_err("[dma no] :: tx : rx = %x : %x\n",pGmac->hw_fea.tx_dma_num, pGmac->hw_fea.rx_dma_num);
	*/
}

static void fh_qos_mac_add_init(struct dw_qos *pGmac)
{
	//may be get from uboot.here do nothing
}

static void qos_dev_mcast_set(struct net_device *ndev)
{
	u32 regval = 0;
	u32 mc_filter[2];
	int reg = 1;
	struct netdev_hw_addr *ha;
	unsigned int max_mac_addr;
	struct dw_qos *pGmac;
	pGmac = netdev_priv(ndev);

	max_mac_addr = (1 + (((pGmac->hw_fea.feature1) & 0x1fc0000) >> 18));
	//pr_err("max_mac_addr is %x\n",max_mac_addr);
	if (ndev->flags & IFF_PROMISC) {
		regval = DWCQOS_MAC_PKT_FILT_PR;
	} else if (((netdev_mc_count(ndev) > DWCQOS_HASH_TABLE_SIZE) ||
				(ndev->flags & IFF_ALLMULTI))) {
		regval = DWCQOS_MAC_PKT_FILT_PM;

		dw_writel(pGmac, mac.hash_table_reg[0], 0xffffffff);
		dw_writel(pGmac, mac.hash_table_reg[1], 0xffffffff);
	} else if (!netdev_mc_empty(ndev)) {
		regval = DWCQOS_MAC_PKT_FILT_HMC;
		memset(mc_filter, 0, sizeof(mc_filter));
		netdev_for_each_mc_addr(ha, ndev) {
			/* The upper 6 bits of the calculated CRC are used to
			 * index the contens of the hash table
			 */
			int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
			/* The most significant bit determines the register
			 * to use (H/L) while the other 5 bits determine
			 * the bit within the register.
			 */
			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
		}
		dw_writel(pGmac, mac.hash_table_reg[0], mc_filter[0]);
		dw_writel(pGmac, mac.hash_table_reg[1], mc_filter[1]);
	}
	if (netdev_uc_count(ndev) > max_mac_addr) {
		regval |= DWCQOS_MAC_PKT_FILT_PR;
	} else {
		netdev_for_each_uc_addr(ha, ndev) {
			dwcqos_set_hw_mac_addr(pGmac, (u8 *)ha->addr);
			reg++;
		}
		for (; reg < max_mac_addr; reg++)
			dwcqos_disable_umac_addr(pGmac, reg);
	}
	dwcqos_set_hw_mac_filter(pGmac, regval);
}

static int qos_dev_set_mac_addr(struct net_device *dev, void *p)
{
	struct dw_qos *pGmac;
	struct sockaddr *addr = p;

	pGmac = netdev_priv(dev);
	memcpy(pGmac->local_mac_address, addr->sa_data, ETH_ALEN);
	dwcqos_set_hw_mac_addr(pGmac, pGmac->local_mac_address);
	return eth_mac_addr(dev, p);
}

static int qos_dev_ioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
{
	struct dw_qos *pGmac = netdev_priv(ndev);
	int ret;

	if (!netif_running(ndev))
		return -EINVAL;

	if (!pGmac->phydev)
		return -EINVAL;

	ret = phy_mii_ioctl(pGmac->phydev, ifrq, cmd);
	return ret;
}

static void print_mac_status(struct dw_qos *pGmac)
{

	pr_err("SW TX :: dirty = %04d ; cur = %04d\n",
	pGmac->tx_queue[0].dirty_idx, pGmac->tx_queue[0].cur_idx);
	pr_err("SW RX :: dirty = %04d ; cur = %04d\n",
	pGmac->rx_queue[0].dirty_idx, pGmac->rx_queue[0].cur_idx);

	pr_err("HW info below ::\n");

	pr_err("TX DMA: from 0x%08x to 0x%08x len = %x; Current 0x%08x\n",
			dw_readl(pGmac, dma.chan[0].txdesc_list_laddr),
			dw_readl(pGmac, dma.chan[0].txdesc_tail_pointer),
			dw_readl(pGmac, dma.chan[0].txdesc_ring_len),
			dw_readl(pGmac, dma.chan[0].current_app_txdesc));

	pr_err("RX DMA: from 0x%08x to 0x%08x len = %x; Current 0x%08x\n",
			dw_readl(pGmac, dma.chan[0].rxdesc_list_laddr),
			dw_readl(pGmac, dma.chan[0].rxdesc_tail_pointer),
			dw_readl(pGmac, dma.chan[0].rxdesc_ring_len),
			dw_readl(pGmac, dma.chan[0].current_app_rxdesc));

}

static void dwcqos_reinit_for_txtimeout(struct work_struct *data)
{
	struct dw_qos *pGmac = container_of(data, struct dw_qos,
		txtimeout_reinit);
#if (0)
	tx_queue = &pGmac->tx_queue[0];

	//pr_err("tx send timeout process.\n");
	txq = netdev_get_tx_queue(pGmac->ndev, 0);
	//print_mac_status(pGmac);
	spin_lock(&pGmac->lock);
	fh_qos_suspend(pGmac);
	netif_tx_lock_bh(pGmac->ndev);
	netdev_tx_reset_queue(txq);
	netif_tx_unlock_bh(pGmac->ndev);

	fh_qos_resume(pGmac);
	spin_unlock(&pGmac->lock);

	spin_lock(&tx_queue->tx_lock);
	memset(pGmac->tx_queue[0].p_skbuf, 0,
	sizeof(u32) * pGmac->tx_queue[0].desc_size);
	memset(pGmac->tx_queue[0].tx_skbuff_dma, 0,
	sizeof(u32) * pGmac->tx_queue[0].desc_size);
	memset(pGmac->tx_queue[0].p_descs, 0,
	pGmac->tx_queue[0].desc_size * sizeof(struct dwcqos_dma_desc));
	spin_unlock(&tx_queue->tx_lock);
#endif

	pGmac->speed = 0xffffffff;
	rtnl_lock();
	napi_disable(&(pGmac->napi));
	pr_err("%s  %d\n", __func__, __LINE__);
	print_mac_status(pGmac);
	fh_qos_adjust_link(pGmac->ndev);
	napi_enable(&(pGmac->napi));
	rtnl_unlock();
}


static void qos_dev_tx_timeout(struct net_device *ndev)
{
	struct dw_qos *pGmac = netdev_priv(ndev);

	queue_work(pGmac->txtimeout_handler_wq, &pGmac->txtimeout_reinit);
}

static int qos_dev_change_mtu(struct net_device *ndev, int new_mtu)
{
	int max_mtu;
/*
	if (netif_running(ndev))
	{
		pr_err("%s: must be stopped to change its MTU\n", ndev->name);
		return -EBUSY;
	}
*/
	max_mtu = ETH_DATA_LEN;

	if ((new_mtu < 46) || (new_mtu > max_mtu)) {
		pr_err("%s: invalid MTU, max MTU is: %d\n",
				ndev->name, max_mtu);
		return -EINVAL;
	}

	ndev->mtu = new_mtu;
	netdev_update_features(ndev);

	return 0;
}

static int qos_dev_set_config(struct net_device *ndev, struct ifmap *map)
{

	if (ndev->flags & IFF_UP)	/* can't act on a running interface */
		return -EBUSY;
	/* Don't allow changing the I/O address */
	if (map->base_addr != ndev->base_addr) {
		pr_warning("%s: can't change I/O address\n", ndev->name);
		return -EOPNOTSUPP;
	}
	/* Don't allow changing the IRQ */
	if (map->irq != ndev->irq) {
		pr_warning("%s: can't change IRQ number %d\n",
				ndev->name, ndev->irq);
		return -EOPNOTSUPP;
	}
	/* ignore other fields */
	return 0;
}

static const struct net_device_ops fh_gmac_netdev_ops = {

	.ndo_open				= qos_dev_open,
	.ndo_stop				= qos_dev_stop,
	.ndo_start_xmit			= qos_dev_xmit,
	.ndo_set_rx_mode		= qos_dev_mcast_set,
	.ndo_set_mac_address	= qos_dev_set_mac_addr,
	.ndo_do_ioctl			= qos_dev_ioctl,
	.ndo_tx_timeout			= qos_dev_tx_timeout,
	.ndo_change_mtu			= qos_dev_change_mtu,
	.ndo_fix_features		= NULL,
	.ndo_set_config			= qos_dev_set_config,

};

static void dwcqos_desc_init(struct net_device *ndev)
{
	struct dw_qos *pGmac;
	int i;

	pGmac = netdev_priv(ndev);
	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.txq_num, i) {
		fh_qos_txq_malloc_desc(ndev, i, TX_DESC_NUM);
		fh_qos_tx_desc_index_init(pGmac, i);
		spin_lock_init(&(pGmac->tx_queue[i].tx_lock));
		pGmac->tx_queue[i].pGmac = pGmac;
	}

	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.rxq_num, i) {
		//CAUTION!!!!! here set net_rx_packets for q1-qn..means that only one rx queue could work...
		fh_qos_rxq_malloc_desc(ndev, i, RX_DESC_NUM);
		fh_qos_rx_desc_index_init(pGmac, i);
		//napi init
		//netif_napi_add(ndev, &(pGmac->rx_queue[i].rx_napi), fh_gmac_rx_napi_process, NAPI_POLL_WEIGHT);
		spin_lock_init(&(pGmac->rx_queue[i].rx_lock));
		pGmac->rx_queue[i].pGmac = pGmac;
	}
	netif_napi_add(ndev, &(pGmac->napi), fh_gmac_rx_napi_process, NAPI_POLL_WEIGHT);
}

static int  dwcqos_init_sw(struct net_device *ndev, struct platform_device *pdev)
{
	struct dw_qos *pGmac;
	pGmac = netdev_priv(ndev);
	/* add net_device to platform_device */
	SET_NETDEV_DEV(ndev, &pdev->dev);
	pGmac->dev = &(pdev->dev);
	pGmac->pdev = pdev;
	pGmac->ndev = ndev;
	pGmac->msg_enable = netif_msg_init(debug, QOS_GMAC_DEBUG);
	pGmac->duplex = GMAC_DUPLEX_FULL;
	pGmac->speed = GMAC_SPEED_100M;
	pGmac->flow_ctrl = FLOW_RX;
	platform_set_drvdata(pdev, ndev);
	spin_lock_init(&pGmac->lock);
	pGmac->phy_interface = QOS_PHY_MODE;
	ndev->base_addr = (unsigned long)pGmac->regs;
	ether_setup(ndev);
	ndev->netdev_ops = &fh_gmac_netdev_ops;
	//TBD
	fh_gmac_set_ethtool_ops(ndev);

	ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO);
	ndev->dev_addr = pGmac->local_mac_address;
	fh_qos_mac_add_init(pGmac);
	ndev->hw_features = NETIF_F_SG;
	if (pGmac->hw_fea.feature1 & DWCQOS_MAC_HW_FEATURE1_TSOEN)
		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
	if (pGmac->hw_fea.feature0 & DWCQOS_MAC_HW_FEATURE0_TXCOESEL)
		ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
	if (pGmac->hw_fea.feature0 & DWCQOS_MAC_HW_FEATURE0_RXCOESEL)
		ndev->hw_features |= NETIF_F_RXCSUM;
	ndev->features = ndev->hw_features;
	return 0;
}


static int dwcqos_init_dma_hw(struct dw_qos *pGmac)
{
	if (dwcqos_reset_dma_hw(pGmac))
		return -1;
	//set dma isr mode..
	dwcqos_set_dma_mode(pGmac, 3 << 16);
	//3:dma_sys_bus_mode...
	dwcqos_configure_bus(pGmac);
	//4:desc....
	//5:desc ring size...
	//6:desc base add and tail addr...
	dwcqos_dma_desc_init(pGmac);
	//7:dma chan0 ctrl. dma chan0 tx ctrl. dma rx ctrl..
	//8:dma chan0 interrupt..
	//9:start dma chan with dma chan0 rx/tx ctrl..
	dwcqos_dma_chan_init(pGmac);
	return 0;
}

static void dwcqos_kick_tx_queue(struct dw_qos *pGmac, u32 q_no)
{
	//pr_err("kick tail add %08x= %x\n",q_no , pGmac->tx_queue[q_no].descs_phy_tail_addr);
	dw_writel(pGmac, dma.chan[q_no].txdesc_tail_pointer, pGmac->tx_queue[q_no].descs_phy_tail_addr);
}

static void dwcqos_kick_rx_queue(struct dw_qos *pGmac, u32 q_no)
{
	dw_writel(pGmac, dma.chan[q_no].rxdesc_tail_pointer, pGmac->rx_queue[q_no].descs_phy_tail_addr);
}

static void dwcqos_init_mtl_hw(struct dw_qos *pGmac)
{
	int i;
	int tx_queue_size = 0;
	int rx_queue_size = 0;
	//program tx schedule
	//rev arbitration algo
	dw_writel(pGmac, mtl.operation_mode, 0x60);
	//dw_writel(pGmac, mtl.operation_mode, 0);
	//pro dma map0 and map1
	dwcqos_init_mtl_hw_rxdma_queue_map(pGmac);
	//tx_queue_size = (pGmac->hw_fea.tx_fifo_size / pGmac->hw_fea.txq_num / 256) - 1;
	//tx queue operation...
		//1):TSF, TTC
		//2):TXQEN
		//3):TQS

	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.txq_num, i) {
		tx_queue_size = (pGmac->tx_queue[i].hw_queue_size / 256) - 1;
		if (i == 0) {
			dw_writel(pGmac, mtl.q_0.txq_operation_mode, (tx_queue_size << 16) |
			DWCQOS_MTL_TXQ_TXQEN |
			DWCQOS_MTL_TXQ_TSF | (7 << 4));
			dw_writel(pGmac, mtl.q_0.txq_quantum_weight, i);
		} else {
			dw_writel(pGmac, mtl.q_x[i - 1].txq_operation_mode, (tx_queue_size << 16) |
			DWCQOS_MTL_TXQ_TXQEN |
			DWCQOS_MTL_TXQ_TSF | (7 << 4));
			dw_writel(pGmac, mtl.q_x[i - 1].txq_quantum_weight, i);
		}
	}

	//rx queue operation...
		//1):RSF, RTC
		//2):RFA RFD
		//3):FEP FUP
		//4):RQS
	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.rxq_num, i) {
		rx_queue_size = (pGmac->rx_queue[i].hw_queue_size / 256) - 1;
		if (i == 0) {
			dw_writel(pGmac, mtl.q_0.rxq_operation_mode,
			(rx_queue_size << 20) | DWCQOS_MTL_RXQ_RSF | DWCQOS_MTL_RXQ_FUP | DWCQOS_MTL_RXQ_FEP);
			dw_writel(pGmac, mtl.q_0.rxq_control, 0);
			//dw_writel(pGmac, mtl.q_0.rxq_control, i);
		} else {
			dw_writel(pGmac, mtl.q_x[i - 1].rxq_operation_mode,
			(rx_queue_size << 20) | DWCQOS_MTL_RXQ_RSF | DWCQOS_MTL_RXQ_FUP | DWCQOS_MTL_RXQ_FEP);
			//dw_writel(pGmac, mtl.q_x[i - 1].rxq_control, i);
			dw_writel(pGmac, mtl.q_x[i - 1].rxq_control, 0);
		}
	}
}


static void dwcqos_init_mac_hw(struct dw_qos *pGmac)
{
	int i;
	int ret;
	//mac addr low and hi..
	dwcqos_set_hw_mac_addr(pGmac, pGmac->local_mac_address);
	//mac packet filter..
	//dwcqos_set_hw_mac_filter(pGmac, 1 << 5 | 1 << 9);
	//dwcqos_set_hw_mac_filter(pGmac, 1);
	//parse pause frame.
	dwcqos_set_hw_parse_pause_frame(pGmac, 1);

	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.txq_num, i) {
		dwcqos_set_hw_mac_tx_flowctrl(pGmac, i, 0);
	}
	//mac interrupt..
	dwcqos_set_hw_mac_interrupt(pGmac, 0);

	//mac config
	//mac start later, should sync with phy linkup.
	ret = 1 << 27 | 1 << 9;
	dwcqos_set_hw_mac_config(pGmac, ret);
}

static int dwcqos_init_hw(struct dw_qos *pGmac)
{
	int ret;
	//init dma...
	ret = dwcqos_init_dma_hw(pGmac);
	if (ret)
		return ret;
	//init mtl...
	dwcqos_init_mtl_hw(pGmac);
	//init mac...
	dwcqos_init_mac_hw(pGmac);

	return 0;
}

static void set_first_desc(struct dwcqos_dma_desc *pdesc)
{
	pdesc->desc3 |= DWCQOS_DMA_TDES3_FD;
}

static void set_last_desc(struct dwcqos_dma_desc *pdesc)
{
	pdesc->desc3 |= DWCQOS_DMA_TDES3_LD;
}

static void fh_qos_prepare_normal_send(struct dwcqos_dma_desc *pdesc,
u32 p_buf_add, u32 len, u32 total_len, u32 crc_flag)
{
	pdesc->desc0 |= p_buf_add;
	pdesc->desc2 |= len;
	pdesc->desc3 |= total_len | (crc_flag << 16);
}

static void set_desc_dma_valid(struct dwcqos_dma_desc *pdesc)
{
	pdesc->desc3 |= DWCQOS_DMA_TDES3_OWN;
}

static void set_desc_isr_valid(struct dwcqos_dma_desc *pdesc)
{
	pdesc->desc2 |= DWCQOS_DMA_TDES2_IOC;
}


static u32 get_desc_data_len(struct dwcqos_dma_desc *pdesc)
{
	u32 ret = 0;

	if (pdesc->desc1 != 0)
		ret = ((pdesc->desc2 & 0x3fff0000) >> 16);
	if (pdesc->desc0 != 0)
		ret += pdesc->desc2 & 0x3fff;
	return ret;
}
#if (0)
static void dump_lli(struct dwcqos_dma_desc *desc, u32 size)
{
	u32 i;

	pr_err("dump go...\n");
	for (i = 0; i < size; i++) {
		pr_err("[desc add]  : %08x\n", (u32)&desc[i]);
		pr_err("data = %08x : %08x : %08x : %08x\n", desc[i].desc0, desc[i].desc1, desc[i].desc2, desc[i].desc3);
	}
}
#endif
static int cal_tx_desc_require(struct sk_buff *skb, struct net_tx_queue *tx_queue)
{
	// head need at least 1
	int nfrags, frag_size;
	int i;
	skb_frag_t *frag;

	int ret = 1;
	//just cal frag
	nfrags = skb_shinfo(skb)->nr_frags;

	for (i = 0; i < nfrags; i++) {
		frag = &skb_shinfo(skb)->frags[i];
		frag_size = skb_frag_size(frag);
		ret += (frag_size / tx_queue->desc_xfer_max_size);
		if (frag_size % tx_queue->desc_xfer_max_size)
			ret++;
	}
	return ret;
}


static int cal_tso_tx_desc_require(struct sk_buff *skb,
struct net_tx_queue *tx_queue)
{
	int nfrags, frag_size;
	int i;
	skb_frag_t *frag;
	/*ctxt + head*/
	int ret = 2;

	nfrags = skb_shinfo(skb)->nr_frags;

	for (i = 0; i < nfrags; i++) {
		frag = &skb_shinfo(skb)->frags[i];
		frag_size = skb_frag_size(frag);
		ret += (frag_size / tx_queue->desc_xfer_max_size);
		if (frag_size % tx_queue->desc_xfer_max_size)
			ret++;
	}
	return ret;
}

static int qos_tso_xmit(struct sk_buff *skb, struct net_device *ndev)
{
	struct dwcqos_dma_desc *p_desc;
	struct dwcqos_dma_desc *first_desc = 0;
	struct dw_qos *pGmac;
	struct net_tx_queue *tx_queue;
	u32 sk_buf_dma_add;
	u32 index;
	int qno;
	int len;
	u32 ret;
	u32 each_desc_xfer_size = 0;
	skb_frag_t *frag;
	int i;
	int csum_insertion = 0;
	u32 vaild_desc_size;
	int nfrags;
	struct netdev_queue *txq;
	//tcp + ip + mac head
	u32 proto_hdr_len;
	u32 payload_len, mss;
	u32 consumed_size;
	u32 frag_size;
	u32 req_desc;

	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL) ? 3 : 0;
	pGmac = netdev_priv(ndev);
	qno = skb_get_queue_mapping(skb);
	tx_queue = &pGmac->tx_queue[qno];
	txq = netdev_get_tx_queue(ndev, qno);
	mss = skb_shinfo(skb)->gso_size;

	spin_lock(&tx_queue->tx_lock);
	payload_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
	vaild_desc_size = fh_qos_tx_desc_avail(pGmac, qno);
	nfrags = skb_shinfo(skb)->nr_frags;

	//check head payload len;
	if (payload_len > tx_queue->desc_xfer_max_size) {
		pr_err("payload_len %d > max size %d\n", payload_len, tx_queue->desc_xfer_max_size);
		spin_unlock(&tx_queue->tx_lock);
		return NETDEV_TX_BUSY;
	}

	req_desc = cal_tso_tx_desc_require(skb, tx_queue);

	if (unlikely(vaild_desc_size < req_desc)) {
		netif_tx_stop_queue(txq);
		/* This is a hard error, log it. */
		pr_err("%s: BUG! Tx Ring full when queue awake \n",
			__func__);

		pr_err("%x : %x : %x : %x\n", vaild_desc_size, req_desc, pGmac->tx_queue[qno].cur_idx, pGmac->tx_queue[qno].dirty_idx);
		spin_unlock(&tx_queue->tx_lock);
		return NETDEV_TX_BUSY;
	}

	{
		tx_queue->mss = mss;
		//first set ctxt desc
		p_desc = fh_qos_get_tx_desc_cur(pGmac, qno, &index);
		//rec first desc, set dma valid at the end...
		first_desc = p_desc;
		p_desc->desc0 = 0;
		p_desc->desc1 = 0;
		p_desc->desc2 = mss;
		p_desc->desc3 = DWCQOS_DMA_TDES3_CTXT | DWCQOS_DMA_TDES3_TCMSSV;
		//set_desc_dma_valid(p_desc);
		set_tx_skbuf_vadd(pGmac, qno, index, DUMMY_SK_BUFF_FLAG);
		set_tx_skbuf_padd(pGmac, qno, index, 0);
		/*dump_lli(p_desc, 1);*/
	}
	//set head desc
	p_desc = fh_qos_get_tx_desc_cur(pGmac, qno, &index);
	//rec first desc, set dma valid at the end...
	if (!first_desc)
		first_desc = p_desc;
	else
		set_desc_dma_valid(p_desc);

	len = skb_headlen(skb);
	sk_buf_dma_add = dma_map_single(pGmac->dev, skb->data, len, DMA_TO_DEVICE);
	//1: head desc
	p_desc->desc0 |= sk_buf_dma_add;
	p_desc->desc1 = 0;
	p_desc->desc2 |= proto_hdr_len;
	p_desc->desc3 |= 1 << 18 | ((tcp_hdrlen(skb) / 4) << 19) | (skb->len - proto_hdr_len);
	set_first_desc(p_desc);
	set_tx_skbuf_vadd(pGmac, qno, index, (u32)skb);
	set_tx_skbuf_padd(pGmac, qno, index, (u32)sk_buf_dma_add);
	/*dump_lli(p_desc, 1);*/

	if ((len - proto_hdr_len) != 0) {
		p_desc->desc1 |= sk_buf_dma_add + proto_hdr_len;
		p_desc->desc2 |= (len - proto_hdr_len) << 16;
	}
	/*dump_lli(p_desc, 1);*/
	//3: frag data
	for (i = 0; i < nfrags; i++) {

		frag = &skb_shinfo(skb)->frags[i];
		frag_size = skb_frag_size(frag);
		sk_buf_dma_add = skb_frag_dma_map(pGmac->ndev->dev.parent,
				frag, 0, frag_size, DMA_TO_DEVICE);

		consumed_size = 0;
		while (consumed_size < frag_size) {
			each_desc_xfer_size = min_t(size_t, frag_size - consumed_size, tx_queue->desc_xfer_max_size);
			//len -= each_desc_xfer_size;
			p_desc = fh_qos_get_tx_desc_cur(pGmac, qno, &index);
			fh_qos_prepare_normal_send(p_desc, sk_buf_dma_add + consumed_size, each_desc_xfer_size, skb->len - proto_hdr_len, 0);
			set_desc_dma_valid(p_desc);
			set_tx_skbuf_vadd(pGmac, qno, index, DUMMY_SK_BUFF_FLAG);
			set_tx_skbuf_padd(pGmac, qno, index, sk_buf_dma_add + consumed_size);
			consumed_size += each_desc_xfer_size;
		}
	}
	wmb();
	set_last_desc(p_desc);
	//last desc make isr go
	set_desc_isr_valid(p_desc);
	set_desc_dma_valid(first_desc);
	wmb();

	netdev_tx_sent_queue(txq, skb->len);
	dwcqos_kick_tx_queue(pGmac, qno);
	ret = dw_readl(pGmac, mac.config);
	dw_writel(pGmac, mac.config, ret);
	spin_unlock(&tx_queue->tx_lock);
	return NETDEV_TX_OK;
}

static int qos_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
{
	struct dwcqos_dma_desc *p_desc;
	struct dwcqos_dma_desc *first_desc;
	struct dw_qos *pGmac;
	struct net_tx_queue *tx_queue;
	u32 sk_buf_dma_add;
	u32 index;
	int qno;
	int len;
	u32 ret;
	skb_frag_t *frag;
	int i;
	u32 req_desc;
	int csum_insertion = 0;
	u32 vaild_desc_size;
	int nfrags;
	struct netdev_queue *txq; //= netdev_get_tx_queue(ndev, queue);
	//pr_err("qos dev xmit get in...\n");
	/* Manage oversized TCP frames for GMAC4 device */
	if (skb_is_gso(skb)) {
		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
			return qos_tso_xmit(skb, ndev);
	}

	//spin_lock(&txlq->tx_lock);
	pGmac = netdev_priv(ndev);
	//u16 queue = skb_get_queue_mapping(skb);
	//qno = pGmac->active_queue_index;
	qno = skb_get_queue_mapping(skb);
	tx_queue = &pGmac->tx_queue[qno];
	txq = netdev_get_tx_queue(ndev, qno);

	spin_lock(&tx_queue->tx_lock);
	//pr_err("xmit use qno is %d\n",qno);
	nfrags = skb_shinfo(skb)->nr_frags;
	len = skb_headlen(skb);
	//spin_lock(&txlq->tx_lock);
	//desc3 bit[16:17]
	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL) ? 3 : 0;
	vaild_desc_size = fh_qos_tx_desc_avail(pGmac, qno);

	if (len > tx_queue->desc_xfer_max_size) {
		pr_err("len %d > max size %d\n", len, tx_queue->desc_xfer_max_size);
		spin_unlock(&tx_queue->tx_lock);
		return NETDEV_TX_BUSY;
	}
	req_desc = cal_tx_desc_require(skb, tx_queue);
	//+1 means head???
	if (unlikely(vaild_desc_size < req_desc)) {
		//if (!netif_queue_stopped(ndev)) {
			netif_tx_stop_queue(txq);
			/* This is a hard error, log it. */
			pr_err("%s: BUG! Tx Ring full when queue awake \n",
				__func__);

			pr_err("%x : %x : %x : %x\n", vaild_desc_size, req_desc, pGmac->tx_queue[qno].cur_idx, pGmac->tx_queue[qno].dirty_idx);
		//}
		spin_unlock(&tx_queue->tx_lock);
		return NETDEV_TX_BUSY;
	}
	//set first desc..
	p_desc = fh_qos_get_tx_desc_cur(pGmac, qno, &index);
	//rec first desc, set dma valid at the end...
	first_desc = p_desc;
	len = skb_headlen(skb);

	sk_buf_dma_add = dma_map_single(pGmac->dev, skb->data, len, DMA_TO_DEVICE);
	fh_qos_prepare_normal_send(p_desc, sk_buf_dma_add, len, skb->len, csum_insertion);
	set_first_desc(p_desc);
	//pr_err("first..\n");
	//pr_err("##set skb is %x\n",(u32)skb);
	set_tx_skbuf_vadd(pGmac, qno, index, (u32)skb);
	//pr_err("!!set pdd is %x\n",(u32)sk_buf_dma_add);
	set_tx_skbuf_padd(pGmac, qno, index, (u32)sk_buf_dma_add);

	for (i = 0; i < nfrags; i++) {
		//pr_err("nfrags = %x\n",nfrags);
		frag = &skb_shinfo(skb)->frags[i];
		len = skb_frag_size(frag);
		sk_buf_dma_add = skb_frag_dma_map(pGmac->ndev->dev.parent,
						frag, 0, len, DMA_TO_DEVICE);
		//pr_err("multi frag info len: %x : %x\n",skb_headlen(skb), len);
		p_desc = fh_qos_get_tx_desc_cur(pGmac, qno, &index);
		fh_qos_prepare_normal_send(p_desc, sk_buf_dma_add, len, skb->len, 0);
		wmb();
		set_desc_dma_valid(p_desc);
		wmb();
		//here set deadbeef...cause skbuf only one..else is data.
		//pr_err("set deadbeef and padd is : %x\n",(u32)sk_buf_dma_add);
		set_tx_skbuf_vadd(pGmac, qno, index, DUMMY_SK_BUFF_FLAG);
		set_tx_skbuf_padd(pGmac, qno, index, (u32)sk_buf_dma_add);

	}
	set_last_desc(p_desc);
	set_desc_isr_valid(p_desc);
	wmb();
	set_desc_dma_valid(first_desc);
	wmb();
	//wmb();
	netdev_tx_sent_queue(txq, skb->len);
	dwcqos_kick_tx_queue(pGmac, qno);
	ret = dw_readl(pGmac, mac.config);
	dw_writel(pGmac, mac.config, ret);
	spin_unlock(&tx_queue->tx_lock);
	return NETDEV_TX_OK;
}

void check_tx_done_desc(struct dwcqos_dma_desc *p_desc)
{
	if (p_desc->desc3 & (1 << 28)) {
		if (p_desc->desc3 & (1 << 15)) {
			pr_err("[tx] :: got err %x\n", p_desc->desc3);
		}
	}
}

static void tx_dirty_process(struct dw_qos *pGmac, struct net_tx_queue *queue,
u32 *tx_packet, u32 *tx_byte)
{
	u32 index;
	struct sk_buff *skb;
	u32 sk_buf_dma_add;
	u32 len;
	struct dwcqos_dma_desc *p_desc;
	int qno = queue->id;
	*tx_byte = 0;
	*tx_packet = 0;

	while (1) {
		p_desc = fh_qos_get_tx_desc_dirty(pGmac, qno, &index);
		if (!p_desc)
			break;
		check_tx_done_desc(p_desc);
		skb = (struct sk_buff *)get_tx_skbuf_vadd(pGmac, qno, index);
		sk_buf_dma_add = get_tx_skbuf_padd(pGmac, qno, index);
		len = get_desc_data_len(p_desc);
		if (sk_buf_dma_add) {
			dma_unmap_single(pGmac->ndev->dev.parent, sk_buf_dma_add, len,
			DMA_TO_DEVICE);
		}
		//here can't be 0...
		if ((u32)skb != DUMMY_SK_BUFF_FLAG) {
			*tx_byte += skb->len;
			*tx_packet += 1;
			dev_consume_skb_any(skb);
		}
		set_tx_skbuf_vadd(pGmac, qno, index, 0);
		set_tx_skbuf_padd(pGmac, qno, index, 0);
	}
}


static int check_rx_packet(struct dw_qos *pGmac,
struct dwcqos_dma_desc *p_desc)
{
	int ret = csum_none;

	if (unlikely((p_desc->desc3 & DWCQOS_DMA_RDES3_ES) ||
	(p_desc->desc1 & DWCQOS_DMA_RDES1_IPCE))) {
		if (p_desc->desc3 & DWCQOS_DMA_RDES3_ERR_CRC) {
			//netdev_err(pGmac->ndev, "rx got crc err.\n");
			ret =  discard_frame;
		}

		if (p_desc->desc3 & DWCQOS_DMA_RDES3_ERR_DRIB) {
			if (pGmac->ac_reg_cfg->inf_sup == PHY_INTERFACE_MODE_MII) {
				//netdev_err(pGmac->ndev,
				//"rx got dribble err.\n");
				ret =  discard_frame;
			}
		}

		if (p_desc->desc3 & DWCQOS_DMA_RDES3_ERR_REV) {
			//netdev_err(pGmac->ndev, "rx got rev err.\n");
			ret =  discard_frame;
		}

		if (p_desc->desc3 & DWCQOS_DMA_RDES3_ERR_WDT) {
			//netdev_err(pGmac->ndev, "rx got wdt err.\n");
			ret =  discard_frame;
		}

		if (p_desc->desc3 & DWCQOS_DMA_RDES3_ERR_OV) {
			//netdev_err(pGmac->ndev, "rx got overflow err.\n");
			ret =  discard_frame;
		}

		if (p_desc->desc3 & DWCQOS_DMA_RDES3_ERR_GI) {
			//netdev_err(pGmac->ndev, "rx got Giant Packet err.\n");
			ret =  discard_frame;
		}

	}
#if (0)
	if (unlikely(ret == discard_frame))
		netdev_err(pGmac->ndev, "rx err desc3 : desc1 = %x : %x\n",
		p_desc->desc3, p_desc->desc1);
#endif

	return ret;
}

static void rx_refill_desc(struct dw_qos *pGmac, struct net_rx_queue *queue)
{
	u32 index;
	struct sk_buff *skb;
	u32 sk_buf_dma_add;
	struct dwcqos_dma_desc *p_desc;
	//int qno = pGmac->active_queue_index;
	int qno = queue->id;

	while (1) {
		p_desc = fh_qos_get_rx_desc_dirty(pGmac, qno, &index);
		if (!p_desc) {
			break;
		}
		skb = netdev_alloc_skb(pGmac->ndev,
		pGmac->rx_queue[qno].desc_xfer_max_size);
		if (unlikely(skb == NULL)) {
			pr_err("%s :: no mem to alloc skb\n", __func__);
			//should dirty_idx-- but here should find why no mem alloc
			BUG_ON(1);
			break;
		}

		sk_buf_dma_add = dma_map_single(pGmac->dev, skb->data,
		pGmac->rx_queue[qno].desc_xfer_max_size,
		DMA_FROM_DEVICE);
		wmb();
		p_desc->desc0 = (u32)sk_buf_dma_add;
		p_desc->desc3 = DWCQOS_DMA_RDES3_BUF1V | DWCQOS_DMA_RDES3_OWN | DWCQOS_DMA_RDES3_INTE;
		wmb();
		set_rx_skbuf_vadd(pGmac, qno, index, (u32)skb);
		set_rx_skbuf_padd(pGmac, qno, index, (u32)sk_buf_dma_add);
		dwcqos_kick_rx_queue(pGmac, qno);
	}
}

static int rx_valid_process(struct dw_qos *pGmac, struct net_rx_queue *queue, int limit)
{
	int packet_prcess = 0;
	struct dwcqos_dma_desc *p_desc;
	struct sk_buff *skb;
	u32 sk_buf_dma_add;
	int frame_len;
	int status;
	u32 index;

	int qno = queue->id;

	while (packet_prcess < limit) {
		//dma_rmb();
		p_desc = fh_qos_get_rx_desc_cur(pGmac, qno, &index);
		if (!p_desc)
			break;
		status = check_rx_packet(pGmac, p_desc);
		skb = (struct sk_buff *)get_rx_skbuf_vadd(pGmac, qno, index);
		if (status == discard_frame) {
			pGmac->ndev->stats.rx_errors++;
			//free skb..
			dev_kfree_skb(skb);
			continue;
		} else {
			frame_len = p_desc->desc3  & 0x7fff;
			frame_len -= ETH_FCS_LEN;
			sk_buf_dma_add = get_rx_skbuf_padd(pGmac, qno, index);
			dma_unmap_single(pGmac->dev,
			sk_buf_dma_add,
			pGmac->rx_queue[qno].desc_xfer_max_size, DMA_FROM_DEVICE);

			skb_put(skb, frame_len);
			skb->protocol = eth_type_trans(skb, pGmac->ndev);
			switch (p_desc->desc1 & DWCQOS_DMA_RDES1_PT) {
			case DWCQOS_DMA_RDES1_PT_UDP:
			case DWCQOS_DMA_RDES1_PT_TCP:
			case DWCQOS_DMA_RDES1_PT_ICMP:
				skb->ip_summed = CHECKSUM_UNNECESSARY;
				break;
			default:
				skb->ip_summed = CHECKSUM_NONE;
				break;
			}
			pGmac->ndev->stats.rx_packets++;
			pGmac->ndev->stats.rx_bytes += frame_len;
			netif_receive_skb(skb);
		}
		packet_prcess++;
	}
	dwcqos_kick_rx_queue(pGmac, qno);
	return packet_prcess;
}

static int fh_gmac_rx_napi_process(struct napi_struct *napi, int budget)
{
	struct dw_qos *pGmac;
	//only use rx one channel.
	u32 rx_qno = 0;
	u32 tx_qno = 0;
	int work_done = 0;
	struct net_rx_queue *rx_queue;
	struct net_tx_queue *tx_queue;
	struct netdev_queue *txq;

	pGmac = container_of(napi, struct dw_qos, napi);
	rx_queue = &pGmac->rx_queue[rx_qno];
	tx_queue = &pGmac->tx_queue[tx_qno];

	txq = netdev_get_tx_queue(pGmac->ndev, tx_queue->id);
	//tx always done all dirty desc..func back 0
	spin_lock(&tx_queue->tx_lock);
	work_done = fh_tx_cleanup(pGmac, tx_queue->id);
	spin_unlock(&tx_queue->tx_lock);
	if (netif_queue_stopped(pGmac->ndev) &&
		fh_qos_tx_desc_avail(pGmac, tx_queue->id)) {
		netif_tx_wake_queue(txq);
	}

	spin_lock(&rx_queue->rx_lock);
	work_done = rx_valid_process(pGmac, rx_queue, budget);
	rx_refill_desc(pGmac, rx_queue);
	spin_unlock(&rx_queue->rx_lock);

	if (work_done < budget) {
		napi_complete(napi);
		dwcqos_dma_isr_enable_set(pGmac, rx_qno,
			DWCQOS_DMA_CH_IE_NIE |
			DWCQOS_DMA_CH_IE_AIE |
			DWCQOS_DMA_CH_IE_FBEE|
			DWCQOS_DMA_CH_IE_RIE |
			DWCQOS_DMA_CH_IE_RBUE|
			DWCQOS_DMA_CH_IE_TIE);
	}

	return work_done;
}

static void dwcqos_mtl_isr_process(struct dw_qos *pGmac)
{
	u32 qno;
	int ret;

	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.rxq_num, qno) {
		ret = dwcqos_get_hw_mtl_isr_status(pGmac, qno);
		pr_err("[mtl isr] :: %x\n", ret);
		dwcqos_set_hw_mtl_isr_status(pGmac, qno, ret);
	}
}

static void dwcqos_read_mmc_counters(struct dw_qos *pGmac, u32 rx_mask,
				      u32 tx_mask, u32 lpc_mask)
{
	struct dw_mac_mmc_regs mmc;
	struct dw_mac_mmc_regs *regs;
	u32 *p_u32;

	p_u32 = (u32 *)&mmc;

	regs = &(((struct dw_qos_regs_map *)pGmac->regs)->mac.mmc);
	//TBD rec hw status must read hw reg to clear isr status
	wmb();
	memcpy(&mmc, regs, sizeof(struct dw_mac_mmc_regs));
	wmb();
}

static void dwcqos_mac_isr_process(struct dw_qos *pGmac)
{
	u32 cause;
	u32 rx_mask;
	u32 tx_mask;
	u32 ipc_mask;

	cause = dwcqos_mac_get_interrupt_status(pGmac);
	if (cause & DWCQOS_MAC_IS_MMC_INT) {
		rx_mask = dw_readl(pGmac, mac.mmc.rx_interrupt);
		tx_mask = dw_readl(pGmac, mac.mmc.tx_interrupt);
		ipc_mask = dw_readl(pGmac, mac.mmc.mmc_ipc_rx_interrupt);
		dwcqos_read_mmc_counters(pGmac, rx_mask, tx_mask, ipc_mask);
	}
}

static irqreturn_t fh_gmac_common_interrupt(int irq, void *dev_id)
{
	struct net_device *ndev = (struct net_device *)dev_id;
	u32 qno = 0;
	int ret;
	int cause;
	unsigned long bit_first;
	int irq_handle = IRQ_NONE;
	struct dw_qos *pGmac = netdev_priv(ndev);

	cause = dwcqos_dma_get_interrupt_status(pGmac);
	bit_first = cause & 0xff;
	//process all channel isr..
	while (bit_first) {
		if (bit_first & (1 << qno)) {
			ret = dwcqos_dma_isr_status_get(pGmac, qno);
			if (ret & (DWCQOS_DMA_CH_IS_RI | DWCQOS_DMA_CH_IS_RBU | DWCQOS_DMA_CH_IS_TI)) {
				if (likely(napi_schedule_prep(&pGmac->napi))) {
					dwcqos_dma_isr_enable_set(pGmac, qno, 0);
					__napi_schedule(&pGmac->napi);
				}
			}

			dwcqos_dma_isr_status_set(pGmac, qno, ret);
			bit_first &= ~(1 << qno);
			irq_handle = IRQ_HANDLED;

		}
		qno++;
	}

	//process mtl
	if (cause & DWCQOS_DMA_IS_MTLIS) {
		dwcqos_mtl_isr_process(pGmac);
		irq_handle = IRQ_HANDLED;
	}

	if (cause & DWCQOS_DMA_IS_MACIS) {
		dwcqos_mac_isr_process(pGmac);
		irq_handle = IRQ_HANDLED;
	}
	return irq_handle;
	//process mac
}

static int qos_dev_open(struct net_device *ndev)
{
	u32 qno;
	struct dw_qos *pGmac = netdev_priv(ndev);

	netif_carrier_off(ndev);
	DWCQOS_FOR_EACH_QUEUE(max_t(size_t, pGmac->hw_fea.rxq_num, pGmac->hw_fea.txq_num), qno) {
		dwcqos_dma_isr_enable_set(pGmac, qno,
		DWCQOS_DMA_CH_IE_NIE |
		DWCQOS_DMA_CH_IE_AIE |
		DWCQOS_DMA_CH_IE_FBEE);
	}

	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.txq_num, qno) {
		//dwcqos_dma_tx_enable_set(pGmac, qno, 1);
		dwcqos_dma_isr_tx_set(pGmac, qno, 1);
	}

	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.rxq_num, qno) {
		//dwcqos_dma_rx_enable_set(pGmac, qno, 1);
		dwcqos_dma_isr_rx_set(pGmac, qno, 1);
		dwcqos_kick_rx_queue(pGmac, qno);
	}
#if (0)
	//mac start later, should sync with phy linkup.
	dwcqos_mac_rx_enable(pGmac);
	dwcqos_mac_tx_enable(pGmac);
#endif
	phy_start(pGmac->phydev);
	napi_enable(&(pGmac->napi));
	netif_tx_start_all_queues(ndev);
	return 0;
}

static int qos_dev_stop(struct net_device *ndev)
{
	u32 qno;
	struct netdev_queue *txq;
	struct dw_qos *pGmac = netdev_priv(ndev);

	napi_disable(&(pGmac->napi));

	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.rxq_num, qno) {
		//dwcqos_dma_rx_enable_set(pGmac, qno, 0);
		dwcqos_dma_isr_enable_set(pGmac, qno, 0);
	}

	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.txq_num, qno) {
		//dwcqos_dma_tx_enable_set(pGmac, qno, 0);
		txq = netdev_get_tx_queue(ndev, qno);
		netif_tx_stop_queue(txq);
	}

	phy_stop(pGmac->phydev);

	return 0;
}

static int fh_tx_cleanup(struct dw_qos *pGmac, int qno)
{
	struct net_tx_queue *queue;
	struct netdev_queue *txq;
	u32 tx_bytes = 0;
	u32 tx_packets = 0;

	queue = &pGmac->tx_queue[qno];
	//spin_lock(&queue->tx_lock);
	txq = netdev_get_tx_queue(pGmac->ndev, qno);
	tx_dirty_process(pGmac, queue, &tx_packets, &tx_bytes);
	//spin_unlock(&queue->tx_lock);
	netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
	pGmac->ndev->stats.tx_packets += tx_packets;
	pGmac->ndev->stats.tx_bytes += tx_bytes;
	return tx_packets;
}

static int fh_qos_gmac_probe(struct platform_device *pdev)
{
	struct net_device *ndev;
	struct dw_qos *pGmac;
	int ret;
	void __iomem *regs;
	int i;
	int rx_queue_no;
	int tx_queue_no;
	int feature2;

#ifdef CONFIG_USE_OF
	struct device_node *np = pdev->dev.of_node;
	struct fh_gmac_platform_data *p_plat_data;

	p_plat_data = fh_qos_parse_dts_node(pdev);
	if (!p_plat_data) {
		pr_err("%s: cannot get platform data\n", __func__);
		return  -ENXIO;
	}

	regs = (void __iomem *)of_iomap(np, 0);
	if (!regs) {
		pr_err("%s: ERROR: memory mapping failed\n", __func__);
		return -ENOMEM;
	}
	feature2 = __qos_raw_readl(&(((struct dw_qos_regs_map *)regs)->mac.hw_feature_2));
	pdev->dev.platform_data = (void *)p_plat_data;
#else
	regs = fh_qos_get_ctl_regs_resource(pdev);
	if (!regs)
		return -ENOMEM;
	feature2 = __qos_raw_readl(&(((struct dw_qos_regs_map *)regs)->mac.hw_feature_2));
#endif

	rx_queue_no = (feature2 & 0xf) + 1;
	tx_queue_no = ((feature2 >> 6) & 0xf) + 1;
	//force only one rx channel and tx channel
	rx_queue_no = 1;
	tx_queue_no = 1;
	ndev = alloc_etherdev_mqs(sizeof(struct dw_qos),
			tx_queue_no, rx_queue_no);
	if (!ndev) {
		pr_err("%s: ERROR: allocating the device\n", __func__);
		ret = -ENOMEM;
		return ret;
	}
	pGmac = netdev_priv(ndev);

	ret  = fh_qos_parse_plat_info(ndev, pdev, regs);
	if (ret)
		goto open_error;

	dwcqos_init_sw(ndev, pdev);
	dwcqos_desc_init(ndev);

	if (auto_find_phy(ndev)) {
		pr_err("find no phy !!!!!!!");
		goto free_desc_info;
	}

	if (dwcqos_init_hw(pGmac))
		goto free_desc_info;

	if (!is_valid_ether_addr(pGmac->local_mac_address)) {
		/* Use random MAC if none passed */
		random_ether_addr(pGmac->local_mac_address);
		pr_warning("\tusing random MAC address: %pM\n",
		pGmac->local_mac_address);
	}

	dwcqos_set_hw_mac_addr(pGmac, pGmac->local_mac_address);
	if (auto_find_phy(ndev)) {
		pr_err("find no phy !!!!!!!");
		goto free_desc_info;
	}

	/* MDIO bus Registration */
	ret = fh_qos_mdio_register(ndev);
	if (ret < 0) {
		pr_err("mdio register err..\n");
		goto free_desc_info;
	}

	pGmac->txtimeout_handler_wq = alloc_workqueue("fh_qos_gmac",
	WQ_MEM_RECLAIM, 0);
	if (!pGmac->txtimeout_handler_wq)
		BUG_ON(1);
	INIT_WORK(&pGmac->txtimeout_reinit, dwcqos_reinit_for_txtimeout);


	ret = request_irq(pGmac->common_irq_no, fh_gmac_common_interrupt,
	IRQF_SHARED, pGmac->common_irq_name, ndev);
	BUG_ON(ret);
	ret = register_netdev(ndev);
	if (ret) {
		pr_err("%s: ERROR %i registering the netdevice\n", __func__, ret);
		ret = -ENODEV;
		goto free_desc_info;
	}

	pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
		   "\t\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
		   pdev->id, ndev->irq, pGmac->regs);

	return 0;

free_desc_info:
	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.txq_num, i) {
		fh_qos_txq_free_skb(ndev, i);
		fh_qos_txq_free_desc(ndev, i);
	}
	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.rxq_num, i) {
		fh_qos_rxq_free_skb(ndev, i);
		fh_qos_rxq_free_desc(ndev, i);
	}
	dwcqos_reset_dma_hw(pGmac);

	kfree(pGmac->tx_queue);
	kfree(pGmac->rx_queue);
open_error:
	free_netdev(ndev);
	return ret;
}

static int fh_qos_gmac_remove(struct platform_device *pdev)
{
	struct net_device *ndev = platform_get_drvdata(pdev);
	struct dw_qos *pGmac = netdev_priv(ndev);
	u32 qno;

	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.rxq_num, qno) {
		fh_qos_rxq_free_skb(ndev, qno);
		fh_qos_rxq_free_desc(ndev, qno);
	}

	DWCQOS_FOR_EACH_QUEUE(pGmac->hw_fea.txq_num, qno) {
		fh_qos_txq_free_skb(ndev, qno);
		fh_qos_txq_free_desc(ndev, qno);
	}
	if (ndev->phydev)
		phy_disconnect(ndev->phydev);
	mdiobus_unregister(pGmac->mii);
	mdiobus_free(pGmac->mii);
	unregister_netdev(ndev);
	free_netdev(ndev);
	return 0;
}




static int  pm_fh_qos_gmac_suspend(struct device *dev)
{
	struct net_device *ndev = dev_get_drvdata(dev);
	struct dw_qos *pGmac = netdev_priv(ndev);

	fh_qos_suspend(pGmac);

	return 0;
}

static int  pm_fh_qos_gmac_resume(struct device *dev)
{
	struct net_device *ndev = dev_get_drvdata(dev);
	struct dw_qos *pGmac = netdev_priv(ndev);

	if (pGmac->ac_phy_info->phy_reset)
		pGmac->ac_phy_info->phy_reset();

	fh_qos_resume(pGmac);
	return 0;
}

static  const struct dev_pm_ops fh_qos_gmac_pm_ops = {
	.suspend	= pm_fh_qos_gmac_suspend,
	.resume		= pm_fh_qos_gmac_resume,
};


static const struct of_device_id fh_qos_gmac_of_match[] = {
	{.compatible = "fh,fh-qos-gmac",},
	{},
};

MODULE_DEVICE_TABLE(of, fh_qos_gmac_of_match);

static struct platform_driver fh_qos_gmac_driver = {
	.driver = {
		.name = "fh_qos_gmac",
		.of_match_table = fh_qos_gmac_of_match,
		.pm = &fh_qos_gmac_pm_ops,
	},
	.probe = fh_qos_gmac_probe,
	.remove = fh_qos_gmac_remove,
};

#ifdef CONFIG_DEFERRED_INITCALLS_GMAC
deferred_module_platform_driver(fh_qos_gmac_driver);
#else
module_platform_driver(fh_qos_gmac_driver);
#endif

MODULE_LICENSE("GPL");
MODULE_AUTHOR("ZHANGY");
MODULE_DESCRIPTION("Fullhan Ethernet driver");
