/*
 *  Soul SureSave ETH10G HBA Linux driver
 *  Copyright(2009-2015) Soul Tech.
 *
 *  This program is free software; you can redistribute it and/or modify it
 *  under the terms and conditions of the GNU General Public License,
 *  version 2, as published by the Free Software Foundation.
 * 
 *  This program is distributed in the hope it will be useful, but WITHOUT
 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 *  more details.
 *
 *  You should have received a copy of the GNU General Public License along with
 *  this program; if not, write to the Free Software Foundation, Inc.,
 *  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 *
 *  The full GNU General Public License is included in this distribution in
 *  the file called "COPYING".
 */
#include <linux/version.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/timer.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/debugfs.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/sched.h>
#include <asm/io.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/mii.h>
#include <linux/device.h>
#include <linux/ethtool.h>
#include <linux/highmem.h>

static char *rep_version = GITVERSION;

#define eth_dbg(__dev, format, args...) \
do { \
	netdev_dbg(__dev, "%s:%d " format, __func__, __LINE__, ##args); \
} while (0)

#define CONFIG_INET_LRO 1
#ifdef CONFIG_INET_LRO
#include <linux/inet_lro.h>
enum lro_state {
	ETH_LRO_INIT,
	ETH_LRO_NORM,
};
#endif

#include "xaxidma.h"
#include "xaxidma_bdring.h"
#include "xdebug.h"
#include "mac.h"
#include "spiflash.h"

#define DRIVER_NAME		"SureSave ETH10G"
#define DRIVER_DESCRIPTION	"Soul SureSave ETH10G HBA Linux driver"
#define DRIVER_VERSION		"1.00a"
static const char eth_copyright[] = "Copyright(2009-2015) Soul Tech.";
#define ETH_DEBUG		0
#define RX_HW_CSUM		1
#define TX_HW_CSUM		1
#define ETH_SG_ENABLE   0
/* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */
#define TX_BD_NUM	16384
#define RX_BD_NUM	16384

/* Default TX/RX Threshold and waitbound values for SGDMA mode */
#define ETH_TX_THRESHOLD  24
#define ETH_TX_WAITBOUND  254
#define ETH_RX_THRESHOLD  12
#define ETH_RX_WAITBOUND  254

#define TX_TIMEOUT   (10*HZ)	/* Transmission timeout is 10 seconds. */
#define RESET_TIMEOUT_COUNTER 10000
/* Queues with locks */
static LIST_HEAD(receivedQueue);
static spinlock_t receivedQueueSpin = __SPIN_LOCK_UNLOCKED(receivedQueueSpin);

static LIST_HEAD(sentQueue);
static spinlock_t sentQueueSpin = __SPIN_LOCK_UNLOCKED(sentQueueSpin);

u32 dma_rx_int_mask = XAXIDMA_IRQ_ALL_MASK;
u32 dma_tx_int_mask = XAXIDMA_IRQ_ALL_MASK;

/* for exclusion of all program flows (processes, ISRs and BHs) */
spinlock_t ETH_spinlock = __SPIN_LOCK_UNLOCKED(ETH_spinlock);
spinlock_t ETH_tx_spinlock = __SPIN_LOCK_UNLOCKED(ETH_tx_spinlock);
spinlock_t ETH_rx_spinlock = __SPIN_LOCK_UNLOCKED(ETH_rx_spinlock);

/*
 * ethtool has a status reporting feature where we can report any sort of
 * status information we'd like. This is the list of strings used for that
 * status reporting. ETH_GSTRING_LEN is defined in ethtool.h
 */
static char eth_ethtool_gstrings_stats[][ETH_GSTRING_LEN] = {
    "txpkts", "txdropped", "txerr", "txfifoerr",
    "rxpkts", "rxdropped", "rxerr", "rxfifoerr",
    "rxrejerr", "max_frags", "tx_hw_csums", "rx_hw_csums",
};

#define ETH_STATS_LEN sizeof(eth_ethtool_gstrings_stats) / ETH_GSTRING_LEN

static int eth_DmaSend_internal(struct sk_buff *skb, struct net_device *ndev);
static void eth_DmaSetupRecvBuffers(struct net_device *ndev);
static void eth_set_mac_address(struct net_device *ndev, void *address);

/*
 * Checksum offload macros
 */
#define BdCsumEnable(BdPtr) \
	XAxiDma_BdWrite((BdPtr), XAXIDMA_BD_USR0_OFFSET,             \
		((XAxiDma_BdRead((BdPtr), XAXIDMA_BD_USR0_OFFSET)) | 0x2) & 0xFFFFFFFE)

/* Used for debugging */
#define BdCsumEnabled(BdPtr) \
	((XAxiDma_BdRead((BdPtr), XAXIDMA_BD_USR0_OFFSET)) & 1)

#define BdCsumDisable(BdPtr) \
	XAxiDma_BdWrite((BdPtr), XAXIDMA_BD_USR0_OFFSET,             \
		(XAxiDma_BdRead((BdPtr), XAXIDMA_BD_USR0_OFFSET)) & 0xFFFFFFFE )

#define BdCsumSetup(BdPtr, Start, Insert) \
    XAxiDma_BdWrite((BdPtr), XAXIDMA_BD_USR1_OFFSET, ((Start) << 16) | (Insert))

/* Used for debugging */
#define BdCsumInsert(BdPtr) \
    (XAxiDma_BdRead((BdPtr), XAXIDMA_BD_USR1_OFFSET) & 0xffff)

#define BdCsumSeed(BdPtr, Seed) \
    XAxiDma_BdWrite((BdPtr), XAXIDMA_BD_USR2_OFFSET, 0)

#define BdCsumGet(BdPtr) \
    (XAxiDma_BdRead((BdPtr), XAXIDMA_BD_USR3_OFFSET) & 0xffff)

#define BdGetRxLen(BdPtr) \
    (XAxiDma_BdRead((BdPtr), XAXIDMA_BD_USR4_OFFSET) & 0xffff)

#define AxiDma_Stop(BaseAddress)	\
{			\
	XAxiDma_WriteReg(BaseAddress,XAXIDMA_TX_OFFSET + XAXIDMA_CR_OFFSET,	\
	(XAxiDma_ReadReg(BaseAddress, XAXIDMA_TX_OFFSET + XAXIDMA_CR_OFFSET) &	\
			(~XAXIDMA_CR_RUNSTOP_MASK)));	\
	XAxiDma_WriteReg(BaseAddress,XAXIDMA_RX_OFFSET + XAXIDMA_CR_OFFSET,	\
	(XAxiDma_ReadReg(BaseAddress, XAXIDMA_RX_OFFSET + XAXIDMA_CR_OFFSET) &	\
			(~XAXIDMA_CR_RUNSTOP_MASK)));	\
}
    
/*
 * Our private per device data.  When a net_device is allocated we will
 * ask for enough extra space for this.
 */
struct eth_local {
	struct timer_list watchdog_timer;
	struct work_struct tx_timeout_task;
	struct list_head rcv;
	struct list_head xmit;
	
	struct net_device *ndev;
	struct device *dev;
	struct pci_dev *pdev;
	u32 link_speed;
	u32 link_duplex;

	/* IO registers, dma functions and IRQs */
	u32 base;
	u32 base_len;
	void __iomem *reg_base;

	u32 ctl_start;
	u32 ctl_len;
	void __iomem *ctl_base;

	XAxiDma AxiDma;
	unsigned int frame_size;

	struct sk_buff *deferred_skb;
	struct net_device_stats stats;

	/* Buffer descriptors */
	XAxiDma_Bd *tx_bd_v;
	dma_addr_t tx_bd_p;
	u32 tx_bd_size;
	XAxiDma_Bd *rx_bd_v;
	dma_addr_t rx_bd_p;
	u32 rx_bd_size;
	
	/* Options */
	u32 options;
	unsigned long local_features;

	/* Status */
	int max_frags_in_a_packet;
	unsigned long tx_hw_csums;
	unsigned long rx_hw_csums;

	/* Spinlock */
	spinlock_t hw_lock;

	/* Version */
	unsigned long fw_version;

#ifdef CONFIG_INET_LRO
#define MAX_LRO_DESCRIPTORS 8
#define LRO_MAX_AGGR        64
	enum lro_state lro_state;
	struct net_lro_mgr lro_mgr;
	struct net_lro_desc lro_arr[MAX_LRO_DESCRIPTORS];
#endif
};

/* eth_pci_tbl - PCI Device ID Table
 * 
 * wildcard entries (PCI_ANY_ID) should come last
 * last entry must be all 0s
 * 
 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
 *   Class, Class Mask, private data (not used) }
 */
static DEFINE_PCI_DEVICE_TABLE(eth_pci_table) = {
	{0x1172, 0xe001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},

	/* required last entry */
	{},
};

MODULE_DEVICE_TABLE(pci, eth_pci_table);

/* Local Function Prototypes */
static int eth_descriptor_init(struct eth_local *lp);
static void eth_descriptor_free(struct net_device *ndev);
static void eth_led_ctrl(struct net_device *ndev);
/* The callback function for completed frames sent in SGDMA mode. */
static void DmaSendHandlerBH(unsigned long p);
static void DmaRecvHandlerBH(unsigned long p);

DECLARE_TASKLET(DmaSendBH, DmaSendHandlerBH, 0);
DECLARE_TASKLET(DmaRecvBH, DmaRecvHandlerBH, 0);

static void disp_bd_ring(XAxiDma_BdRing *bd_ring)
{
	unsigned long flags;
	int num_bds = bd_ring->AllCnt;
	u32 *cur_bd_ptr = (u32 *) bd_ring->FirstBdAddr;
	int idx;
	spin_lock_irqsave(&ETH_spinlock, flags);
	printk("ChanBase       : %p\n", (void *) bd_ring->ChanBase);
	printk("FirstBdPhysAddr: %p\n", (void *) bd_ring->FirstBdPhysAddr);
	printk("FirstBdAddr    : %p\n", (void *) bd_ring->FirstBdAddr);
	printk("LastBdAddr     : %p\n", (void *) bd_ring->LastBdAddr);
	printk("Length         : %d (0x%0x)\n", bd_ring->Length, bd_ring->Length);
	printk("RunState       : %d (0x%0x)\n", bd_ring->RunState, bd_ring->RunState);
	printk("Separation     : %d (0x%0x)\n", bd_ring->Separation, bd_ring->Separation);
	printk("BD Count       : %d\n", bd_ring->AllCnt);
	printk("\n");

	printk("FreeHead       : %p\n", (void *) bd_ring->FreeHead);
	printk("PreHead        : %p\n", (void *) bd_ring->PreHead);
	printk("HwHead         : %p\n", (void *) bd_ring->HwHead);
	printk("HwTail         : %p\n", (void *) bd_ring->HwTail);
	printk("PostHead       : %p\n", (void *) bd_ring->PostHead);
	printk("BdaRestart     : %p\n", (void *) bd_ring->BdaRestart);

	printk("\n");
	printk("CR             : %08x\n", XAxiDma_ReadReg(bd_ring->ChanBase, XAXIDMA_CR_OFFSET));
	printk("SR             : %08x\n", XAxiDma_ReadReg(bd_ring->ChanBase, XAXIDMA_SR_OFFSET));
	printk("CDESC          : %08x\n", XAxiDma_ReadReg(bd_ring->ChanBase, XAXIDMA_CDESC_OFFSET));
	printk("TDESC          : %08x\n", XAxiDma_ReadReg(bd_ring->ChanBase, XAXIDMA_TDESC_OFFSET));

	printk("\n");
	printk("Ring Contents:\n");

	dma_cache_sync(NULL, cur_bd_ptr, bd_ring->Length, DMA_FROM_DEVICE);
/*
* Buffer Descriptr
* word byte    description
* 0    0h      next ptr
* 1    4h      buffer addr
* 2    8h      buffer len
* 3    ch      sts/ctrl | app data (0) [tx csum enable (bit 31 LSB)]
* 4    10h     app data (1) [tx csum begin (bits 0-15 MSB) | csum insert (bits 16-31 LSB)]
* 5    14h     app data (2) [tx csum seed (bits 16-31 LSB)]
* 6    18h     app data (3) [rx raw csum (bits 16-31 LSB)]
* 7    1ch     app data (4) [rx recv length (bits 18-31 LSB)]
* 8    20h     sw app data (0) [id]
*/
	printk("Idx  NextBD  BuffAddr   CRTL    STATUS    APP0     APP1     APP2     APP3     APP4      ID\n");
	printk("--- -------- -------- -------- -------- -------- -------- -------- -------- -------- --------\n");
	for (idx = 0; idx < num_bds; idx++) {
	printk("%3d %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
		idx,
		cur_bd_ptr[XAXIDMA_BD_NDESC_OFFSET / sizeof(*cur_bd_ptr)],
		cur_bd_ptr[XAXIDMA_BD_BUFA_OFFSET / sizeof(*cur_bd_ptr)],
		cur_bd_ptr[XAXIDMA_BD_CTRL_LEN_OFFSET / sizeof(*cur_bd_ptr)],
		cur_bd_ptr[XAXIDMA_BD_STS_OFFSET / sizeof(*cur_bd_ptr)],
		cur_bd_ptr[XAXIDMA_BD_USR0_OFFSET / sizeof(*cur_bd_ptr)],
		cur_bd_ptr[XAXIDMA_BD_USR1_OFFSET / sizeof(*cur_bd_ptr)],
		cur_bd_ptr[XAXIDMA_BD_USR2_OFFSET / sizeof(*cur_bd_ptr)],
		cur_bd_ptr[XAXIDMA_BD_USR3_OFFSET / sizeof(*cur_bd_ptr)],
		cur_bd_ptr[XAXIDMA_BD_USR4_OFFSET / sizeof(*cur_bd_ptr)],
		cur_bd_ptr[XAXIDMA_BD_ID_OFFSET / sizeof(*cur_bd_ptr)]);
		cur_bd_ptr += bd_ring->Separation / sizeof(int);
	}
	printk("--------------------------------------- Done ---------------------------------------\n");	
	spin_unlock_irqrestore(&ETH_spinlock, flags);
}

static XAxiDma_Config *AxiDma_Config(struct eth_local *lp)
{
	static XAxiDma_Config Cfg;
	u32 cfg_info, data_width;
	XAxiDma_Config *CfgPtr = &Cfg;

	cfg_info = XAxiDma_ReadReg((u32)lp->reg_base, ETH_CFG_INFO_OFFSET);
	data_width = XAxiDma_ReadReg((u32)lp->reg_base, ETH_CFG_DATA_WIDTH_OFFSET);

	printk("cfg_info: %08x, data_width: %08x\n", cfg_info, data_width);
	CfgPtr->BaseAddr = (u32)(lp->reg_base + AXI_DMA_REG);
	CfgPtr->DeviceId = 0xe001;
#if 0 
	CfgPtr->HasMm2S = cfg_info & ETH_CFG_HasMm2S;
	CfgPtr->HasMm2SDRE = (cfg_info & ETH_CFG_HasMm2SDRE) >> 1;
	CfgPtr->HasS2Mm = (cfg_info & ETH_CFG_HasS2Mm) >> 2;
	CfgPtr->HasS2MmDRE = (cfg_info & ETH_CFG_HasS2MmDRE) >> 3;
	CfgPtr->HasSg = (cfg_info & ETH_CFG_HasSg) >> 4;
	CfgPtr->HasStsCntrlStrm = (cfg_info & ETH_CFG_HasStsCntrlStrm) >> 5;
	CfgPtr->Mm2SDataWidth = data_width & ETH_CFG_Mm2SDataWidth;
	CfgPtr->Mm2sNumChannels = (cfg_info & ETH_CFG_Mm2sNumChannels) >> 16;
	CfgPtr->S2MmDataWidth = (data_width & ETH_CFG_S2MmDataWidth) >> 16;
	CfgPtr->S2MmNumChannels = (cfg_info & ETH_CFG_S2MmNumChannels) >> 24;
#endif
	CfgPtr->HasMm2S = 1;
	CfgPtr->HasMm2SDRE = 1;
	CfgPtr->HasS2Mm = 1;
	CfgPtr->HasS2MmDRE = 1;
	CfgPtr->HasSg = 1;
	CfgPtr->HasStsCntrlStrm = 1;
	CfgPtr->Mm2SDataWidth = 64;
	CfgPtr->Mm2sNumChannels = 1;
	CfgPtr->S2MmDataWidth = 64;
	CfgPtr->S2MmNumChannels = 1;

	return CfgPtr;
}

static int eth_mac_start(void *reg_base)
{
	/*
     * Read AEL2020 CHIP ID, Address: 1.C205, CHIP ID should be 0x0211
     * PYH1 Addrsss: 0xC2050001, PYH2 Address: 0xC2050101
     */
#ifndef PHY2
	XAxiDma_WriteReg((u32)reg_base + MAC_ADDR_BASE + 0x10000, 0x84, 0xC2050001);
#else
	XAxiDma_WriteReg((u32)reg_base + MAC_ADDR_BASE + 0x10000, 0x84, 0xC2050101);
#endif
	mdelay(50);
    printk("AEL2020 CHIP ID:%x\n", XAxiDma_ReadReg((u32)reg_base + 
				MAC_ADDR_BASE + 0x10000,0x80));
	mdelay(50);
    /*MAC RX PAD CRC Control*/
    XAxiDma_WriteReg((u32)reg_base + MAC_ADDR_BASE, RX_PADCRC_CTL_REG, 3);

    /*Set MAC Clear Status*/
    XAxiDma_WriteReg((u32)reg_base + MAC_ADDR_BASE, TX_STATS_CLS_REG, 0x01);
    XAxiDma_WriteReg((u32)reg_base + MAC_ADDR_BASE, RX_STATS_CLS_REG, 0x01);

    return XST_SUCCESS;
}

static void eth_reset(struct net_device *ndev, u32 line_num)
{
	unsigned long flags;
	u32 TxThreshold, TxWaitBound, RxThreshold, RxWaitBound;
	XAxiDma_BdRing *RxRingPtr, *TxRingPtr;
	int status, TimeOut;
	static u32 reset_cnt = 0;
	int RingIndex = 0;
	struct eth_local *lp = (struct eth_local *) netdev_priv(ndev);
	RxRingPtr = XAxiDma_GetRxRing(&lp->AxiDma, RingIndex);
	TxRingPtr = XAxiDma_GetTxRing(&lp->AxiDma);

	netdev_err(ndev, "%s: XLlTemac: resets (#%u) from adapter code line %d\n",
	       ndev->name, ++reset_cnt, line_num);

	/* Shouldn't really be necessary, but shouldn't hurt. */
	netif_stop_queue(ndev);

	/*
	 * Capture the dma coalesce settings (if needed) and reset the
	 * connected core, dma or fifo
	 */
	XAxiDma_BdRingGetCoalesce(RxRingPtr, &RxThreshold, &RxWaitBound);
	XAxiDma_BdRingGetCoalesce(TxRingPtr, &TxThreshold, &TxWaitBound);

	XAxiDma_Reset(&lp->AxiDma);
	
	TimeOut = RESET_TIMEOUT_COUNTER;

	while (TimeOut) {
		if(XAxiDma_ResetIsDone(&lp->AxiDma)) {
			break;
		}
		TimeOut -= 1;
	}
	if (!TimeOut) {
		xdbg_printf(XDBG_DEBUG_ERROR, "Failed reset in timeout\r\n");
		return ;
	}
	
	status = XAxiDma_BdRingSetCoalesce(RxRingPtr, RxThreshold, RxWaitBound);
	status |= XAxiDma_BdRingSetCoalesce(TxRingPtr, TxThreshold, TxWaitBound);
	if (status != XST_SUCCESS) {
		/* Print the error, but keep on going as it's not a fatal error. */
		netdev_err(ndev, "%s: XLlTemac: error setting coalesce values (probably out of range). status: %d\n",
		       ndev->name, status);
	}
	XAxiDma_mBdRingIntEnable(RxRingPtr, XAXIDMA_IRQ_ALL_MASK);
	XAxiDma_mBdRingIntEnable(TxRingPtr, XAXIDMA_IRQ_ALL_MASK);

	if (lp->deferred_skb) {
		dev_kfree_skb_any(lp->deferred_skb);
		lp->deferred_skb = NULL;
		lp->stats.tx_errors++;
	}

	spin_lock_irqsave(&ETH_spinlock, flags);
	eth_mac_start(lp->reg_base);
	spin_unlock_irqrestore(&ETH_spinlock, flags);

	/* We're all ready to go.  Start the queue in case it was stopped. */
	netif_wake_queue(ndev);
}

static irqreturn_t eth_dma_rx_interrupt(struct net_device *ndev, u32 irq_status)
{
	struct list_head *cur_lp;
	unsigned long flags;
	int RingIndex = 0;
	XAxiDma_BdRing *RingPtr;
	int TimeOut;
	struct eth_local *lp = (struct eth_local *) netdev_priv(ndev);

	RingPtr = XAxiDma_GetRxRing(&lp->AxiDma, RingIndex);
	/*Clear IRQ status*/
	XAxiDma_mBdRingAckIrq(RingPtr, irq_status);

	if ((irq_status & XAXIDMA_ERR_ALL_MASK)) {
		netdev_alert(ndev, "RXIRQ error sts %08x\n", irq_status);
		disp_bd_ring(RingPtr);
		XAxiDma_Reset(&lp->AxiDma);
		/*                                                                         
		 * Reset should never fail for transmit channel                                          
		 */ 
		TimeOut = RESET_TIMEOUT_COUNTER;

		while (TimeOut) {
			if (XAxiDma_ResetIsDone(&lp->AxiDma)) {
				break;
			}
			TimeOut -= 1;                                                                        
        }                    
		if (!TimeOut) {
			netdev_err(ndev, "Failed reset in timeout\r\n");
			return IRQ_HANDLED;
		}
		return IRQ_HANDLED;
	}

	if ((irq_status & (XAXIDMA_IRQ_DELAY_MASK | XAXIDMA_IRQ_IOC_MASK))) {
		spin_lock_irqsave(&receivedQueueSpin, flags);
		list_for_each(cur_lp, &receivedQueue) {
			if (cur_lp == &(lp->rcv)) {
				break;
			}
		}
		if (cur_lp != &(lp->rcv)) {
			list_add_tail(&lp->rcv, &receivedQueue);
			XAxiDma_mBdRingIntDisable(RingPtr,
						 XAXIDMA_IRQ_ALL_MASK);
			tasklet_schedule(&DmaRecvBH);
		}
		spin_unlock_irqrestore(&receivedQueueSpin, flags);
	}
	return IRQ_HANDLED;
}

static irqreturn_t eth_dma_tx_interrupt(struct net_device *ndev, u32 irq_status)
{
	struct list_head *cur_lp;
	unsigned long flags;
	XAxiDma_BdRing *RingPtr;
	int TimeOut;
	struct eth_local *lp = (struct eth_local *) netdev_priv(ndev);

	RingPtr = XAxiDma_GetTxRing(&lp->AxiDma);
	/*Clear IRQ status*/
	XAxiDma_mBdRingAckIrq(RingPtr, irq_status);

	if ((irq_status & XAXIDMA_ERR_ALL_MASK)) {
		netdev_alert(ndev, "TXIRQ error sts %08x\n", irq_status);
		disp_bd_ring(RingPtr);
		XAxiDma_Reset(&lp->AxiDma);
		/*                                                                         
         * Reset should never fail for transmit channel                                          
         */ 
        TimeOut = RESET_TIMEOUT_COUNTER;

		while (TimeOut) {
			if (XAxiDma_ResetIsDone(&lp->AxiDma)) {
                break;
            } 
            TimeOut -= 1;                                                                        
        }                       
		if (!TimeOut) {
			netdev_err(ndev, "Failed reset in timeout\r\n");
			return IRQ_HANDLED;
		}
		return IRQ_HANDLED;
	}

	if ((irq_status & (XAXIDMA_IRQ_DELAY_MASK | XAXIDMA_IRQ_IOC_MASK))) {
		spin_lock_irqsave(&sentQueueSpin, flags);
		list_for_each(cur_lp, &sentQueue) {
			if (cur_lp == &(lp->xmit)) {
 				break;
			}
		}
		if (cur_lp != &(lp->xmit)) {
			list_add_tail(&lp->xmit, &sentQueue);
			XAxiDma_mBdRingIntDisable(RingPtr, XAXIDMA_IRQ_ALL_MASK);
			tasklet_schedule(&DmaSendBH);
		}
		spin_unlock_irqrestore(&sentQueueSpin, flags);
	}

	return IRQ_HANDLED;
}

static irqreturn_t eth_interrupt(int irq, void *dev_id)
{
	struct net_device *ndev = (struct net_device *)dev_id;
	struct eth_local *lp = (struct eth_local *)netdev_priv(ndev);
	XAxiDma_BdRing *RxRingPtr, *TxRingPtr;
	u32 IrqStatusTx, IrqStatusRx;
	irqreturn_t res = IRQ_NONE;

	RxRingPtr = XAxiDma_GetRxRing(&lp->AxiDma, 0);
	TxRingPtr = XAxiDma_GetTxRing(&lp->AxiDma);
	IrqStatusTx = XAxiDma_ReadReg(TxRingPtr->ChanBase, XAXIDMA_SR_OFFSET);
	IrqStatusRx = XAxiDma_ReadReg(RxRingPtr->ChanBase, XAXIDMA_SR_OFFSET);

	//netdev_dbg(ndev, "IrqStatusTx: %x, IrqStatusRx: %x\n", IrqStatusTx, IrqStatusRx);
	if (((IrqStatusTx | IrqStatusRx) & XAXIDMA_IRQ_ALL_MASK) == 0) {
		goto out;
	}

	netdev_dbg(ndev, "IrqStatusTx: %x, IrqStatusRx: %x\n", IrqStatusTx, IrqStatusRx);
	/* Acknowledge pending interrupts */
	if ((IrqStatusTx & XAXIDMA_IRQ_ALL_MASK)) {
		res = eth_dma_tx_interrupt(ndev, IrqStatusTx);
	}

	/* Acknowledge pending interrupts */
	if ((IrqStatusRx & XAXIDMA_IRQ_ALL_MASK)) {	
		res = eth_dma_rx_interrupt(ndev, IrqStatusRx);
	}
out:
	return res;
}

static void DmaSendHandlerBH(unsigned long p)
{
	struct net_device *ndev;
	struct eth_local *lp;
	XAxiDma_Bd *BdPtr, *BdCurPtr;
	int len;
	unsigned long flags;
	struct sk_buff *skb;
	dma_addr_t skb_dma_addr;
	int result = XST_SUCCESS;
	unsigned int bd_processed, bd_processed_save;
	XAxiDma_BdRing *RingPtr;

	while (1) {
		spin_lock_irqsave(&sentQueueSpin, flags);
		if (list_empty(&sentQueue)) {
			spin_unlock_irqrestore(&sentQueueSpin, flags);
			break;
		}

		lp = list_entry(sentQueue.next, struct eth_local, xmit);
		RingPtr = XAxiDma_GetTxRing(&lp->AxiDma);

		list_del_init(&(lp->xmit));
		spin_unlock_irqrestore(&sentQueueSpin, flags);

		spin_lock_irqsave(&ETH_tx_spinlock, flags);
		ndev = lp->ndev;
		bd_processed_save = 0;
		while ((bd_processed = XAxiDma_BdRingFromHw(RingPtr, TX_BD_NUM, &BdPtr)) > 0) {
			eth_dbg(ndev, "bd_processed %d, %p\n", bd_processed, BdPtr);

			bd_processed_save = bd_processed;
			BdCurPtr = BdPtr;
			do {
				len = XAxiDma_BdGetLength(BdCurPtr, RingPtr->MaxTransferLen);
				skb_dma_addr = (dma_addr_t) XAxiDma_BdGetBufAddr(BdCurPtr);
				dma_unmap_single(ndev->dev.parent, skb_dma_addr, len,
						 DMA_TO_DEVICE);

				/* get ptr to skb */
				skb = (struct sk_buff *)XAxiDma_BdGetId(BdCurPtr);
				eth_dbg(ndev, "cur %p, len %d, skb %p, dma %llx\n",
						BdCurPtr, len, skb,
						(uint64_t)skb_dma_addr);
				
				if (skb)
					dev_kfree_skb(skb);

				/* reset BD id */
				XAxiDma_BdSetId(BdCurPtr, NULL);

				lp->stats.tx_bytes += len;
				if (XAxiDma_BdGetCtrl(BdCurPtr) & XAXIDMA_BD_CTRL_TXEOF_MASK) {
					lp->stats.tx_packets++;
				}

				BdCurPtr = XAxiDma_mBdRingNext(RingPtr, BdCurPtr);
				bd_processed--;
			} while (bd_processed > 0);

			result = XAxiDma_BdRingFree(RingPtr, bd_processed_save, BdPtr);
			if (result != XST_SUCCESS) {
				netdev_err(ndev, "%s: XAxiDma: BdRingFree() error %d.\n",
				       ndev->name, result);
				XAxiDma_Reset(&lp->AxiDma);
				spin_unlock_irqrestore(&ETH_tx_spinlock, flags);
				return;
			}
		}
		XAxiDma_mBdRingIntEnable(RingPtr, XAXIDMA_IRQ_ALL_MASK);

		/* Send out the deferred skb if it exists */
		if ((lp->deferred_skb) && bd_processed_save) {
			skb = lp->deferred_skb;
			lp->deferred_skb = NULL;

			result = eth_DmaSend_internal(skb, ndev);
		}

		if (result == XST_SUCCESS) {
			netif_wake_queue(ndev);	/* wake up send queue */
		}
		spin_unlock_irqrestore(&ETH_tx_spinlock, flags);
	}
}


static void DmaRecvHandlerBH(unsigned long p)
{
	struct net_device *ndev;
	struct eth_local *lp;
	struct sk_buff *skb;
	u32 len, skb_baddr;
	int result;
	unsigned long flags;
	XAxiDma_Bd *BdPtr, *BdCurPtr;
	unsigned int bd_processed, bd_processed_saved;
	int RingIndex = 0;
	XAxiDma_BdRing *RingPtr;
#ifdef CONFIG_INET_LRO
	bool lro_flush_needed = false;
#endif

	while (1) {
		spin_lock_irqsave(&receivedQueueSpin, flags);
		if (list_empty(&receivedQueue)) {
			spin_unlock_irqrestore(&receivedQueueSpin, flags);
			break;
		}
		lp = list_entry(receivedQueue.next, struct eth_local, rcv);
		RingPtr = XAxiDma_GetRxRing(&lp->AxiDma, RingIndex);
		
		list_del_init(&(lp->rcv));
		spin_unlock_irqrestore(&receivedQueueSpin, flags);
		ndev = lp->ndev;

		spin_lock_irqsave(&ETH_rx_spinlock, flags);
		if ((bd_processed = XAxiDma_BdRingFromHw(RingPtr, RX_BD_NUM, &BdPtr)) > 0) {
			eth_dbg(ndev, "bd_processed %d, %p\n", bd_processed, BdPtr);

			bd_processed_saved = bd_processed;
			BdCurPtr = BdPtr;
			do {
				len = BdGetRxLen(BdCurPtr);

				/* get ptr to skb */
				skb = (struct sk_buff *)XAxiDma_BdGetId(BdCurPtr);
				eth_dbg(ndev, "BdCurPtr %p, len %d, skb %p, %08x\n",
						BdCurPtr, len, skb, XAxiDma_BdGetCtrl(BdCurPtr));

				/* get and free up dma handle used by skb->data */
				skb_baddr = (dma_addr_t) XAxiDma_BdGetBufAddr(BdCurPtr);
				dma_unmap_single(ndev->dev.parent, skb_baddr,
						 len, DMA_FROM_DEVICE);

				/* reset ID */
				XAxiDma_BdSetId(BdCurPtr, NULL);

				/* setup received skb and send it upstream */
				skb_put(skb, len);	/* Tell the skb how much data we got. */
				skb->dev = ndev;

				/* this routine adjusts skb->data to skip the header */
				skb->protocol = eth_type_trans(skb, ndev);
#if RX_HW_CSUM
				/* default the ip_summed value */
				skb->ip_summed = CHECKSUM_NONE;
				
				/* if we're doing rx csum offload, set it up */
				if ((skb->protocol == __constant_htons(ETH_P_IP)) &&
					(skb->len > 64)) {
					unsigned int csum;

					csum = BdCsumGet(BdCurPtr);
					skb->csum = csum;
					skb->ip_summed = CHECKSUM_UNNECESSARY;
					lp->rx_hw_csums++;
				}
#endif
				lp->stats.rx_packets++;
				lp->stats.rx_bytes += len;
#ifdef CONFIG_INET_LRO
				if (lp->lro_state == ETH_LRO_NORM) {
					lro_receive_skb(&lp->lro_mgr, skb, 0);
					lro_flush_needed = true;
				} else {
					result = netif_rx(skb);	/* Send the packet upstream. */
				}
#else
				result = netif_rx(skb);	/* Send the packet upstream. */
#endif

#if ETH_DEBUG
				eth_dbg(ndev, "RX LEN: %d, res %d\n", len, result);
				print_hex_dump(KERN_DEBUG, "RX ", DUMP_PREFIX_ADDRESS, 
						16, 1, skb->data, min_t(int, len, 32), 1);
#endif
				BdCurPtr = XAxiDma_mBdRingNext(RingPtr, BdCurPtr);
				bd_processed--;
			} while (bd_processed > 0);

#ifdef CONFIG_INET_LRO
			if (lro_flush_needed)
				lro_flush_all(&lp->lro_mgr);
#endif
			/* give the descriptor back to the driver */
			result = XAxiDma_BdRingFree(RingPtr, bd_processed_saved, BdPtr);
			if (result != XST_SUCCESS) {
				netdev_err(ndev, "%s: XAxiDma: BdRingFree unsuccessful (%d)\n",
				       ndev->name, result);
				XAxiDma_Reset(&lp->AxiDma);
				spin_unlock_irqrestore(&ETH_rx_spinlock, flags);
				return;
			}

			eth_DmaSetupRecvBuffers(ndev);
		}
		XAxiDma_mBdRingIntEnable(RingPtr, XAXIDMA_IRQ_ALL_MASK);
		spin_unlock_irqrestore(&ETH_rx_spinlock, flags);
	}
}

static int eth_irq_setup(struct net_device *ndev)
{
	int res;
	struct eth_local *lp = netdev_priv(ndev);

	eth_dbg(ndev, "irq %d", lp->pdev->irq);
	res = request_irq(lp->pdev->irq, eth_interrupt, IRQF_SHARED,
			DRIVER_NAME, lp->ndev);
	if (res) {
		dev_err(lp->dev, "request tx_irq failed %d\n", res);
		return res;
	}
	return res;
}

static int eth_open(struct net_device *ndev)
{
	unsigned long flags;
	int RingIndex = 0;
	int err;
	u32 options;
	XAxiDma_Config *Config;
	XAxiDma_BdRing *RxRingPtr, *TxRingPtr;
	struct eth_local *lp = (struct eth_local *)netdev_priv(ndev);
	RxRingPtr = XAxiDma_GetRxRing(&lp->AxiDma, RingIndex);
	TxRingPtr = XAxiDma_GetTxRing(&lp->AxiDma);
	
	netif_carrier_off(ndev);

	spin_lock_irqsave(&lp->hw_lock, flags);
	Config = AxiDma_Config(lp);
	/* Initialize DMA engine */
	err = XAxiDma_CfgInitialize(&lp->AxiDma, Config);
	if (err != XST_SUCCESS) {
		netdev_err(ndev, "Cfg initialize failed\n");
		return XST_FAILURE ;
	}
	
	if(!XAxiDma_HasSg(&lp->AxiDma)) {
		netdev_info(ndev, "Device configured as Simple mode \r\n");
		return XST_FAILURE;
	}

	err = eth_descriptor_init(lp);
	if (err != XST_SUCCESS) {
		netdev_err(ndev, "descriptor init failed\n");
		goto err_desc_init;
	}

	/* set the packet threshold and wait bound for both TX/RX directions */
	err = XAxiDma_BdRingSetCoalesce(TxRingPtr, ETH_TX_THRESHOLD, ETH_TX_WAITBOUND);
	if (err != XST_SUCCESS) {
		netdev_err(ndev,
		       "XAxiDma: could not set SEND pkt threshold/waitbound, ERROR %d", err);
	}
	err = XAxiDma_BdRingSetCoalesce(RxRingPtr, ETH_RX_THRESHOLD, ETH_RX_WAITBOUND);
	if (err != XST_SUCCESS) {
		netdev_err(ndev,
		       "XAxiDma: Could not set RECV pkt threshold/waitbound ERROR %d", err);
	}
	spin_unlock_irqrestore(&lp->hw_lock, flags);

	netif_stop_queue(ndev);
	
	INIT_LIST_HEAD(&lp->rcv);
	INIT_LIST_HEAD(&lp->xmit);

	options = lp->options;
	options |= ETH_FLOW_CONTROL_OPTION;
		
	spin_lock_irqsave(&ETH_spinlock, flags);
	eth_mac_start(lp->reg_base);
	spin_unlock_irqrestore(&ETH_spinlock, flags);

    netdev_dbg(ndev, "Read DMA DBG(0x240): %08x\n", 
			XAxiDma_ReadReg((u32)lp->reg_base + AXI_DMA_REG, 0x240));
    netdev_dbg(ndev, "Read MAC GIT(0x10c): %08x\n", 
			XAxiDma_ReadReg((u32)lp->reg_base + MAC_ADDR_BASE, 0x10c));
	eth_irq_setup(ndev);

	if (XAxiDma_BdRingStart(TxRingPtr, RingIndex) == XST_FAILURE) {
		netdev_err(ndev, "%s: XAxiDma: could not start dma tx channel\n", ndev->name);
		return XST_FAILURE;
	}
	if (XAxiDma_BdRingStart(RxRingPtr, RingIndex) == XST_FAILURE) {
		netdev_err(ndev, "%s: XAxiDma: could not start dma rx channel\n", ndev->name);
		return XST_FAILURE;
	}
	netdev_dbg(ndev, "Tx CR(0x00): %x, Rx CR(0x30): %x\n",
			XAxiDma_ReadReg((u32)lp->reg_base + AXI_DMA_REG, XAXIDMA_TX_OFFSET),
			XAxiDma_ReadReg((u32)lp->reg_base + AXI_DMA_REG, XAXIDMA_RX_OFFSET));

	/* We're ready to go. */
	netif_start_queue(ndev);
	
	mod_timer(&lp->watchdog_timer, jiffies);
	
	/*
	* Enable interrupt enable bits for a channel. It modifies the
	* XAXIDMA_CR_OFFSET register.
	*/
	XAxiDma_mBdRingIntEnable(TxRingPtr, XAXIDMA_IRQ_ALL_MASK);
	XAxiDma_mBdRingIntEnable(RxRingPtr, XAXIDMA_IRQ_ALL_MASK);

	return XST_SUCCESS;
err_desc_init:
	if (ndev) {
		eth_descriptor_free(ndev);
		AxiDma_Stop((u32)(lp->reg_base + AXI_DMA_REG));
	}
	return XST_FAILURE;
}

static int eth_close(struct net_device *ndev)
{
	unsigned long flags;
	int RingIndex = 0;
	XAxiDma_BdRing *RxRingPtr, *TxRingPtr;
	struct eth_local *lp = (struct eth_local *) netdev_priv(ndev);
	RxRingPtr = XAxiDma_GetRxRing(&lp->AxiDma, RingIndex);
	TxRingPtr = XAxiDma_GetTxRing(&lp->AxiDma);
	
	/* Stop Send queue */
	netif_stop_queue(ndev);
	XAxiDma_WriteReg((u32)lp->reg_base, 0x8000, 0x0);

	del_timer_sync(&lp->watchdog_timer);
	netif_carrier_off(ndev);
	/*
	* Clear interrupt enable bits for a channel. It modifies the
	* XAXIDMA_CR_OFFSET register.
	*/
	XAxiDma_mBdRingIntDisable(TxRingPtr, XAXIDMA_IRQ_ALL_MASK);
	XAxiDma_mBdRingIntDisable(RxRingPtr, XAXIDMA_IRQ_ALL_MASK);

	/*Stop AXI DMA Engine*/
	AxiDma_Stop((u32)(lp->reg_base + AXI_DMA_REG));
	
	if (ndev)
		eth_descriptor_free(ndev);

	/*
	 * Free the interrupt - not polled mode.
	 */
	free_irq(lp->pdev->irq, ndev);

	spin_lock_irqsave(&receivedQueueSpin, flags);
	list_del(&(lp->rcv));
	spin_unlock_irqrestore(&receivedQueueSpin, flags);

	spin_lock_irqsave(&sentQueueSpin, flags);
	list_del(&(lp->xmit));
	spin_unlock_irqrestore(&sentQueueSpin, flags);

	return XST_SUCCESS;
}

static int eth_DmaSend_internal(struct sk_buff *skb, struct net_device *ndev)
{
	int result;
	int total_frags;
	int i;
	size_t len;
	dma_addr_t phy_addr;
	XAxiDma_Bd *bd_ptr, *first_bd_ptr, *last_bd_ptr;
	skb_frag_t *frag;
	void *virt_addr;
	struct eth_local *lp = (struct eth_local *) netdev_priv(ndev);
	int RingIndex = 0;
	XAxiDma_BdRing *RingPtr = XAxiDma_GetTxRing(&lp->AxiDma);
	
	/* get skb_shinfo(skb)->nr_frags + 1 buffer descriptors */
	total_frags = skb_shinfo(skb)->nr_frags + 1;
	if (total_frags > 1)
		eth_dbg(ndev, "total_frags: %d DmaSend Use SG I/O\n", total_frags);

    /* stats */
    if (lp->max_frags_in_a_packet < total_frags) {
        lp->max_frags_in_a_packet = total_frags;
    }

	if (total_frags < TX_BD_NUM) {
		result = XAxiDma_BdRingAlloc(RingPtr, total_frags, &bd_ptr);
		if (result != XST_SUCCESS) {
			netif_stop_queue(ndev);	/* stop send queue */

			/* buffer the sk_buffer and will send it in interrupt context */
			lp->deferred_skb = skb;
			return result;
		}
	} else {
		dev_kfree_skb(skb);
		lp->stats.tx_dropped++;
		netdev_err(ndev, "%s: XAxiDma: could not send TX socket buffers (too many fragments).\n", 
				ndev->name);
		return XST_FAILURE;
	}

	len = skb_headlen(skb);

	/* get the physical address of the header */
	phy_addr = dma_map_single(ndev->dev.parent, skb->data, len, DMA_TO_DEVICE);
	if ((phy_addr >> 32)) {
		netdev_alert(ndev, "send packet phys_addr %llx", (uint64_t)phy_addr);
	}

#if ETH_DEBUG
	eth_dbg(ndev, "%d: %d, skb %p, dma %llx\n", 0, len, skb, phy_addr);
	print_hex_dump(KERN_DEBUG, "TX ", DUMP_PREFIX_ADDRESS, 
			16, 1, skb->data, min_t(int, 32, len), 1);
#endif
	/* get the header fragment, it's in the skb differently */
	XAxiDma_BdSetBufAddr(bd_ptr, phy_addr);
	XAxiDma_BdSetLength(bd_ptr, len, RingPtr->MaxTransferLen);
	XAxiDma_BdSetId(bd_ptr, skb);

#if TX_HW_CSUM
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		unsigned int csum_start_off = skb_transport_offset(skb);
		unsigned int csum_index_off = csum_start_off + skb->csum_offset;

		BdCsumEnable(bd_ptr);
		BdCsumSetup(bd_ptr, csum_start_off, csum_index_off);
		lp->tx_hw_csums++;
	} else {
		/*
		 * This routine will do no harm even if hardware checksum capability is
		 * off.
		 */
		BdCsumDisable(bd_ptr);
	}
#endif

	first_bd_ptr = bd_ptr;
	last_bd_ptr = bd_ptr;

	frag = &skb_shinfo(skb)->frags[0];

	for (i = 1; i < total_frags; i++, frag++) {
		bd_ptr = XAxiDma_mBdRingNext(RingPtr, bd_ptr);
		last_bd_ptr = bd_ptr;

		virt_addr = page_address(frag->page) + frag->page_offset;
		phy_addr = dma_map_single(ndev->dev.parent, virt_addr, frag->size, DMA_TO_DEVICE);
		if ((phy_addr >> 32)) {
			netdev_alert(ndev, "send packet phys_addr %llx", (uint64_t)phy_addr);
		}

#if ETH_DEBUG
		eth_dbg(ndev, "%d: %d, skb %p, dma %llx\n", i, len, skb, phy_addr);
		print_hex_dump(KERN_DEBUG, "TX ", DUMP_PREFIX_ADDRESS,
				16, 1, virt_addr, min_t(int, 32, len), 1);
#endif

		XAxiDma_BdSetBufAddr(bd_ptr, phy_addr);
		XAxiDma_BdSetLength(bd_ptr, frag->size, RingPtr->MaxTransferLen);
		XAxiDma_BdSetId(bd_ptr, NULL);
#if TX_HW_CSUM
		BdCsumDisable(bd_ptr);
#endif
		XAxiDma_BdSetCtrl(bd_ptr,0);
	}

	if (first_bd_ptr == last_bd_ptr) {
		XAxiDma_BdSetCtrl(last_bd_ptr, XAXIDMA_BD_CTRL_ALL_MASK);
	} else {
		XAxiDma_BdSetCtrl(first_bd_ptr, XAXIDMA_BD_CTRL_TXSOF_MASK);
		XAxiDma_BdSetCtrl(last_bd_ptr, XAXIDMA_BD_CTRL_TXEOF_MASK);
	}

	/* Enqueue to HW */
	result = XAxiDma_BdRingToHw(RingPtr, total_frags,
				   first_bd_ptr, RingIndex);
	if (result != XST_SUCCESS) {
		netif_stop_queue(ndev);	/* stop send queue */
		dev_kfree_skb(skb);
		XAxiDma_BdSetId(first_bd_ptr, NULL);
		lp->stats.tx_dropped++;
		netdev_err(ndev, "%s: XLlTemac: could not send commit TX buffer descriptor (%d).\n",
		       ndev->name, result);
		XAxiDma_Reset(&lp->AxiDma);

		return XST_FAILURE;
	}

	ndev->trans_start = jiffies;

	return XST_SUCCESS;
}

/* The send function for frames sent in DMA mode */
static int eth_send(struct sk_buff *skb, struct net_device *ndev)
{
	/* The following spin_lock protects
     * SgAlloc, SgCommit sequence, which also exists in DmaSendHandlerBH Bottom    
     * Half, or triggered by other processor in SMP case.                          
     */
	spin_lock_bh(&ETH_tx_spinlock);
	eth_DmaSend_internal(skb, ndev);
	spin_unlock_bh(&ETH_tx_spinlock);

	return XST_SUCCESS;
}

static struct net_device_stats *eth_get_stats(struct net_device *ndev)
{
	struct eth_local *lp = (struct eth_local *)netdev_priv(ndev);

	return &lp->stats;
}

static void eth_tx_timeout(struct net_device *ndev)
{
	struct eth_local *lp = (struct eth_local *)netdev_priv(ndev);
	unsigned long flags;

	/*
	 * Make sure that no interrupts come in that could cause reentrancy
	 * problems in reset.
	 */
	spin_lock_irqsave(&ETH_tx_spinlock, flags);
	
	netdev_err(ndev, "%s: XLlTemac: exceeded transmit timeout of %lu ms.  Resetting emac.\n",
	       ndev->name, TX_TIMEOUT * 1000UL / HZ);
	lp->stats.tx_errors++;

	eth_reset(ndev, __LINE__);

	spin_unlock_irqrestore(&ETH_tx_spinlock, flags);
}

static void eth_DmaSetupRecvBuffers(struct net_device *ndev)
{
	int num_sk_buffs;
	struct sk_buff_head sk_buff_list;
	struct sk_buff *new_skb;
	dma_addr_t new_skb_baddr;
	XAxiDma_Bd *BdPtr, *BdCurPtr;
	int result;
	int free_bd_count;
	XAxiDma_BdRing *RingPtr;
	int RingIndex = 0;
	struct eth_local *lp = (struct eth_local *)netdev_priv(ndev);
	RingPtr = XAxiDma_GetRxRing(&lp->AxiDma, RingIndex);
	free_bd_count = XAxiDma_mBdRingGetFreeCnt(RingPtr);

	skb_queue_head_init(&sk_buff_list);
	for (num_sk_buffs = 0; num_sk_buffs < free_bd_count; num_sk_buffs++) {
		new_skb = netdev_alloc_skb_ip_align(ndev, lp->frame_size);
		if (new_skb == NULL) {
			break;
		}
		__skb_queue_tail(&sk_buff_list, new_skb);
	}
	if (!num_sk_buffs) {
		netdev_err(ndev, "%s: XAxiDma: alloc_skb unsuccessful\n",
		       ndev->name);
		goto out;
	}
	
	/* now we got a bunch o' sk_buffs */
	result = XAxiDma_BdRingAlloc(RingPtr, num_sk_buffs, &BdPtr);
	if (result != XST_SUCCESS) {
		/* we really shouldn't get this */
		skb_queue_purge(&sk_buff_list);
		netdev_err(ndev, "%s: XAxiDma: BdRingAlloc unsuccessful (%d)\n",
		       ndev->name, result);
		XAxiDma_Reset(&lp->AxiDma);
		goto out;
	}
	BdCurPtr = BdPtr;
	
	new_skb = skb_dequeue(&sk_buff_list);
	while (new_skb) {
		/* Get dma handle of skb->data */
		new_skb_baddr = dma_map_single(ndev->dev.parent,
					new_skb->data, lp->frame_size,
						     DMA_FROM_DEVICE);
		if ((new_skb_baddr >> 32)) {
			netdev_alert(ndev, "send packet phys_addr %llx", 
					(uint64_t)new_skb_baddr);
		}

		XAxiDma_BdSetBufAddr(BdCurPtr, new_skb_baddr);
		XAxiDma_BdSetLength(BdCurPtr, lp->frame_size, RingPtr->MaxTransferLen);
		XAxiDma_BdSetId(BdCurPtr, new_skb);
		XAxiDma_BdSetCtrl(BdCurPtr, RingIndex);

		BdCurPtr = XAxiDma_mBdRingNext(RingPtr, BdCurPtr);

		new_skb = skb_dequeue(&sk_buff_list);
	}

	/* enqueue RxBD with the attached skb buffers such that it is
	 * ready for frame reception */
	result = XAxiDma_BdRingToHw(RingPtr, num_sk_buffs, BdPtr, RingIndex);
	if (result != XST_SUCCESS) {
		netdev_err(ndev, "%s: XAxiDma: (DmaSetupRecvBuffers) BdRingToHw unsuccessful (%d)\n",
		       ndev->name, result);
		skb_queue_purge(&sk_buff_list);
		BdCurPtr = BdPtr;
		while (num_sk_buffs > 0) {
			XAxiDma_BdSetId(BdCurPtr, NULL);
			BdCurPtr = XAxiDma_mBdRingNext(RingPtr,
						      BdCurPtr);
			num_sk_buffs--;
		}
		XAxiDma_Reset(&lp->AxiDma);
		goto out;
	}
out:
	return;
}

static int eth_descriptor_init(struct eth_local *lp)
{
	int recvsize, sendsize;
	int result;
	int RingIndex = 0;
	XAxiDma_Bd BdTemplate;
	XAxiDma_BdRing *RxRingPtr, *TxRingPtr;
	RxRingPtr = XAxiDma_GetRxRing(&lp->AxiDma, RingIndex);
	TxRingPtr = XAxiDma_GetTxRing(&lp->AxiDma);

	/* calc size of descriptor space pool; alloc from non-cached memory */
	sendsize = XAxiDma_mBdRingMemCalc(XAXIDMA_BD_MINIMUM_ALIGNMENT, TX_BD_NUM);
	lp->tx_bd_v = dma_alloc_coherent(&lp->pdev->dev, sendsize, &lp->tx_bd_p, GFP_KERNEL);
	lp->tx_bd_size = sendsize;

	recvsize = XAxiDma_mBdRingMemCalc(XAXIDMA_BD_MINIMUM_ALIGNMENT, RX_BD_NUM);
	lp->rx_bd_v = dma_alloc_coherent(&lp->pdev->dev, recvsize, &lp->rx_bd_p, GFP_KERNEL);
	lp->rx_bd_size = recvsize;

	dev_dbg(lp->dev, "Tx:phy: 0x%llx, virt: %p, size: 0x%x\n"
			  "Rx:phy: 0x%llx, virt: %p, size: 0x%x\n",
			(uint64_t)lp->tx_bd_p, lp->tx_bd_v, lp->tx_bd_size,
			(uint64_t)lp->rx_bd_p, lp->rx_bd_v, lp->rx_bd_size);

	result = XAxiDma_BdRingCreate(TxRingPtr, (u32)lp->tx_bd_p, (u32)lp->tx_bd_v,
			XAXIDMA_BD_MINIMUM_ALIGNMENT, TX_BD_NUM);
	if (result != XST_SUCCESS) {
		dev_err(lp->dev, "XAxiDma: DMA Ring Create (SEND). Error: %d\n", result);
		return XST_FAILURE;
	}
	XAxiDma_BdClear(&BdTemplate);
	result = XAxiDma_BdRingClone(TxRingPtr, &BdTemplate);                                 
	if (result != XST_SUCCESS) {    
        dev_err(lp->dev, "Failed clone TX BDs\r\n");
        return XST_FAILURE;                                         
    }
	result = XAxiDma_BdRingCreate(RxRingPtr, (u32)lp->rx_bd_p, (u32)lp->rx_bd_v,
			(u32)XAXIDMA_BD_MINIMUM_ALIGNMENT, RX_BD_NUM);
	if (result != XST_SUCCESS) {
		dev_err(lp->dev, "XAxiDma: DMA Ring Create (RECV). Error: %d\n", result);
		return XST_FAILURE;
	}
	XAxiDma_BdClear(&BdTemplate);
	result = XAxiDma_BdRingClone(RxRingPtr, &BdTemplate);                                 
	if (result != XST_SUCCESS) {    
        dev_err(lp->dev, "Failed clone RX BDs\r\n");
        return XST_FAILURE;                                         
    }
	eth_DmaSetupRecvBuffers(lp->ndev);

	return XST_SUCCESS;
}

static void eth_descriptor_free(struct net_device *ndev)
{
	XAxiDma_Bd *BdPtr;
	struct sk_buff *skb;
	dma_addr_t skb_dma_addr;
	u32 len, i;
	int RingIndex = 0;
	XAxiDma_BdRing *RxRingPtr, *TxRingPtr;
	struct eth_local *lp = (struct eth_local *) netdev_priv(ndev);
	RxRingPtr = XAxiDma_GetRxRing(&lp->AxiDma, RingIndex);
	TxRingPtr = XAxiDma_GetTxRing(&lp->AxiDma);

	/* Unmap and free skb's allocated and mapped in eth_descriptor_init() */

	/* Get the virtual address of the 1st BD in the DMA RX BD ring */
	BdPtr = (XAxiDma_Bd *) RxRingPtr->FirstBdAddr;
	if (BdPtr == NULL)
		return;

	for (i = 0; i < RX_BD_NUM; i++) {
		skb = (struct sk_buff *) XAxiDma_BdGetId(BdPtr);
		if (skb) {
			skb_dma_addr = (dma_addr_t)XAxiDma_BdGetBufAddr(BdPtr);
			dma_unmap_single(ndev->dev.parent, skb_dma_addr,
					lp->frame_size, DMA_FROM_DEVICE);
			dev_kfree_skb(skb);
		}
		/* find the next BD in the DMA RX BD ring */
		BdPtr = XAxiDma_mBdRingNext(RxRingPtr, BdPtr);
	}

	/* Unmap and free TX skb's that have not had a chance to be freed
	 * in DmaSendHandlerBH(). This could happen when TX Threshold is larger
	 * than 1 and TX waitbound is 0
	 */

	/* Get the virtual address of the 1st BD in the DMA TX BD ring */
	BdPtr = (XAxiDma_Bd *) TxRingPtr->FirstBdAddr;
	if (BdPtr == NULL)
		return;
	
	for (i = 0; i < TX_BD_NUM; i++) {
		skb = (struct sk_buff *) XAxiDma_BdGetId(BdPtr);
		if (skb) {
			skb_dma_addr = (dma_addr_t)XAxiDma_BdGetBufAddr(BdPtr);
			len = XAxiDma_BdGetLength(BdPtr, TxRingPtr->MaxTransferLen);
			dma_unmap_single(ndev->dev.parent, skb_dma_addr, len,
					 DMA_TO_DEVICE);
			dev_kfree_skb(skb);
		}
		/* find the next BD in the DMA TX BD ring */
		BdPtr = XAxiDma_mBdRingNext(TxRingPtr, BdPtr);
	}

	if (lp->tx_bd_v)
		dma_free_coherent(ndev->dev.parent, lp->tx_bd_size, 
				lp->tx_bd_v, lp->tx_bd_p);
	
	if (lp->rx_bd_v)
		dma_free_coherent(ndev->dev.parent, lp->rx_bd_size, 
				lp->rx_bd_v, lp->rx_bd_p);
}

#ifdef CONFIG_INET_LRO
/* base on pasemi_mac.c */
static int eth_get_skb_header(struct sk_buff *skb, void **iphdr,
		void **tcph, u64 *hdr_flags, void *priv)
{
	struct iphdr *iph;
	unsigned int ip_len;

	/* TODO IPv4 header checksum failed */

	/* non tcp packet */
	skb_reset_network_header(skb);
	iph = ip_hdr(skb);
	if (iph->protocol != IPPROTO_TCP)
		return -1;

	ip_len = ip_hdrlen(skb);
	skb_set_transport_header(skb, ip_hdrlen(skb));
	*tcph = tcp_hdr(skb);

	/* check if ip header and tcp header are complete */
	if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
		return -1;

	*hdr_flags = LRO_IPV4 | LRO_TCP;
	*iphdr = ip_hdr(skb);

	return XST_SUCCESS;
}
#endif

static int eth_ethtool_get_settings(struct net_device *ndev, 
		struct ethtool_cmd *ecmd)
{
	struct eth_local *lp = (struct eth_local *)netdev_priv(ndev);
	
	ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
    ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
    ecmd->port = PORT_FIBRE;
    ecmd->transceiver = XCVR_EXTERNAL;
    
    if (netif_carrier_ok(lp->ndev)) {
        ethtool_cmd_speed_set(ecmd, SPEED_10000);
        ecmd->duplex = DUPLEX_FULL;
    } else {
        ethtool_cmd_speed_set(ecmd, -1);
        ecmd->duplex = -1;
    }
    
    ecmd->autoneg = AUTONEG_DISABLE;
    return 0;
}

static int eth_ethtool_set_settings(struct net_device *ndev, 
		struct ethtool_cmd *ecmd)
{
	struct eth_local *lp = (struct eth_local *) netdev_priv(ndev);
	u32 speed = ethtool_cmd_speed(ecmd);

	if (ecmd->autoneg == AUTONEG_ENABLE ||
		(speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
	return -EINVAL;

    if (ecmd->speed != 10000){
        printk(KERN_ERR
               "%s: eth_ethtool_set_settings speed not supported: %d\n",
               ndev->name, ecmd->speed);
        return -EOPNOTSUPP;
    }
	lp->link_speed = 10000;	
    lp->link_duplex = DUPLEX_FULL;

	return 0;
}

static int eth_ethtool_get_coalesce(struct net_device *ndev, 
		struct ethtool_coalesce *ec)
{
    struct eth_local *lp = (struct eth_local *) netdev_priv(ndev);
	XAxiDma_BdRing *RxRingPtr, *TxRingPtr;
    u32 threshold, waitbound;

    memset(ec, 0, sizeof(struct ethtool_coalesce));
    RxRingPtr = XAxiDma_GetRxRing(&lp->AxiDma, 0);
    TxRingPtr = XAxiDma_GetTxRing(&lp->AxiDma);


    XAxiDma_BdRingGetCoalesce(RxRingPtr, &threshold, &waitbound);
    ec->rx_max_coalesced_frames = threshold;
    ec->rx_coalesce_usecs = waitbound;

    XAxiDma_BdRingGetCoalesce(TxRingPtr, &threshold, &waitbound);
    ec->tx_max_coalesced_frames = threshold;
    ec->tx_coalesce_usecs = waitbound;

	return 0;
}

static int eth_ethtool_set_coalesce(struct net_device *ndev, 
		struct ethtool_coalesce *ec)
{
	int ret;
    struct eth_local *lp = (struct eth_local *) netdev_priv(ndev);
	XAxiDma_BdRing *RxRingPtr, *TxRingPtr;

    RxRingPtr = XAxiDma_GetRxRing(&lp->AxiDma, 0);
    TxRingPtr = XAxiDma_GetTxRing(&lp->AxiDma);
	
    if (ec->rx_coalesce_usecs == 0) {
        ec->rx_coalesce_usecs = 1;
        dma_rx_int_mask = XAXIDMA_IRQ_ALL_MASK & ~XAXIDMA_IRQ_DELAY_MASK;
    }
    if ((ret = XAxiDma_BdRingSetCoalesce(RxRingPtr,
            (u16) (ec->rx_max_coalesced_frames),
            (u16) (ec->rx_coalesce_usecs))) != XST_SUCCESS) {
        printk(KERN_ERR "%s: EthDma: BdRingSetCoalesce error %d\n",
               ndev->name, ret);
        return -EIO;
    }
    XAxiDma_mBdRingIntEnable(RxRingPtr, dma_rx_int_mask);

    if (ec->tx_coalesce_usecs == 0) {
        ec->tx_coalesce_usecs = 1;
        dma_tx_int_mask = XAXIDMA_IRQ_ALL_MASK & ~XAXIDMA_IRQ_DELAY_MASK;
    }
    if ((ret = XAxiDma_BdRingSetCoalesce(TxRingPtr,
            (u16) (ec->tx_max_coalesced_frames),
            (u16) (ec->tx_coalesce_usecs))) != XST_SUCCESS) {
        printk(KERN_ERR "%s: EthDma: BdRingSetCoalesce error %d\n",
               ndev->name, ret);
        return -EIO;
    }
    XAxiDma_mBdRingIntEnable(TxRingPtr, dma_tx_int_mask);

	return 0;
}

static void eth_ethtool_get_ringparam(struct net_device *ndev,
		struct ethtool_ringparam *erp)
{
    memset(erp, 0, sizeof(struct ethtool_ringparam));

    erp->rx_max_pending = RX_BD_NUM;
    erp->tx_max_pending = TX_BD_NUM;
    erp->rx_pending = RX_BD_NUM;
    erp->tx_pending = TX_BD_NUM;
}

static void eth_ethtool_get_pauseparam(struct net_device *ndev,
		struct ethtool_pauseparam *epp)
{
	struct eth_local *lp = (struct eth_local *)netdev_priv(ndev);

	epp->autoneg = AUTONEG_DISABLE;
	if (lp->options & ETH_FLOW_CONTROL_OPTION) {
		epp->rx_pause = 1;
		epp->tx_pause = 1;
	} else {
		epp->rx_pause = 0;
		epp->tx_pause = 0;
	}
}

static u32 eth_ethtool_get_rx_csum(struct net_device *ndev)
{
    struct eth_local *lp = (struct eth_local *)netdev_priv(ndev);
    u32 retval;

    retval = (lp->local_features & RX_HW_CSUM) != 0;

    return retval;
}

static int eth_ethtool_set_rx_csum(struct net_device *ndev, u32 onoff)
{
    struct eth_local *lp = (struct eth_local *)netdev_priv(ndev);

    if (onoff) {
        lp->local_features |= RX_HW_CSUM;
    } else {
        lp->local_features &= ~RX_HW_CSUM;
    }

	return 0;
}

static u32 eth_ethtool_get_tx_csum(struct net_device *ndev)
{
    u32 retval;

    retval = (ndev->features & NETIF_F_IP_CSUM) != 0;
    return retval;
}

static int eth_ethtool_set_tx_csum(struct net_device *ndev, u32 onoff)
{
    if (onoff) {
		ndev->features |= NETIF_F_IP_CSUM;
    } else {
        ndev->features &= ~NETIF_F_IP_CSUM;
    }

	return 0;
}

static u32 eth_ethtool_get_sg(struct net_device *ndev)
{
    u32 retval;

    retval = (ndev->features & NETIF_F_SG) != 0;

    return retval;	
}

static int eth_ethtool_set_sg(struct net_device *ndev, u32 onoff)
{
    if (onoff) {
        ndev->features |= NETIF_F_SG | NETIF_F_FRAGLIST;
    } else {
        ndev->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
    }

	return 0;
}

static void eth_ethtool_get_strings(struct net_device *ndev, 
		u32 stringset, u8 *strings)
{
    *strings = 0;

    switch (stringset) {
    case ETH_SS_STATS:
        memcpy(strings,
            &eth_ethtool_gstrings_stats,
            sizeof(eth_ethtool_gstrings_stats));

        break;

    default:
        break;
    }

}

static void eth_ethtool_get_ethtool_stats(struct net_device *ndev,
    struct ethtool_stats *stats, u64 *data)
{
    struct eth_local *lp = netdev_priv(ndev);

    data[0] = lp->stats.tx_packets;
    data[1] = lp->stats.tx_dropped;
    data[2] = lp->stats.tx_errors;
    data[3] = lp->stats.tx_fifo_errors;
    data[4] = lp->stats.rx_packets;
    data[5] = lp->stats.rx_dropped;
    data[6] = lp->stats.rx_errors;
    data[7] = lp->stats.rx_fifo_errors;
    data[8] = lp->stats.rx_crc_errors;
    data[9] = lp->max_frags_in_a_packet;
    data[10] = lp->tx_hw_csums;
    data[11] = lp->rx_hw_csums;
}

static int eth_ethtool_get_sset_count(struct net_device *ndev, int sset)
{
    switch (sset) {
    case ETH_SS_STATS:
        return ETH_STATS_LEN;
    default:
        return -EOPNOTSUPP;
    }

    return 0;
}

#define EMAC_REGS_N 32

int eth_ethtool_get_regs_len(struct net_device *ndev)
{
    return (sizeof(u16) * EMAC_REGS_N);
}

static void eth_ethtool_get_regs(struct net_device *ndev, 
		struct ethtool_regs *regs, void *p)
{
	XAxiDma_BdRing *RxRingPtr, *TxRingPtr;
	struct eth_local *lp = (struct eth_local *)netdev_priv(ndev);
	u32 *reg = p;
	u32 *reg_start = reg;

	RxRingPtr = XAxiDma_GetRxRing(&lp->AxiDma, 0);
    TxRingPtr = XAxiDma_GetTxRing(&lp->AxiDma);
	regs->version = 0;

	/* Recives Registers */
	*reg++ = XAxiDma_ReadReg(RxRingPtr->ChanBase, XAXIDMA_CR_OFFSET); 
	*reg++ = XAxiDma_ReadReg(RxRingPtr->ChanBase, XAXIDMA_SR_OFFSET); 
	*reg++ = XAxiDma_ReadReg(RxRingPtr->ChanBase, XAXIDMA_CDESC_OFFSET); 
	*reg++ = XAxiDma_ReadReg(RxRingPtr->ChanBase, XAXIDMA_TDESC_OFFSET); 
	*reg++ = XAxiDma_ReadReg(RxRingPtr->ChanBase, XAXIDMA_SRCADDR_OFFSET); 
	*reg++ = XAxiDma_ReadReg(RxRingPtr->ChanBase, XAXIDMA_DESTADDR_OFFSET); 
	*reg++ = XAxiDma_ReadReg(RxRingPtr->ChanBase, XAXIDMA_BUFFLEN_OFFSET); 
	*reg++ = XAxiDma_ReadReg(RxRingPtr->ChanBase, XAXIDMA_SGCTL_OFFSET); 
	
	/* Transmit Registers */
	*reg++ = XAxiDma_ReadReg(TxRingPtr->ChanBase, XAXIDMA_CR_OFFSET); 
	*reg++ = XAxiDma_ReadReg(TxRingPtr->ChanBase, XAXIDMA_SR_OFFSET); 
	*reg++ = XAxiDma_ReadReg(TxRingPtr->ChanBase, XAXIDMA_CDESC_OFFSET); 
	*reg++ = XAxiDma_ReadReg(TxRingPtr->ChanBase, XAXIDMA_TDESC_OFFSET); 
	*reg++ = XAxiDma_ReadReg(TxRingPtr->ChanBase, XAXIDMA_SRCADDR_OFFSET); 
	*reg++ = XAxiDma_ReadReg(TxRingPtr->ChanBase, XAXIDMA_DESTADDR_OFFSET); 
	*reg++ = XAxiDma_ReadReg(TxRingPtr->ChanBase, XAXIDMA_BUFFLEN_OFFSET); 
	*reg++ = XAxiDma_ReadReg(TxRingPtr->ChanBase, XAXIDMA_SGCTL_OFFSET);
	
	regs->len = (reg - reg_start) * sizeof(u32);
}

static void eth_ethtool_get_drvinfo(struct net_device *ndev, 
		struct ethtool_drvinfo *ed)
{
	struct eth_local *lp = (struct eth_local *)netdev_priv(ndev);

    memset(ed, 0, sizeof(struct ethtool_drvinfo));
    strncpy(ed->driver, DRIVER_NAME, sizeof(ed->driver) - 1);
    strncpy(ed->version, DRIVER_VERSION, sizeof(ed->version) - 1);
	snprintf(ed->fw_version, sizeof(ed->fw_version), "0x%08x",
         (unsigned int)lp->fw_version);
    strlcpy(ed->bus_info, pci_name(lp->pdev),
        sizeof(ed->bus_info));

    /* Also tell how much memory is needed for dumping register values */
    ed->regdump_len = sizeof(u16) * EMAC_REGS_N;
    ed->n_stats = ETH_STATS_LEN;
}

static void eth_set_mac_address(struct net_device *ndev, void *address)
{
	struct eth_local *lp = netdev_priv(ndev);

	if (ndev->flags & IFF_UP) 
		return;

	if (address)
		memcpy(ndev->dev_addr, address, ETH_ALEN);

	if (!is_valid_ether_addr(ndev->dev_addr))
		random_ether_addr(ndev->dev_addr);

	/*
	 * Set up unicast MAC address filter set its mac address
	 */
	XAxiDma_WriteReg((u32)lp->reg_base + MAC_ADDR_BASE, RX_FRAME_ADDR0_REG,
				(ndev->dev_addr[0]) |
				(ndev->dev_addr[1] << 8) |
				(ndev->dev_addr[2] << 16) |
				(ndev->dev_addr[3] << 24));

	XAxiDma_WriteReg((u32)lp->reg_base + MAC_ADDR_BASE, RX_FRAME_ADDR1_REG,
				(ndev->dev_addr[4] |
				(ndev->dev_addr[5] << 8)));
}

static int eth_set_netdev_address(struct net_device *ndev, void *p)
{
	struct sockaddr *addr = p;
	if (!is_valid_ether_addr(addr->sa_data))
		return -EADDRNOTAVAIL;

	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);

	eth_set_mac_address(ndev, addr->sa_data);

	return XST_SUCCESS;
}

static void eth_led_ctrl(struct net_device *ndev)
{
	struct eth_local *lp = (struct eth_local *) netdev_priv(ndev);
	
	switch(lp->link_speed) {
		case 10000:
			XAxiDma_WriteReg((u32)lp->reg_base, 0x8000, 0x1);
			XAxiDma_WriteReg((u32)lp->reg_base, 0x8004, 0x1);
			break;
		case 1000:
			XAxiDma_WriteReg((u32)lp->reg_base, 0x8000, 0x1);
			XAxiDma_WriteReg((u32)lp->reg_base, 0x8004, 0x0);
			break;
		default:
			XAxiDma_WriteReg((u32)lp->reg_base, 0x8000, 0x0);
	}
}

static void eth_watchdog(unsigned long data)
{
	struct eth_local *lp = (struct eth_local *)data;
	struct net_device *ndev = lp->ndev;
	int link_up;
	
	XAxiDma_WriteReg((u32)lp->reg_base + MAC_ADDR_BASE + 0x10000, 0x84, 0x00200003);
	mdelay(50);
	link_up = XAxiDma_ReadReg((u32)lp->reg_base + MAC_ADDR_BASE + 0x10000, 0x80);

	if (link_up & LINK_UP_MASK){
		if (!netif_carrier_ok(ndev)){
			netdev_info(ndev, "SureSave ETH10G Link is Up 10 Gbps Full Duplex\n");
			lp->link_speed = 10000;
			lp->link_duplex = 2;
			eth_led_ctrl(ndev);
			netif_carrier_on(ndev);
		}
	}else{
        if (netif_carrier_ok(ndev)) {
            lp->link_speed = 0;
            lp->link_duplex = 0;
			eth_led_ctrl(ndev);
            netdev_info(ndev, "SureSave ETH10G Link is Down\n");
            netif_carrier_off(ndev);
        }	
	}
	/* Reset the timer */
	mod_timer(&lp->watchdog_timer, jiffies + 2 * HZ);
}

static struct net_device_ops eth_netdev_ops = {
	.ndo_open 	= eth_open,
	.ndo_stop	= eth_close,
	.ndo_start_xmit	= eth_send,
	.ndo_do_ioctl	= 0,
	.ndo_change_mtu	= 0,
	.ndo_tx_timeout	= eth_tx_timeout,
	.ndo_get_stats	= eth_get_stats,
	.ndo_set_mac_address = eth_set_netdev_address,
};

static ssize_t eth_ring_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct net_device *ndev = dev_get_drvdata(dev);
	struct eth_local *lp = (struct eth_local *) netdev_priv(ndev);
	XAxiDma_BdRing *RingPtr;

	RingPtr = XAxiDma_GetRxRing(&lp->AxiDma, 0);
	disp_bd_ring(RingPtr);

	RingPtr = XAxiDma_GetTxRing(&lp->AxiDma);
	disp_bd_ring(RingPtr);

	buf[0] = 0;

	return 1;
}

/* From include/linux/ethtool.h */
static struct ethtool_ops ethtool_ops = {
    .get_settings = eth_ethtool_get_settings,
    .set_settings = eth_ethtool_set_settings,
    .get_drvinfo  = eth_ethtool_get_drvinfo,
    .get_regs_len = eth_ethtool_get_regs_len,
    .get_regs     = eth_ethtool_get_regs,
	.get_link	  = ethtool_op_get_link,
    .get_coalesce = eth_ethtool_get_coalesce,
    .set_coalesce = eth_ethtool_set_coalesce,
    .get_ringparam  = eth_ethtool_get_ringparam,
    .get_pauseparam = eth_ethtool_get_pauseparam,
    .get_rx_csum  = eth_ethtool_get_rx_csum,
    .set_rx_csum  = eth_ethtool_set_rx_csum,
    .get_tx_csum  = eth_ethtool_get_tx_csum,
    .set_tx_csum  = eth_ethtool_set_tx_csum,
    .get_sg       = eth_ethtool_get_sg,
    .set_sg       = eth_ethtool_set_sg,
    .get_strings  = eth_ethtool_get_strings,
    .get_ethtool_stats = eth_ethtool_get_ethtool_stats,
    .get_sset_count    = eth_ethtool_get_sset_count,
};


static struct device_attribute eth_ring_attr = {
	.attr = {.name = "ring", .mode = S_IRUGO,},
	.show = eth_ring_show,
};

static int __devinit eth_probe(struct pci_dev *pdev,
	const struct pci_device_id *id)
{
	struct eth_local *lp;
	struct net_device *ndev = NULL;
	int err;
	char *addr = NULL;
	unsigned int value = 0;

	dev_dbg(&pdev->dev, "TX/RX CNT: %d, %d, TX/RX_THRESHOLD: %d, %d, "
			"TX/RX HW cusm: %d, %d, SG: %d\n",
			TX_BD_NUM, RX_BD_NUM, ETH_TX_THRESHOLD, ETH_RX_THRESHOLD,
			TX_HW_CSUM, RX_HW_CSUM, ETH_SG_ENABLE);
	
	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "PCI device enable failed.ERR= %d\n", err);
		return err;
	}

	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	} else {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (!err)
			err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	}
	if (err) {
		dev_err(&pdev->dev, "No usable DMA configuration.\n");
		goto err_dma_mask;
	}

	err = pci_request_regions(pdev, DRIVER_NAME);
	if (err) {
		dev_err(&pdev->dev, "PCI device get region failed.ERR= %d\n", err);
		goto err_request_regions;
	}

	pci_set_master(pdev);

	/* Create an ethernet device instance */
	ndev = alloc_etherdev(sizeof(struct eth_local));
	if (!ndev) {
		dev_err(&pdev->dev, "Could not allocate net device.\n");
		goto err_alloc_etherdev;
	}

	SET_NETDEV_DEV(ndev, &pdev->dev);

	pci_set_drvdata(pdev, ndev);
	ndev->irq = pdev->irq;
	
	/* Initialize the private data*/
	lp = netdev_priv(ndev);
	lp->ndev = ndev;
	lp->pdev = pdev;
	lp->dev = &pdev->dev;

	lp->base = pci_resource_start(pdev, 0);
	lp->base_len = pci_resource_len(pdev, 0);
	lp->reg_base = ioremap_nocache(lp->base, lp->base_len);
	if (!lp->reg_base) {
		dev_err(&pdev->dev, "ioremap reg base error!\n");
		goto err_ioremap;
	}
	dev_info(&pdev->dev, "base 0x%x, size 0x%x, mmr 0x%lx\n",
			lp->base, lp->base_len, (unsigned long)lp->reg_base);
#if 0
    /*-----------------------Test AXIDMA---------------------*/
    printk("AXI_DMA_REG = %x\n", AXI_DMA_REG);
    printk("Read axi dma tx CR before(0x00): %08x\n", 
			XAxiDma_ReadReg((u32)lp->reg_base + AXI_DMA_REG, XAXIDMA_TX_OFFSET));
    printk("Read axi dma rx CR before(0x30): %08x\n", 
			XAxiDma_ReadReg((u32)lp->reg_base + AXI_DMA_REG, XAXIDMA_RX_OFFSET));

    printk("Write (0x07<<16) (0x07<<12) to XAXIDMA TX(0x00)/RX OFFSET(0x30)\n");
    XAxiDma_WriteReg((u32)lp->reg_base+ AXI_DMA_REG, XAXIDMA_TX_OFFSET, 0x07<<16);
    XAxiDma_WriteReg((u32)lp->reg_base+ AXI_DMA_REG, XAXIDMA_RX_OFFSET, 0x07<<12);
    printk("Ready to read\n");

    printk("Read axi dma tx CR (0x00): %08x\n", 
			XAxiDma_ReadReg((u32)lp->reg_base + AXI_DMA_REG, XAXIDMA_TX_OFFSET));
    printk("Read axi dma rx CR (0x30): %08x\n", 
			XAxiDma_ReadReg((u32)lp->reg_base + AXI_DMA_REG, XAXIDMA_RX_OFFSET));

    printk("reset start\n");
    XAxiDma_WriteReg((u32)lp->reg_base+ AXI_DMA_REG,XAXIDMA_TX_OFFSET,XAXIDMA_CR_RESET_MASK);
    mdelay(5);
    XAxiDma_WriteReg((u32)lp->reg_base+ AXI_DMA_REG,XAXIDMA_RX_OFFSET,XAXIDMA_CR_RESET_MASK);
    mdelay(5);
    printk("reset down\n");

    printk("Read axi dma tx CR (0x00): %08x\n", 
			XAxiDma_ReadReg((u32)lp->reg_base+ AXI_DMA_REG, XAXIDMA_TX_OFFSET));
    printk("Read axi dma rx CR (0x30): %08x\n", 
			XAxiDma_ReadReg((u32)lp->reg_base+ AXI_DMA_REG, XAXIDMA_RX_OFFSET));

    printk("Test Reg R/W down!\n");
#endif
#if 1
    /*-----------------------Test BAR1---------------------*/
	printk("Ready test bar1\n");
    lp->ctl_start= pci_resource_start(pdev, 1);                                               
    lp->ctl_len = pci_resource_len(pdev, 1);
    lp->ctl_base= ioremap(lp->ctl_start, lp->ctl_len);                                        
    
    if (!lp->ctl_base) {
        printk("ioremap ctl base error!\n");                                                  
        goto err_ioremap;                                                                           
    }
            
    printk("base 0x%x, size 0x%x, mmr 0x%lx\n",
                   lp->ctl_start, lp->ctl_len, (unsigned long)lp->ctl_base);

    printk("BAR1(0x00): gpio_in: %08x\n", ioread32(lp->ctl_base + 0x00));
    printk("BAR1(0x04): gpio_out: %08x\n", ioread32(lp->ctl_base + 0x04));
    printk("BAR1(0x08): gpio_en: %08x\n", ioread32(lp->ctl_base + 0x08));                  
    printk("BAR1(0x3c): version: %08x\n", ioread32(lp->ctl_base + 0x3c));

	eth_spi_read_byte((u32)lp->ctl_base, 0xffff00, &value);
	printk("value: %x\n", value);

    //printk("BAR1(0x40): function_num: %08x\n", ioread32(lp->ctl_base + 0x40));              
    //printk("BAR1(0x44): irq_en: %08x\n", ioread32(lp->ctl_base + 0x44));
    //printk("BAR1(0x48): irq_status: %08x\n", ioread32(lp->ctl_base + 0x48));
    //printk("BAR1(0x4c): irq_pending: %08x\n", ioread32(lp->ctl_base + 0x4c));
#endif

	/* initialize the netdev structure */
	ndev->netdev_ops = &eth_netdev_ops;
	ndev->flags &= ~IFF_MULTICAST;
	ndev->watchdog_timeo = TX_TIMEOUT;
#if TX_HW_CSUM
	ndev->features |= NETIF_F_IP_CSUM;
#endif
#if ETH_SG_ENABLE
	ndev->features |= NETIF_F_SG | NETIF_F_FRAGLIST;
#endif

	if (ndev->mtu > ETH_JUMBO_MTU)
		ndev->mtu = ETH_JUMBO_MTU;

	lp->frame_size = ndev->mtu + ETH_HDR_SIZE + ETH_TRL_SIZE;
#ifdef CONFIG_INET_LRO
	lp->lro_state = ETH_LRO_INIT;
#endif

	/* Set the MAC address from platform data */
	eth_set_mac_address(ndev,(void *)addr);
	dev_dbg(&pdev->dev, "Set Mac addr %pM\n", ndev->dev_addr);

	spin_lock_init(&lp->hw_lock);
	init_timer(&lp->watchdog_timer);
	lp->watchdog_timer.function = eth_watchdog;
	lp->watchdog_timer.data = (unsigned long)lp;

	//INIT_WORK(&lp->tx_timeout_task, eth_tx_timeout_task);

#ifdef CONFIG_INET_LRO
	memset(&lp->lro_mgr.stats, 0, sizeof(lp->lro_mgr.stats));
	memset(&lp->lro_arr, 0, sizeof(lp->lro_arr));

	lp->lro_mgr.max_aggr = LRO_MAX_AGGR;
	lp->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
	lp->lro_mgr.lro_arr  = lp->lro_arr;
	lp->lro_mgr.get_skb_header = eth_get_skb_header;
	lp->lro_mgr.features = /*LRO_F_NAPI*/0;
	lp->lro_mgr.dev      = ndev;

	lp->lro_mgr.ip_summed = CHECKSUM_NONE;
	lp->lro_mgr.ip_summed_aggr = CHECKSUM_NONE;

	lp->lro_mgr.frag_align_pad = 0;
	lp->lro_state = ETH_LRO_NORM;
#endif

	/* init the stats */
	lp->max_frags_in_a_packet = 0;
	lp->tx_hw_csums = 0;
	lp->rx_hw_csums = 0;
	lp->fw_version = XAxiDma_ReadReg((u32)lp->reg_base + MAC_ADDR_BASE, 0x10c);

    /* Set ethtool IOCTL handler vectors.*/
	SET_ETHTOOL_OPS(ndev, &ethtool_ops);	

	err = register_netdev(ndev);
	if (err) {
		dev_err(&pdev->dev, "%s: Cannot register net device, aborting.\n", ndev->name);
		goto err_register;
	}
	device_create_file(&pdev->dev, &eth_ring_attr);
	
	/* carrier off reporting is important to ethtool even BEFORE open */
	netif_carrier_off(ndev);

	return XST_SUCCESS;
err_register:
err_ioremap:
	free_netdev(ndev);
err_alloc_etherdev:
	pci_release_regions(pdev);
err_request_regions:
err_dma_mask:
	pci_disable_device(pdev);	
	return XST_FAILURE;
}


static void __devexit eth_remove(struct pci_dev *pdev)
{
	struct net_device *ndev = dev_get_drvdata(&pdev->dev);
	struct eth_local *lp = (struct eth_local *)netdev_priv(ndev);

	/*Stop AXI DMA Engine*/
	AxiDma_Stop((u32)(lp->reg_base + AXI_DMA_REG));

	XAxiDma_WriteReg((u32)lp->reg_base, 0x8000, 0x0);
	unregister_netdev(ndev);
	iounmap((void *) (lp->reg_base));
	if (lp->ctl_base)
		iounmap((void *) (lp->ctl_base));
	pci_release_regions(pdev);
	free_netdev(ndev);
	pci_set_drvdata(pdev, NULL);
	pci_disable_device(pdev);
	device_remove_file(&pdev->dev, &eth_ring_attr);
}

static struct pci_driver eth_driver = {
	.name     = DRIVER_NAME,
	.id_table = eth_pci_table,
	.probe    = eth_probe,
	.remove   = __devexit_p(eth_remove),
};

static int __init eth_init(void)
{
	pr_info("%s - version %s\n", DRIVER_NAME, DRIVER_VERSION);
	pr_info("%s\n", eth_copyright);
	pr_info("rep_version: %s\n", rep_version);
	
	/*
	 * Make sure the locks are initialized
	 */
	spin_lock_init(&ETH_spinlock);
	spin_lock_init(&ETH_tx_spinlock);
	spin_lock_init(&ETH_rx_spinlock);
	
	INIT_LIST_HEAD(&sentQueue);
	INIT_LIST_HEAD(&receivedQueue);

	spin_lock_init(&sentQueueSpin);
	spin_lock_init(&receivedQueueSpin);
	return pci_register_driver(&eth_driver);
}

static void __exit eth_exit(void)
{
	pr_info("Soul SureSave ETH10G Driver Exit\n");
	pci_unregister_driver(&eth_driver);
}

module_init(eth_init);
module_exit(eth_exit);

MODULE_DESCRIPTION("Soul SureSave ETH10G HBA Linux driver");
MODULE_AUTHOR("Soul Tech");
MODULE_LICENSE("GPL");

