// SPDX-License-Identifier: GPL-2.0
/**
 * gmac_e.c - Driver of LomboTech GMAC Controller.
 *
 * Copyright (C) 2016-2018, LomboTech Co.Ltd.
 * Author: lomboswer <lomboswer@lombotech.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License along
 * with this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/if_ether.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>

#include <mach/csp.h>
#include <mach/debug.h>

#if defined(CONFIG_ARCH_LOMBO_N7V3)
#include "csp/n7v3/gmac_const.h"
#include "csp/n7v3/gmac_csp.h"
#else
#error "No supported platform!"
#endif

#include "gmac_e.h"

static irqreturn_t lombo_gmac_irq(int irq, void *dev_id);

static void lombo_gmac_enable_all_queues(struct lombo_gmac *lgmac)
{
	u32 maxq = max(lgmac->rx_queues_to_use, lgmac->tx_queues_to_use);
	u32 queue;

	for (queue = 0; queue < maxq; queue++) {
		struct lgmac_channel *ch = &lgmac->channel[queue];

		napi_enable(&ch->napi);
	}
}

static void lombo_gmac_disable_all_queues(struct lombo_gmac *lgmac)
{
	u32 maxq = max(lgmac->rx_queues_to_use, lgmac->tx_queues_to_use);
	u32 queue;

	for (queue = 0; queue < maxq; queue++) {
		struct lgmac_channel *ch = &lgmac->channel[queue];

		napi_disable(&ch->napi);
	}
}

static void lombo_gmac_start_all_queues(struct lombo_gmac *lgmac)
{
	u32 queue;

	for (queue = 0; queue < lgmac->tx_queues_to_use; queue++)
		netif_tx_start_queue(netdev_get_tx_queue(lgmac->net_dev,
							 queue));
}

static void lombo_gmac_stop_all_queues(struct lombo_gmac *lgmac)
{
	u32 queue;

	for (queue = 0; queue < lgmac->tx_queues_to_use; queue++)
		netif_tx_stop_queue(netdev_get_tx_queue(lgmac->net_dev, queue));
}

static void lombo_gmac_global_error(struct lombo_gmac *lgmac)
{
	netif_carrier_off(lgmac->net_dev);
	set_bit(LGMAC_RESET_REQUESTED, &lgmac->state);
	if (!test_bit(LGMAC_DOWN, &lgmac->state) &&
	    !test_and_set_bit(LGMAC_SERVICE_SCHED, &lgmac->state))
		queue_work(lgmac->workqueue, &lgmac->service_task);
}

static u32 lombo_gmac_tx_avail(struct lombo_gmac *lgmac, u32 queue)
{
	struct lgmac_tx_queue *tx_q = &lgmac->tx_queue[queue];
	u32 avail;

	if (tx_q->dirty_tx > tx_q->cur_tx)
		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
	else
		avail = LGMAC_TX_DESC_CNT - tx_q->cur_tx + tx_q->dirty_tx - 1;

	return avail;
}

static u32 lombo_gmac_rx_dirty(struct lombo_gmac *lgmac, u32 queue)
{
	struct lgmac_rx_queue *rx_q = &lgmac->rx_queue[queue];
	u32 dirty;

	if (rx_q->dirty_rx <= rx_q->cur_rx)
		dirty = rx_q->cur_rx - rx_q->dirty_rx;
	else
		dirty = LGMAC_RX_DESC_CNT - rx_q->dirty_rx + rx_q->cur_rx;

	return dirty;
}

static void lombo_gmac_enable_eee_mode(struct lombo_gmac *lgmac)
{
	u32 queue;

	for (queue = 0; queue < lgmac->tx_queues_to_use; queue++) {
		struct lgmac_tx_queue *tx_q = &lgmac->tx_queue[queue];

		if (tx_q->dirty_tx != tx_q->cur_tx)
			return;
	}

	if (!lgmac->tx_lpi_enter)
		csp_gmac_set_lpi_mode(lgmac->base_addr, 1);
}

static void lombo_gmac_disable_eee_mode(struct lombo_gmac *lgmac)
{
	csp_gmac_set_lpi_mode(lgmac->base_addr, 0);
	del_timer_sync(&lgmac->eee_ctrl_timer);
	lgmac->tx_lpi_enter = false;
}

static void lombo_gmac_eee_ctrl_timer(struct timer_list *t)
{
	struct lombo_gmac *lgmac = from_timer(lgmac, t, eee_ctrl_timer);

	lombo_gmac_enable_eee_mode(lgmac);
	mod_timer(&lgmac->eee_ctrl_timer,
		  jiffies + msecs_to_jiffies(LGMAC_LPI_TIMER_MS));
}

static int lombo_gmac_eee_init(struct lombo_gmac *lgmac)
{
	int ret = 0;

	ret = phy_init_eee(lgmac->phy_dev, 1);
	if (ret) {
		mutex_lock(&lgmac->lock);
		if (lgmac->eee_active) {
			del_timer_sync(&lgmac->eee_ctrl_timer);
			csp_gmac_set_lpi_timer(lgmac->base_addr, 0,
					       lgmac->tx_lpi_time);
		}
		lgmac->eee_active = 0;
		mutex_unlock(&lgmac->lock);
		return ret;
	}

	mutex_lock(&lgmac->lock);
	if (!lgmac->eee_active) {
		lgmac->eee_active = 1;
		timer_setup(&lgmac->eee_ctrl_timer,
			    lombo_gmac_eee_ctrl_timer, 0);
		mod_timer(&lgmac->eee_ctrl_timer,
			  jiffies + msecs_to_jiffies(LGMAC_LPI_TIMER_MS));
		csp_gmac_set_lpi_timer(lgmac->base_addr,
				       LGMAC_DEF_LPI_LS_TIMER,
				       lgmac->tx_lpi_time);
	}
	csp_gmac_set_lpi_pls(lgmac->base_addr, lgmac->phy_dev->link);
	mutex_unlock(&lgmac->lock);

	return 0;
}

/**
 * lombo_gmac_flow_control - set flow control parameter.
 * @lgmac: pointer to lombo gmac struct.
 */
static void lombo_gmac_flow_control(struct lombo_gmac *lgmac)
{
	u32 queue;

	csp_gmac_set_rx_flow_control(lgmac->base_addr,
				     lgmac->flow_ctrl & LGMAC_FLOW_CTRL_RX);

	for (queue = 0; queue < lgmac->tx_queues_to_use; queue++)
		csp_gmac_set_tx_flow_control(lgmac->base_addr, queue,
					     lgmac->flow_ctrl & LGMAC_FLOW_CTRL_TX,
					     lgmac->old_duplex,
					     LGMAC_FLOW_CTRL_PAUSE);
}

/**
 * lombo_gmac_set_speed - set speed in MII/RMII/RGMII interface.
 * @lgmac: pointer to lombo gmac struct.
 * @speed: speed to set, should be 10/100/1000.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_set_speed(struct lombo_gmac *lgmac, int speed)
{
	u32 tx_delay, rx_delay;

	if (speed == 10) {
		tx_delay = lgmac->tx_delay[0];
		rx_delay = lgmac->rx_delay[0];
	} else if (speed == 100) {
		tx_delay = lgmac->tx_delay[1];
		rx_delay = lgmac->rx_delay[1];
	} else if (speed == 1000) {
		tx_delay = lgmac->tx_delay[2];
		rx_delay = lgmac->rx_delay[2];
	} else {
		PRT_WARN("%s: unsupported speed %d\n",
			 lgmac->net_dev->name,
			 speed);
		return -EINVAL;
	}

	csp_gmac_set_speed(lgmac->base_addr, speed);

	/* set tx driver delay and rx sample delay */
	csp_gmac_set_tx_drv_delay(lgmac->base_addr, tx_delay);
	csp_gmac_set_rx_smp_delay(lgmac->base_addr, rx_delay);

	return 0;
}

/**
 * lombo_gmac_adjust_link - to fix the link status.
 * @ndev: pointer to net device struct.
 */
static void lombo_gmac_adjust_link(struct net_device *ndev)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	struct phy_device *phy_dev = ndev->phydev;
	u32 new_state = 0;

	if (!phy_dev) {
		PRT_WARN("%s: not any phy device exist\n",
			 lgmac->net_dev->name);
		return;
	}

	PRT_DBG("%s: PHY(addr:%d link:%d speed:%d duplex:%d pause:%d)\n",
		lgmac->net_dev->name,
		lgmac->phy_addr, phy_dev->link,
		phy_dev->speed, phy_dev->duplex,
		phy_dev->pause);

	mutex_lock(&lgmac->lock);

	if (phy_dev->link) {
		if (phy_dev->speed != lgmac->old_speed) {
			lgmac->old_speed = phy_dev->speed;
			lombo_gmac_set_speed(lgmac, phy_dev->speed);
			new_state = 1;
		}

		if (phy_dev->duplex != lgmac->old_duplex) {
			lgmac->old_duplex = phy_dev->duplex;
			csp_gmac_set_duplex_mode(lgmac->base_addr,
						 phy_dev->duplex);
			new_state = 1;
		}

		if (phy_dev->pause)
			lombo_gmac_flow_control(lgmac);

		if (!lgmac->old_link) {
			lgmac->old_link = 1;
			new_state = 1;
		}
	} else if (lgmac->old_link) {
		lgmac->old_link = 0;
		lgmac->old_speed = 0;
		lgmac->old_duplex = -1;
		new_state = 1;
	}

	if (new_state)
		phy_print_status(phy_dev);

	mutex_unlock(&lgmac->lock);

	lgmac->eee_enable = !lombo_gmac_eee_init(lgmac);
}

/**
 * lombo_gmac_phy_init - init phy device.
 * @ndev: pointer to net device struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_phy_init(struct net_device *ndev)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	struct phy_device *phy_dev;
	int interface = lgmac->phy_interface;
	char bus_id[MII_BUS_ID_SIZE];
	char phy_id[MII_BUS_ID_SIZE + 3];

	PRT_DBG("%s: init phy device\n", lgmac->net_dev->name);

	lgmac->old_link = 0;
	lgmac->old_speed = 0;
	lgmac->old_duplex = -1;

	snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", LGMAC_MII_BUS_NAME, 0);
	snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
		 lgmac->phy_addr);

	phy_dev = phy_connect(ndev, phy_id, &lombo_gmac_adjust_link, interface);
	if (IS_ERR_OR_NULL(phy_dev)) {
		PRT_ERR("%s: failed to connect phy device\n",
			lgmac->net_dev->name);
		if (!phy_dev)
			return -ENODEV;
		return PTR_ERR(phy_dev);
	}

#ifndef CONFIG_ARCH_LOMBO_N7V3_FPGA
	/* stop advertising 1000base capability if interface is not (R)GMII */
	if (interface != PHY_INTERFACE_MODE_RGMII)
		phy_dev->advertising &= ~(SUPPORTED_1000baseT_Half |
					  SUPPORTED_1000baseT_Full);
#else
	phy_dev->advertising &= ~(SUPPORTED_1000baseT_Half |
				  SUPPORTED_1000baseT_Full);
#endif

	/* Half-duplex mode not supported with multiqueue,
	 * Half-duplex can only works with single queue.
	 */
	if (lgmac->tx_queues_to_use > 1)
		phy_dev->supported &= ~(SUPPORTED_1000baseT_Half |
					SUPPORTED_100baseT_Half |
					SUPPORTED_10baseT_Half);

	/**
	 * Broken hardware is sometimes missing the pull-up resistor on the
	 * MDIO line, which results in reads to non-existent devices returning
	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
	 * device as well.
	 */
	if (!phy_dev->phy_id) {
		PRT_ERR("%s: broken phy device %s\n",
			lgmac->net_dev->name,
			phy_id);
		phy_disconnect(phy_dev);
		return -ENODEV;
	}

	lgmac->phy_dev = phy_dev;

	return 0;
}

static void lombo_gmac_clear_rx_desc(struct lombo_gmac *lgmac, u32 queue)
{
	struct lgmac_rx_queue *rx_q = &lgmac->rx_queue[queue];
	int i;

	for (i = 0; i < LGMAC_RX_DESC_CNT; i++)
		csp_gmac_desc_rx_init(&rx_q->dma_rx[i], lgmac->use_riwt);
}

static void lombo_gmac_clear_tx_desc(struct lombo_gmac *lgmac, u32 queue)
{
	struct lgmac_tx_queue *tx_q = &lgmac->tx_queue[queue];
	int i;

	for (i = 0; i < LGMAC_TX_DESC_CNT; i++)
		csp_gmac_desc_tx_init(&tx_q->dma_tx[i]);
}

static int lombo_gmac_init_rx_buf(struct lombo_gmac *lgmac,
				  struct dma_desc *desc,
				  int i, gfp_t flags, u32 queue)
{
	struct lgmac_rx_queue *rx_q = &lgmac->rx_queue[queue];
	struct sk_buff *skb;

	skb = __netdev_alloc_skb_ip_align(lgmac->net_dev,
					  lgmac->dma_buf_sz, flags);
	if (!skb) {
		PRT_WARN("%s: failed to alloc rx socket buffer\n",
			 lgmac->net_dev->name);
		return -ENOMEM;
	}

	rx_q->rx_skbuff[i] = skb;
	rx_q->rx_skbuff_dma[i] = dma_map_single(lgmac->dev, skb->data,
						lgmac->dma_buf_sz,
						DMA_FROM_DEVICE);
	if (dma_mapping_error(lgmac->dev, rx_q->rx_skbuff_dma[i])) {
		PRT_WARN("%s: DMA mapping error\n", lgmac->net_dev->name);
		dev_kfree_skb_any(skb);
		return -EINVAL;
	}

	csp_gmac_desc_set_addr(desc, rx_q->rx_skbuff_dma[i]);

	return 0;
}

static void lombo_gmac_free_rx_buf(struct lombo_gmac *lgmac, u32 queue, int i)
{
	struct lgmac_rx_queue *rx_q = &lgmac->rx_queue[queue];

	if (rx_q->rx_skbuff[i]) {
		dma_unmap_single(lgmac->dev, rx_q->rx_skbuff_dma[i],
				 lgmac->dma_buf_sz, DMA_FROM_DEVICE);
		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
	}
	rx_q->rx_skbuff[i] = NULL;
}

static void lombo_gmac_free_tx_buf(struct lombo_gmac *lgmac, u32 queue, int i)
{
	struct lgmac_tx_queue *tx_q = &lgmac->tx_queue[queue];

	if (tx_q->tx_skbuff_dma[i].buf) {
		if (tx_q->tx_skbuff_dma[i].map_as_page)
			dma_unmap_page(lgmac->dev,
				       tx_q->tx_skbuff_dma[i].buf,
				       tx_q->tx_skbuff_dma[i].len,
				       DMA_TO_DEVICE);
		else
			dma_unmap_single(lgmac->dev,
					 tx_q->tx_skbuff_dma[i].buf,
					 tx_q->tx_skbuff_dma[i].len,
					 DMA_TO_DEVICE);
	}

	if (tx_q->tx_skbuff[i]) {
		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
		tx_q->tx_skbuff[i] = NULL;
		tx_q->tx_skbuff_dma[i].buf = 0;
		tx_q->tx_skbuff_dma[i].map_as_page = false;
	}
}

static int lombo_gmac_init_dma_rx_desc_rings(struct lombo_gmac *lgmac,
					     gfp_t flags)
{
	int i, queue, ret = -ENOMEM;

	for (queue = 0; queue < lgmac->rx_queues_to_use; queue++) {
		struct lgmac_rx_queue *rx_q = &lgmac->rx_queue[queue];

		for (i = 0; i < LGMAC_RX_DESC_CNT; i++) {
			struct dma_desc *desc;

			desc = rx_q->dma_rx + i;

			ret = lombo_gmac_init_rx_buf(lgmac, desc, i, flags,
						     queue);
			if (ret)
				goto exit_free_rx_buf;
		}

		rx_q->cur_rx = 0;
		rx_q->dirty_rx = (unsigned int)(i - LGMAC_RX_DESC_CNT);

		lombo_gmac_clear_rx_desc(lgmac, queue);
	}

	return 0;

exit_free_rx_buf:
	while (queue >= 0) {
		while (--i >= 0)
			lombo_gmac_free_rx_buf(lgmac, queue, i);

		if (queue == 0)
			break;

		i = LGMAC_RX_DESC_CNT;
		queue--;
	}

	return ret;
}

static int lombo_gmac_init_dma_tx_desc_rings(struct lombo_gmac *lgmac)
{
	int i, queue;

	for (queue = 0; queue < lgmac->tx_queues_to_use; queue++) {
		struct lgmac_tx_queue *tx_q = &lgmac->tx_queue[queue];

		for (i = 0; i < LGMAC_TX_DESC_CNT; i++) {
			tx_q->tx_skbuff_dma[i].buf = 0;
			tx_q->tx_skbuff_dma[i].map_as_page = false;
			tx_q->tx_skbuff_dma[i].len = 0;
			tx_q->tx_skbuff_dma[i].last_segment = false;
			tx_q->tx_skbuff[i] = NULL;
		}

		tx_q->dirty_tx = 0;
		tx_q->cur_tx = 0;
		tx_q->mss = 0;

		lombo_gmac_clear_tx_desc(lgmac, queue);

		netdev_tx_reset_queue(netdev_get_tx_queue(lgmac->net_dev,
							  queue));
	}

	return 0;
}

static void lombo_gmac_dma_free_rx_skbs(struct lombo_gmac *lgmac, u32 queue)
{
	int i;

	for (i = 0; i < LGMAC_RX_DESC_CNT; i++)
		lombo_gmac_free_rx_buf(lgmac, queue, i);
}

static void lombo_gmac_dma_free_tx_skbs(struct lombo_gmac *lgmac, u32 queue)
{
	int i;

	for (i = 0; i < LGMAC_TX_DESC_CNT; i++)
		lombo_gmac_free_tx_buf(lgmac, queue, i);
}

static void lombo_gmac_free_dma_rx_desc_res(struct lombo_gmac *lgmac)
{
	u32 queue;

	for (queue = 0; queue < lgmac->rx_queues_to_use; queue++) {
		struct lgmac_rx_queue *rx_q = &lgmac->rx_queue[queue];

		lombo_gmac_dma_free_rx_skbs(lgmac, queue);

		dma_free_coherent(lgmac->dev,
				  LGMAC_RX_DESC_CNT * sizeof(struct dma_desc),
				  rx_q->dma_rx, rx_q->dma_rx_phy);

		kfree(rx_q->rx_skbuff_dma);
		kfree(rx_q->rx_skbuff);
	}
}

static void lombo_gmac_free_dma_tx_desc_res(struct lombo_gmac *lgmac)
{
	u32 queue;

	for (queue = 0; queue < lgmac->tx_queues_to_use; queue++) {
		struct lgmac_tx_queue *tx_q = &lgmac->tx_queue[queue];

		lombo_gmac_dma_free_tx_skbs(lgmac, queue);

		dma_free_coherent(lgmac->dev,
				  LGMAC_TX_DESC_CNT * sizeof(struct dma_desc),
				  tx_q->dma_tx, tx_q->dma_tx_phy);

		kfree(tx_q->tx_skbuff_dma);
		kfree(tx_q->tx_skbuff);
	}
}

static int lombo_gmac_alloc_dma_rx_desc_res(struct lombo_gmac *lgmac)
{
	u32 queue;

	for (queue = 0; queue < lgmac->rx_queues_to_use; queue++) {
		struct lgmac_rx_queue *rx_q = &lgmac->rx_queue[queue];

		rx_q->queue_index = queue;
		rx_q->priv_data = lgmac;

		rx_q->rx_skbuff_dma = kmalloc_array(LGMAC_RX_DESC_CNT,
						    sizeof(dma_addr_t),
						    GFP_KERNEL);
		if (!rx_q->rx_skbuff_dma)
			goto exit_free_dma_rx_desc_res;

		rx_q->rx_skbuff = kmalloc_array(LGMAC_RX_DESC_CNT,
						sizeof(struct sk_buff *),
						GFP_KERNEL);
		if (!rx_q->rx_skbuff)
			goto exit_free_dma_rx_desc_res;

		rx_q->dma_rx = dma_zalloc_coherent(lgmac->dev,
						   LGMAC_RX_DESC_CNT *
						   sizeof(struct
							  dma_desc),
						   &rx_q->dma_rx_phy,
						   GFP_KERNEL);
		if (!rx_q->dma_rx)
			goto exit_free_dma_rx_desc_res;
	}

	return 0;

exit_free_dma_rx_desc_res:
	lombo_gmac_free_dma_rx_desc_res(lgmac);

	return -ENOMEM;
}

static int lombo_gmac_alloc_dma_tx_desc_res(struct lombo_gmac *lgmac)
{
	u32 queue;

	for (queue = 0; queue < lgmac->tx_queues_to_use; queue++) {
		struct lgmac_tx_queue *tx_q = &lgmac->tx_queue[queue];

		tx_q->queue_index = queue;
		tx_q->priv_data = lgmac;

		tx_q->tx_skbuff_dma = kmalloc_array(LGMAC_TX_DESC_CNT,
						    sizeof(*tx_q->tx_skbuff_dma),
						    GFP_KERNEL);
		if (!tx_q->tx_skbuff_dma)
			goto exit_free_dma_tx_desc_res;

		tx_q->tx_skbuff = kmalloc_array(LGMAC_TX_DESC_CNT,
						sizeof(struct sk_buff *),
						GFP_KERNEL);
		if (!tx_q->tx_skbuff)
			goto exit_free_dma_tx_desc_res;

		tx_q->dma_tx = dma_zalloc_coherent(lgmac->dev,
						   LGMAC_TX_DESC_CNT *
						   sizeof(struct
							  dma_desc),
						   &tx_q->dma_tx_phy,
						   GFP_KERNEL);
		if (!tx_q->dma_tx)
			goto exit_free_dma_tx_desc_res;
	}

	return 0;

exit_free_dma_tx_desc_res:
	lombo_gmac_free_dma_tx_desc_res(lgmac);

	return -ENOMEM;
}

static int lombo_gmac_alloc_dma_desc_res(struct lombo_gmac *lgmac)
{
	int ret;

	ret = lombo_gmac_alloc_dma_rx_desc_res(lgmac);
	if (ret) {
		PRT_ERR("%s: failed to alloc rx dma resource\n",
			lgmac->net_dev->name);
		return ret;
	}

	ret = lombo_gmac_alloc_dma_tx_desc_res(lgmac);
	if (ret) {
		PRT_ERR("%s: failed to alloc tx dma resource\n",
			lgmac->net_dev->name);
		goto exit_free_dma_tx_desc_res;
	}

	return 0;

exit_free_dma_tx_desc_res:
	lombo_gmac_free_dma_rx_desc_res(lgmac);

	return ret;
}

static void lombo_gmac_free_dma_desc_res(struct lombo_gmac *lgmac)
{
	lombo_gmac_free_dma_rx_desc_res(lgmac);

	lombo_gmac_free_dma_tx_desc_res(lgmac);
}

static void lombo_gmac_start_all_dma(struct lombo_gmac *lgmac)
{
	u32 chan = 0;

	for (chan = 0; chan < lgmac->rx_queues_to_use; chan++)
		csp_gmac_dma_ch_rx_enable(lgmac->base_addr, chan, 1);

	for (chan = 0; chan < lgmac->tx_queues_to_use; chan++)
		csp_gmac_dma_ch_tx_enable(lgmac->base_addr, chan, 1);
}

static void lombo_gmac_stop_all_dma(struct lombo_gmac *lgmac)
{
	u32 chan = 0;

	for (chan = 0; chan < lgmac->rx_queues_to_use; chan++)
		csp_gmac_dma_ch_rx_enable(lgmac, chan, 0);

	for (chan = 0; chan < lgmac->tx_queues_to_use; chan++)
		csp_gmac_dma_ch_tx_enable(lgmac, chan, 0);
}

static void lombo_gmac_dma_operation_mode(struct lombo_gmac *lgmac)
{
	u32 rx_queue_size = LGMAC_RX_FIFO_SIZE / lgmac->rx_queues_to_use;
	u32 tx_queue_size = LGMAC_TX_FIFO_SIZE / lgmac->tx_queues_to_use;
	u32 chan = 0;

	for (chan = 0; chan < lgmac->rx_queues_to_use; chan++) {
		csp_gmac_set_rx_queue_threshold(lgmac->base_addr, chan, 1, 0);
		csp_gmac_set_rx_queue_size(lgmac->base_addr, chan,
					   rx_queue_size);
		csp_gmac_set_dma_ch_rbsz(lgmac->base_addr, chan,
					 lgmac->dma_buf_sz);
	}

	for (chan = 0; chan < lgmac->tx_queues_to_use; chan++) {
		csp_gmac_set_tx_queue_threshold(lgmac->base_addr, chan, 1, 0);
		csp_gmac_set_tx_queue_enable(lgmac->base_addr, chan,
					     LGMAC_TXQEN_ENABLE);
		csp_gmac_set_tx_queue_size(lgmac->base_addr, chan,
					   tx_queue_size);
	}
}

static int lombo_gmac_tx_clean(struct lombo_gmac *lgmac, int budget, u32 queue)
{
	struct lgmac_tx_queue *tx_q = &lgmac->tx_queue[queue];
	u32 bytes_compl = 0, pkts_compl = 0;
	u32 entry, count = 0;

	__netif_tx_lock_bh(netdev_get_tx_queue(lgmac->net_dev, queue));

	entry = tx_q->dirty_tx;
	while ((entry != tx_q->cur_tx) && (count < budget)) {
		struct sk_buff *skb = tx_q->tx_skbuff[entry];
		struct dma_desc *desc;
		int status;

		desc = tx_q->dma_tx + entry;
		status = csp_gmac_desc_tx_status(desc, &lgmac->net_dev->stats);
		if (unlikely(status & LGMAC_TX_DMA_OWN))
			break;

		count++;

		/* Make sure descriptor fields are read after reading
		 * the own bit.
		 */
		dma_rmb();

		/* Just consider the last segment and ... */
		if (likely(!(status & LGMAC_TX_NOT_LS))) {
			/* Verify the status error condition. */
			if (unlikely(status & LGMAC_TX_ERROR))
				lgmac->net_dev->stats.tx_errors++;
			else
				lgmac->net_dev->stats.tx_packets++;
		}

		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
			if (tx_q->tx_skbuff_dma[entry].map_as_page)
				dma_unmap_page(lgmac->dev,
					       tx_q->tx_skbuff_dma[entry].buf,
					       tx_q->tx_skbuff_dma[entry].len,
					       DMA_TO_DEVICE);
			else
				dma_unmap_single(lgmac->dev,
						 tx_q->tx_skbuff_dma[entry].buf,
						 tx_q->tx_skbuff_dma[entry].len,
						 DMA_TO_DEVICE);
			tx_q->tx_skbuff_dma[entry].buf = 0;
			tx_q->tx_skbuff_dma[entry].len = 0;
			tx_q->tx_skbuff_dma[entry].map_as_page = false;
		}

		tx_q->tx_skbuff_dma[entry].last_segment = false;

		if (likely(skb)) {
			pkts_compl++;
			bytes_compl += skb->len;
			dev_consume_skb_any(skb);
			tx_q->tx_skbuff[entry] = NULL;
		}

		entry = LGMAC_GET_ENTRY(entry, LGMAC_TX_DESC_CNT);
	}

	tx_q->dirty_tx = entry;

	netdev_tx_completed_queue(netdev_get_tx_queue(lgmac->net_dev, queue),
				  pkts_compl, bytes_compl);

	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(lgmac->net_dev,
								queue))) &&
		     lombo_gmac_tx_avail(lgmac, queue) > LGMAC_TX_DESC_THRESH) {
		PRT_DBG("%s: restart transmit\n", lgmac->net_dev->name);
		netif_tx_wake_queue(netdev_get_tx_queue(lgmac->net_dev, queue));
	}

	if (lgmac->eee_enable && !lgmac->tx_lpi_enter) {
		lombo_gmac_enable_eee_mode(lgmac);
		mod_timer(&lgmac->eee_ctrl_timer,
			  jiffies + msecs_to_jiffies(LGMAC_LPI_TIMER_MS));
	}

	__netif_tx_unlock_bh(netdev_get_tx_queue(lgmac->net_dev, queue));

	return count;
}

/**
 * lombo_gmac_tx_error - clean tx desc and restart transmit in case of errors.
 * @lgmac: pointer to lombo gmac struct.
 */
static void lombo_gmac_tx_error(struct lombo_gmac *lgmac, u32 chan)
{
	struct lgmac_tx_queue *tx_q = &lgmac->tx_queue[chan];
	int i;

	netif_tx_stop_queue(netdev_get_tx_queue(lgmac->net_dev, chan));

	csp_gmac_set_transmitter(lgmac->base_addr, 0);
	csp_gmac_dma_ch_tx_enable(lgmac->base_addr, chan, 0);

	lombo_gmac_dma_free_tx_skbs(lgmac, chan);
	for (i = 0; i < LGMAC_TX_DESC_CNT; i++)
		csp_gmac_desc_tx_init(&tx_q->dma_tx[i]);
	tx_q->dirty_tx = 0;
	tx_q->cur_tx = 0;
	tx_q->mss = 0;

	netdev_tx_reset_queue(netdev_get_tx_queue(lgmac->net_dev, chan));

	csp_gmac_dma_ch_tx_enable(lgmac->base_addr, chan, 1);
	csp_gmac_set_transmitter(lgmac->base_addr, 1);

	lgmac->net_dev->stats.tx_errors++;

	netif_tx_wake_queue(netdev_get_tx_queue(lgmac->net_dev, chan));
}

/**
 * lombo_gmac_dma_irq - DMA ISR.
 * @lgmac: pointer to lombo gmac struct.
 */
static void lombo_gmac_dma_irq(struct lombo_gmac *lgmac)
{
	u32 maxq = max(lgmac->rx_queues_to_use, lgmac->tx_queues_to_use);
	u32 status, chan;

	for (chan = 0; chan < maxq; chan++) {
		struct lgmac_channel *ch = &lgmac->channel[chan];

		status = csp_gmac_get_dma_ch_status(lgmac->base_addr, chan);
		csp_gmac_clr_dma_ch_status(lgmac->base_addr, chan, status);

		if ((ch->has_rx && (status & LGMAC_DMA_CH_INT_RI)) ||
		    (ch->has_tx && (status & LGMAC_DMA_CH_INT_TI))) {
			if (napi_schedule_prep(&ch->napi)) {
				csp_gmac_dma_ch_int_disable(lgmac->base_addr,
							    chan);
				__napi_schedule(&ch->napi);
			}
		}

		if (chan < lgmac->tx_queues_to_use) {
			if (unlikely(status & (LGMAC_DMA_CH_INT_TPS |
					       LGMAC_DMA_CH_INT_FBE))) {
				PRT_ERR("%s: DMA tx error\n",
					lgmac->net_dev->name);
				lombo_gmac_tx_error(lgmac, chan);
			}
		}
	}
}

/**
 * lombo_gmac_check_ether_addr - verify the hardware address.
 * @lgmac: pointer to lombo gmac struct.
 */
static void lombo_gmac_check_ether_addr(struct lombo_gmac *lgmac)
{
	if (!is_valid_ether_addr(lgmac->net_dev->dev_addr)) {
		csp_gmac_get_mac_addr(lgmac->base_addr,
				      lgmac->net_dev->dev_addr);

		if (!is_valid_ether_addr(lgmac->net_dev->dev_addr))
			eth_hw_addr_random(lgmac->net_dev);
	}

	PRT_DBG("%s: hardware address - %02x:%02x:%02x:%02x:%02x:%02x\n",
		lgmac->net_dev->name,
		lgmac->net_dev->dev_addr[0], lgmac->net_dev->dev_addr[1],
		lgmac->net_dev->dev_addr[2], lgmac->net_dev->dev_addr[3],
		lgmac->net_dev->dev_addr[4], lgmac->net_dev->dev_addr[5]);
}

/**
 * lombo_gmac_dma_soft_reset - dma software reset.
 * @lgmac: pointer to lombo gmac struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_dma_soft_reset(struct lombo_gmac *lgmac)
{
	int limit = 100;

	csp_gmac_dma_soft_reset(lgmac->base_addr);

	while (limit--) {
		if (!csp_gmac_dma_soft_reset_status(lgmac->base_addr))
			break;
		mdelay(1);
	}

	if (limit < 0)
		return -EBUSY;

	return 0;
}

static int lombo_gmac_init_dma_engine(struct lombo_gmac *lgmac)
{
	u32 dma_csr_ch = max(lgmac->rx_queues_to_use, lgmac->tx_queues_to_use);
	struct lgmac_rx_queue *rx_q;
	struct lgmac_tx_queue *tx_q;
	u32 chan = 0;

	/* DMA Configuration */
	csp_gmac_set_addr_align_beat(lgmac->base_addr, 1);
	csp_gmac_set_axi_wr_osr_limit(lgmac->base_addr,
				      LGMAC_MAX_OUTSTAND_LIMIT);
	csp_gmac_set_axi_rd_osr_limit(lgmac->base_addr,
				      LGMAC_MAX_OUTSTAND_LIMIT);
	csp_gmac_set_enhanced_address_mode(lgmac->base_addr, 0);
	csp_gmac_set_fixed_burst_len(lgmac->base_addr, 0);
	csp_gmac_set_axi_burst_len(lgmac->base_addr,
				   LGMAC_AXI_BLEN_4 | LGMAC_AXI_BLEN_8 |
				   LGMAC_AXI_BLEN_16);

	/* DMA CSR Channel configuration */
	for (chan = 0; chan < dma_csr_ch; chan++) {
		csp_gmac_set_dma_ch_8xpbl(lgmac->base_addr, chan, 0);
		csp_gmac_dma_ch_int_enable(lgmac->base_addr, chan);
	}

	/* DMA Rx Channel Configuration */
	for (chan = 0; chan < lgmac->rx_queues_to_use; chan++) {
		rx_q = &lgmac->rx_queue[chan];

		csp_gmac_set_dma_ch_rxpbl(lgmac->base_addr, chan,
					  LGMAC_DMA_MAX_BURST);
		csp_gmac_set_rx_desc_base(lgmac->base_addr, chan,
					  rx_q->dma_rx_phy);
		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
			(LGMAC_RX_DESC_CNT * sizeof(struct dma_desc));
		csp_gmac_set_rx_desc_tail(lgmac->base_addr, chan,
					  rx_q->rx_tail_addr);
	}

	/* DMA Tx Channel Configuration */
	for (chan = 0; chan < lgmac->tx_queues_to_use; chan++) {
		tx_q = &lgmac->tx_queue[chan];

		csp_gmac_set_dma_ch_txpbl(lgmac->base_addr, chan,
					  LGMAC_DMA_MAX_BURST);
		csp_gmac_set_tx_desc_base(lgmac->base_addr, chan,
					  tx_q->dma_tx_phy);
		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
		csp_gmac_set_tx_desc_tail(lgmac->base_addr, chan,
					  tx_q->tx_tail_addr);
	}

	return 0;
}

static void lombo_gmac_tx_timer_arm(struct lombo_gmac *lgmac, u32 queue)
{
	struct lgmac_tx_queue *tx_q = &lgmac->tx_queue[queue];

	mod_timer(&tx_q->txtimer,
		  jiffies + usecs_to_jiffies(lgmac->tx_coal_timer));
}

static void lombo_gmac_tx_coal_timer(struct timer_list *t)
{
	struct lgmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
	struct lombo_gmac *lgmac = tx_q->priv_data;
	struct lgmac_channel *ch;

	ch = &lgmac->channel[tx_q->queue_index];

	if (likely(napi_schedule_prep(&ch->napi)))
		__napi_schedule(&ch->napi);
}

static void lombo_gmac_init_tx_coalesce(struct lombo_gmac *lgmac)
{
	u32 chan;

	lgmac->tx_coal_frames = LGMAC_TX_COAL_FRAME;
	lgmac->tx_coal_timer = LGMAC_TX_COAL_TIMER;

	for (chan = 0; chan < lgmac->tx_queues_to_use; chan++) {
		struct lgmac_tx_queue *tx_q = &lgmac->tx_queue[chan];

		timer_setup(&tx_q->txtimer, lombo_gmac_tx_coal_timer, 0);
	}
}

static void lombo_gmac_set_rings_length(struct lombo_gmac *lgmac)
{
	u32 chan;

	for (chan = 0; chan < lgmac->rx_queues_to_use; chan++)
		csp_gmac_set_rx_ring_len(lgmac->base_addr, chan,
					 LGMAC_RX_DESC_CNT - 1);

	for (chan = 0; chan < lgmac->tx_queues_to_use; chan++)
		csp_gmac_set_tx_ring_len(lgmac->base_addr, chan,
					 LGMAC_TX_DESC_CNT - 1);
}

static void lombo_gmac_mtl_config(struct lombo_gmac *lgmac)
{
	u32 queue;

	if (lgmac->tx_queues_to_use > 1)  {
		csp_gmac_set_tx_algorithm(lgmac->base_addr, LGMAC_MTL_TSA_WRR);
		for (queue = 0; queue < lgmac->tx_queues_to_use; queue++) {
			csp_gmac_set_tx_queue_weight(lgmac->base_addr,
						     queue, 32);
			csp_gmac_set_tx_queue_priority(lgmac->base_addr,
						       queue, BIT(queue));
		}
	}

	if (lgmac->rx_queues_to_use > 1) {
		csp_gmac_set_rx_algorithm(lgmac->base_addr, LGMAC_MTL_RAA_STR);
		for (queue = 0; queue < lgmac->rx_queues_to_use; queue++) {
			csp_gmac_set_rx_queue_priority(lgmac->base_addr,
						       queue, BIT(queue));
			csp_gmac_set_rx_queue_routing(lgmac->base_addr, queue,
						      LGMAC_ROUTE_AVCPQ);
		}
	}

	for (queue = 0; queue < lgmac->rx_queues_to_use; queue++) {
		csp_gmac_set_rx_queue_map(lgmac->base_addr, queue, queue);
		csp_gmac_set_rx_queue_enable(lgmac->base_addr, queue,
					     LGMAC_RXQEN_DCB);
	}
}

/**
 * lombo_gmac_core_init - init mac core.
 * @lgmac: pointer to lombo gmac struct.
 *
 * Set default configuration and enable default interrupt.
 */
static void lombo_gmac_core_init(struct lombo_gmac *lgmac)
{
	u32 config;

	config = LGMAC_DEF_CONFIG;
	if (lgmac->net_dev->mtu > 1500)
		config |= LGMAC_CONFIG_2K;
	if (lgmac->net_dev->mtu > 2000)
		config |= LGMAC_CONFIG_JE;

	csp_gmac_set_base_config(lgmac->base_addr, config);

	csp_gmac_set_clear_on_write(lgmac->base_addr, 1);

	csp_gmac_set_core_int_enable(lgmac->base_addr, LGMAC_DEF_INT_MASK);
}

static int lombo_gmac_hw_setup(struct lombo_gmac *lgmac)
{
	u32 chan;
	int ret;

	ret = lombo_gmac_init_dma_engine(lgmac);
	if (ret < 0) {
		PRT_ERR("%s: DMA engine initialization failed\n",
			lgmac->net_dev->name);
		return ret;
	}

	lombo_gmac_check_ether_addr(lgmac);
	csp_gmac_set_mac_addr(lgmac->base_addr, lgmac->net_dev->dev_addr);

	lombo_gmac_core_init(lgmac);

	lombo_gmac_mtl_config(lgmac);

	csp_gmac_set_checksum_offload(lgmac->base_addr, lgmac->rx_csum);

	csp_gmac_set_transmitter(lgmac->base_addr, 1);
	csp_gmac_set_receiver(lgmac->base_addr, 1);

	lombo_gmac_dma_operation_mode(lgmac);

	lgmac->tx_lpi_time = LGMAC_DEF_LPI_TW_TIMER;

	if (lgmac->use_riwt) {
		lgmac->rx_wdt_cnt = LGMAC_MAX_RX_WDT_CNT;
		for (chan = 0; chan < lgmac->rx_queues_to_use; chan++)
			csp_gmac_set_dma_ch_rxwdt(lgmac->base_addr, chan,
						  LGMAC_MAX_RX_WDT_CNT);
	}

	lombo_gmac_set_rings_length(lgmac);

	if (lgmac->tso_en) {
		for (chan = 0; chan < lgmac->tx_queues_to_use; chan++)
			csp_gmac_dma_ch_tso_enable(lgmac->base_addr, chan, 1);
	}

	lombo_gmac_start_all_dma(lgmac);

	return 0;
}

/**
 * lombo_gmac_mdio_wait_busy - mdio wait busy.
 * @lgmac: pointer to lombo gmac struct.
 *
 * return 0 if success; !0 if timeout.
 */
static int lombo_gmac_mdio_wait_busy(struct lombo_gmac *lgmac)
{
	unsigned long deadline = jiffies + 3 * HZ;

	do {
		if (csp_gmac_check_mdio_busy(lgmac->base_addr))
			cpu_relax();
		else
			return 0;
	} while (!time_after_eq(jiffies, deadline));

	return -EBUSY;
}

/**
 * lombo_gmac_mdio_read - mdio read.
 * @bus: pointer to mii bus struct.
 * @phy_id: phy device address.
 * @reg_num: phy register address.
 *
 * return >=0 the data; <0 error number.
 */
static int lombo_gmac_mdio_read(struct mii_bus *bus, int phy_id, int reg_num)
{
	struct net_device *ndev = bus->priv;
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	int data;

	if (lombo_gmac_mdio_wait_busy(lgmac))
		return -EBUSY;

	csp_gmac_config_mdio_read(lgmac->base_addr, phy_id, reg_num);

	if (lombo_gmac_mdio_wait_busy(lgmac))
		return -EBUSY;

	data = (int)csp_gmac_mdio_read_data(lgmac->base_addr);

	return data;
}

/**
 * lombo_gmac_mdio_write - mdio write.
 * @bus: pointer to mii bus struct.
 * @phy_id: phy device address.
 * @reg_num: phy register address.
 * @data: data to write.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_mdio_write(struct mii_bus *bus, int phy_id,
				 int reg_num, u16 data)
{
	struct net_device *ndev = bus->priv;
	struct lombo_gmac *lgmac = netdev_priv(ndev);

	if (lombo_gmac_mdio_wait_busy(lgmac))
		return -EBUSY;

	csp_gmac_mdio_write_data(lgmac->base_addr, data);
	csp_gmac_config_mdio_write(lgmac->base_addr, phy_id, reg_num);

	return lombo_gmac_mdio_wait_busy(lgmac);
}

/**
 * lombo_gmac_mdio_reset - mdio reset.
 * @bus: pointer to mii bus struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_mdio_reset(struct mii_bus *bus)
{
	struct net_device *ndev = bus->priv;
	struct lombo_gmac *lgmac = netdev_priv(ndev);

	csp_gmac_mdio_reset(lgmac->base_addr);
	csp_gmac_fix_mdio_clock(lgmac->base_addr,
				clk_get_rate(lgmac->clk_core));

	return 0;
}

/**
 * lombo_gmac_mdio_register - register mdio bus.
 * @ndev: pointer to net device struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_mdio_register(struct net_device *ndev)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	struct mii_bus *mbus;
	struct phy_device *phy_dev;
	int addr, found, ret = 0;

	mbus = mdiobus_alloc();
	if (!mbus) {
		PRT_ERR("%s: failed to alloc mdio bus\n", lgmac->net_dev->name);
		return -ENOMEM;
	}

	mbus->name = LGMAC_MII_BUS_NAME;
	mbus->read = &lombo_gmac_mdio_read;
	mbus->write = &lombo_gmac_mdio_write;
	mbus->reset = &lombo_gmac_mdio_reset;
	snprintf(mbus->id, MII_BUS_ID_SIZE, "%s-%x", mbus->name, 0);
	mbus->priv = ndev;
	mbus->phy_mask = 0;
	mbus->parent = lgmac->dev;

	ret = mdiobus_register(mbus);
	if (ret) {
		PRT_ERR("%s: failed to register mdio bus (%d)\n",
			lgmac->net_dev->name, ret);
		goto exit_free_mdiobus;
	}

	found = 0;
	for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
		phy_dev = mdiobus_get_phy(mbus, addr);
		if (!phy_dev)
			continue;

		/**
		 * If we're going to bind the MAC to this PHY bus,
		 * and no PHY address number was provided to the MAC,
		 * use the one probed here.
		 */
		if (lgmac->phy_addr < 0)
			lgmac->phy_addr = addr;

		phy_attached_info(phy_dev);

		found = 1;
	}

	if (!found) {
		PRT_WARN("%s: failed to found any phy device\n",
			 lgmac->net_dev->name);
		ret = -ENODEV;
		goto exit_unregister_mdiobus;
	}

	lgmac->mii_bus = mbus;

	PRT_DBG("%s: mdio bus registered\n", lgmac->net_dev->name);

	return 0;

exit_unregister_mdiobus:
	mdiobus_unregister(mbus);

exit_free_mdiobus:
	mdiobus_free(mbus);

	return ret;
}

/**
 * lombo_gmac_mdio_unregister - unregister mdio bus.
 * @ndev: pointer to net device struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_mdio_unregister(struct net_device *ndev)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);

	PRT_DBG("%s: mdio bus unregistered\n", lgmac->net_dev->name);

	if (!lgmac->mii_bus)
		return 0;

	mdiobus_unregister(lgmac->mii_bus);
	lgmac->mii_bus->priv = NULL;
	mdiobus_free(lgmac->mii_bus);
	lgmac->mii_bus = NULL;

	return 0;
}

/**
 * lombo_gmac_phy_reset - phy device hardware reset or power down.
 * @lgmac: pointer to lombo gmac struct.
 * @reset: 0 means power down; !0 means reset and power on.
 */
static void lombo_gmac_phy_reset(struct lombo_gmac *lgmac, u32 reset)
{
	if (reset) {
		/* reset and power on the phy device */
		if (gpio_is_valid(lgmac->phy_rst_pin)) {
			gpio_direction_output(lgmac->phy_rst_pin,
					      !lgmac->phy_rst_level);
			mdelay(5);
			gpio_direction_output(lgmac->phy_rst_pin,
					      lgmac->phy_rst_level);
			mdelay(5);
			gpio_direction_output(lgmac->phy_rst_pin,
					      !lgmac->phy_rst_level);
			mdelay(60);
		}
	} else {
		/* power down the phy device */
		if (gpio_is_valid(lgmac->phy_rst_pin))
			gpio_direction_output(lgmac->phy_rst_pin,
					      lgmac->phy_rst_level);
	}
}

/**
 * lombo_gmac_phy_set_interface - set phy interface type.
 * @lgmac: pointer to lombo gmac struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_phy_set_interface(struct lombo_gmac *lgmac)
{
	if (lgmac->phy_interface == PHY_INTERFACE_MODE_MII) {
		PRT_DBG("%s: phy interface type is MII\n",
			lgmac->net_dev->name);
		csp_gmac_set_phy_interface(lgmac->base_addr,
					   LGMAC_PHY_TYPE_MII_GMII);
	} else if (lgmac->phy_interface == PHY_INTERFACE_MODE_RMII) {
		PRT_DBG("%s: phy interface type is RMII\n",
			lgmac->net_dev->name);
		csp_gmac_set_phy_interface(lgmac->base_addr,
					   LGMAC_PHY_TYPE_RMII);
		if (lgmac->rmii_osc_ext) {
			csp_gmac_set_rmii_tx_clk_src(lgmac->base_addr, 1);
			csp_gmac_set_rmii_tx_clk_invert(lgmac->base_addr, 0);
		} else {
			csp_gmac_set_rmii_tx_clk_src(lgmac->base_addr, 0);
			csp_gmac_set_rmii_tx_clk_invert(lgmac->base_addr, 1);
		}
	} else if (lgmac->phy_interface == PHY_INTERFACE_MODE_RGMII) {
		PRT_DBG("%s: phy interface type is RGMII\n",
			lgmac->net_dev->name);
		csp_gmac_set_phy_interface(lgmac->base_addr,
					   LGMAC_PHY_TYPE_RGMII);
	} else {
		PRT_ERR("%s: unsupported phy interface type %d\n",
			lgmac->net_dev->name,
			lgmac->phy_interface);
		return -EPFNOSUPPORT;
	}

#ifdef CONFIG_ARCH_LOMBO_N7V3_FPGA
	csp_gmac_set_gmii_tx_clk_invert(lgmac->base_addr, 1);
#endif

	return 0;
}

/**
 * lombo_gmac_clock_init - init module clock.
 * @lgmac: pointer to lombo gmac struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_clock_init(struct lombo_gmac *lgmac)
{
#ifndef CONFIG_PM_GENERIC_DOMAINS
	int i, ret = 0;
	static const char * const clk_name[] = {
		"gmac_core_clk", "gmac_sclk_0", "gmac_sclk_1",
		"gmac_ahb_gate", "gmac_mem_axi_gate", "gmac_ahb_reset"
	};
	struct clk **clk_handle[] = {
		&lgmac->clk_core, &lgmac->clk_sclk0, &lgmac->clk_sclk1,
		&lgmac->clk_gate, &lgmac->clk_axi, &lgmac->clk_reset
	};

	for (i = 0; i < ARRAY_SIZE(clk_handle); i++) {
		ret = clk_prepare_enable(*clk_handle[i]);
		if (ret) {
			PRT_ERR("%s: failed to enable %s (%d)\n",
				lgmac->net_dev->name,
				clk_name[i],
				ret);
			goto exit_disable_clock;
		}
	}
#else
	pm_runtime_get_sync(lgmac->dev);
#endif

	return 0;

#ifndef CONFIG_PM_GENERIC_DOMAINS
exit_disable_clock:
	for (i--; i >= 0; i--)
		clk_disable_unprepare(*clk_handle[i]);

	return ret;
#endif
}

/**
 * lombo_gmac_clock_deinit - deinit module clock.
 * @lgmac: pointer to lombo gmac struct.
 */
static void lombo_gmac_clock_deinit(struct lombo_gmac *lgmac)
{
#ifndef CONFIG_PM_GENERIC_DOMAINS
	clk_disable_unprepare(lgmac->clk_core);
	clk_disable_unprepare(lgmac->clk_sclk0);
	clk_disable_unprepare(lgmac->clk_sclk1);
	clk_disable_unprepare(lgmac->clk_axi);
	clk_disable_unprepare(lgmac->clk_reset);
	clk_disable_unprepare(lgmac->clk_gate);
#else
	pm_runtime_put_sync(lgmac->dev);
#endif
}

static int __lombo_gmac_open(struct net_device *ndev)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	u32 chan;
	int ret;

	netif_carrier_off(ndev);

	csp_gmac_enable_clk_gate(lgmac->base_addr, LGMAC_CLK_GATE_ALL);

	ret = lombo_gmac_phy_set_interface(lgmac);
	if (ret) {
		PRT_ERR("%s: failed to set phy interface (%d)\n",
			lgmac->net_dev->name, ret);
		goto exit_disable_clock_gate;
	}

	lombo_gmac_phy_reset(lgmac, 1);

	ret = lombo_gmac_dma_soft_reset(lgmac);
	if (ret) {
		PRT_ERR("%s: dma software reset timeout\n",
			lgmac->net_dev->name);
		goto exit_reset_phy;
	}

	if (!lgmac->mii_bus) {
		ret = lombo_gmac_mdio_register(ndev);
		if (ret) {
			PRT_ERR("%s: failed to register mdio bus (%d)\n",
				lgmac->net_dev->name, ret);
			goto exit_reset_phy;
		}
	}

	ret = lombo_gmac_phy_init(ndev);
	if (ret) {
		PRT_ERR("%s: failed to init phy device (%d)\n",
			lgmac->net_dev->name, ret);
		goto exit_unregister_mdio;
	}

	if (ndev->mtu >= LGMAC_BUF_SIZE_4K)
		lgmac->dma_buf_sz = LGMAC_BUF_SIZE_8K;
	else if (ndev->mtu >= LGMAC_BUF_SIZE_2K)
		lgmac->dma_buf_sz = LGMAC_BUF_SIZE_4K;
	else if (ndev->mtu > LGMAC_BUF_SIZE_DEF)
		lgmac->dma_buf_sz = LGMAC_BUF_SIZE_2K;
	else
		lgmac->dma_buf_sz = LGMAC_BUF_SIZE_DEF;

	ret = lombo_gmac_alloc_dma_desc_res(lgmac);
	if (ret < 0) {
		PRT_ERR("%s: failed to alloc dma resource\n",
			lgmac->net_dev->name);
		goto exit_disconnect_phy;
	}

	ret = lombo_gmac_init_dma_rx_desc_rings(lgmac, GFP_KERNEL);
	if (ret) {
		PRT_ERR("%s: failed to init rx desc rings\n",
			lgmac->net_dev->name);
		goto exit_free_dma_resource;
	}

	ret = lombo_gmac_init_dma_tx_desc_rings(lgmac);
	if (ret) {
		PRT_ERR("%s: failed to init tx desc rings\n",
			lgmac->net_dev->name);
		goto exit_free_dma_resource;
	}

	ret = lombo_gmac_hw_setup(lgmac);
	if (ret < 0) {
		PRT_ERR("%s: hardware setup error\n",
			lgmac->net_dev->name);
		goto exit_free_dma_resource;
	}

	lombo_gmac_init_tx_coalesce(lgmac);

	if (ndev->phydev)
		phy_start(ndev->phydev);

	/* Request IRQ */
	ret = request_irq(ndev->irq, lombo_gmac_irq, IRQF_SHARED,
			  ndev->name, ndev);
	if (unlikely(ret < 0)) {
		PRT_ERR("%s: failed to request irq (%d)\n",
			ndev->name, ret);
		goto exit_delete_tx_timer;
	}

	lombo_gmac_enable_all_queues(lgmac);
	lombo_gmac_start_all_queues(lgmac);

	return 0;

exit_delete_tx_timer:
	for (chan = 0; chan < lgmac->tx_queues_to_use; chan++)
		del_timer_sync(&lgmac->tx_queue[chan].txtimer);

exit_free_dma_resource:
	lombo_gmac_free_dma_desc_res(lgmac);

exit_disconnect_phy:
	if (ndev->phydev) {
		phy_stop(ndev->phydev);
		phy_disconnect(ndev->phydev);
		lgmac->phy_dev = NULL;
	}

exit_unregister_mdio:
	if (lgmac->mii_bus)
		lombo_gmac_mdio_unregister(ndev);

exit_reset_phy:
	lombo_gmac_phy_reset(lgmac, 0);

exit_disable_clock_gate:
	csp_gmac_disable_clk_gate(lgmac->base_addr, LGMAC_CLK_GATE_ALL);

	lombo_gmac_clock_deinit(lgmac);

	return ret;
}

/**
 * lombo_gmac_open - open entry point of the driver.
 * @ndev: pointer to net device struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_open(struct net_device *ndev)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	int ret;

	PRT_DBG("%s: open...\n", lgmac->net_dev->name);

	ret = lombo_gmac_clock_init(lgmac);
	if (ret) {
		PRT_ERR("%s: failed to init module clock (%d)\n",
			lgmac->net_dev->name, ret);
		return ret;
	}

	ret = __lombo_gmac_open(ndev);
	if (ret) {
		PRT_ERR("%s: failed to open lombo_gmac (%d)\n",
			lgmac->net_dev->name, ret);
		return ret;
	}

	return 0;
}

static int __lombo_gmac_stop(struct net_device *ndev)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	u32 chan;

	if (lgmac->eee_enable)
		del_timer_sync(&lgmac->eee_ctrl_timer);

	/* Stop and disconnect the PHY */
	if (ndev->phydev) {
		phy_stop(ndev->phydev);
		phy_disconnect(ndev->phydev);
		lgmac->phy_dev = NULL;
	}

	lombo_gmac_stop_all_queues(lgmac);

	lombo_gmac_disable_all_queues(lgmac);

	for (chan = 0; chan < lgmac->tx_queues_to_use; chan++)
		del_timer_sync(&lgmac->tx_queue[chan].txtimer);

	/* Free the IRQ lines */
	free_irq(ndev->irq, ndev);

	/* Stop TX/RX DMA and clear the descriptors */
	lombo_gmac_stop_all_dma(lgmac);

	/* Release and free the Rx/Tx resources */
	lombo_gmac_free_dma_desc_res(lgmac);

	/* Disable the MAC Rx/Tx */
	csp_gmac_set_transmitter(lgmac->base_addr, 0);
	csp_gmac_set_receiver(lgmac->base_addr, 0);

	if (lgmac->mii_bus)
		lombo_gmac_mdio_unregister(ndev);

	lombo_gmac_phy_reset(lgmac, 0);

	csp_gmac_disable_clk_gate(lgmac->base_addr, LGMAC_CLK_GATE_ALL);

	netif_carrier_off(ndev);

	return 0;
}

/**
 * lombo_gmac_stop - close entry point of the driver.
 * @ndev: pointer to net device struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_stop(struct net_device *ndev)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	int ret = 0;

	PRT_DBG("%s: stop...\n", lgmac->net_dev->name);

	ret = __lombo_gmac_stop(ndev);
	if (ret) {
		PRT_ERR("%s: failed to stop lombo_gmac (%d)\n",
			lgmac->net_dev->name, ret);
		return ret;
	}

	lombo_gmac_clock_deinit(lgmac);

	return 0;
}

static void lombo_gmac_tso_allocator(struct lombo_gmac *lgmac,
				     u32 des, int total_len,
				     bool last_segment, u32 queue)
{
	struct lgmac_tx_queue *tx_q = &lgmac->tx_queue[queue];
	struct dma_desc *desc;
	u32 buff_size;
	int tmp_len;

	tmp_len = total_len;

	while (tmp_len > 0) {
		tx_q->cur_tx = LGMAC_GET_ENTRY(tx_q->cur_tx, LGMAC_TX_DESC_CNT);
		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
		desc = tx_q->dma_tx + tx_q->cur_tx;
		csp_gmac_desc_tx_init(desc);

		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
		buff_size = tmp_len >= LGMAC_TSO_MAX_BUF_SIZE ?
			    LGMAC_TSO_MAX_BUF_SIZE : tmp_len;

		csp_gmac_desc_tx_tso_prepare(desc, buff_size, 0, 1, 0,
					     last_segment &&
					     (tmp_len <= LGMAC_TSO_MAX_BUF_SIZE),
					     0, 0);

		tmp_len -= LGMAC_TSO_MAX_BUF_SIZE;
	}
}

static netdev_tx_t lombo_gmac_tso_start_xmit(struct sk_buff *skb,
					     struct net_device *ndev)
{
	struct dma_desc *desc, *first, *mss_desc = NULL;
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	int nfrags = skb_shinfo(skb)->nr_frags;
	u32 queue = skb_get_queue_mapping(skb);
	unsigned int first_entry, des;
	struct lgmac_tx_queue *tx_q;
	int tmp_pay_len = 0;
	u32 pay_len, mss;
	u8 proto_hdr_len;
	int i;

	tx_q = &lgmac->tx_queue[queue];

	/* Compute header lengths */
	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);

	/* Desc availability based on threshold should be enough safe */
	if (unlikely(lombo_gmac_tx_avail(lgmac, queue) <
		(((skb->len - proto_hdr_len) / LGMAC_TSO_MAX_BUF_SIZE + 1)))) {
		if (!netif_tx_queue_stopped(netdev_get_tx_queue(ndev, queue))) {
			netif_tx_stop_queue(netdev_get_tx_queue(ndev, queue));
			PRT_ERR("%s: tx ring is full when queue awake\n",
				lgmac->net_dev->name);
		}
		return NETDEV_TX_BUSY;
	}

	pay_len = skb_headlen(skb) - proto_hdr_len;	/* no frags */

	mss = skb_shinfo(skb)->gso_size;

	/* set new MSS value if needed */
	if (mss != tx_q->mss) {
		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
		csp_gmac_desc_tx_init(mss_desc);
		csp_gmac_desc_tx_set_mss(mss_desc, mss);
		tx_q->mss = mss;
		tx_q->cur_tx = LGMAC_GET_ENTRY(tx_q->cur_tx, LGMAC_TX_DESC_CNT);
		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
	}

	first_entry = tx_q->cur_tx;
	WARN_ON(tx_q->tx_skbuff[first_entry]);

	desc = tx_q->dma_tx + first_entry;
	first = desc;
	csp_gmac_desc_tx_init(desc);

	/* first descriptor: fill Headers on Buf1 */
	des = dma_map_single(lgmac->dev, skb->data, skb_headlen(skb),
			     DMA_TO_DEVICE);
	if (dma_mapping_error(lgmac->dev, des))
		goto dma_map_err;

	tx_q->tx_skbuff_dma[first_entry].buf = des;
	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);

	first->des0 = cpu_to_le32(des);

	/* Fill start of payload in buff2 of first descriptor */
	if (pay_len)
		first->des1 = cpu_to_le32(des + proto_hdr_len);

	/* If needed take extra descriptors to fill the remaining payload */
	tmp_pay_len = pay_len - LGMAC_TSO_MAX_BUF_SIZE;

	lombo_gmac_tso_allocator(lgmac, des, tmp_pay_len, (nfrags == 0), queue);

	/* Prepare fragments */
	for (i = 0; i < nfrags; i++) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		des = skb_frag_dma_map(lgmac->dev, frag, 0,
				       skb_frag_size(frag),
				       DMA_TO_DEVICE);
		if (dma_mapping_error(lgmac->dev, des))
			goto dma_map_err;

		lombo_gmac_tso_allocator(lgmac, des, skb_frag_size(frag),
					 (i == nfrags - 1), queue);

		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
	}

	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;

	/* Only the last descriptor gets to point to the skb. */
	tx_q->tx_skbuff[tx_q->cur_tx] = skb;

	desc = tx_q->dma_tx + tx_q->cur_tx;

	/* We've used all descriptors we need for this skb, however,
	 * advance cur_tx so that it references a fresh descriptor.
	 * ndo_start_xmit will fill this descriptor the next time it's
	 * called and stmmac_tx_clean may clean up to this descriptor.
	 */
	tx_q->cur_tx = LGMAC_GET_ENTRY(tx_q->cur_tx, LGMAC_TX_DESC_CNT);

	if (unlikely(lombo_gmac_tx_avail(lgmac, queue) <=
		     (MAX_SKB_FRAGS + 1))) {
		netif_tx_stop_queue(netdev_get_tx_queue(ndev, queue));
		PRT_DBG("%s: stop transmitted packets\n", lgmac->net_dev->name);
	}

	ndev->stats.tx_bytes += skb->len;

	/* Manage tx mitigation */
	tx_q->tx_count_frames += nfrags + 1;
	if (likely(lgmac->tx_coal_frames > tx_q->tx_count_frames)) {
		lombo_gmac_tx_timer_arm(lgmac, queue);
	} else {
		tx_q->tx_count_frames = 0;
		csp_gmac_desc_tx_set_ic(desc);
	}

	skb_tx_timestamp(skb);

	/* Complete the first descriptor before granting the DMA */
	csp_gmac_desc_tx_tso_prepare(first, proto_hdr_len, pay_len, 1, 1,
				     tx_q->tx_skbuff_dma[first_entry].last_segment,
				     tcp_hdrlen(skb) / 4,
				     (skb->len - proto_hdr_len));

	/* If context desc is used to change MSS */
	if (mss_desc) {
		/* Make sure that first descriptor has been completely
		 * written, including its own bit. This is because MSS is
		 * actually before first descriptor, so we need to make
		 * sure that MSS's own bit is the last thing written.
		 */
		dma_wmb();
		csp_gmac_desc_tx_set_owner(mss_desc);
	}

	/* The own bit must be the latest setting done when prepare the
	 * descriptor and then barrier is needed to make sure that
	 * all is coherent before granting the DMA engine.
	 */
	wmb();

	netdev_tx_sent_queue(netdev_get_tx_queue(ndev, queue), skb->len);

	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
	csp_gmac_set_tx_desc_tail(lgmac->base_addr, queue, tx_q->tx_tail_addr);

	return NETDEV_TX_OK;

dma_map_err:
	PRT_ERR("%s: tx dma map error\n", lgmac->net_dev->name);

	dev_kfree_skb(skb);
	ndev->stats.tx_dropped++;

	return NETDEV_TX_OK;
}

static netdev_tx_t lombo_gmac_start_xmit(struct sk_buff *skb,
					 struct net_device *ndev)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	u32 nopaged_len = skb_headlen(skb);
	int i, csum_insertion = 0;
	u32 queue = skb_get_queue_mapping(skb);
	int nfrags = skb_shinfo(skb)->nr_frags;
	int entry;
	unsigned int first_entry;
	struct dma_desc *desc, *first;
	struct lgmac_tx_queue *tx_q;
	unsigned int des;
	bool last_segment;

	tx_q = &lgmac->tx_queue[queue];

	if (lgmac->tx_lpi_enter)
		lombo_gmac_disable_eee_mode(lgmac);

	/* Manage oversized TCP frames for GMAC4 device */
	if (skb_is_gso(skb) && lgmac->tso_en) {
		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
			return lombo_gmac_tso_start_xmit(skb, ndev);
	}

	if (unlikely(lombo_gmac_tx_avail(lgmac, queue) < nfrags + 1)) {
		if (!netif_tx_queue_stopped(netdev_get_tx_queue(ndev, queue))) {
			netif_tx_stop_queue(netdev_get_tx_queue(ndev, queue));
			PRT_ERR("%s: tx ring is full when queue awake\n",
				lgmac->net_dev->name);
		}
		return NETDEV_TX_BUSY;
	}

	entry = tx_q->cur_tx;
	first_entry = entry;
	WARN_ON(tx_q->tx_skbuff[first_entry]);

	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);

	desc = tx_q->dma_tx + entry;
	csp_gmac_desc_tx_init(desc);

	first = desc;

	for (i = 0; i < nfrags; i++) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		int len = skb_frag_size(frag);
		bool last_segment = (i == (nfrags - 1));

		entry = LGMAC_GET_ENTRY(entry, LGMAC_TX_DESC_CNT);
		WARN_ON(tx_q->tx_skbuff[entry]);

		desc = tx_q->dma_tx + entry;
		csp_gmac_desc_tx_init(desc);

		des = skb_frag_dma_map(lgmac->dev, frag, 0, len, DMA_TO_DEVICE);
		if (dma_mapping_error(lgmac->dev, des))
			goto dma_map_err; /* should reuse desc w/o issues */

		tx_q->tx_skbuff_dma[entry].buf = des;

		csp_gmac_desc_set_addr(desc, des);

		tx_q->tx_skbuff_dma[entry].map_as_page = true;
		tx_q->tx_skbuff_dma[entry].len = len;
		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;

		/* Prepare the descriptor and set the OWN bit too */
		csp_gmac_desc_tx_prepare(desc, len, skb->len,
					 1, 0, last_segment, csum_insertion);
	}

	/* Only the last descriptor gets to point to the skb. */
	tx_q->tx_skbuff[entry] = skb;

	/* We've used all descriptors we need for this skb, however,
	 * advance cur_tx so that it references a fresh descriptor.
	 * ndo_start_xmit will fill this descriptor the next time it's
	 * called and stmmac_tx_clean may clean up to this descriptor.
	 */
	entry = LGMAC_GET_ENTRY(entry, LGMAC_TX_DESC_CNT);
	tx_q->cur_tx = entry;

	if (unlikely(lombo_gmac_tx_avail(lgmac, queue) <=
		     (MAX_SKB_FRAGS + 1))) {
		netif_tx_stop_queue(netdev_get_tx_queue(ndev, queue));
		PRT_DBG("%s: stop transmitted packets\n", lgmac->net_dev->name);
	}

	ndev->stats.tx_bytes += skb->len;

	/* According to the coalesce parameter the IC bit for the latest
	 * segment is reset and the timer re-started to clean the tx status.
	 * This approach takes care about the fragments: desc is the first
	 * element in case of no SG.
	 */
	tx_q->tx_count_frames += nfrags + 1;
	if (likely(lgmac->tx_coal_frames > tx_q->tx_count_frames)) {
		lombo_gmac_tx_timer_arm(lgmac, queue);
	} else {
		tx_q->tx_count_frames = 0;
		csp_gmac_desc_tx_set_ic(desc);
	}

	skb_tx_timestamp(skb);

	/* Ready to fill the first descriptor and set the OWN bit w/o any
	 * problems because all the descriptors are actually ready to be
	 * passed to the DMA engine.
	 */
	last_segment = (nfrags == 0);

	des = dma_map_single(lgmac->dev, skb->data,
			     nopaged_len, DMA_TO_DEVICE);
	if (dma_mapping_error(lgmac->dev, des))
		goto dma_map_err;

	tx_q->tx_skbuff_dma[first_entry].buf = des;

	csp_gmac_desc_set_addr(first, des);

	tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
	tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;

	/* Prepare the first descriptor setting the OWN bit too */
	csp_gmac_desc_tx_prepare(first, nopaged_len, skb->len,
				 1, 1, last_segment, csum_insertion);

	/* The own bit must be the latest setting done when prepare the
	 * descriptor and then barrier is needed to make sure that
	 * all is coherent before granting the DMA engine.
	 */
	wmb();

	netdev_tx_sent_queue(netdev_get_tx_queue(ndev, queue), skb->len);

	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
	csp_gmac_set_tx_desc_tail(lgmac->base_addr, queue, tx_q->tx_tail_addr);

	return NETDEV_TX_OK;

dma_map_err:
	PRT_ERR("%s: tx dma map error\n", lgmac->net_dev->name);

	dev_kfree_skb(skb);
	ndev->stats.tx_dropped++;

	return NETDEV_TX_OK;
}

static void lombo_gmac_rx_vlan(struct net_device *ndev, struct sk_buff *skb)
{
	struct vlan_ethhdr *veth;
	__be16 vlan_proto;
	u16 vlanid;

	veth = (struct vlan_ethhdr *)skb->data;
	vlan_proto = veth->h_vlan_proto;

	if ((vlan_proto == htons(ETH_P_8021Q) &&
	     ndev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
	     (vlan_proto == htons(ETH_P_8021AD) &&
	      ndev->features & NETIF_F_HW_VLAN_STAG_RX)) {
		/* pop the vlan tag */
		vlanid = ntohs(veth->h_vlan_TCI);
		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
		skb_pull(skb, VLAN_HLEN);
		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
	}
}

/**
 * lombo_gmac_rx_refill - refill used skb preallocated buffer.
 * @lgmac: pointer to lombo gmac struct.
 * @queue: queue number.
 *
 * To reallocate the skb for the reception process that is based on zero-copy.
 */
static void lombo_gmac_rx_refill(struct lombo_gmac *lgmac, u32 queue)
{
	struct lgmac_rx_queue *rx_q = &lgmac->rx_queue[queue];
	int dirty = lombo_gmac_rx_dirty(lgmac, queue);
	unsigned int entry = rx_q->dirty_rx;

	while (dirty-- > 0) {
		struct dma_desc *desc;

		desc = rx_q->dma_rx + entry;

		if (likely(!rx_q->rx_skbuff[entry])) {
			struct sk_buff *skb;

			skb = netdev_alloc_skb_ip_align(lgmac->net_dev,
							lgmac->dma_buf_sz);
			if (unlikely(!skb)) {
				/* so for a while no zero-copy! */
				rx_q->rx_zeroc_thresh = LGMAC_RX_DESC_THRESH;
				if (unlikely(net_ratelimit()))
					PRT_ERR("%s: fail to alloc skb %d\n",
						lgmac->net_dev->name, entry);
				break;
			}

			rx_q->rx_skbuff[entry] = skb;
			rx_q->rx_skbuff_dma[entry] =
				dma_map_single(lgmac->dev, skb->data,
					       lgmac->dma_buf_sz,
					       DMA_FROM_DEVICE);
			if (dma_mapping_error(lgmac->dev,
					      rx_q->rx_skbuff_dma[entry])) {
				PRT_ERR("%s: rx dma map error\n",
					lgmac->net_dev->name);
				dev_kfree_skb(skb);
				break;
			}

			csp_gmac_desc_set_addr(desc,
					       rx_q->rx_skbuff_dma[entry]);

			if (rx_q->rx_zeroc_thresh > 0)
				rx_q->rx_zeroc_thresh--;
		}

		dma_wmb();

		csp_gmac_desc_rx_set_owner(desc, lgmac->use_riwt);

		dma_wmb();

		entry = LGMAC_GET_ENTRY(entry, LGMAC_RX_DESC_CNT);
	}

	rx_q->dirty_rx = entry;

	csp_gmac_set_rx_desc_tail(lgmac->base_addr, queue, rx_q->rx_tail_addr);
}

/**
 * lombo_gmac_rx - rx entry point of the driver.
 * @lgmac: pointer to lombo gmac struct.
 * @limit: napi bugget.
 * @queue: queue number.
 *
 * This function called by the napi poll method.
 * It gets all the frames inside the ring.
 */
static int lombo_gmac_rx(struct lombo_gmac *lgmac, int limit, u32 queue)
{
	struct lgmac_rx_queue *rx_q = &lgmac->rx_queue[queue];
	struct lgmac_channel *ch = &lgmac->channel[queue];
	u32 next_entry = rx_q->cur_rx;
	u32 count = 0;
	int rx_frame_errors_count;
	int phy_software_reset = 0;

	while (count < limit) {
		int entry, status;
		struct dma_desc *desc;
		struct dma_desc *ndesc;

		entry = next_entry;
		desc = rx_q->dma_rx + entry;

		rx_frame_errors_count = lgmac->net_dev->stats.rx_frame_errors;

		status = csp_gmac_desc_rx_status(desc, &lgmac->net_dev->stats);
		if (unlikely(status & LGMAC_RX_DMA_OWN))
			break;

		count++;

		rx_q->cur_rx = LGMAC_GET_ENTRY(rx_q->cur_rx, LGMAC_RX_DESC_CNT);
		next_entry = rx_q->cur_rx;

		ndesc = rx_q->dma_rx + next_entry;

		prefetch(ndesc);

		if (unlikely(status == LGMAC_RX_DISCARD)) {
			lgmac->net_dev->stats.rx_errors++;
			if (rx_frame_errors_count
			    < lgmac->net_dev->stats.rx_frame_errors)
				phy_software_reset = 1;
		} else {
			struct sk_buff *skb;
			int frame_len;

			csp_gmac_desc_get_addr(desc);
			frame_len = csp_gmac_desc_rx_frame_len(desc);

			/*  If frame length is greater than skb buffer size
			 *  (preallocated during init) then the packet is
			 *  ignored.
			 */
			if (frame_len > lgmac->dma_buf_sz) {
				lgmac->net_dev->stats.rx_length_errors++;
				continue;
			}

			frame_len -= ETH_FCS_LEN;

			skb = rx_q->rx_skbuff[entry];
			if (unlikely(!skb)) {
				lgmac->net_dev->stats.rx_dropped++;
				continue;
			}

			prefetch(skb->data - NET_IP_ALIGN);
			rx_q->rx_skbuff[entry] = NULL;
			rx_q->rx_zeroc_thresh++;

			skb_put(skb, frame_len);
			dma_unmap_single(lgmac->dev,
					 rx_q->rx_skbuff_dma[entry],
					 lgmac->dma_buf_sz,
					 DMA_FROM_DEVICE);

			lombo_gmac_rx_vlan(lgmac->net_dev, skb);

			skb->protocol = eth_type_trans(skb, lgmac->net_dev);

			if (unlikely(!lgmac->rx_csum))
				skb_checksum_none_assert(skb);
			else
				skb->ip_summed = CHECKSUM_UNNECESSARY;

			napi_gro_receive(&ch->napi, skb);

			lgmac->net_dev->stats.rx_packets++;
			lgmac->net_dev->stats.rx_bytes += frame_len;
		}
	}

	if (phy_software_reset)
		queue_work(lgmac->workqueue_phy_reset, &lgmac->phy_reset);

	lombo_gmac_rx_refill(lgmac, queue);

	return count;
}

/**
 * lombo_gmac_poll - poll method (NAPI).
 * @lgmac: pointer to napi struct.
 * @budget: max number of packets that the current CPU can
 *          receive from all interfaces.
 *
 * To look at the incoming frames and clear the tx resources.
 */
static int lombo_gmac_napi_poll(struct napi_struct *napi, int budget)
{
	struct lgmac_channel *ch =
		container_of(napi, struct lgmac_channel, napi);
	struct lombo_gmac *lgmac = ch->priv_data;
	u32 status, chan = ch->index;
	int work_done, rx_done = 0, tx_done = 0;

	if (ch->has_tx)
		tx_done = lombo_gmac_tx_clean(lgmac, budget, chan);
	if (ch->has_rx)
		rx_done = lombo_gmac_rx(lgmac, budget, chan);

	work_done = max(rx_done, tx_done);
	work_done = min(work_done, budget);

	if (work_done < budget && napi_complete_done(napi, work_done)) {
		csp_gmac_dma_ch_int_enable(lgmac->base_addr, chan);
		status = csp_gmac_get_dma_ch_status(lgmac->base_addr, chan);
		csp_gmac_clr_dma_ch_status(lgmac->base_addr, chan, status);

		if ((status & LGMAC_DMA_CH_INT_CARE) && napi_reschedule(napi))
			csp_gmac_dma_ch_int_disable(lgmac->base_addr, chan);
	}

	return work_done;
}

/**
 * lombo_gmac_tx_timeout - tx timeout entry point of the driver.
 * @ndev: pointer to net device struct.
 *
 * This function is called when a packet transmission fails to
 * complete within a reasonable time.
 * The driver will mark the error in the netdev structure and arrange
 * for the device to be reset to a sane state in order to transmit a new packet.
 */
static void lombo_gmac_tx_timeout(struct net_device *ndev)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);

	lombo_gmac_global_error(lgmac);
}

/**
 * lombo_gmac_set_frame_filter - set frame filter multicast addressing.
 * @ndev: pointer to net device struct.
 */
static void lombo_gmac_set_frame_filter(struct lombo_gmac *lgmac)
{
	struct net_device *ndev = lgmac->net_dev;
	struct netdev_hw_addr *ha;
	int mc_count = netdev_mc_count(ndev);
	u32 bit_nr, value = 0;
	u32 mc_filter[2];

	if (ndev->flags & IFF_PROMISC) {
		/* pass all packets */
		value = LGMAC_FILTER_PR;
	} else if (ndev->flags & IFF_ALLMULTI ||
		   mc_count > LGMAC_HASH_TABLE_SIZE) {
		/* pass all multicast packets */
		value = LGMAC_FILTER_PM;
		csp_gmac_set_filter_hash_lo(lgmac->base_addr, 0xFFFFFFFF);
		csp_gmac_set_filter_hash_hi(lgmac->base_addr, 0xFFFFFFFF);
	} else if (!netdev_mc_empty(ndev)) {
		/* hash filter for multicast packets */
		value = LGMAC_FILTER_HMC;

		memset(mc_filter, 0, sizeof(mc_filter));
		netdev_for_each_mc_addr(ha, ndev) {
			/* The upper 6 bits of the calculated CRC are used to
			 * index the content of the Hash Table Registers.
			 */
			bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
			/* The most significant bit determines the register
			 * to use while the other 5 bits determines the bit
			 * within the selected register
			 */
			mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31);
		}

		csp_gmac_set_filter_hash_lo(lgmac->base_addr, mc_filter[0]);
		csp_gmac_set_filter_hash_hi(lgmac->base_addr, mc_filter[1]);
	}

	csp_gmac_set_frame_filter(lgmac->base_addr, value);
}

/**
 * lombo_gmac_set_rx_mode - entry point for multicast addressing.
 * @ndev: pointer to net device struct.
 *
 * This function is called by the kernel whenever multicast address
 * must be enable or disable.
 */
static void lombo_gmac_set_rx_mode(struct net_device *ndev)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);

	lombo_gmac_set_frame_filter(lgmac);
}

/**
 * lombo_gmac_change_mtu - entry point to change MTU of the net device.
 * @ndev: pointer to net device struct.
 * @new_mtu: MTU value to be set.
 *
 * The Maximum Transfer Unit (MTU) is used by the network layer
 * to drive packet transmission. Ethernet has an MTU of 1500 octets
 * (ETH_DATA_LEN). This value can be changed with ifconfig.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_change_mtu(struct net_device *ndev, int new_mtu)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);

	if (netif_running(ndev)) {
		PRT_WARN("%s: must be stopped to change its MTU\n",
			 lgmac->net_dev->name);
		return -EBUSY;
	}

	ndev->mtu = new_mtu;

	netdev_update_features(ndev);

	return 0;
}

/**
 * lombo_gmac_fix_features - fix features entry point of the driver.
 * @ndev: pointer to net device struct.
 * @features: net device features.
 *
 * return new net device features value.
 */
static netdev_features_t lombo_gmac_fix_features(struct net_device *ndev,
						 netdev_features_t features)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);

	/* disable tso if asked by ethtool */
	if (features & NETIF_F_TSO)
		lgmac->tso_en = 1;
	else
		lgmac->tso_en = 0;

	return features;
}

/**
 * lombo_gmac_set_features - set features entry point of the driver.
 * @ndev: pointer to net device struct.
 * @features: net device features.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_set_features(struct net_device *ndev,
				   netdev_features_t features)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);

	if (features & NETIF_F_RXCSUM)
		lgmac->rx_csum = 1;
	else
		lgmac->rx_csum = 0;

	csp_gmac_set_checksum_offload(lgmac->base_addr, lgmac->rx_csum);

	return 0;
}

/**
 * lombo_gmac_irq - GMAC ISR.
 * @irq: interrupt number.
 * @dev_id: pointer to net device struct.
 *
 * This is the main driver interrupt service routine.
 * It calls the DMA ISR and also the Core ISR to manage other interrupts.
 *
 * return irqreturn_t value.
 */
static irqreturn_t lombo_gmac_irq(int irq, void *dev_id)
{
	struct net_device *ndev = (struct net_device *)dev_id;
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	u32 status, enable;
	u32 mtl_sta, qx_sta;
	u32 queue, max_queues;
	u32 lpi_sta;

	max_queues = max(lgmac->tx_queues_to_use, lgmac->rx_queues_to_use);

	if (test_bit(LGMAC_DOWN, &lgmac->state))
		return IRQ_HANDLED;

	/* Core Interrupt */
	status = csp_gmac_get_core_int_status(lgmac->base_addr);
	enable = csp_gmac_get_core_int_enable(lgmac->base_addr);
	status &= enable;

	/* LPI Interrupt */
	if (status & LGMAC_INT_LPI) {
		lpi_sta = csp_gmac_get_lpi_ctrl_status(lgmac->base_addr);
		csp_gmac_clear_lpi_ctrl_status(lgmac->base_addr, lpi_sta);
		if (lpi_sta & LGMAC_LPI_TX_ENTRY)
			lgmac->tx_lpi_enter = 1;
		if (lpi_sta & LGMAC_LPI_TX_EXIT)
			lgmac->tx_lpi_enter = 0;
	}

	/* MTL Interrupt */
	mtl_sta = csp_gmac_get_mtl_int_status(lgmac->base_addr);
	for (queue = 0; queue < max_queues; queue++) {
		if (mtl_sta & LGMAC_MTL_INT_QX(queue)) {
			struct lgmac_rx_queue *rx_q = &lgmac->rx_queue[queue];

			qx_sta = csp_gmac_get_qx_int_status(lgmac->base_addr,
							    queue);
			if (qx_sta & LGMAC_Q_INT_RXOVFIS) {
				csp_gmac_clr_qx_int_status(lgmac->base_addr,
							   queue, qx_sta);
				csp_gmac_set_rx_desc_tail(lgmac->base_addr,
							  queue,
							  rx_q->rx_tail_addr);
			}
		}
	}

	/* DMA Interrupt */
	lombo_gmac_dma_irq(lgmac);

	return IRQ_HANDLED;
}

#ifdef CONFIG_NET_POLL_CONTROLLER
/**
 * lombo_gmac_poll_controller - polling receive.
 * @ndev: pointer to net device struct.
 *
 * Used by NETCONSOLE and other diagnostic tools to allow network I/O
 * with interrupts disabled.
 */
static void lombo_gmac_poll_controller(struct net_device *ndev)
{
	disable_irq(ndev->irq);
	lombo_gmac_irq(ndev->irq, ndev);
	enable_irq(ndev->irq);
}
#endif

/**
 * lombo_gmac_do_ioctl - IOCTL entry point of the driver.
 * @ndev: pointer to net device struct.
 * @ifr: an IOCTL specefic structure, that can contain a pointer to
 *      a proprietary structure used to pass information to the driver.
 * @cmd: IOCTL command
 */
static int lombo_gmac_do_ioctl(struct net_device *ndev, struct ifreq *ifr,
			       int cmd)
{
	int ret = -EOPNOTSUPP;

	if (!netif_running(ndev))
		return -EINVAL;

	switch (cmd) {
	case SIOCGMIIPHY:
	case SIOCGMIIREG:
	case SIOCSMIIREG:
		if (!ndev->phydev)
			return -EINVAL;
		ret = phy_mii_ioctl(ndev->phydev, ifr, cmd);
		break;
	default:
		break;
	}

	return ret;
}

static u16 lombo_gmac_select_queue(struct net_device *ndev,
				   struct sk_buff *skb,
				   struct net_device *sb_dev,
				   select_queue_fallback_t fallback)
{
	if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
		/* There is no way to determine the number of TSO
		 * capable Queues. Let's use always the Queue 0
		 * because if TSO is supported then at least this
		 * one will be capable.
		 */
		return 0;
	}

	return fallback(ndev, skb, NULL) % ndev->real_num_tx_queues;
}

static const struct net_device_ops lombo_gmac_netdev_ops = {
	.ndo_open		= lombo_gmac_open,
	.ndo_stop		= lombo_gmac_stop,
	.ndo_start_xmit		= lombo_gmac_start_xmit,
	.ndo_select_queue	= lombo_gmac_select_queue,
	.ndo_set_rx_mode	= lombo_gmac_set_rx_mode,
	.ndo_set_mac_address	= eth_mac_addr,
	.ndo_do_ioctl		= lombo_gmac_do_ioctl,
	.ndo_change_mtu		= lombo_gmac_change_mtu,
	.ndo_tx_timeout		= lombo_gmac_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= lombo_gmac_poll_controller,
#endif
	.ndo_fix_features	= lombo_gmac_fix_features,
	.ndo_set_features	= lombo_gmac_set_features,
};

/**
 * lombo_gmac_ethtool_getdrvinfo - ethtool get driver info.
 * @ndev: pointer to net device struct.
 * @info: pointer to ethtool driver info struct.
 */
static void lombo_gmac_ethtool_getdrvinfo(struct net_device *ndev,
					  struct ethtool_drvinfo *info)
{
	strlcpy(info->driver, LGMAC_DRV_NAME, sizeof(info->driver));
	strlcpy(info->version, LGMAC_DRV_VERSION, sizeof(info->version));
	info->fw_version[0] = '\0';
}

/**
 * lombo_gmac_ethtool_get_coalesce - ethtool get interrupt coalescing parameter.
 * @ndev: pointer to net device struct.
 * @coalesce: pointer to ethtool coalesce struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_ethtool_get_coalesce(struct net_device *ndev,
					   struct ethtool_coalesce *coalesce)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	unsigned long clk_rate = clk_get_rate(lgmac->clk_core);

	coalesce->tx_max_coalesced_frames = lgmac->tx_coal_frames;
	coalesce->tx_coalesce_usecs = lgmac->tx_coal_timer;
	if (lgmac->use_riwt)
		coalesce->rx_coalesce_usecs = clk_rate ?
			(256 * lgmac->rx_wdt_cnt) / (clk_rate / 1000000) : 0;

	return 0;
}

/**
 * lombo_gmac_ethtool_set_coalesce - ethtool set interrupt coalescing parameter.
 * @ndev: pointer to net device struct.
 * @coalesce: pointer to ethtool coalesce struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_ethtool_set_coalesce(struct net_device *ndev,
					   struct ethtool_coalesce *coalesce)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	unsigned long clk_rate = clk_get_rate(lgmac->clk_core);
	u32 rx_wdt_cnt, chan;

	if (coalesce->rx_max_coalesced_frames ||
	    coalesce->rx_coalesce_usecs_irq ||
	    coalesce->rx_max_coalesced_frames_irq ||
	    coalesce->tx_coalesce_usecs_irq ||
	    coalesce->use_adaptive_rx_coalesce ||
	    coalesce->use_adaptive_tx_coalesce ||
	    coalesce->pkt_rate_low ||
	    coalesce->rx_coalesce_usecs_low ||
	    coalesce->rx_max_coalesced_frames_low ||
	    coalesce->tx_coalesce_usecs_high ||
	    coalesce->tx_max_coalesced_frames_low ||
	    coalesce->pkt_rate_high ||
	    coalesce->tx_coalesce_usecs_low ||
	    coalesce->rx_coalesce_usecs_high ||
	    coalesce->rx_max_coalesced_frames_high ||
	    coalesce->tx_max_coalesced_frames_irq ||
	    coalesce->stats_block_coalesce_usecs ||
	    coalesce->tx_max_coalesced_frames_high ||
	    coalesce->rate_sample_interval)
		return -EOPNOTSUPP;

	if (!coalesce->rx_coalesce_usecs)
		return -EINVAL;

	if (!coalesce->tx_coalesce_usecs &&
	    !coalesce->tx_max_coalesced_frames)
		return -EINVAL;

	if (coalesce->tx_coalesce_usecs > LGMAC_TX_COAL_TIMER_MAX ||
	    coalesce->tx_max_coalesced_frames > LGMAC_TX_COAL_FRAME_MAX)
		return -EINVAL;

	rx_wdt_cnt = clk_rate ?
		(coalesce->rx_coalesce_usecs * (clk_rate / 1000000)) / 256 : 0;

	if (rx_wdt_cnt > LGMAC_MAX_RX_WDT_CNT ||
	    rx_wdt_cnt < LGMAC_MIN_RX_WDT_CNT)
		return -EINVAL;
	else if (!lgmac->use_riwt)
		return -EOPNOTSUPP;

	lgmac->tx_coal_frames = coalesce->tx_max_coalesced_frames;
	lgmac->tx_coal_timer = coalesce->tx_coalesce_usecs;
	lgmac->rx_wdt_cnt = rx_wdt_cnt;
	for (chan = 0; chan < lgmac->rx_queues_to_use; chan++)
		csp_gmac_set_dma_ch_rxwdt(lgmac->base_addr, chan, rx_wdt_cnt);

	return 0;
}

/**
 * lombo_gmac_ethtool_set_pauseparam - ethtool get flow control parameter.
 * @ndev: pointer to net device struct.
 * @pause: pointer to ethtool flow control parameter struct.
 */
static void lombo_gmac_ethtool_get_pauseparam(struct net_device *ndev,
					      struct ethtool_pauseparam *param)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	struct phy_device *phy_dev = ndev->phydev;

	param->rx_pause = 0;
	param->tx_pause = 0;

	if (!(phy_dev->supported & SUPPORTED_Pause) ||
	    !(phy_dev->supported & SUPPORTED_Asym_Pause))
		return;

	mutex_lock(&lgmac->lock);

	param->autoneg = lgmac->phy_dev->autoneg;

	if (lgmac->flow_ctrl & LGMAC_FLOW_CTRL_RX)
		param->rx_pause = 1;

	if (lgmac->flow_ctrl & LGMAC_FLOW_CTRL_TX)
		param->tx_pause = 1;

	mutex_unlock(&lgmac->lock);
}

/**
 * lombo_gmac_ethtool_set_pauseparam - ethtool set flow control parameter.
 * @ndev: pointer to net device struct.
 * @pause: pointer to ethtool flow control parameter struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_ethtool_set_pauseparam(struct net_device *ndev,
					     struct ethtool_pauseparam *param)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	struct phy_device *phy_dev = ndev->phydev;
	int ret = 0;

	if (!(phy_dev->supported & SUPPORTED_Pause) ||
	    !(phy_dev->supported & SUPPORTED_Asym_Pause))
		return -EOPNOTSUPP;

	mutex_lock(&lgmac->lock);

	phy_dev->autoneg = param->autoneg;

	lgmac->flow_ctrl = LGMAC_FLOW_CTRL_OFF;
	if (param->rx_pause)
		lgmac->flow_ctrl |= LGMAC_FLOW_CTRL_RX;
	if (param->tx_pause)
		lgmac->flow_ctrl |= LGMAC_FLOW_CTRL_TX;

	if (param->autoneg) {
		if (netif_running(ndev))
			ret = phy_start_aneg(phy_dev);
	} else {
		lombo_gmac_flow_control(lgmac);
	}

	mutex_unlock(&lgmac->lock);

	return ret;
}

/**
 * lombo_gmac_check_if_running - ethtool begin, check if net device is running.
 * @ndev: pointer to net device struct.
 *
 * return 0 if device is running; otherwise not running.
 */
static int lombo_gmac_ethtool_begin(struct net_device *ndev)
{
	if (!netif_running(ndev))
		return -EBUSY;

	return 0;
}

/**
 * lombo_gmac_ethtool_get_eee - ethtool get EEE supported and status.
 * @ndev: pointer to net device struct.
 * @eee: pointer to ethtool eee struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_ethtool_get_eee(struct net_device *ndev,
				      struct ethtool_eee *eee)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);

	eee->eee_enabled = lgmac->eee_enable;
	eee->eee_active = lgmac->eee_active;
	eee->tx_lpi_timer = lgmac->tx_lpi_time;

	return phy_ethtool_get_eee(ndev->phydev, eee);
}

/**
 * lombo_gmac_ethtool_set_eee - ethtool set EEE supported and status.
 * @ndev: pointer to net device struct.
 * @eee: pointer to ethtool eee struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_ethtool_set_eee(struct net_device *ndev,
				      struct ethtool_eee *eee)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	int ret = 0;

	if (!eee->eee_enabled) {
		lombo_gmac_disable_eee_mode(lgmac);
	} else {
		/**
		 * We are asking for enabling the EEE but it is safe
		 * to verify all by invoking the eee_init function.
		 * In case of failure it will return an error.
		 */
		eee->eee_enabled = !lombo_gmac_eee_init(lgmac);
		if (!eee->eee_enabled)
			return -EOPNOTSUPP;
	}

	ret = phy_ethtool_set_eee(ndev->phydev, eee);
	if (ret)
		return ret;

	lgmac->eee_enable = eee->eee_enabled;
	lgmac->tx_lpi_time = eee->tx_lpi_timer;

	return 0;
}

/**
 * lombo_gmac_ethtool_get_link_ksettings - ethtool get link settings.
 * @ndev: pointer to net device struct.
 * @settings: pointer to ethtool link settings struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_ethtool_get_link_ksettings(struct net_device *ndev,
				struct ethtool_link_ksettings *settings)
{
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	struct phy_device *phy_dev = ndev->phydev;

	if (!phy_dev) {
		PRT_ERR("%s: not any phy device exist\n", lgmac->net_dev->name);
		return -ENODEV;
	}

	if (!netif_running(ndev)) {
		PRT_ERR("%s: net interface is disabled\n",
			lgmac->net_dev->name);
		return -EBUSY;
	}

	phy_ethtool_ksettings_get(phy_dev, settings);

	return 0;
}

/**
 * lombo_gmac_ethtool_set_link_ksettings - ethtool set link settings.
 * @ndev: pointer to net device struct.
 * @settings: pointer to ethtool link settings struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_ethtool_set_link_ksettings(struct net_device *ndev,
				const struct ethtool_link_ksettings *settings)
{
	struct phy_device *phy_dev = ndev->phydev;

	return phy_ethtool_ksettings_set(phy_dev, settings);
}

static const struct ethtool_ops lombo_gmac_ethtool_ops = {
	.get_drvinfo		= lombo_gmac_ethtool_getdrvinfo,
	.nway_reset		= phy_ethtool_nway_reset,
	.get_link		= ethtool_op_get_link,
	.get_coalesce		= lombo_gmac_ethtool_get_coalesce,
	.set_coalesce		= lombo_gmac_ethtool_set_coalesce,
	.get_pauseparam		= lombo_gmac_ethtool_get_pauseparam,
	.set_pauseparam		= lombo_gmac_ethtool_set_pauseparam,
	.begin			= lombo_gmac_ethtool_begin,
	.get_eee		= lombo_gmac_ethtool_get_eee,
	.set_eee		= lombo_gmac_ethtool_set_eee,
	.get_link_ksettings	= lombo_gmac_ethtool_get_link_ksettings,
	.set_link_ksettings	= lombo_gmac_ethtool_set_link_ksettings,
};

static void lombo_gmac_reset_subtask(struct lombo_gmac *lgmac)
{
	if (!test_and_clear_bit(LGMAC_RESET_REQUESTED, &lgmac->state))
		return;
	if (test_bit(LGMAC_DOWN, &lgmac->state))
		return;

	rtnl_lock();

	netif_trans_update(lgmac->net_dev);
	while (test_and_set_bit(LGMAC_RESETTING, &lgmac->state))
		usleep_range(1000, 2000);

	set_bit(LGMAC_DOWN, &lgmac->state);
	dev_close(lgmac->net_dev);
	dev_open(lgmac->net_dev);
	clear_bit(LGMAC_DOWN, &lgmac->state);
	clear_bit(LGMAC_RESETTING, &lgmac->state);

	rtnl_unlock();
}

static void lombo_gmac_service_task(struct work_struct *work)
{
	struct lombo_gmac *lgmac = container_of(work, struct lombo_gmac,
						service_task);

	lombo_gmac_reset_subtask(lgmac);
	clear_bit(LGMAC_SERVICE_SCHED, &lgmac->state);
}

static void lombo_gmac_service_phy_reset(struct work_struct *work)
{
	struct lombo_gmac *lgmac = container_of(work, struct lombo_gmac,
						phy_reset);

	genphy_soft_reset(lgmac->phy_dev);
	genphy_restart_aneg(lgmac->phy_dev);
}

/**
 * lombo_gmac_clock_get - get module clock.
 * @lgmac: pointer to lombo gmac struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_clock_get(struct lombo_gmac *lgmac)
{
	int ret = 0;
	u32 i;
	static const char * const clk_name[] = {
		"gmac_parent_clk", "gmac_core_clk", "gmac_sclk_0",
		"gmac_sclk_1", "gmac_ahb_gate", "gmac_mem_axi_gate",
		"gmac_ahb_reset",
	};
	struct clk **clk_handle[] = {
		&lgmac->clk_parent, &lgmac->clk_core, &lgmac->clk_sclk0,
		&lgmac->clk_sclk1, &lgmac->clk_gate, &lgmac->clk_axi,
		&lgmac->clk_reset,
	};

	for (i = 0; i < ARRAY_SIZE(clk_handle); i++) {
		*clk_handle[i] = devm_clk_get(lgmac->dev, clk_name[i]);
		if (IS_ERR_OR_NULL(*clk_handle[i])) {
			PRT_ERR("%s: failed to get clock %s (%d)\n",
				dev_name(lgmac->dev),
				clk_name[i],
				(int)PTR_ERR(*clk_handle[i]));
			return PTR_ERR(*clk_handle[i]);
		}
	}

	ret = clk_set_parent(lgmac->clk_core, lgmac->clk_parent);
	if (ret) {
		PRT_ERR("%s: failed to set parent for core clock (%d)\n",
			lgmac->net_dev->name, ret);
		return ret;
	}

	ret = clk_set_parent(lgmac->clk_sclk0, lgmac->clk_parent);
	if (ret) {
		PRT_ERR("%s: failed to set parent for sclk0 clock (%d)\n",
			lgmac->net_dev->name, ret);
		return ret;
	}

	ret = clk_set_parent(lgmac->clk_sclk1, lgmac->clk_parent);
	if (ret) {
		PRT_ERR("%s: failed to set parent for sclk1 clock (%d)\n",
			lgmac->net_dev->name, ret);
		return ret;
	}

	ret = clk_set_rate(lgmac->clk_core,
			   lgmac->phy_interface == PHY_INTERFACE_MODE_RGMII ?
			   250000000 : 100000000);
	if (ret) {
		PRT_ERR("%s: failed to set rate for core clock (%d)\n",
			lgmac->net_dev->name, ret);
		return ret;
	}

	return 0;
}

#ifdef CONFIG_PM_GENERIC_DOMAINS
static int lombo_gmac_pm_operation(struct lombo_gmac *lgmac)
{
	int ret = 0;

	ret = pm_clk_add_clk(lgmac->dev, lgmac->clk_gate);
	if (ret) {
		PRT_ERR("failed to add ahb gate clock to pm clock (%d)\n", ret);
		return ret;
	}

	ret = pm_clk_add_clk(lgmac->dev, lgmac->clk_axi);
	if (ret) {
		PRT_ERR("failed to add axi gate clock to pm clock (%d)\n", ret);
		return ret;
	}

	ret = pm_clk_add_clk(lgmac->dev, lgmac->clk_reset);
	if (ret) {
		PRT_ERR("failed to add reset clock to pm clock (%d)\n", ret);
		return ret;
	}

	ret = pm_clk_add_clk(lgmac->dev, lgmac->clk_core);
	if (ret) {
		PRT_ERR("failed to add core clock to pm clock (%d)\n", ret);
		return ret;
	}

	ret = pm_clk_add_clk(lgmac->dev, lgmac->clk_sclk0);
	if (ret) {
		PRT_ERR("failed to add sclk0 clock to pm clock (%d)\n", ret);
		return ret;
	}

	ret = pm_clk_add_clk(lgmac->dev, lgmac->clk_sclk1);
	if (ret) {
		PRT_ERR("failed to add sclk1 clock to pm clock (%d)\n", ret);
		return ret;
	}

	ret = pm_clk_resume(lgmac->dev);
	if (ret) {
		PRT_ERR("failed to resume pm clock (%d)\n", ret);
		return ret;
	}

	pm_runtime_set_active(lgmac->dev);
	pm_runtime_enable(lgmac->dev);

	return 0;
}
#endif

/**
 * lombo_gmac_parse_dt - parse config from device tree.
 * @lgmac: pointer to lombo gmac struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_parse_dt(struct lombo_gmac *lgmac)
{
	int ret;
	enum of_gpio_flags flags;

	lgmac->rx_csum = 1;
	lgmac->use_riwt = 1;
	lgmac->phy_addr = -1;
	lgmac->flow_ctrl = LGMAC_FLOW_CTRL_OFF;

	/* get phy interface */
	lgmac->phy_interface = of_get_phy_mode(lgmac->dev->of_node);
	if (lgmac->phy_interface < 0) {
		PRT_ERR("%s: failed to get phy interface type (%d)\n",
			dev_name(lgmac->dev),
			lgmac->phy_interface);
		return lgmac->phy_interface;
	}

	/* Supported phy interface: MII, RMII, RGMII. */
	if (lgmac->phy_interface != PHY_INTERFACE_MODE_MII &&
	    lgmac->phy_interface != PHY_INTERFACE_MODE_RMII &&
	    lgmac->phy_interface != PHY_INTERFACE_MODE_RGMII) {
		PRT_ERR("%s: unsupported phy interface %d\n",
			dev_name(lgmac->dev),
			lgmac->phy_interface);
		return -EPFNOSUPPORT;
	}

	/* get rmii clock source */
	if (lgmac->phy_interface == PHY_INTERFACE_MODE_RMII) {
		ret = of_property_read_u32(lgmac->dev->of_node,
					   "rmii_osc_ext",
					   &lgmac->rmii_osc_ext);
		if (ret) {
			PRT_ERR("%s: failed to get rmii tx clk src (%d)\n",
				dev_name(lgmac->dev), ret);
			return ret;
		}
	}

	/* get phy reset pin and reset level */
	lgmac->phy_rst_pin = of_get_named_gpio_flags(lgmac->dev->of_node,
						     "phy_rst_pin", 0,
						     &flags);
	if (gpio_is_valid(lgmac->phy_rst_pin))
		lgmac->phy_rst_level = flags;
	else
		PRT_WARN("%s: failed to get phy reset pin (%d)\n",
			 dev_name(lgmac->dev), lgmac->phy_rst_pin);

	/* get tx delay */
	ret = of_property_read_u32_array(lgmac->dev->of_node,
					 "tx_delay",
					 lgmac->tx_delay, 3);
	if (ret)
		PRT_WARN("%s: failed to get tx delay parameters (%d)\n",
			 dev_name(lgmac->dev), ret);

	/* get rx delay */
	ret = of_property_read_u32_array(lgmac->dev->of_node,
					 "rx_delay",
					 lgmac->rx_delay, 3);
	if (ret)
		PRT_WARN("%s: failed to get rx delay parameters (%d)\n",
			 dev_name(lgmac->dev), ret);

	/* get rx queues to use */
	ret = of_property_read_u32(lgmac->dev->of_node,
				   "rx_queues_to_use",
				   &lgmac->rx_queues_to_use);
	if (ret) {
		PRT_WARN("%s: failed to get rx queues to use (%d)\n",
			 dev_name(lgmac->dev), ret);
		lgmac->rx_queues_to_use = 1;
	}

	/* get tx queues to use */
	ret = of_property_read_u32(lgmac->dev->of_node,
				   "tx_queues_to_use",
				   &lgmac->tx_queues_to_use);
	if (ret) {
		PRT_WARN("%s: failed to get tx queues to use (%d)\n",
			 dev_name(lgmac->dev), ret);
		lgmac->tx_queues_to_use = 1;
	}

	/* get tso enable */
	ret = of_property_read_u32(lgmac->dev->of_node,
				   "tso_enable",
				   &lgmac->tso_en);
	if (ret) {
		PRT_WARN("%s: failed to get tso enable config (%d)\n",
			 dev_name(lgmac->dev), ret);
		lgmac->tso_en = 1;
	}

	return 0;
}

/**
 * lombo_gmac_probe - probe entry point of the driver.
 * @pdev: pointer to platform device struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_probe(struct platform_device *pdev)
{
	struct resource *res;
	struct net_device *ndev = NULL;
	struct lombo_gmac *lgmac = NULL;
	struct device *dev = &pdev->dev;
	u32 queue, max_queues;
	int ret = 0;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		PRT_ERR("%s: failed to get memory resource\n", dev_name(dev));
		return -ENODEV;
	}

	ndev = alloc_etherdev_mqs(sizeof(struct lombo_gmac),
				  LGMAC_TX_QUEUES_MAX,
				  LGMAC_RX_QUEUES_MAX);
	if (!ndev) {
		PRT_ERR("%s: failed to alloc net device\n", dev_name(dev));
		return -ENOMEM;
	}

	/* ether_setup(ndev); */
	SET_NETDEV_DEV(ndev, dev);

	lgmac = netdev_priv(ndev);
	lgmac->net_dev = ndev;
	lgmac->dev = dev;

	lgmac->base_addr = devm_ioremap_resource(dev, res);
	if (IS_ERR(lgmac->base_addr)) {
		ret = PTR_ERR(lgmac->base_addr);
		PRT_ERR("%s: failed to ioremap (%d)\n", dev_name(dev), ret);
		goto exit_free_net_device;
	}

	ndev->irq = platform_get_irq(pdev, 0);
	if (ndev->irq < 0) {
		ret = ndev->irq;
		PRT_ERR("%s: failed to get irq number (%d)\n",
			dev_name(dev), ret);
		goto exit_free_net_device;
	}

	ret = lombo_gmac_parse_dt(lgmac);
	if (ret) {
		PRT_ERR("%s: failed to parse device tree (%d)\n",
			dev_name(dev), ret);
		goto exit_free_net_device;
	}

	if (gpio_is_valid(lgmac->phy_rst_pin)) {
		ret = devm_gpio_request(lgmac->dev, lgmac->phy_rst_pin,
					"phy_rst_pin");
		if (ret) {
			PRT_ERR("%s: failed to request phy reset pin (%d)\n",
				dev_name(dev), ret);
			goto exit_free_net_device;
		}
	}

	ret = lombo_gmac_clock_get(lgmac);
	if (ret) {
		PRT_ERR("%s: failed to get module clock (%d)\n",
			dev_name(dev), ret);
		goto exit_free_net_device;
	}

#ifdef CONFIG_PM_GENERIC_DOMAINS
	ret = lombo_gmac_pm_operation(lgmac);
	if (ret) {
		PRT_ERR("%s: failed to add clock to pm clock (%d)\n",
			dev_name(dev), ret);
		goto exit_free_net_device;
	}
#endif

	mutex_init(&lgmac->lock);

	csp_gmac_default_mac_addr(lgmac->base_addr, ndev->dev_addr);

	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
			    NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
	if (lgmac->tso_en)
		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
	ndev->priv_flags |= IFF_UNICAST_FLT;
	ndev->base_addr = (unsigned long)lgmac->base_addr;
	ndev->watchdog_timeo = msecs_to_jiffies(LGMAC_TX_TIMEOUT_MS);
	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
	ndev->max_mtu = 9000;

	ndev->netdev_ops = &lombo_gmac_netdev_ops;
	ndev->ethtool_ops = &lombo_gmac_ethtool_ops;

	lgmac->workqueue = create_singlethread_workqueue("lgmac_wq");
	if (!lgmac->workqueue) {
		PRT_ERR("%s: failed to create workqueue\n", dev_name(dev));
		ret = -ENOMEM;
		goto exit_destroy_mutex;
	}

	INIT_WORK(&lgmac->service_task, lombo_gmac_service_task);

	lgmac->workqueue_phy_reset = create_singlethread_workqueue("lgmac_phy");
	if (!lgmac->workqueue_phy_reset) {
		PRT_ERR("%s: failed to create workqueue\n", dev_name(dev));
		ret = -ENOMEM;
		goto exit_destroy_workqueue;
	}

	INIT_WORK(&lgmac->phy_reset, lombo_gmac_service_phy_reset);

	ret = netif_set_real_num_rx_queues(ndev, lgmac->rx_queues_to_use);
	if (ret) {
		PRT_ERR("%s: failed to set real num rx queues (%d)\n",
			dev_name(dev), ret);
		goto exit_destroy_phy_reset_workqueue;
	}

	ret = netif_set_real_num_tx_queues(ndev, lgmac->tx_queues_to_use);
	if (ret) {
		PRT_ERR("%s: failed to set real num tx queues (%d)\n",
			dev_name(dev), ret);
		goto exit_destroy_phy_reset_workqueue;
	}

	max_queues = max(lgmac->rx_queues_to_use, lgmac->tx_queues_to_use);
	for (queue = 0; queue < max_queues; queue++) {
		struct lgmac_channel *ch = &lgmac->channel[queue];

		ch->priv_data = lgmac;
		ch->index = queue;

		if (queue < lgmac->rx_queues_to_use)
			ch->has_rx = 1;
		if (queue < lgmac->tx_queues_to_use)
			ch->has_tx = 1;

		netif_napi_add(ndev, &ch->napi, lombo_gmac_napi_poll,
			       NAPI_POLL_WEIGHT);
	}

	ret = register_netdev(ndev);
	if (ret) {
		PRT_ERR("%s: failed to register net device (%d)\n",
			dev_name(dev), ret);
		goto exit_delete_napi;
	}

	netif_carrier_off(ndev);
	if (lgmac->tso_en)
		netif_set_gso_max_size(ndev, 14480);

	platform_set_drvdata(pdev, lgmac->net_dev);

	PRT_INFO("%s: driver probe success\n", lgmac->net_dev->name);

	return 0;

exit_delete_napi:
	for (queue = 0; queue < max_queues; queue++) {
		struct lgmac_channel *ch = &lgmac->channel[queue];

		netif_napi_del(&ch->napi);
	}

exit_destroy_phy_reset_workqueue:
	destroy_workqueue(lgmac->workqueue_phy_reset);

exit_destroy_workqueue:
	destroy_workqueue(lgmac->workqueue);

exit_destroy_mutex:
	mutex_destroy(&lgmac->lock);

exit_free_net_device:
	free_netdev(ndev);

	return ret;
}

/**
 * lombo_gmac_remove - remove entry point of the driver.
 * @pdev: pointer to platform device struct.
 *
 * return 0 if success; otherwise failed.
 */
static int lombo_gmac_remove(struct platform_device *pdev)
{
	struct net_device *ndev = platform_get_drvdata(pdev);
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	u32 queue, max_queues;

	PRT_DBG("%s: driver remove\n", lgmac->net_dev->name);

	/* unregister and free net device */
	netif_carrier_off(ndev);
	unregister_netdev(ndev);
	max_queues = max(lgmac->rx_queues_to_use, lgmac->tx_queues_to_use);
	for (queue = 0; queue < max_queues; queue++) {
		struct lgmac_channel *ch = &lgmac->channel[queue];

		netif_napi_del(&ch->napi);
	}
	destroy_workqueue(lgmac->workqueue);
	mutex_destroy(&lgmac->lock);
	free_netdev(ndev);
	platform_set_drvdata(pdev, NULL);

	return 0;
}

#ifdef CONFIG_PM_SLEEP
static int lombo_gmac_suspend(struct device *dev)
{
	struct net_device *ndev = dev_get_drvdata(dev);
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	int ret = 0;

	if (!ndev || !netif_running(ndev))
		return 0;

	ret = __lombo_gmac_stop(ndev);
	if (ret) {
		PRT_ERR("%s: failed to stop lombo_gmac (%d)\n",
			lgmac->net_dev->name, ret);
		return ret;
	}

#ifndef CONFIG_PM_GENERIC_DOMAINS
	lombo_gmac_clock_deinit(lgmac);
#endif

	return 0;
}

static int lombo_gmac_resume(struct device *dev)
{
	struct net_device *ndev = dev_get_drvdata(dev);
	struct lombo_gmac *lgmac = netdev_priv(ndev);
	int ret = 0;

	if (!ndev || !netif_running(ndev))
		return 0;

#ifndef CONFIG_PM_GENERIC_DOMAINS
	ret = lombo_gmac_clock_init(lgmac);
	if (ret) {
		PRT_ERR("%s: failed to init gmac clock (%d)\n",
			lgmac->net_dev->name, ret);
		return ret;
	}

	ret = __lombo_gmac_open(ndev);
	if (ret) {
		PRT_ERR("%s: failed to open lombo_gmac (%d)\n",
			lgmac->net_dev->name, ret);
		return ret;
	}
#else
	if (pm_runtime_status_suspended(dev))
		pm_clk_resume(dev);

	ret = __lombo_gmac_open(ndev);
	if (ret) {
		PRT_ERR("%s: failed to open lombo_gmac (%d)\n",
			lgmac->net_dev->name, ret);
		return ret;
	}

	if (pm_runtime_status_suspended(dev))
		pm_clk_suspend(dev);
#endif

	return 0;
}

static const struct dev_pm_ops lombo_gmac_pm_ops = {
#ifdef CONFIG_PM_SLEEP
	SET_SYSTEM_SLEEP_PM_OPS(lombo_gmac_suspend, lombo_gmac_resume)
#endif
};
#endif

static const struct of_device_id lombo_gmac_match[] = {
	{ .compatible = "lombo,n7v3-gmac", },
	{},
};

MODULE_DEVICE_TABLE(of, lombo_gmac_match);

static struct platform_driver lombo_gmac_driver = {
	.probe  = lombo_gmac_probe,
	.remove = lombo_gmac_remove,
	.driver = {
		.owner		= THIS_MODULE,
		.name		= "lombo-gmac",
#ifdef CONFIG_PM
		.pm		= &lombo_gmac_pm_ops,
#endif
		.of_match_table	= lombo_gmac_match,
	},
};

module_platform_driver(lombo_gmac_driver);

MODULE_DESCRIPTION("Driver for LomboTech GMAC Controller");
MODULE_AUTHOR("lomboswer <lomboswer@lombotech.com>");
MODULE_LICENSE("GPL v2");
