// SPDX-License-Identifier: GPL-2.0
/*
 * base from kernel 4.x.
 * Description: stmmac legacy rx and phy driver mode
 */

#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/if.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/phylink.h>
#include "stmmac.h"
#include "dwmac1000.h"
#include "hwif.h"
#include "stmmac_ext.h"

/**
 * stmmac_init_rx_buffers - init the RX descriptor buffer.
 * from kernel 4.4
 * @priv: driver private structure
 * @p: descriptor pointer
 * @i: descriptor index
 * @flags: gfp flag
 * @queue: RX queue index
 * Description: this function is called to allocate a receive buffer, perform
 * the DMA mapping and init the descriptor.
 */
int stmmac_init_rx_buffers_legacy(struct stmmac_priv *priv, struct dma_desc *p,
				  int i, gfp_t flags, u32 queue)
{
	struct sk_buff *skb;

	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
	if (!skb)
		return -ENOMEM;

	priv->rx_queue[queue].rx_skbuff[i] = skb;
	priv->rx_queue[queue].rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
						priv->dma_buf_sz,
						DMA_FROM_DEVICE);
	if (dma_mapping_error(priv->device, priv->rx_queue[queue].rx_skbuff_dma[i])) {
		pr_err("%s: DMA mapping error\n", __func__);
		dev_kfree_skb_any(skb);
		return -EINVAL;
	}

	p->des2 = priv->rx_queue[queue].rx_skbuff_dma[i];

	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
		stmmac_init_desc3(priv, p);

	return 0;
}

/**
 * stmmac_free_rx_buffer - free RX dma buffers
 * from kernel 4.4
 * @priv: private structure
 * @queue: RX queue index
 * @i: buffer index.
 */
void stmmac_free_rx_buffer_legacy(struct stmmac_priv *priv, u32 queue, int i)
{
	if (priv->rx_queue[queue].rx_skbuff[i]) {
		dma_unmap_single(priv->device, priv->rx_queue[queue].rx_skbuff_dma[i],
				 priv->dma_buf_sz, DMA_FROM_DEVICE);
		dev_kfree_skb_any(priv->rx_queue[queue].rx_skbuff[i]);
	}
	priv->rx_queue[queue].rx_skbuff[i] = NULL;
}

/**
 * init_dma_desc_rings - init the RX/TX descriptor rings.
 * from kernel 4.4
 * @dev: net device structure
 * @flags: gfp flag.
 * Description: this function initializes the DMA RX/TX descriptors
 * and allocates the socket buffers. It suppors the chained and ring
 * modes.
 */
int init_dma_desc_rings_legacy(struct net_device *dev, gfp_t flags,
				void (*stmmac_clear_descriptors)(struct stmmac_priv *priv),
				void (*stmmac_display_rings)(struct stmmac_priv *priv))
{
	int i;
	struct stmmac_priv *priv = netdev_priv(dev);
	unsigned int txsize = priv->dma_tx_size;
	unsigned int rxsize = priv->dma_rx_size;
	unsigned int bfsize = 0;
	struct dma_desc *p = NULL;
	int ret = -ENOMEM;
	u32 rx_count = priv->plat->rx_queues_to_use;
	u32 tx_count = priv->plat->tx_queues_to_use;
	struct stmmac_rx_queue *rx_q = NULL;
	struct stmmac_tx_queue *tx_q = NULL;
	u32 queue;

	if (priv->hw->mode->set_16kib_bfsize)
		bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);

	if (bfsize < BUF_SIZE_16KiB)
		trace_stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz, &bfsize);

	priv->dma_buf_sz = bfsize;

	for (queue = 0; queue < rx_count; queue++) {
		rx_q = &priv->rx_queue[queue];
	netif_dbg(priv, probe, priv->dev, "(%s) dma_rx_phy=0x%08x\n", __func__,
				 (u32)rx_q->dma_rx_phy);
	for (i = 0; i < rxsize; i++) {
		p = rx_q->dma_rx + i;
		ret = stmmac_init_rx_buffers_legacy(priv, p, i, flags, queue);
		if (ret)
			goto err_init_rx_buffers;
	}
		rx_q->cur_rx = 0;
		rx_q->dirty_rx = (unsigned int)(i - rxsize);
	}

	for (queue = 0; queue < tx_count; queue++) {
		tx_q = &priv->tx_queue[queue];
		/* TX INITIALIZATION */
			for (i = 0; i < txsize; i++) {
				p = tx_q->dma_tx + i;
				p->des2 = 0;
				tx_q->tx_skbuff_dma[i].buf = 0;
				tx_q->tx_skbuff_dma[i].map_as_page = false;
				tx_q->tx_skbuff[i] = NULL;
		}

		tx_q->dirty_tx = 0;
		tx_q->cur_tx = 0;
	}
	netdev_reset_queue(priv->dev);
	stmmac_clear_descriptors(priv);

	if (netif_msg_hw(priv))
		stmmac_display_rings(priv);

	return 0;
err_init_rx_buffers:
	while (queue >= 0) {
		while (--i >= 0)
			stmmac_free_rx_buffer_legacy(priv, queue, i);

		if (queue == 0)
			break;

		i = priv->dma_rx_size;
		queue--;
	}
	return ret;
}

static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
{
	int i;

	for (i = 0; i < priv->dma_rx_size; i++)
		stmmac_free_rx_buffer_legacy(priv, queue, i);
}

/**
 * free_dma_rx_desc_resources - free RX dma desc resources
 * from kernel 4.4
 * @priv: private structure
 */
void free_dma_rx_desc_resources_legacy(struct stmmac_priv *priv)
{
	u32 rx_count = priv->plat->rx_queues_to_use;
	u32 queue;

	/* Free RX queue resources */
	for (queue = 0; queue < rx_count; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
		size_t size;
		void *addr;

		/* Release the DMA TX socket buffers */
		dma_free_rx_skbufs(priv, queue);

		size = sizeof(struct dma_desc);
		addr = rx_q->dma_rx;

		size *= priv->dma_rx_size;

		dma_free_coherent(priv->device, size, addr, rx_q->dma_rx_phy);

		kfree(rx_q->rx_skbuff_dma);
		kfree(rx_q->rx_skbuff);
	}
}

/**
 * alloc_dma_rx_desc_resources - alloc RX resources.
 * from kernel 4.4
 * @priv: private structure
 * Description: according to which descriptor can be used (extend or basic)
 * this function allocates the resources for TX and RX paths. In case of
 * reception, for example, it pre-allocated the RX socket buffer in order to
 * allow zero-copy mechanism.
 */
int alloc_dma_rx_desc_resources_legacy(struct stmmac_priv *priv)
{
	unsigned int rxsize = priv->dma_rx_size;
	unsigned int rx_count = priv->plat->rx_queues_to_use;
	unsigned int i, queue;
	struct stmmac_rx_queue *rx_q = NULL;
	int ret = -ENOMEM;

	/* RX queues buffers and DMA */
	for (queue = 0; queue < rx_count; queue++) {
		rx_q = &priv->rx_queue[queue];
		priv->rx_queue[queue].rx_skbuff_dma = kmalloc_array(rxsize,
										sizeof(dma_addr_t), GFP_KERNEL);
		if (!priv->rx_queue[queue].rx_skbuff_dma)
			goto err_rx_skbuff_dma;

		priv->rx_queue[queue].rx_skbuff = kmalloc_array(rxsize,
									sizeof(struct sk_buff *), GFP_KERNEL);
		if (!priv->rx_queue[queue].rx_skbuff)
			goto err_rx_skbuff;

		priv->rx_queue[queue].dma_rx = dma_alloc_coherent(priv->device,
										rxsize * sizeof(struct dma_desc),
										&priv->rx_queue[queue].dma_rx_phy,
										GFP_KERNEL);
		if (!priv->rx_queue[queue].dma_rx)
			goto err_dma;

		rx_q->queue_index = queue;
		rx_q->priv_data = priv;
	}

	return 0;

err_dma:
	for (i = 0; i < queue; i++) {
		if (priv->rx_queue[queue].dma_rx != NULL)
			dma_free_coherent(priv->device, rxsize * sizeof(struct dma_desc),
			priv->rx_queue[queue].dma_rx, priv->rx_queue[queue].dma_rx_phy);
	}

err_rx_skbuff:
	for (i = 0; i < queue; i++) {
		if (priv->rx_queue[i].rx_skbuff != NULL)
			kfree(priv->rx_queue[i].rx_skbuff);
	}

err_rx_skbuff_dma:
	for (i = 0; i < queue; i++) {
		if (priv->rx_queue[i].rx_skbuff_dma != NULL)
			kfree(priv->rx_queue[i].rx_skbuff_dma);
	}
	return ret;
}

/**
 * stmmac_rx_refill - refill used skb preallocated buffers
 * from kernel 4.4
 * @priv: driver private structure
 * @queue: RX queue index
 * Description : this is to reallocate the skb for the reception process
 * that is based on zero-copy.
 */
static inline void stmmac_rx_refill_legacy(struct stmmac_priv *priv, u32 queue)
{
	unsigned int rxsize = priv->dma_rx_size;
	int bfsize = priv->dma_buf_sz;

	for (; priv->rx_queue[queue].cur_rx - priv->rx_queue[queue].dirty_rx > 0;
		priv->rx_queue[queue].dirty_rx++) {
		unsigned int entry = priv->rx_queue[queue].dirty_rx % rxsize;
		struct dma_desc *p = priv->rx_queue[queue].dma_rx + entry;

		if (likely(priv->rx_queue[queue].rx_skbuff[entry] == NULL)) {
			struct sk_buff *skb;

			skb = __netdev_alloc_skb(priv->dev, bfsize, GFP_ATOMIC);
			if (unlikely(skb == NULL))
				break;

			priv->rx_queue[queue].rx_skbuff[entry] = skb;
			priv->rx_queue[queue].rx_skbuff_dma[entry] =
				dma_map_single(priv->device, skb->data, bfsize, DMA_FROM_DEVICE);
			if (dma_mapping_error(priv->device, priv->rx_queue[queue].rx_skbuff_dma[entry])) {
				dev_err(priv->device, "Rx dma map failed\n");
				dev_kfree_skb(skb);
				break;
			}
			p->des2 = priv->rx_queue[queue].rx_skbuff_dma[entry];
			/* Fill DES3 in case of RING mode */
			if (priv->dma_buf_sz > BUF_SIZE_8KiB)
				p->des3 = cpu_to_le32(le32_to_cpu(p->des2) +
					BUF_SIZE_8KiB - ARCH_DMA_MINALIGN);

			if (netif_msg_rx_status(priv))
				pr_info("\trefill entry #%d\n", entry);
		}
		wmb();
		stmmac_set_rx_owner(priv, p, 1); //TODO
		wmb();
	}
}

static void print_pkt(unsigned char *buf, int len)
{
	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
}

/**
 * stmmac_rx - manage the receive process
 * from kernel 4.4
 * @priv: driver private structure
 * @limit: napi bugget
 * @queue: RX queue index.
 * Description :  this the function called by the napi poll method.
 * It gets all the frames inside the ring.
 */
int stmmac_rx_legacy(struct stmmac_priv *priv, int limit, u32 queue)
{
	unsigned int rxsize = priv->dma_rx_size;
	unsigned int next_entry = priv->rx_queue[queue].cur_rx % rxsize;
	unsigned int count = 0;
	int coe = priv->hw->rx_csum;
	struct sk_buff *skb = NULL;
	int frame_len = 0;
	struct stmmac_channel *ch = &priv->channel[queue];

	ch->priv_data = priv;
	ch->index = queue;

	if (netif_msg_rx_status(priv)) {
		pr_info("%s: descriptor ring:\n", __func__);
		stmmac_display_ring(priv, &priv->rx_queue[queue].dma_rx,
			priv->dma_rx_size, 0, priv->rx_queue[queue].dma_rx_phy,
			sizeof(struct dma_desc));
	}
	while (count < limit) {
		int status, entry;
		struct dma_desc *p;

		entry = next_entry;
		p = priv->rx_queue[queue].dma_rx + entry;
		if (trace_enh_desc_get_rx_owner(p))
			break;

		count++;
		next_entry = (++priv->rx_queue[queue].cur_rx) % rxsize;
		prefetch(priv->rx_queue[queue].dma_rx + next_entry);

		/* read the status of the incoming frame */
		status = stmmac_rx_status(priv, &priv->dev->stats, &priv->xstats, p);
		if (unlikely(status == discard_frame)) {
			priv->dev->stats.rx_errors++;
			if (priv->hwts_rx_en && !priv->extend_desc) {
				/* DESC2 & DESC3 will be overwitten by device
				 * with timestamp value, hence reinitialize
				 * them in stmmac_rx_refill() function so that
				 * device can reuse it.
				 */
				priv->rx_queue[queue].rx_skbuff[entry] = NULL;
				dma_unmap_single(priv->device,
						 priv->rx_queue[queue].rx_skbuff_dma[entry],
						 priv->dma_buf_sz,
						 DMA_FROM_DEVICE);
			}
		} else {
			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
			/*  check if frame_len fits the preallocated memory */
			if (frame_len > priv->dma_buf_sz) {
				priv->dev->stats.rx_length_errors++;
				continue;
			}

			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
			 * Type frames (LLC/LLC-SNAP)
			 */
			if (unlikely(status != llc_snap))
				frame_len -= ETH_FCS_LEN;

			if (netif_msg_rx_status(priv)) {
				pr_info("\tdesc: %p [entry %d] buff=0x%x\n",
					 p, entry, p->des2);
				if (frame_len > ETH_FRAME_LEN)
					pr_info("\tframe size %d, COE: %d\n",
						 frame_len, status);
			}
			skb = priv->rx_queue[queue].rx_skbuff[entry];
			if (unlikely(!skb)) {
				pr_err("%s: Inconsistent Rx descriptor chain\n",
					   priv->dev->name);
				priv->dev->stats.rx_dropped++;
				continue;
			}
			prefetch(skb->data);
			priv->rx_queue[queue].rx_skbuff[entry] = NULL;

			skb_put(skb, frame_len);
			dma_unmap_single(priv->device,
					 priv->rx_queue[queue].rx_skbuff_dma[entry],
					 priv->dma_buf_sz, DMA_FROM_DEVICE);

			if (netif_msg_pktdata(priv)) {
				pr_info("frame received (%dbytes)", frame_len);
				print_pkt(skb->data, frame_len);
			}

			/* not support STMMAC_VLAN_TAG_USED */

			skb->protocol = eth_type_trans(skb, priv->dev);

			if (unlikely(!coe))
				skb_checksum_none_assert(skb);
			else
				skb->ip_summed = CHECKSUM_UNNECESSARY;

			trace_flowctrl_pump(skb);

			trace_napi_receive(ch, skb);

			priv->dev->stats.rx_packets++;
			priv->dev->stats.rx_bytes += frame_len;
		}
	}

	stmmac_rx_refill_legacy(priv, queue);

	priv->xstats.rx_pkt_n += count;

	return count;
}
