/*
 * Copyright (C) 2014 Luigi Rizzo. All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 */

/*
 * netmap support for: Emulex be2net (LINUX version)
 *
 * For details on netmap support please see ixgbe_netmap.h
 */


#include <bsd_glue.h>
#include <net/netmap.h>
#include <netmap/netmap_kern.h>

#define SOFTC_T	be_adapter
#define BE_COMPL_BATCH	64
#define BE_SEND_BATCH	64


/* prototypes used here, can be probably avoided including
 * the file at some later place
 */
static int be_close(struct net_device *netdev);
static int be_open(struct net_device *netdev);
static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
			  u16 posted);
static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted);
static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq);
void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped);
static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len);
static u16 be_tx_compl_process(struct be_adapter *adapter,
			       struct be_tx_obj *txo, u16 last_index);


/*
 * Register/unregister. We are already under netmap lock.
 * Only called on the first register or the last unregister.
 */
static int
be2net_netmap_reg(struct netmap_adapter *na, int onoff)
{
	struct ifnet *ifp = na->ifp;
	struct SOFTC_T *adapter = netdev_priv(ifp);

	rtnl_lock();
	if (netif_running(adapter->netdev))
		be_close(adapter->netdev);

	/* enable or disable flags and callbacks in na and ifp */
	if (onoff) {
		nm_set_native_flags(na);
	} else {
		nm_clear_native_flags(na);
	}
	if (netif_running(adapter->netdev))
		be_open(adapter->netdev);
	rtnl_unlock();

	return 0;
}


/*
 * Reconcile kernel and user view of the transmit ring.
 */
static int
be2net_netmap_txsync(struct netmap_kring *kring, int flags)
{
	struct netmap_adapter *na = kring->na;
	struct ifnet *ifp = na->ifp;
	struct netmap_ring *ring = kring->ring;
	u_int ring_nr = kring->ring_id;
	u_int nm_i;	/* index into the netmap ring */
	u_int nic_i;	/* index into the NIC ring */
	u_int n;
	u_int const lim = kring->nkr_num_slots - 1;
	u_int const head = kring->rhead;
	/* generate an interrupt approximately every half ring */
	u_int report_frequency = kring->nkr_num_slots >> 1;

	/* device-specific */
	struct SOFTC_T *adapter = netdev_priv(ifp);
	struct be_tx_obj *txo = &adapter->tx_obj[ring_nr];
	struct be_queue_info *txq = &txo->q;
	struct be_eth_tx_compl *txcp;
	u16 num_tx_compl = 0; // , wrb_cnt = 0, num_wrbs = 0,
	u16 num_wrb_compl = 0;
        u16 end_idx, frag_index = 0;
	// int reclaim_tx;

	/*
	 * First part: process new packets to send.
	 */

	if (!netif_carrier_ok(ifp)) {
		goto out;
	}

	nm_i = kring->nr_hwcur;
	RD(5, "%s start nm_i %d head %d txq->head %d tail %d used %d",
		kring->name, nm_i, head, txq->head, txq->tail, atomic_read(&txq->used));
	if (nm_i != head) {	/* we have new packets to send */

		RD(5, "sending %u pkts txq->head = %d txcq->tail = %d txq->tail = %d",
		  (unsigned)(head - nm_i) & lim, txq->head, txo->cq.tail, txq->tail);

		nic_i = netmap_idx_k2n(kring, nm_i);

		for (n = 0; nm_i != head; ) {
			struct netmap_slot *slot = &ring->slot[nm_i];
			u_int len = slot->len;
			uint64_t paddr;
			void *addr = PNMB(na, slot, &paddr);

			/* device-specific */
			// u16 start = txq->head;
			struct be_eth_hdr_wrb *hdr;
			struct be_eth_wrb *wrb;

			NM_CHECK_ADDR_LEN(na, addr, len);

			/* HDR WRB/WQE */
			hdr = queue_head_node(txq);
			queue_head_inc(txq);
			/* DATA WRB/WQE */
			wrb = queue_head_node(txq);
			queue_head_inc(txq);

			wrb_fill(wrb, paddr, len);

			/* NIC HDR WQE AMAP settings */
			memset(hdr, 0, sizeof(*hdr));
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, 2);
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
			/* set the report flags every so often */
			if ((slot->flags & NS_REPORT) ||
			    ((nm_i % BE_COMPL_BATCH) == BE_COMPL_BATCH - 1)) {
				RD(5, "Set evt/compl bit for nm_i = %d", nm_i);
				AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
				AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
			}
			if (slot->flags & NS_BUF_CHANGED) {
				/* buffer has changed, reload map */
				// netmap_reload_map(pdev, DMA_TO_DEVICE, old_paddr, addr);
			}
			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);

			be_dws_cpu_to_le(hdr, sizeof(*hdr));

			n++;
			if ( n >= BE_SEND_BATCH) {
				atomic_add(n*2, &txq->used);
				RD(5, "%s notify %d nm_i %d head %d txq->head %d tail %d used %d",
					kring->name, n*2, nm_i, head, txq->head, txq->tail, atomic_read(&txq->used));
				be_txq_notify(adapter, txo, n * 2);
				n = 0;
			}

			nm_i = nm_next(nm_i, lim);
			nic_i = nm_next(nic_i, lim);
		}
#if 0
		/* compose a dummy wrb if there are odd set of wrbs to notify */
		if (!lancer_chip(adapter) && (wrb_cnt & 1)) {
			wrb_fill(queue_head_node(txq), 0, 0);
			queue_head_inc(txq);
			atomic_inc(&txq->used);
			wrb_cnt++;
			hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
					TX_HDR_WRB_NUM_SHIFT);
			hdr->dw[2] |= cpu_to_le32((txo->last_wrb_cnt + 1) <<
					TX_HDR_WRB_NUM_SHIFT);

		}
#endif
		if (n) {	/* leftover packets */
			atomic_add(n*2, &txq->used);
			RD(5, "%s notify %d nm_i %d head %d txq->head %d tail %d used %d",
				kring->name, n*2, nm_i, head, txq->head, txq->tail, atomic_read(&txq->used));
			be_txq_notify(adapter, txo, n * 2);
		}
		kring->nr_hwcur = head;
	}

	/*
	 * Second part: reclaim buffers for completed transmissions.
	 */
	if (1 ||  nm_i == report_frequency ||
		flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
		/* Get Tx completions for the Tx Completion Q (CQ) */
		while ((txcp = be_tx_compl_get(&txo->cq))) {
			/* end_idx is the index of the last WRB in the completion returned */
			end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
						wrb_index, txcp);
			RD(10, "tail %d end_idx %d", txq->tail, end_idx);

			do {
				frag_index = txq->tail;
				queue_tail_inc(txq);
				num_wrb_compl++;
			} while (frag_index != end_idx);
			RD(5, "end_idx = %d txq->tail = %d", end_idx, txq->tail);
//			num_wrb_compl = MODULO(end_idx + 1 - txq->tail, txq->len);
//			txq->tail = MODULO(end_idx + 1, txq->len);
			ND("num_wrb_compl = %d", num_wrb_compl);
			num_tx_compl++;
		}

		if (num_tx_compl) {
			tx_stats(txo)->tx_compl += num_tx_compl;
			ND("advance tail by %d num_tx_compl = %d",
			   num_nm_compl, num_tx_compl);
//			atomic_sub(2* num_tx_compl , &txo->q.used);

			be_cq_notify(adapter, txo->cq.id, true, num_tx_compl);
			kring->nr_hwtail = (kring->nr_hwtail + 1 * num_wrb_compl/2) % kring->nkr_num_slots;
//			kring->nr_hwtail = nm_prev(kring->nr_hwcur, lim);
		}
	}

out:
	 nm_txsync_finalize(kring);
	 return 0;
}

static int
be2net_netmap_rxsync(struct netmap_kring *kring, int flags)
{
	return 0;
}

static int
be2net_netmap_configure_tx_ring(struct SOFTC_T *adapter, int ring_nr)
{
	struct netmap_adapter *na = NA(adapter->netdev);
	struct netmap_slot *slot;
	if (!na || !(na->na_flags & NAF_NATIVE_ON)) {
		return 0;
	}
	slot = netmap_reset(na, NR_TX, ring_nr, 0);
	if (!slot)
		return 0;
	return 1;
}

static int
be2net_netmap_configure_rx_rings(struct SOFTC_T *adapter)
{
	struct netmap_adapter *na = NA(adapter->netdev);
	struct be_rx_page_info *page_info;
	struct be_queue_info *rxq;
	struct netmap_slot *slot;
	struct be_eth_rx_d *rxd;
	struct be_rx_obj *rxo;
	u32 notify = 0;
	int lim, i, ring_nr;

	D("adapter = %p adapter->netdev = %p na = %p",
		adapter, adapter->netdev, na);
	if (!na || !(na->na_flags & NAF_NATIVE_ON)) {
		return 0;
	}
   for_all_rx_queues(adapter, rxo, ring_nr) {
	slot = netmap_reset(na, NR_RX, ring_nr, 0);
	if (!slot) {
		D("!slot");
		continue;
	}
	lim = na->num_rx_desc - 1 - nm_kr_rxspace(&na->rx_rings[ring_nr]);

	rxo = &adapter->rx_obj[ring_nr];
	rxq = &rxo->q;
	D("lim = %d num_rx_desc = %d", lim, na->num_rx_desc);
	for (i = 0; i < na->num_rx_desc; i++) {
		/*
		 * Fill the map and set the buffer address in the NIC ring,
		 * considering the offset between the netmap and NIC rings
		 * (see comment in ixgbe_setup_transmit_ring() ).
		 */
		int si = netmap_idx_n2k(&na->rx_rings[ring_nr], i);
		uint64_t paddr;

		page_info = &rxo->page_info_tbl[i];
		PNMB(na, slot + si, &paddr);
		pci_unmap_page(adapter->pdev, pci_unmap_addr(page_info, bus),
			       adapter->big_page_size/2, PCI_DMA_FROMDEVICE);
		if (page_info->last_page_user) {
			pci_unmap_page(adapter->pdev, pci_unmap_addr(page_info, bus),
					adapter->big_page_size/2, PCI_DMA_FROMDEVICE);
			page_info->last_page_user = false;
		}

		pci_unmap_addr_set(page_info, bus, paddr);
		rxd = queue_head_node(rxq);
		RD(5, "paddr = %p i = %d head = %d", (void *)paddr, i, rxq->head);
		rxd->fragpa_lo = cpu_to_le32(paddr & 0xFFFFFFFF);
		rxd->fragpa_hi = cpu_to_le32(upper_32_bits(paddr));

		queue_head_inc(rxq);
	}
	do {
		notify = min(256, i);
		be_rxq_notify(adapter, rxq->id, notify);
	} while (i -= notify, i);
    }

	return 1;	// success
}


static void
be2net_netmap_attach(struct SOFTC_T *adapter)
{
	struct netmap_adapter na;

	bzero(&na, sizeof(na));

	na.ifp = adapter->netdev;
	/* one slot for command, one slot for data */
	na.num_tx_desc = TX_Q_LEN/2;
	na.num_rx_desc = TX_Q_LEN;
	na.nm_register = be2net_netmap_reg;
	na.nm_txsync = be2net_netmap_txsync;
	na.nm_rxsync = be2net_netmap_rxsync;
	na.num_tx_rings = 8; /*adapter->num_tx_qs; */
	na.num_rx_rings = adapter->num_rx_qs;
	D("using %d TX and %d RX queues",
					 na.num_tx_rings, na.num_rx_rings);
	netmap_attach(&na);
}

/* end of file */
