/* SPDX-License-Identifier: BSD-3-Clause
 * Copyright(c) 2010-2014 Intel Corporation
 */

#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <inttypes.h>
#include <string.h>
#include <sys/queue.h>
#include <stdarg.h>
#include <errno.h>
#include <getopt.h>

#include <netinet/in.h>
#include <linux/if.h>
#include <linux/if_tun.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include <signal.h>

#include <rte_common.h>
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
#include <rte_bus_pci.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_string_fns.h>
#include <rte_cycles.h>
#include <rte_malloc.h>
#include <rte_kni.h>
#include <cmdline_parse.h>
#include <cmdline_parse_ipaddr.h>
#include <rte_mbuf.h>
#include <rte_arp.h>




/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1

/* Max size of a single packet */
#define MAX_PACKET_SZ           2048

/* Size of the data buffer in each mbuf */
#define MBUF_DATA_SZ (MAX_PACKET_SZ + RTE_PKTMBUF_HEADROOM)

/* Number of mbufs in mempool that is created */
#define NB_MBUF                 (8192 * 16)

/* How many packets to attempt to read from NIC in one go */
#define PKT_BURST_SZ            32

/* How many objects (mbufs) to keep in per-lcore mempool cache */
#define MEMPOOL_CACHE_SZ        PKT_BURST_SZ

/* Number of RX ring descriptors */
#define NB_RXD                  1024

/* Number of TX ring descriptors */
#define NB_TXD                  1024

/* Total octets in ethernet header */
#define KNI_ENET_HEADER_SIZE    14

/* Total octets in the FCS */
#define KNI_ENET_FCS_SIZE       4

#define KNI_US_PER_SECOND       1000000
#define KNI_SECOND_PER_DAY      86400

#define KNI_MAX_KTHREAD 32

#define RING_SIZE 1024

static volatile int force_quit=0;

typedef enum sendType{ SEND_ARP, SEND_UDP }SendType;

/*
 * Structure of port parameters
 */
struct kni_port_params {
	uint16_t port_id;/* Port ID */
	unsigned lcore_rx; /* lcore ID for RX */
	unsigned lcore_tx; /* lcore ID for TX */
	uint32_t nb_lcore_k; /* Number of lcores for KNI multi kernel threads */
	uint32_t nb_kni; /* Number of KNI devices to be created */
	unsigned lcore_k[KNI_MAX_KTHREAD]; /* lcore ID list for kthreads */
	struct rte_kni *kni[KNI_MAX_KTHREAD]; /* KNI context pointers */
} __rte_cache_aligned;

static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];


/* Options for configuring ethernet port */
static struct rte_eth_conf port_conf = {
	.txmode = {
		.mq_mode = ETH_MQ_TX_NONE,
	},
};

/* Mempool for mbufs */
static struct rte_mempool * pktmbuf_pool = NULL;

/* Mask of enabled ports */
static uint32_t ports_mask = 0;
/* Ports set in promiscuous mode off by default. */
static int promiscuous_on = 0;
/* Monitor link status continually. off by default. */
static int monitor_links;

/* Structure type for recording kni interface specific stats */
struct kni_interface_stats {
	/* number of pkts received from NIC, and sent to KNI */
	uint64_t rx_packets;

	/* number of pkts received from NIC, but failed to send to KNI */
	uint64_t rx_dropped;

	/* number of pkts received from KNI, and sent to NIC */
	uint64_t tx_packets;

	/* number of pkts received from KNI, but failed to send to NIC */
	uint64_t tx_dropped;
};

/* kni device statistics array */
static struct kni_interface_stats kni_stats[RTE_MAX_ETHPORTS];

static int kni_change_mtu(uint16_t port_id, unsigned int new_mtu);
static int kni_config_network_interface(uint16_t port_id, uint8_t if_up);
static int kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[]);

static rte_atomic32_t kni_stop = RTE_ATOMIC32_INIT(0);
static rte_atomic32_t kni_pause = RTE_ATOMIC32_INIT(0);

/* Print out statistics on packets handled */
static void
print_stats(void)
{
	uint16_t i;

	printf("\n**KNI example application statistics**\n"
	       "======  ==============  ============  ============  ============  ============\n"
	       " Port    Lcore(RX/TX)    rx_packets    rx_dropped    tx_packets    tx_dropped\n"
	       "------  --------------  ------------  ------------  ------------  ------------\n");
	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
		if (!kni_port_params_array[i])
			continue;

		printf("%7d %10u/%2u %13"PRIu64" %13"PRIu64" %13"PRIu64" "
							"%13"PRIu64"\n", i,
					kni_port_params_array[i]->lcore_rx,
					kni_port_params_array[i]->lcore_tx,
						kni_stats[i].rx_packets,
						kni_stats[i].rx_dropped,
						kni_stats[i].tx_packets,
						kni_stats[i].tx_dropped);
	}
	printf("======  ==============  ============  ============  ============  ============\n");
}

/* Custom handling of signals to handle stats and kni processing */
static void
signal_handler(int signum)
{
	/* When we receive a USR1 signal, print stats */
	if (signum == SIGUSR1) {
		print_stats();
	}

	/* When we receive a USR2 signal, reset stats */
	if (signum == SIGUSR2) {
		memset(&kni_stats, 0, sizeof(kni_stats));
		printf("\n** Statistics have been reset **\n");
		return;
	}

	/* When we receive a RTMIN or SIGINT signal, stop kni processing */
	if (signum == SIGRTMIN || signum == SIGINT){
		printf("\nSIGRTMIN/SIGINT received. KNI processing stopping.\n");
		rte_atomic32_inc(&kni_stop);
		return;
        }
}

static void
kni_burst_free_mbufs(struct rte_mbuf **pkts, unsigned num)
{
	unsigned i;

	if (pkts == NULL)
		return;

	for (i = 0; i < num; i++) {
		rte_pktmbuf_free(pkts[i]);
		pkts[i] = NULL;
	}
}


#define CONTENTLEN 4
#define PUBKEYLEN 4
struct CoLoR_get
{
    uint8_t version_type;   //版本4位，类型4位 1
    uint8_t ttl;        //生存时间 1
    uint16_t total_len; //总长度 2
    uint16_t port_src;  //源端口号 2
    uint16_t port_dst;  //目的端口号 2
    uint16_t minmal_PID_CP; //pid改变的周期 2
    uint8_t PIDs;       //PID的数目 1
    uint8_t Offest_RES;     //位运算取Offset 1
    uint32_t offset;    //偏移量 4
    uint32_t length;    //偏移长度 4
    uint16_t content_len;   //公钥长度 2
    uint16_t mtu;       //最大传输单元 2
    uint16_t publickey_len; //公钥长度 2
    uint16_t checksum;  //检验和 2
    uint8_t n_sid[16];  //NID part of an SID，长度为16字节 16*8=128   16
    uint8_t l_sid[20];  //SID的长度为20字节 20*8 = 160  20
    uint8_t nid[16];    //NID的长度为16字节 16
    uint8_t content[CONTENTLEN];    // Content characteristics 4
    uint8_t publickey[PUBKEYLEN];   //公钥 4
}__attribute__((__packed__));
typedef struct CoLoR_get CoLoR_get_t;


/* RARP and ARP opcodes */
enum {  ARP_REQUEST = 1, ARP_REPLY = 2, RARP_REQUEST = 3, RARP_REPLY = 4,
	GRATUITOUS_ARP = 5 };

typedef union {
	uint16_t _16[3];
	uint8_t _8[6];
} mac_e;

typedef union {
	uint16_t _16[2];
	uint32_t _32;
} ip4_e;



/* ARP packet format */
typedef struct arpPkt_s {
	uint16_t hrd;	/**< ARP Hardware type */
	uint16_t pro;	/**< ARP Protocol type */
	uint8_t hln;	/**< Hardware length */
	uint8_t pln;	/**< Protocol length */
	uint16_t op;	/**< opcode */
	mac_e sha;	/**< Sender hardware address */
	ip4_e spa;	/**< Sender IP address */
	mac_e tha;	/**< Target hardware address */
	ip4_e tpa;	/**< Target protocol address */
} __attribute__((__packed__)) arpPkt_t;

typedef struct ipHdr_s {
	uint8_t vl;	/* Version and header length */
	uint8_t tos;	/* Type of Service */
	uint16_t tlen;	/* total length */
	uint16_t ident;	/* identification */
	uint16_t ffrag;	/* Flags and Fragment offset */
	uint8_t ttl;	/* Time to Live */
	uint8_t proto;	/* Protocol */
	uint16_t cksum;	/* Header checksum */
	uint32_t src;	/* source IP address */
	uint32_t dst;	/* destination IP address */
} __attribute__((__packed__)) ipHdr_t;


typedef struct pkt_hdr_s {
	struct ether_hdr eth;	/**< Ethernet header */
	union {
		ipHdr_t ipv4;		/**< IPv4 Header */

	} u;
} __attribute__((__packed__)) pkt_hdr_t;


typedef struct pkt_seq_s {
	/* Packet type and information */
	struct ether_addr eth_dst_addr;	/**< Destination Ethernet address */
	struct ether_addr eth_src_addr;	/**< Source Ethernet address */

	struct cmdline_ipaddr ip_src_addr;	/**< Source IPv4 address also used for IPv6 */
	struct cmdline_ipaddr ip_dst_addr;	/**< Destination IPv4 address */
	uint32_t ip_mask;			/**< IPv4 Netmask value */

	uint16_t sport;		/**< Source port value */
	uint16_t dport;		/**< Destination port value */
	uint16_t ethType;	/**< IPv4 or IPv6 */
	uint16_t ipProto;	/**< TCP or UDP or ICMP */
	uint16_t vlanid;	/**< VLAN ID value if used */
	uint8_t cos;		/**< 802.1p cos value if used */
	uint8_t tos;		/**< tos value if used */
	uint16_t ether_hdr_size;/**< Size of Ethernet header in packet for VLAN ID */

	uint32_t mpls_entry;	/**< MPLS entry if used */
	uint16_t qinq_outerid;	/**< Outer VLAN ID if Q-in-Q */
	uint16_t qinq_innerid;	/**< Inner VLAN ID if Q-in-Q */
	uint32_t gre_key;	/**< GRE key if used */

	uint16_t pktSize;	/**< Size of packet in bytes not counting FCS */
	uint8_t seq_enabled;	/**< Enable or disable this sequence through GUI */
	uint8_t pad0;
	uint32_t gtpu_teid;	/**< GTP-U TEID, if UDP dport=2152 */

        RTE_STD_C11
        union {
                uint64_t vxlan;         	/**< VxLAN 64 bit word */
                struct {
                        uint16_t vni_flags;     /**< VxLAN Flags */
                        uint16_t group_id;      /**< VxLAN Group Policy ID */
                        uint32_t vxlan_id;	/**< VxLAN VNI */
                };
        };

	pkt_hdr_t hdr __rte_cache_aligned;	/**< Packet header data */
	uint8_t pad[RTE_MBUF_DEFAULT_BUF_SIZE  - sizeof(pkt_hdr_t)];
} pkt_seq_t __rte_cache_aligned;


/* UDP Header */
typedef struct udpHdr_s {
	uint16_t sport;	/* Source port value */
	uint16_t dport;	/* Destination port value */
	uint16_t len;	/* Length of datagram + header */
	uint16_t cksum;	/* Checksum of data and header */
} __attribute__((__packed__)) udpHdr_t;

/* IP overlay header for the pseudo header */
typedef struct ipOverlay_s {
	uint32_t node[2];
	uint8_t pad0;	/* overlays ttl */
	uint8_t proto;	/* Protocol type */
	uint16_t len;	/* Protocol length, overlays cksum */
	uint32_t src;	/* Source address */
	uint32_t dst;	/* Destination address */
} __attribute__((__packed__)) ipOverlay_t;

/* The UDP/IP Pseudo header */
typedef struct udpip_s {
	ipOverlay_t ip;	/* IPv4 overlay header */
	udpHdr_t udp;	/* UDP header for protocol */
} __attribute__((__packed__)) udpip_t;



/* SPDX-License-Identifier: BSD-3-Clause
 * Copyright(c) 2010-2015 Intel Corporation
 * Author: Lianpeng
 */
#include <stdint.h>
#include <inttypes.h>
#include <rte_eal.h>
#include <rte_ethdev.h>
#include <rte_cycles.h>
#include <rte_lcore.h>
#include <rte_mbuf.h>
#include <rte_byteorder.h>
#include <rte_ip.h>
#include <rte_ether.h>

#define RX_RING_SIZE 1024  
#define TX_RING_SIZE 1024

#define NUM_MBUFS 8191
#define MBUF_CACHE_SIZE 250
#define BURST_SIZE 1

#define TXONLY_DEF_PACKET_LEN 64
//IP_SRC_ADDR表示虚拟机对应网卡的IP地址(192.168.73.3)
//IP_DST_ADDR表示宿主机对应的IP地址(192.168.73.1)
//在VirtualBox中网卡enp0s8采用Host-Only的连接模式
//这种模式下宿主机和虚拟机都能ping通

#define IP_SRC_ADDR ((192U << 24) | (168 << 16) | (73 << 8) | 3)
#define IP_DST_ADDR ((192U << 24) | (168 << 16) | (73 << 8) | 1)

//IP数据包必填部分
#define IP_DEFTTL  64   /* from RFC 1340. */
#define IP_VERSION 0x40
#define IP_HDRLEN  0x05 /* default IP header length == five 32-bits words. */
#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)

//COLOR_get包对应端口的设置
#define COLOR_SRC_PORT 9191
#define COLOR_DEST_PORT 9191

#define PKG_GEN_COUNT 1

static struct ipv4_hdr pkt_ip_hdr;
static struct CoLoR_get pkt_colorget_hdr;
static struct ether_hdr pkt_eth_hdr;

static const struct rte_eth_conf port_conf_default = {
	.rxmode = {
		.max_rx_pkt_len = ETHER_MAX_LEN,
	},
};

/*Funciton: PrePare a COLOR_get Packet
 * Input: struct rte_mbuf  *pkt
 * Output: VOID 
 */

static void pkt_prepare(struct rte_mbuf  *pkt){
	pkt->data_len = sizeof(struct ether_hdr)  +  sizeof(struct ipv4_hdr) + sizeof(struct CoLoR_get);
	//set basic info of pkg
	pkt->nb_segs = 1;//nb_segs;//
	pkt->pkt_len = pkt->data_len;
	pkt->ol_flags = PKT_TX_IPV4;//ol_flags;//
	pkt->vlan_tci = 0;//vlan_tci;//
	pkt->vlan_tci_outer = 0;//vlan_tci_outer;//
	pkt->l2_len = sizeof(struct ether_hdr);
	pkt->l3_len = sizeof(struct ipv4_hdr);
	pkt_eth_hdr.s_addr.addr_bytes[0] = 0x08;
	pkt_eth_hdr.s_addr.addr_bytes[1] = 0x00;
	pkt_eth_hdr.s_addr.addr_bytes[2] = 0x27;
	pkt_eth_hdr.s_addr.addr_bytes[3] = 0xea;
	pkt_eth_hdr.s_addr.addr_bytes[4] = 0x05;
	pkt_eth_hdr.s_addr.addr_bytes[5] = 0x4f;
	pkt_eth_hdr.d_addr.addr_bytes[0] = 0x0a;
	pkt_eth_hdr.d_addr.addr_bytes[1] = 0x00;
	pkt_eth_hdr.d_addr.addr_bytes[2] = 0x27;
	pkt_eth_hdr.d_addr.addr_bytes[3] = 0x00;
	pkt_eth_hdr.d_addr.addr_bytes[4] = 0x00;
	pkt_eth_hdr.d_addr.addr_bytes[5] = 0x14;
	//本来以为这里需要转换成网络字节序，但在调试过程中发现存在问题。
	//因此在Ethernet中无需调整MAC地址的字节序信息
	pkt_eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
	copy_buf_to_pkt(&pkt_eth_hdr, sizeof(pkt_eth_hdr), pkt, 0);
	copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt, sizeof(struct ether_hdr));
	copy_buf_to_pkt(&pkt_colorget_hdr, sizeof(pkt_colorget_hdr), pkt,
						sizeof(struct ether_hdr) +
						sizeof(struct ipv4_hdr));
	//set ethernet flame header
}



/**************************************************************************//**
 *
 * pktgen_send_arp - Send an ARP request packet.
 *
 * DESCRIPTION
 * Create and send an ARP request packet.
 *
 * RETURNS: N/A
 *
 * SEE ALSO:
 */

pkt_seq_t         sourPkt;

#define ETH_HW_TYPE                 1		/* Ethernet hardware type */


void inetAddrCopy(void *t, void *f) {
	uint32_t *d = (uint32_t *)t;
	uint32_t *s = (uint32_t *)f;

	*d = *s;
}


void
pktgen_send_udp(struct rte_mbuf  * pktSend)
{

	pkt_seq_t         *pkt;
	struct rte_mbuf   *m;
	struct ether_hdr  *eth;
	
	struct udp_hdr *udp_hdr;
	arpPkt_t          *arp;
	uint32_t addr;
	uint8_t qid = 0;
	int type = 1;


}


#if 0
void
pktgen_send_arp(struct rte_mbuf  * pktSend)
{

	pkt_seq_t         *pkt;
	struct rte_mbuf   *m;
	struct ether_hdr  *eth;
	arpPkt_t          *arp;
	uint32_t addr;
	uint8_t qid = 0;
	int type = 1;
    
	pkt=&sourPkt;

	pkt->ip_dst_addr.addr.ipv4.s_addr = IP_DST_ADDR;

	eth = rte_pktmbuf_mtod(pktSend, struct ether_hdr *);
	arp = (arpPkt_t *)&eth[1];

	/* src and dest addr */
	memset(&eth->d_addr, 0xFF, 6);
	ether_addr_copy(&pkt->eth_src_addr, &eth->s_addr);
	eth->ether_type = htons(ETHER_TYPE_ARP);

	memset(arp, 0, sizeof(arpPkt_t));

	rte_memcpy(&arp->sha, &pkt->eth_src_addr, 6);
	addr = htonl(pkt->ip_src_addr.addr.ipv4.s_addr);
	inetAddrCopy(&arp->spa, &addr);

	if (likely(type == GRATUITOUS_ARP) ) {
		rte_memcpy(&arp->tha, &pkt->eth_src_addr, 6);
		addr = htonl(pkt->ip_src_addr.addr.ipv4.s_addr);
		inetAddrCopy(&arp->tpa, &addr);
	} else {
		memset(&arp->tha, 0, 6);
		addr = htonl(pkt->ip_dst_addr.addr.ipv4.s_addr);
		inetAddrCopy(&arp->tpa, &addr);
	}

	/* Fill in the rest of the ARP packet header */
	arp->hrd    = htons(ETH_HW_TYPE);
	arp->pro    = htons(ETHER_TYPE_IPv4);
	arp->hln    = 6;
	arp->pln    = 4;
	arp->op     = htons(ARP_REQUEST);

	m->pkt_len  = 60;
	m->data_len = 60;

}

#endif


void
pktgen_send_arp(struct rte_mbuf  * pktSend)
{

	struct rte_mbuf* pkt = pktSend;
	struct ether_hdr  *eth;
	arpPkt_t          *arp;
	uint32_t addr;
	uint8_t qid = 0;
	int type = 1;

   
	eth = rte_pktmbuf_mtod(pktSend, struct ether_hdr *);
	arp = (arpPkt_t *)&eth[1];

	/* src and dest addr */
	memset(&eth->d_addr, 0xFF, 6);

	eth->ether_type = htons(ETHER_TYPE_ARP);

	memset(arp, 0, sizeof(arpPkt_t));


	addr = htonl(IP_SRC_ADDR);
	inetAddrCopy(&arp->spa, &addr);

	if (likely(type == GRATUITOUS_ARP) ) {

	} else {
		memset(&arp->tha, 0, 6);
	}

	addr = htonl(IP_DST_ADDR);
	inetAddrCopy(&arp->tpa, &addr);

	/* Fill in the rest of the ARP packet header */
	arp->hrd    = htons(ETH_HW_TYPE);
	arp->pro    = htons(ETHER_TYPE_IPv4);
	arp->hln    = 6;
	arp->pln    = 4;
	arp->op     = htons(ARP_REQUEST);

	pkt->data_len = 60;
	pkt->pkt_len = pkt->data_len;

}



/**************************************************************************//**
 *
 * pktgen_process_arp - Handle a ARP request input packet and send a response.
 *
 * DESCRIPTION
 * Handle a ARP request input packet and send a response if required.
 *
 * RETURNS: N/A
 *
 * SEE ALSO:
 */

//uint32_t ipForMac = IP4(192,168,1,100);

struct ether_addr hostMac;

/* ethSwap(u16_t * to, u16_t * from) - Swap two 16 bit values */
static __inline__ void
uint16Swap(void *t, void *f) {
	uint16_t *d = (uint16_t *)t;
	uint16_t *s = (uint16_t *)f;
	uint16_t v;

	v = *d; *d = *s; *s = v;
}


/* inetAddrSwap( void * t, void * f ) - Swap two IPv4 addresses */
static __inline__ void
inetAddrSwap(void *t, void *f) {
	uint32_t *d = (uint32_t *)t;
	uint32_t *s = (uint32_t *)f;
	uint32_t v;

	v  = *d; *d = *s; *s = v;
}

/* ethAddrSwap( u16_t * to, u16_t * from ) - Swap two ethernet addresses */
static __inline__ void
ethAddrSwap(void *t, void *f) {
	uint16_t    *d = (uint16_t *)t;
	uint16_t    *s = (uint16_t *)f;

	uint16Swap(d++, s++);
	uint16Swap(d++, s++);
	uint16Swap(d, s);
}




void
pktgen_process_arp(struct rte_mbuf *m, uint32_t portId, uint32_t vlan)
{

	struct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
	arpPkt_t      *arp = (arpPkt_t *)&eth[1];
	int nb_tx = 0 ;
	struct rte_mbuf *pkt= m;
	
	eth_random_addr(hostMac.addr_bytes);

	/* Adjust for a vlan header if present */
	if (vlan)
		arp = (arpPkt_t *)((char *)arp + sizeof(struct vlan_hdr));

	/* Process all ARP requests if they are for us. */
	if (arp->op == htons(ARP_REQUEST) ) {
		
		/* ARP request not for this interface. */
		if (likely(pkt != NULL) ) {

			/* Swap the two MAC addresses */
			ethAddrSwap(&arp->sha, &arp->tha);

			/* Swap the two IP addresses */
			inetAddrSwap(&arp->tpa._32, &arp->spa._32);

			/* Set the packet to ARP reply */
			arp->op = htons(ARP_REPLY);

			/* Swap the MAC addresses */
			ethAddrSwap(&eth->d_addr, &eth->s_addr);

			/* Copy in the MAC address for the reply. */
			rte_memcpy(&arp->sha, hostMac.addr_bytes, 6);
			rte_memcpy(&eth->s_addr, hostMac.addr_bytes, 6);

			nb_tx = rte_eth_tx_burst(portId, 0, &pkt, 1);


			/* No need to free mbuf as it was reused */
			return;
		}
	}
}




static void
setup_pkt_color_ip_headers(struct ipv4_hdr *ip_hdr,	 struct CoLoR_get *color_hdr)
{
	uint16_t *ptr16;
	uint32_t ip_cksum;
	uint16_t pkt_len;

	/*
	 * Initialize COLOR header.
	 * get packet size, version_type, ttl, port_src, port_dst, check_sum
	 */
	pkt_len = sizeof(struct CoLoR_get);
	color_hdr->length = rte_cpu_to_be_16(sizeof(struct CoLoR_get));
	color_hdr->version_type = rte_cpu_to_be_16(1);
	color_hdr->ttl=rte_cpu_to_be_16(88);//ttl
	color_hdr->port_src=rte_cpu_to_be_16(COLOR_SRC_PORT);
	color_hdr->port_dst=rte_cpu_to_be_16(COLOR_DEST_PORT);
	//懒得计算color的checksum

	/*
	 * Initialize IP header.
	 * 
	 */
	pkt_len = (uint16_t) (pkt_len + sizeof(struct ipv4_hdr));
	ip_hdr->version_ihl   = IP_VHL_DEF;
	ip_hdr->type_of_service   = 0;
	ip_hdr->fragment_offset = 0;
	ip_hdr->time_to_live   = IP_DEFTTL;
	ip_hdr->next_proto_id = IPPROTO_IP;//选择报头0
	ip_hdr->packet_id = 0;
	ip_hdr->total_length   = rte_cpu_to_be_16(pkt_len);
	ip_hdr->src_addr = rte_cpu_to_be_32(IP_SRC_ADDR);
	ip_hdr->dst_addr = rte_cpu_to_be_32(IP_DST_ADDR);

	/*
	 * Compute IP header checksum.
	 */
	ptr16 = (unaligned_uint16_t*) ip_hdr;
	ip_cksum = 0;
	ip_cksum += ptr16[0]; ip_cksum += ptr16[1];
	ip_cksum += ptr16[2]; ip_cksum += ptr16[3];
	ip_cksum += ptr16[4];
	ip_cksum += ptr16[6]; ip_cksum += ptr16[7];
	ip_cksum += ptr16[8]; ip_cksum += ptr16[9];

	/*
	 * Reduce 32 bit checksum to 16 bits and complement it.
	 */
	ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) +
		(ip_cksum & 0x0000FFFF);
	if (ip_cksum > 65535)
		ip_cksum -= 65535;
	ip_cksum = (~ip_cksum) & 0x0000FFFF;
	if (ip_cksum == 0)
		ip_cksum = 0xFFFF;
	ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
}

/*Function: Display INFORMATION and MAC address.
  * Input: info, rte_ether_addr *addr
  * Output: VOID
*/
static inline void 
show_mac(const char *info,struct ether_addr *addr ){
		printf("%s %02" PRIx8 " %02" PRIx8 " %02" PRIx8
			   " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
			info,
			(addr->addr_bytes)[0], (addr->addr_bytes)[1],
			(addr->addr_bytes)[2], (addr->addr_bytes)[3],
			(addr->addr_bytes)[4], (addr->addr_bytes)[5]);
}


/*Function: Display SRC and DEST MAC address after receiving some packets
 * Input: bufs, nb_mbufs
 * Output: VOID
*/
static inline void 
display_rx_mac(struct rte_mbuf **bufs, uint16_t nb_mbufs){
	struct ether_hdr *eth;
	struct rte_mbuf *m;
	int buf_count;

		for(buf_count=0;buf_count<nb_mbufs;buf_count++)
	{
		m=bufs[buf_count];
		
		eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
		
		show_mac("src_MAC :", &(eth->s_addr));
		show_mac("dst_MAC :", &(eth->d_addr));
	}
}


/*
 * Initializes a given port using global settings and with the RX buffers
 * coming from the mbuf_pool passed as a parameter.
 */
static inline int
port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
	struct rte_eth_conf port_conf = port_conf_default;
	const uint16_t rx_rings = 1, tx_rings = 1;
	uint16_t nb_rxd = RX_RING_SIZE;
	uint16_t nb_txd = TX_RING_SIZE;
	int retval;
	uint16_t q;
	struct rte_eth_dev_info dev_info;
	struct rte_eth_txconf txconf;

	if (!rte_eth_dev_is_valid_port(port))
		return -1;

	rte_eth_dev_info_get(port, &dev_info);


	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
		port_conf.txmode.offloads |=
			DEV_TX_OFFLOAD_MBUF_FAST_FREE;

	/* Configure the Ethernet device. */
	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
	if (retval != 0)
		return retval;

	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
	if (retval != 0)
		return retval;

	/* Allocate and set up 1 RX queue per Ethernet port. */
	for (q = 0; q < rx_rings; q++) {
		retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
				rte_eth_dev_socket_id(port), NULL, mbuf_pool);
		if (retval < 0)
			return retval;
	}

	txconf = dev_info.default_txconf;
	txconf.offloads = port_conf.txmode.offloads;
	/* Allocate and set up 1 TX queue per Ethernet port. */
	for (q = 0; q < tx_rings; q++) {
		retval = rte_eth_tx_queue_setup(port, q, nb_txd,
				rte_eth_dev_socket_id(port), &txconf);
		if (retval < 0)
			return retval;
	}

	/* Start the Ethernet port. */
	retval = rte_eth_dev_start(port);
	if (retval < 0)
		return retval;

	/* Display the port MAC address. */
	struct ether_addr addr;
	rte_eth_macaddr_get(port, &addr);


	/* Enable RX in promiscuous mode for the Ethernet device. */
	rte_eth_promiscuous_enable(port);


	return 0;
}


struct rte_ring *rx_ring, *tx_ring, *kni_ring;


static void
init_ring() {
      int i=0;

      char name[32] = {0};

      snprintf(name, sizeof(name), "ring_rx_%u", i);
      rx_ring = rte_ring_create(name, RING_SIZE, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ);
      if (rx_ring == NULL)
        rte_exit(EXIT_FAILURE, "Cannot create RX ring, %s, %s():%d\n",
                 rte_strerror(rte_errno), __func__, __LINE__);


      snprintf(name, sizeof(name), "ring_tx_%u", i);
      tx_ring = rte_ring_create(name, RING_SIZE, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ);
      if (tx_ring == NULL)
        rte_exit(EXIT_FAILURE, "Cannot create TX ring, %s, %s():%d\n",
                 rte_strerror(rte_errno), __func__, __LINE__);

      snprintf(name, sizeof(name), "ring_kni_%u", i);
      kni_ring = rte_ring_create(name, RING_SIZE, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ);
      if (kni_ring == NULL)
        rte_exit(EXIT_FAILURE, "Cannot create KNI ring, %s, %s():%d\n",
                 rte_strerror(rte_errno), __func__, __LINE__);


}


static uint64_t total_send_out;


static void
rx_thread() {
  unsigned nb_rx, sent;

  struct timespec nano;
  nano.tv_sec = 0;
  nano.tv_nsec = 1000;

  struct rte_mbuf *pkts_burst[PKT_BURST_SZ];

/*  if (unlikely(p == NULL))
    return;
*/

  while (!force_quit) {
    /* Burst rx from eth */
    nb_rx = rte_eth_rx_burst(0, 0, pkts_burst, PKT_BURST_SZ);
    if (unlikely(nb_rx == 0)) {
      nanosleep(&nano, NULL);
      continue;
    }

    sent = rte_ring_sp_enqueue_burst(rx_ring, (void **) pkts_burst, nb_rx, NULL);
    if (unlikely(sent < nb_rx)) {
      while (sent < nb_rx)
        rte_pktmbuf_free(pkts_burst[sent++]);
    }
  }
}

/*
static void
kni_burst_free_mbufs(struct rte_mbuf **pkts, unsigned num) {
  unsigned i;

  if (pkts == NULL)
    return;

  for (i = 0; i < num; i++) {
    rte_pktmbuf_free(pkts[i]);
    pkts[i] = NULL;
  }
}
*/


static void
tx_thread(void *arg) {
  uint8_t port_id;

  struct timespec nano;
  nano.tv_sec = 0;
  nano.tv_nsec = 1000;

  uint16_t nb_tx, num;
  struct rte_mbuf *pkts_burst[PKT_BURST_SZ];


  struct kni_port_params *p = (struct kni_port_params *) arg;
  port_id = p->port_id;

  while (!force_quit) {
    /* Burst rx from kni */
    num = rte_kni_rx_burst(p->kni, pkts_burst, PKT_BURST_SZ);
    if (likely(num)) {
      nb_tx = rte_eth_tx_burst(port_id, 0, pkts_burst, num);
      if (unlikely(nb_tx < num))
        kni_burst_free_mbufs(&pkts_burst[nb_tx], num - nb_tx);
    }

    num = rte_ring_sc_dequeue_burst(tx_ring, (void **) pkts_burst, PKT_BURST_SZ,NULL);
    if (unlikely(num == 0)) {
      nanosleep(&nano, NULL);
      continue;
    }

    nb_tx = rte_eth_tx_burst(port_id, 0, pkts_burst, num);
    total_send_out += num;
    if (unlikely(nb_tx < num)) {
      kni_burst_free_mbufs(&pkts_burst[nb_tx], num - nb_tx);
    }
  }
}






static void
copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
		     unsigned offset)
{
	struct rte_mbuf *seg;
	void *seg_buf;
	unsigned copy_len;

	seg = pkt;
	while (offset >= seg->data_len) {
		offset -= seg->data_len;
		seg = seg->next;
	}
	copy_len = seg->data_len - offset;
	seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
	while (len > copy_len) {
		rte_memcpy(seg_buf, buf, (size_t) copy_len);
		len -= copy_len;
		buf = ((char*) buf + copy_len);
		seg = seg->next;
		seg_buf = rte_pktmbuf_mtod(seg, char *);
	}
	rte_memcpy(seg_buf, buf, (size_t) len);
}

/*Function: Memory Copy from the source header to pkt by offset
 * Input: void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset
 * Output: VOID
 * */
void
copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
{
	if (offset + len <= pkt->data_len) {
		rte_memcpy(rte_pktmbuf_mtod_offset(pkt, char *, offset),
			buf, (size_t) len);
		return;
	}
	copy_buf_to_pkt_segs(buf, len, pkt, offset);
	//In gereral, it's useless.So note it.
}


/*
 * The lcore main. This is the main thread that does the work, reading from
 * an input port and writing to an output port.
 */
static __attribute__((noreturn)) void
lcore_main(struct rte_mempool *mbuf_pool)
{
	uint16_t port;
	uint16_t i;
	struct rte_mbuf *tx_bufs_pt[PKG_GEN_COUNT];

	SendType inType = SEND_ARP;
	/*
	 * Check that the port is on the same NUMA node as the polling thread
	 * for best performance.
	 */
	RTE_ETH_FOREACH_DEV(port)
		if (rte_eth_dev_socket_id(port) > 0 &&
				rte_eth_dev_socket_id(port) !=
						(int)rte_socket_id())
			printf("WARNING, port %u is on remote NUMA node to "
					"polling thread.\n\tPerformance will "
					"not be optimal.\n", port);

	printf("\nCore %u producing get packets. [Ctrl+C to quit]\n",
			rte_lcore_id());

	/* Run until the application is quit or killed. */
	for (;;) {
		/*
		  * Ensure that Only ONE Ethernet Device is working under DPDK module
		  * So FOREACH_DEV means the only one device is used and the device is supposed to send COLOR_get Packet per second
		 */
		RTE_ETH_FOREACH_DEV(port) {

			/* malloc some storage memory to initialize the Header:
			  * Ethernet |  IPV4  |  COLOR_GET
			  *     14          |   20     |   88 bytes == 122 bytes
			  * sizeof(struct ether_hdr)  |  sizeof(struct rte_ipv4_hdr) |  sizeof(struct CoLoR_get)
			*/
			struct rte_mbuf *rx_buf[BURST_SIZE];
			const uint16_t nb_rx = rte_eth_rx_burst(port, 0, rx_buf, BURST_SIZE);
			//对于网口port，将队列0的BURST_SIZE个包放入首地址rx_buf中
			//函数返回值是接收到的包的数量，数量应该<= BURST_SIZE

			//if get the pkg from other host
			if(nb_rx!=0)
			{
				printf("get packet from port \n");
				display_rx_mac(rx_buf,nb_rx);
				for(i=0; i<nb_rx; i++)
					rte_pktmbuf_free(rx_buf[i]);
				//把这些包的内存空间全部还给缓冲池
			}
			//decode the packet 
			for( i=0;i<PKG_GEN_COUNT;i++){
				struct rte_mbuf  *pkt = (struct rte_mbuf *)rte_pktmbuf_alloc((struct rte_mempool *)mbuf_pool);
				if(pkt==NULL){
						rte_exit(EXIT_FAILURE, "Cannot alloc storage memory in  port %"PRIu16 "\n",port);
				}
				if(inType==SEND_ARP)
				{
				    pktgen_send_arp(pkt);
				}else{
				   pkt_prepare(pkt);
				}
				tx_bufs_pt[i] = pkt;

			}
			int nb_tx=rte_eth_tx_burst(port , 0, tx_bufs_pt, PKG_GEN_COUNT);
			printf("Successfully sent %d packets to MAC:0A:00:27:00:00:14\n",nb_tx);
			sleep(10);
			for(i=0;i<PKG_GEN_COUNT;i++)
			{
				if(unlikely(tx_bufs_pt[i]!=NULL))
					rte_pktmbuf_free(tx_bufs_pt[i]);
			}
		}
	}
}

/*
 * The main function, which does initialization and calls the per-lcore
 * functions.
 */
int
main(int argc, char *argv[])
{
	struct rte_mempool *mbuf_pool;
	unsigned nb_ports;
	uint16_t portid;

	/* Initialize the Environment Abstraction Layer (EAL). */
	int ret = rte_eal_init(argc, argv);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");

	argc -= ret;
	argv += ret;

	/* Check that there is an even number of ports to send/receive on. */
	nb_ports = rte_eth_dev_count_avail();
	if (nb_ports != 1)
		rte_exit(EXIT_FAILURE, "Error: It only need ONE Ethernet Device!\n");

	/* Creates a new mempool in memory to hold the mbufs. */
	mbuf_pool = rte_pktmbuf_pool_create("TX_POOL", NUM_MBUFS * nb_ports,
		MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());

	if (mbuf_pool == NULL)
		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");

	/* Initialize all ports. */
	RTE_ETH_FOREACH_DEV(portid)
		if (port_init(portid, mbuf_pool) != 0)
			rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
					portid);

	if (rte_lcore_count() > 1)
		printf("\nWARNING: Too many lcores enabled. Only 1 used.\n");

	setup_pkt_color_ip_headers(&pkt_ip_hdr,&pkt_colorget_hdr);
	/* Call lcore_main on the master core only. */
	lcore_main(mbuf_pool);

	return 0;
}





/**
 * Interface to burst rx and enqueue mbufs into rx_q
 */
static void
kni_ingress(struct kni_port_params *p)
{
	uint8_t i;
	uint16_t port_id;
	unsigned nb_rx, num;
	uint32_t nb_kni;
	struct rte_mbuf *pkts_burst[PKT_BURST_SZ];

	if (p == NULL)
		return;

	nb_kni = p->nb_kni;
	port_id = p->port_id;
	for (i = 0; i < nb_kni; i++) {
		/* Burst rx from eth */
		nb_rx = rte_eth_rx_burst(port_id, 0, pkts_burst, PKT_BURST_SZ);
		if (unlikely(nb_rx > PKT_BURST_SZ)) {
			RTE_LOG(ERR, APP, "Error receiving from eth\n");
			return;
		}
		/* Burst tx to kni */
		num = rte_kni_tx_burst(p->kni[i], pkts_burst, nb_rx);
		if (num)
			kni_stats[port_id].rx_packets += num;

		rte_kni_handle_request(p->kni[i]);
		if (unlikely(num < nb_rx)) {
			/* Free mbufs not tx to kni interface */
			kni_burst_free_mbufs(&pkts_burst[num], nb_rx - num);
			kni_stats[port_id].rx_dropped += nb_rx - num;
		}
	}
}

/**
 * Interface to dequeue mbufs from tx_q and burst tx
 */
static void
kni_egress(struct kni_port_params *p)
{
	uint8_t i;
	uint16_t port_id;
	unsigned nb_tx, num;
	uint32_t nb_kni;
	struct rte_mbuf *pkts_burst[PKT_BURST_SZ];

	if (p == NULL)
		return;

	nb_kni = p->nb_kni;
	port_id = p->port_id;
	for (i = 0; i < nb_kni; i++) {
		/* Burst rx from kni */
		num = rte_kni_rx_burst(p->kni[i], pkts_burst, PKT_BURST_SZ);
		if (unlikely(num > PKT_BURST_SZ)) {
			RTE_LOG(ERR, APP, "Error receiving from KNI\n");
			return;
		}
		/* Burst tx to eth */
		nb_tx = rte_eth_tx_burst(port_id, 0, pkts_burst, (uint16_t)num);
		if (nb_tx)
			kni_stats[port_id].tx_packets += nb_tx;
		if (unlikely(nb_tx < num)) {
			/* Free mbufs not tx to NIC */
			kni_burst_free_mbufs(&pkts_burst[nb_tx], num - nb_tx);
			kni_stats[port_id].tx_dropped += num - nb_tx;
		}
	}
}

static int
main_loop(__rte_unused void *arg)
{
	uint16_t i;
	int32_t f_stop;
	int32_t f_pause;
	const unsigned lcore_id = rte_lcore_id();
	enum lcore_rxtx {
		LCORE_NONE,
		LCORE_RX,
		LCORE_TX,
		LCORE_MAX
	};
	enum lcore_rxtx flag = LCORE_NONE;

	RTE_ETH_FOREACH_DEV(i) {
		if (!kni_port_params_array[i])
			continue;
		if (kni_port_params_array[i]->lcore_rx == (uint8_t)lcore_id) {
			flag = LCORE_RX;
			break;
		} else if (kni_port_params_array[i]->lcore_tx ==
						(uint8_t)lcore_id) {
			flag = LCORE_TX;
			break;
		}
	}

	if (flag == LCORE_RX) {
		RTE_LOG(INFO, APP, "Lcore %u is reading from port %d\n",
					kni_port_params_array[i]->lcore_rx,
					kni_port_params_array[i]->port_id);
		while (1) {
			f_stop = rte_atomic32_read(&kni_stop);
			f_pause = rte_atomic32_read(&kni_pause);
			if (f_stop)
				break;
			if (f_pause)
				continue;
			kni_ingress(kni_port_params_array[i]);
		}
	} else if (flag == LCORE_TX) {
		RTE_LOG(INFO, APP, "Lcore %u is writing to port %d\n",
					kni_port_params_array[i]->lcore_tx,
					kni_port_params_array[i]->port_id);
		while (1) {
			f_stop = rte_atomic32_read(&kni_stop);
			f_pause = rte_atomic32_read(&kni_pause);
			if (f_stop)
				break;
			if (f_pause)
				continue;
			kni_egress(kni_port_params_array[i]);
		}
	} else
		RTE_LOG(INFO, APP, "Lcore %u has nothing to do\n", lcore_id);

	return 0;
}

/* Display usage instructions */
static void
print_usage(const char *prgname)
{
	RTE_LOG(INFO, APP, "\nUsage: %s [EAL options] -- -p PORTMASK -P -m "
		   "[--config (port,lcore_rx,lcore_tx,lcore_kthread...)"
		   "[,(port,lcore_rx,lcore_tx,lcore_kthread...)]]\n"
		   "    -p PORTMASK: hex bitmask of ports to use\n"
		   "    -P : enable promiscuous mode\n"
		   "    -m : enable monitoring of port carrier state\n"
		   "    --config (port,lcore_rx,lcore_tx,lcore_kthread...): "
		   "port and lcore configurations\n",
	           prgname);
}

/* Convert string to unsigned number. 0 is returned if error occurs */
static uint32_t
parse_unsigned(const char *portmask)
{
	char *end = NULL;
	unsigned long num;

	num = strtoul(portmask, &end, 16);
	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
		return 0;

	return (uint32_t)num;
}

static void
print_config(void)
{
	uint32_t i, j;
	struct kni_port_params **p = kni_port_params_array;

	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
		if (!p[i])
			continue;
		RTE_LOG(DEBUG, APP, "Port ID: %d\n", p[i]->port_id);
		RTE_LOG(DEBUG, APP, "Rx lcore ID: %u, Tx lcore ID: %u\n",
					p[i]->lcore_rx, p[i]->lcore_tx);
		for (j = 0; j < p[i]->nb_lcore_k; j++)
			RTE_LOG(DEBUG, APP, "Kernel thread lcore ID: %u\n",
							p[i]->lcore_k[j]);
	}
}

static int
parse_config(const char *arg)
{
	const char *p, *p0 = arg;
	char s[256], *end;
	unsigned size;
	enum fieldnames {
		FLD_PORT = 0,
		FLD_LCORE_RX,
		FLD_LCORE_TX,
		_NUM_FLD = KNI_MAX_KTHREAD + 3,
	};
	int i, j, nb_token;
	char *str_fld[_NUM_FLD];
	unsigned long int_fld[_NUM_FLD];
	uint16_t port_id, nb_kni_port_params = 0;

	memset(&kni_port_params_array, 0, sizeof(kni_port_params_array));
	while (((p = strchr(p0, '(')) != NULL) &&
		nb_kni_port_params < RTE_MAX_ETHPORTS) {
		p++;
		if ((p0 = strchr(p, ')')) == NULL)
			goto fail;
		size = p0 - p;
		if (size >= sizeof(s)) {
			printf("Invalid config parameters\n");
			goto fail;
		}
		snprintf(s, sizeof(s), "%.*s", size, p);
		nb_token = rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',');
		if (nb_token <= FLD_LCORE_TX) {
			printf("Invalid config parameters\n");
			goto fail;
		}
		for (i = 0; i < nb_token; i++) {
			errno = 0;
			int_fld[i] = strtoul(str_fld[i], &end, 0);
			if (errno != 0 || end == str_fld[i]) {
				printf("Invalid config parameters\n");
				goto fail;
			}
		}

		i = 0;
		port_id = int_fld[i++];
		if (port_id >= RTE_MAX_ETHPORTS) {
			printf("Port ID %d could not exceed the maximum %d\n",
						port_id, RTE_MAX_ETHPORTS);
			goto fail;
		}
		if (kni_port_params_array[port_id]) {
			printf("Port %d has been configured\n", port_id);
			goto fail;
		}
		kni_port_params_array[port_id] =
			rte_zmalloc("KNI_port_params",
				    sizeof(struct kni_port_params), RTE_CACHE_LINE_SIZE);
		kni_port_params_array[port_id]->port_id = port_id;
		kni_port_params_array[port_id]->lcore_rx =
					(uint8_t)int_fld[i++];
		kni_port_params_array[port_id]->lcore_tx =
					(uint8_t)int_fld[i++];
		if (kni_port_params_array[port_id]->lcore_rx >= RTE_MAX_LCORE ||
		kni_port_params_array[port_id]->lcore_tx >= RTE_MAX_LCORE) {
			printf("lcore_rx %u or lcore_tx %u ID could not "
						"exceed the maximum %u\n",
				kni_port_params_array[port_id]->lcore_rx,
				kni_port_params_array[port_id]->lcore_tx,
						(unsigned)RTE_MAX_LCORE);
			goto fail;
		}
		for (j = 0; i < nb_token && j < KNI_MAX_KTHREAD; i++, j++)
			kni_port_params_array[port_id]->lcore_k[j] =
						(uint8_t)int_fld[i];
		kni_port_params_array[port_id]->nb_lcore_k = j;
	}
	print_config();

	return 0;

fail:
	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
		if (kni_port_params_array[i]) {
			rte_free(kni_port_params_array[i]);
			kni_port_params_array[i] = NULL;
		}
	}

	return -1;
}

static int
validate_parameters(uint32_t portmask)
{
	uint32_t i;

	if (!portmask) {
		printf("No port configured in port mask\n");
		return -1;
	}

	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
		if (((portmask & (1 << i)) && !kni_port_params_array[i]) ||
			(!(portmask & (1 << i)) && kni_port_params_array[i]))
			rte_exit(EXIT_FAILURE, "portmask is not consistent "
				"to port ids specified in --config\n");

		if (kni_port_params_array[i] && !rte_lcore_is_enabled(\
			(unsigned)(kni_port_params_array[i]->lcore_rx)))
			rte_exit(EXIT_FAILURE, "lcore id %u for "
					"port %d receiving not enabled\n",
					kni_port_params_array[i]->lcore_rx,
					kni_port_params_array[i]->port_id);

		if (kni_port_params_array[i] && !rte_lcore_is_enabled(\
			(unsigned)(kni_port_params_array[i]->lcore_tx)))
			rte_exit(EXIT_FAILURE, "lcore id %u for "
					"port %d transmitting not enabled\n",
					kni_port_params_array[i]->lcore_tx,
					kni_port_params_array[i]->port_id);

	}

	return 0;
}

#define CMDLINE_OPT_CONFIG  "config"

/* Parse the arguments given in the command line of the application */
static int
parse_args(int argc, char **argv)
{
	int opt, longindex, ret = 0;
	const char *prgname = argv[0];
	static struct option longopts[] = {
		{CMDLINE_OPT_CONFIG, required_argument, NULL, 0},
		{NULL, 0, NULL, 0}
	};

	/* Disable printing messages within getopt() */
	opterr = 0;

	/* Parse command line */
	while ((opt = getopt_long(argc, argv, "p:Pm", longopts,
						&longindex)) != EOF) {
		switch (opt) {
		case 'p':
			ports_mask = parse_unsigned(optarg);
			break;
		case 'P':
			promiscuous_on = 1;
			break;
		case 'm':
			monitor_links = 1;
			break;
		case 0:
			if (!strncmp(longopts[longindex].name,
				     CMDLINE_OPT_CONFIG,
				     sizeof(CMDLINE_OPT_CONFIG))) {
				ret = parse_config(optarg);
				if (ret) {
					printf("Invalid config\n");
					print_usage(prgname);
					return -1;
				}
			}
			break;
		default:
			print_usage(prgname);
			rte_exit(EXIT_FAILURE, "Invalid option specified\n");
		}
	}

	/* Check that options were parsed ok */
	if (validate_parameters(ports_mask) < 0) {
		print_usage(prgname);
		rte_exit(EXIT_FAILURE, "Invalid parameters\n");
	}

	return ret;
}

/* Initialize KNI subsystem */
static void
init_kni(void)
{
	unsigned int num_of_kni_ports = 0, i;
	struct kni_port_params **params = kni_port_params_array;

	/* Calculate the maximum number of KNI interfaces that will be used */
	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
		if (kni_port_params_array[i]) {
			num_of_kni_ports += (params[i]->nb_lcore_k ?
				params[i]->nb_lcore_k : 1);
		}
	}

	/* Invoke rte KNI init to preallocate the ports */
	rte_kni_init(num_of_kni_ports);
}

/* Initialise a single port on an Ethernet device */
static void
init_port(uint16_t port)
{
	int ret;
	uint16_t nb_rxd = NB_RXD;
	uint16_t nb_txd = NB_TXD;
	struct rte_eth_dev_info dev_info;
	struct rte_eth_rxconf rxq_conf;
	struct rte_eth_txconf txq_conf;
	struct rte_eth_conf local_port_conf = port_conf;

	/* Initialise device and RX/TX queues */
	RTE_LOG(INFO, APP, "Initialising port %u ...\n", (unsigned)port);
	fflush(stdout);
	rte_eth_dev_info_get(port, &dev_info);
	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
		local_port_conf.txmode.offloads |=
			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
	ret = rte_eth_dev_configure(port, 1, 1, &local_port_conf);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Could not configure port%u (%d)\n",
		            (unsigned)port, ret);

	ret = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Could not adjust number of descriptors "
				"for port%u (%d)\n", (unsigned)port, ret);

	rxq_conf = dev_info.default_rxconf;
	rxq_conf.offloads = local_port_conf.rxmode.offloads;
	ret = rte_eth_rx_queue_setup(port, 0, nb_rxd,
		rte_eth_dev_socket_id(port), &rxq_conf, pktmbuf_pool);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Could not setup up RX queue for "
				"port%u (%d)\n", (unsigned)port, ret);

	txq_conf = dev_info.default_txconf;
	txq_conf.offloads = local_port_conf.txmode.offloads;
	ret = rte_eth_tx_queue_setup(port, 0, nb_txd,
		rte_eth_dev_socket_id(port), &txq_conf);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Could not setup up TX queue for "
				"port%u (%d)\n", (unsigned)port, ret);

	ret = rte_eth_dev_start(port);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Could not start port%u (%d)\n",
						(unsigned)port, ret);

	if (promiscuous_on)
		rte_eth_promiscuous_enable(port);
}

/* Check the link status of all ports in up to 9s, and print them finally */
static void
check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
	uint16_t portid;
	uint8_t count, all_ports_up, print_flag = 0;
	struct rte_eth_link link;

	printf("\nChecking link status\n");
	fflush(stdout);
	for (count = 0; count <= MAX_CHECK_TIME; count++) {
		all_ports_up = 1;
		RTE_ETH_FOREACH_DEV(portid) {
			if ((port_mask & (1 << portid)) == 0)
				continue;
			memset(&link, 0, sizeof(link));
			rte_eth_link_get_nowait(portid, &link);
			/* print link status if flag set */
			if (print_flag == 1) {
				if (link.link_status)
					printf(
					"Port%d Link Up - speed %uMbps - %s\n",
						portid, link.link_speed,
				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
					("full-duplex") : ("half-duplex\n"));
				else
					printf("Port %d Link Down\n", portid);
				continue;
			}
			/* clear all_ports_up flag if any link down */
			if (link.link_status == ETH_LINK_DOWN) {
				all_ports_up = 0;
				break;
			}
		}
		/* after finally printing all link status, get out */
		if (print_flag == 1)
			break;

		if (all_ports_up == 0) {
			printf(".");
			fflush(stdout);
			rte_delay_ms(CHECK_INTERVAL);
		}

		/* set the print_flag if all ports up or timeout */
		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
			print_flag = 1;
			printf("done\n");
		}
	}
}

static void
log_link_state(struct rte_kni *kni, int prev, struct rte_eth_link *link)
{
	if (kni == NULL || link == NULL)
		return;

	if (prev == ETH_LINK_DOWN && link->link_status == ETH_LINK_UP) {
		RTE_LOG(INFO, APP, "%s NIC Link is Up %d Mbps %s %s.\n",
			rte_kni_get_name(kni),
			link->link_speed,
			link->link_autoneg ?  "(AutoNeg)" : "(Fixed)",
			link->link_duplex ?  "Full Duplex" : "Half Duplex");
	} else if (prev == ETH_LINK_UP && link->link_status == ETH_LINK_DOWN) {
		RTE_LOG(INFO, APP, "%s NIC Link is Down.\n",
			rte_kni_get_name(kni));
	}
}

/*
 * Monitor the link status of all ports and update the
 * corresponding KNI interface(s)
 */
static void *
monitor_all_ports_link_status(void *arg)
{
	uint16_t portid;
	struct rte_eth_link link;
	unsigned int i;
	struct kni_port_params **p = kni_port_params_array;
	int prev;
	(void) arg;

	while (monitor_links) {
		rte_delay_ms(500);
		RTE_ETH_FOREACH_DEV(portid) {
			if ((ports_mask & (1 << portid)) == 0)
				continue;
			memset(&link, 0, sizeof(link));
			rte_eth_link_get_nowait(portid, &link);
			for (i = 0; i < p[portid]->nb_kni; i++) {
				prev = rte_kni_update_link(p[portid]->kni[i],
						link.link_status);
				log_link_state(p[portid]->kni[i], prev, &link);
			}
		}
	}
	return NULL;
}

/* Callback for request of changing MTU */
static int
kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
{
	int ret;
	uint16_t nb_rxd = NB_RXD;
	struct rte_eth_conf conf;
	struct rte_eth_dev_info dev_info;
	struct rte_eth_rxconf rxq_conf;

	if (!rte_eth_dev_is_valid_port(port_id)) {
		RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
		return -EINVAL;
	}

	RTE_LOG(INFO, APP, "Change MTU of port %d to %u\n", port_id, new_mtu);

	/* Stop specific port */
	rte_eth_dev_stop(port_id);

	memcpy(&conf, &port_conf, sizeof(conf));
	/* Set new MTU */
	if (new_mtu > ETHER_MAX_LEN)
		conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
	else
		conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;

	/* mtu + length of header + length of FCS = max pkt length */
	conf.rxmode.max_rx_pkt_len = new_mtu + KNI_ENET_HEADER_SIZE +
							KNI_ENET_FCS_SIZE;
	ret = rte_eth_dev_configure(port_id, 1, 1, &conf);
	if (ret < 0) {
		RTE_LOG(ERR, APP, "Fail to reconfigure port %d\n", port_id);
		return ret;
	}

	ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd, NULL);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Could not adjust number of descriptors "
				"for port%u (%d)\n", (unsigned int)port_id,
				ret);

	rte_eth_dev_info_get(port_id, &dev_info);
	rxq_conf = dev_info.default_rxconf;
	rxq_conf.offloads = conf.rxmode.offloads;
	ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd,
		rte_eth_dev_socket_id(port_id), &rxq_conf, pktmbuf_pool);
	if (ret < 0) {
		RTE_LOG(ERR, APP, "Fail to setup Rx queue of port %d\n",
				port_id);
		return ret;
	}

	/* Restart specific port */
	ret = rte_eth_dev_start(port_id);
	if (ret < 0) {
		RTE_LOG(ERR, APP, "Fail to restart port %d\n", port_id);
		return ret;
	}

	return 0;
}

/* Callback for request of configuring network interface up/down */
static int
kni_config_network_interface(uint16_t port_id, uint8_t if_up)
{
	int ret = 0;

	if (!rte_eth_dev_is_valid_port(port_id)) {
		RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
		return -EINVAL;
	}

	RTE_LOG(INFO, APP, "Configure network interface of %d %s\n",
					port_id, if_up ? "up" : "down");

	rte_atomic32_inc(&kni_pause);

	if (if_up != 0) { /* Configure network interface up */
		rte_eth_dev_stop(port_id);
		ret = rte_eth_dev_start(port_id);
	} else /* Configure network interface down */
		rte_eth_dev_stop(port_id);

	rte_atomic32_dec(&kni_pause);

	if (ret < 0)
		RTE_LOG(ERR, APP, "Failed to start port %d\n", port_id);

	return ret;
}

static void
print_ethaddr(const char *name, struct ether_addr *mac_addr)
{
	char buf[ETHER_ADDR_FMT_SIZE];
	ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, mac_addr);
	RTE_LOG(INFO, APP, "\t%s%s\n", name, buf);
}

/* Callback for request of configuring mac address */
static int
kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[])
{
	int ret = 0;

	if (!rte_eth_dev_is_valid_port(port_id)) {
		RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
		return -EINVAL;
	}

	RTE_LOG(INFO, APP, "Configure mac address of %d\n", port_id);
	print_ethaddr("Address:", (struct ether_addr *)mac_addr);

	ret = rte_eth_dev_default_mac_addr_set(port_id,
					       (struct ether_addr *)mac_addr);
	if (ret < 0)
		RTE_LOG(ERR, APP, "Failed to config mac_addr for port %d\n",
			port_id);

	return ret;
}

static int
kni_alloc(uint16_t port_id)
{
	uint8_t i;
	struct rte_kni *kni;
	struct rte_kni_conf conf;
	struct kni_port_params **params = kni_port_params_array;

	if (port_id >= RTE_MAX_ETHPORTS || !params[port_id])
		return -1;

	params[port_id]->nb_kni = params[port_id]->nb_lcore_k ?
				params[port_id]->nb_lcore_k : 1;

	for (i = 0; i < params[port_id]->nb_kni; i++) {
		/* Clear conf at first */
		memset(&conf, 0, sizeof(conf));
		if (params[port_id]->nb_lcore_k) {
			snprintf(conf.name, RTE_KNI_NAMESIZE,
					"vEth%u_%u", port_id, i);
			conf.core_id = params[port_id]->lcore_k[i];
			conf.force_bind = 1;
		} else
			snprintf(conf.name, RTE_KNI_NAMESIZE,
						"vEth%u", port_id);
		conf.group_id = port_id;
		conf.mbuf_size = MAX_PACKET_SZ;
		/*
		 * The first KNI device associated to a port
		 * is the master, for multiple kernel thread
		 * environment.
		 */
		if (i == 0) {
			struct rte_kni_ops ops;
			struct rte_eth_dev_info dev_info;
			const struct rte_pci_device *pci_dev;
			const struct rte_bus *bus = NULL;

			memset(&dev_info, 0, sizeof(dev_info));
			rte_eth_dev_info_get(port_id, &dev_info);

			if (dev_info.device)
				bus = rte_bus_find_by_device(dev_info.device);
			if (bus && !strcmp(bus->name, "pci")) {
				pci_dev = RTE_DEV_TO_PCI(dev_info.device);
				conf.addr = pci_dev->addr;
				conf.id = pci_dev->id;
			}
			/* Get the interface default mac address */
			rte_eth_macaddr_get(port_id,
					(struct ether_addr *)&conf.mac_addr);

			rte_eth_dev_get_mtu(port_id, &conf.mtu);

			memset(&ops, 0, sizeof(ops));
			ops.port_id = port_id;
			ops.change_mtu = kni_change_mtu;
			ops.config_network_if = kni_config_network_interface;
			ops.config_mac_address = kni_config_mac_address;

			kni = rte_kni_alloc(pktmbuf_pool, &conf, &ops);
		} else
			kni = rte_kni_alloc(pktmbuf_pool, &conf, NULL);

		if (!kni)
			rte_exit(EXIT_FAILURE, "Fail to create kni for "
						"port: %d\n", port_id);
		params[port_id]->kni[i] = kni;
	}

	return 0;
}

static int
kni_free_kni(uint16_t port_id)
{
	uint8_t i;
	struct kni_port_params **p = kni_port_params_array;

	if (port_id >= RTE_MAX_ETHPORTS || !p[port_id])
		return -1;

	for (i = 0; i < p[port_id]->nb_kni; i++) {
		if (rte_kni_release(p[port_id]->kni[i]))
			printf("Fail to release kni\n");
		p[port_id]->kni[i] = NULL;
	}
	rte_eth_dev_stop(port_id);

	return 0;
}

/* Initialise ports/queues etc. and start main loop on each core */
int
main1(int argc, char** argv)
{
	int ret;
	uint16_t nb_sys_ports, port;
	unsigned i;
	void *retval;
	pthread_t kni_link_tid;
	int pid;

	/* Associate signal_hanlder function with USR signals */
	signal(SIGUSR1, signal_handler);
	signal(SIGUSR2, signal_handler);
	signal(SIGRTMIN, signal_handler);
	signal(SIGINT, signal_handler);

	/* Initialise EAL */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Could not initialise EAL (%d)\n", ret);
	argc -= ret;
	argv += ret;

	/* Parse application arguments (after the EAL ones) */
	ret = parse_args(argc, argv);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Could not parse input parameters\n");

	/* Create the mbuf pool */
	pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
		MEMPOOL_CACHE_SZ, 0, MBUF_DATA_SZ, rte_socket_id());
	if (pktmbuf_pool == NULL) {
		rte_exit(EXIT_FAILURE, "Could not initialise mbuf pool\n");
		return -1;
	}

	/* Get number of ports found in scan */
	nb_sys_ports = rte_eth_dev_count_avail();
	if (nb_sys_ports == 0)
		rte_exit(EXIT_FAILURE, "No supported Ethernet device found\n");

	/* Check if the configured port ID is valid */
	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
		if (kni_port_params_array[i] && !rte_eth_dev_is_valid_port(i))
			rte_exit(EXIT_FAILURE, "Configured invalid "
						"port ID %u\n", i);

	/* Initialize KNI subsystem */
	init_kni();

	/* Initialise each port */
	RTE_ETH_FOREACH_DEV(port) {
		/* Skip ports that are not enabled */
		if (!(ports_mask & (1 << port)))
			continue;
		init_port(port);

		if (port >= RTE_MAX_ETHPORTS)
			rte_exit(EXIT_FAILURE, "Can not use more than "
				"%d ports for kni\n", RTE_MAX_ETHPORTS);

		kni_alloc(port);
	}
	check_all_ports_link_status(ports_mask);

	pid = getpid();
	RTE_LOG(INFO, APP, "========================\n");
	RTE_LOG(INFO, APP, "KNI Running\n");
	RTE_LOG(INFO, APP, "kill -SIGUSR1 %d\n", pid);
	RTE_LOG(INFO, APP, "    Show KNI Statistics.\n");
	RTE_LOG(INFO, APP, "kill -SIGUSR2 %d\n", pid);
	RTE_LOG(INFO, APP, "    Zero KNI Statistics.\n");
	RTE_LOG(INFO, APP, "========================\n");
	fflush(stdout);

	ret = rte_ctrl_thread_create(&kni_link_tid,
				     "KNI link status check", NULL,
				     monitor_all_ports_link_status, NULL);
	if (ret < 0)
		rte_exit(EXIT_FAILURE,
			"Could not create link status thread!\n");

	/* Launch per-lcore function on every lcore */
	rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
	RTE_LCORE_FOREACH_SLAVE(i) {
		if (rte_eal_wait_lcore(i) < 0)
			return -1;
	}
	monitor_links = 0;
	pthread_join(kni_link_tid, &retval);

	/* Release resources */
	RTE_ETH_FOREACH_DEV(port) {
		if (!(ports_mask & (1 << port)))
			continue;
		kni_free_kni(port);
	}
	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
		if (kni_port_params_array[i]) {
			rte_free(kni_port_params_array[i]);
			kni_port_params_array[i] = NULL;
		}

	return 0;
}
