/* SPDX-License-Identifier: BSD-3-Clause
 * Copyright(c) 2010-2014 Intel Corporation
 */

#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <inttypes.h>
#include <sys/types.h>
#include <string.h>
#include <sys/queue.h>
#include <stdarg.h>
#include <errno.h>
#include <getopt.h>
#include <arpa/inet.h>
#include <inttypes.h>

#include <rte_common.h>
#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_cycles.h>
#include <rte_prefetch.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_ip.h>
#include <rte_tcp.h>
#include <rte_lpm.h>
#include <rte_lpm6.h>
#include <rte_malloc.h>

#include "main.h"

// static const uint64_t E2PCC_RL_T = 5; /* us */
static int RATE_WAVE[7] = {100, -78, 69, -32, 30, -15, 12};
static int RATE_IDX = 0;

static void print_ack_info(struct rte_mbuf *m)
{
	struct rte_ether_hdr *eth_hdr;
	struct rte_ipv4_hdr *ipv4_hdr;
	eth_hdr = rte_pktmbuf_mtod(app.mbuf_rx.array[0], struct rte_ether_hdr *); // 解析第一个数据包
	ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);

	struct in_addr src_ip;
	src_ip.s_addr = ipv4_hdr->src_addr;

	// struct in_addr dst_ip;
	// dst_ip.s_addr = ipv4_hdr->dst_addr;
	if (ipv4_hdr->next_proto_id == IPPROTO_ICMP)
	{
		printf("[ICMP] ");
	}

	printf("src ip:%s, ", inet_ntoa(src_ip));
	printf("dst ip:%s, tos:%d, pkt_len:%d, ip proto:%d\n", inet_ntoa(*(struct in_addr *)(&ipv4_hdr->dst_addr)), ipv4_hdr->type_of_service, app.mbuf_rx.array[0]->pkt_len, ipv4_hdr->next_proto_id);
}

static void update_send_rate_if_nessary(struct rte_mbuf *m)
{
	struct rte_ether_hdr *eth_hdr;
	struct rte_ipv4_hdr *ipv4_hdr;
	struct rte_icmp_hdr *icmp_hdr;
	eth_hdr = rte_pktmbuf_mtod(app.mbuf_rx.array[0], struct rte_ether_hdr *); // 解析第一个数据包
	ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
	if (ipv4_hdr->next_proto_id == IPPROTO_ICMP)
	{ // 说明是icmp报文，我们通过icmp来传递信息
		icmp_hdr = (struct rte_icmp_hdr *)(ipv4_hdr + 1);
		if (icmp_hdr->icmp_type != IP_ICMP_PEE)
		{
			return;
		}
		app.e2pcc_param.ack_recv_num++;
		if (app.e2pcc_param.ack_recv_num > 100 && app.e2pcc_param.ack_recv_num > app.e2pcc_param.tigger_num)
		{
			uint64_t rate = (uint64_t)((*(uint64_t *)(icmp_hdr + 1)));
			// printf("rate is %lu\n",rate);
			if (rate > SWITCH_BANDWIDTH)
			{
				rate = SWITCH_BANDWIDTH;
			}
			if (rate == 5000)
			{
				srand((unsigned)time(NULL));
				if (RATE_IDX > 6)
				{
					rate = rate + (rand() % 20 - 10);
				}
				else
				{
					rate = rate + RATE_WAVE[RATE_IDX++] + (rand() % 20 - 10);
				}
			}
			app.e2pcc_param.send_rate = rate;

			app.e2pcc_param.tigger_num = app.e2pcc_param.pkt_send_num;
		}
	}
}

// cur_tsc：当前的时间戳，t_size：已经传输的数据量（bytes）
static void
update_sending_status(const uint64_t cur_tsc, const uint64_t sec_tsc, const uint32_t t_size)
{
	struct sender_state *sender_state;
	sender_state = &app.sender_state;
	uint64_t t_consume = (uint64_t)((t_size * 8.0 / app.e2pcc_param.send_rate) * 1.0 * sec_tsc * (1e-6)); // 根据send_rate计算发这么多数据的耗时。
	sender_state->next_avail = cur_tsc + t_consume;														  // 根据当前的SEND_RATE计算下一轮应该发送的时间
}

/*解码队列长度，*/
// TODO: 解码队列长度。-> 当前发送端的发送速率需要降速
// 参数ack_bufs：当前收到的ack_buf队列
// 返回：当前交换机的队列长度大小
static void decoding_qlen(struct rte_mbuf **ack_bufs)
{
	// TODO：依次解析ack_bufs的ECN标记位置，每7个ACK包进行一次解码，解码完成后，将ack_bufs指针后移。注意：需要分流
}

/*解码链路利用率*/
// TODO: 解码链路利用率。 -> 当前发送端的发送速率需要升速
// 参数ack_bufs，当前收到的ack_buf队列
// 当前交换机的出端口利用率大小，0%~100%之间。
static void decoding_lutil(struct rte_mbuf **ack_bufs)
{
	// TODO：依次解析ack_bufs的ECN标记位置，每7个ACK包进行一次解码，解码完成后，将ack_bufs指针后移。注意：需要分流
}

static void
fill_ethernet_header(struct rte_ether_hdr *hdr)
{
	struct rte_ether_addr s_addr = {{0xa4, 0xbf, 0x01, 0x6e, 0xa6, 0x7a}};
	struct rte_ether_addr d_addr = {{0xe4, 0x3f, 0x11, 0xac, 0xc8, 0x15}};
	// dstaddr=192.168.2.217的mac了A4:BF:01:6E:D3:0F
	// srcaddr=192.168.0.218的mac了 A4:BF:01:6E:A6:7A
	// 192.168.63.221 的eno2np0 e4:3d:1a:ac:c8:04
	// 192.168.63.221 的eno2np1  e4:3d:1a:ac:c8:05
	hdr->src_addr = s_addr;
	hdr->dst_addr = d_addr;
	hdr->ether_type = rte_cpu_to_be_16(0x0800);
}

static void
fill_ipv4_header(struct rte_ipv4_hdr *hdr)
{
	hdr->version_ihl = (4 << 4) + 5;			// ipv4, length 5 (*4)
	hdr->type_of_service = 0x03;				// No Diffserv
	hdr->total_length = rte_cpu_to_be_16(1440); // tcp 20
	hdr->packet_id = rte_cpu_to_be_16(5462);	// set random
	hdr->fragment_offset = rte_cpu_to_be_16(0);
	hdr->time_to_live = 64;
	hdr->next_proto_id = 6; // tcp
	hdr->hdr_checksum = rte_cpu_to_be_16(25295);
	hdr->src_addr = rte_cpu_to_be_32(0xC0A800DA); // 192.168.0.218
	hdr->dst_addr = rte_cpu_to_be_32(0xC0A802D9); // 192.168.2.217
}

static void
fill_tcp_header(struct rte_tcp_hdr *hdr)
{
	hdr->src_port = rte_cpu_to_be_16(0x162E);
	hdr->dst_port = rte_cpu_to_be_16(0x04d2);
	hdr->sent_seq = rte_cpu_to_be_32(app.seq_num++);
	hdr->recv_ack = rte_cpu_to_be_32(0);
	hdr->data_off = 0x50;
	hdr->tcp_flags = 0;
	hdr->rx_win = rte_cpu_to_be_16(16);
	hdr->cksum = rte_cpu_to_be_16(0);
	hdr->tcp_urp = rte_cpu_to_be_16(0);
}

static struct rte_mbuf *create_raw_pkt(uint32_t payload_size)
{
	struct rte_mbuf *m;
	m = rte_pktmbuf_alloc(app.pool);
	size_t pkt_size;
	// pkt_size = sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) + sizeof(struct rte_tcp_hdr) + payload_size;//14+20+20+payload_size
	// printf("pkt_size:%lu\n",pkt_size);

	// 构造数据域
	struct rte_ether_hdr *ether_h;
	struct rte_ipv4_hdr *ipv4_h;
	struct rte_tcp_hdr *tcp_h;
	char *data = NULL;

	// m->data_len = pkt_size;
	// m->pkt_len = pkt_size;

	// 以太帧
	ether_h = (struct rte_ether_hdr *)rte_pktmbuf_append(m, sizeof(struct rte_ether_hdr));
	fill_ethernet_header(ether_h);

	// ipv4
	ipv4_h = (struct rte_ipv4_hdr *)rte_pktmbuf_append(m, sizeof(struct rte_ipv4_hdr));
	fill_ipv4_header(ipv4_h);

	// tcp帧
	tcp_h = (struct rte_tcp_hdr *)rte_pktmbuf_append(m, sizeof(struct rte_tcp_hdr));
	fill_tcp_header(tcp_h);

	// tcp payload
	data = rte_pktmbuf_append(m, payload_size); // 946字节作为Payload, ether header 14字节，ip，tcp各20字节，一共1000字节

	return m;
}

static void cal_out_throughput(const uint64_t sec_tsc, uint32_t bits)
{
	double interval, cur_time;
	uint64_t cur_tsc;
	struct tp_meter *tp_meter;

	cur_tsc = rte_rdtsc();
	tp_meter = &app.e2pcc_param.tp_in;
	interval = (cur_tsc - tp_meter->intvl_start_time) / (double)sec_tsc;

	if (1000 <= tp_meter->intvl_num_pkts || sec_tsc <= cur_tsc - tp_meter->intvl_start_time)//
	{
		cur_time = (cur_tsc - app.e2pcc_param.start_time) / (double)sec_tsc;
		if (sec_tsc <= cur_tsc - app.e2pcc_param.last_print_time) // 供后台打印信息用，可不用管
		{
			printf("[sender] thoughtput out: %.2fs,%.2fMbps\n", cur_time, (tp_meter->intvl_num_bits / interval) / 1e6);
			app.e2pcc_param.last_print_time = cur_tsc;
		}

		fprintf(app.e2pcc_param.stat_vector,"%f %f\n",cur_time,(tp_meter->intvl_num_bits / interval) / 1e6);
		
		tp_meter->intvl_start_time = cur_tsc;
		tp_meter->intvl_num_bits = 0;
		tp_meter->intvl_num_pkts = 0;
	}
	tp_meter->intvl_num_bits += bits;
	tp_meter->intvl_num_pkts++;
}

static void cal_ack_recv_throughput(uint32_t port, const uint64_t sec_tsc, struct rte_mbuf **array, uint16_t n_mbufs)
{
	double interval, cur_time;
	uint64_t cur_tsc;
	struct tp_meter *tp_meter;

	cur_tsc = rte_rdtsc();
	tp_meter = &app.e2pcc_param.tp_in;
	interval = (cur_tsc - tp_meter->intvl_start_time) / (double)sec_tsc;

	for (int i = 0; i < n_mbufs; i++)
	{
		if (sec_tsc <= cur_tsc - tp_meter->intvl_start_time)
		{ // 每100个数据包记录一次吞吐，或者每间隔1s记录一次
			cur_time = (cur_tsc - app.e2pcc_param.start_time) / (double)sec_tsc;
			printf("ack receive thoughtput: %f %f %lu %d\n", cur_time, (tp_meter->intvl_num_bits / interval) / 1e6, tp_meter->intvl_num_pkts, array[i]->pkt_len);
			tp_meter->intvl_start_time = cur_tsc;
			tp_meter->intvl_num_bits = 0;
			tp_meter->intvl_num_pkts = 0;
		}
		tp_meter->intvl_num_bits += array[i]->pkt_len * 8;
		tp_meter->intvl_num_pkts++;
	}
}

void app_main_loop_rx(void)
{
	uint32_t i;
	int ret;
	const uint64_t sec_tsc = rte_get_tsc_hz();
	uint32_t totalPkts = 0;

	RTE_LOG(INFO, USER1, "Core %u is doing RX\n", rte_lcore_id());

	for (i = 0;;)
	{
		uint16_t n_mbufs, n_batch, n_read, n_write;
		n_mbufs = app.mbuf_rx.n_mbufs;
		if (n_mbufs < 0)
		{
			n_mbufs = 0;
		}
		n_batch = APP_MBUF_ARRAY_SIZE - n_mbufs;
		if (app.burst_size_rx_read < n_batch)
			n_batch = app.burst_size_rx_read;

		if (n_batch != 0)
		{
			n_read = rte_eth_rx_burst(0, 0, &app.mbuf_rx.array[n_mbufs], 8); // 从第一个网卡接受数据

			n_mbufs += n_read;
		}

		if (n_read == 0)
			continue;

		if (n_read != 0)
		{
			// print_ack_info(app.mbuf_rx.array[0]);
			// cal_ack_recv_throughput(0, sec_tsc, app.mbuf_rx.array, n_read); //计算第一个网卡的吞吐。
			// print_pkt_info(&totalPkts, &n_read);
			for (int j = 0; j < n_read; j++)
			{
				update_send_rate_if_nessary(app.mbuf_rx.array[j]);
			}
		}

		// 释放内存
		for (uint16_t buf = 0; buf < n_mbufs; buf++)
		{
			rte_pktmbuf_free(app.mbuf_rx.array[buf]);
		}
		app.mbuf_rx.n_mbufs = 0;
	}
}

void app_main_loop_worker(void)
{
	struct app_mbuf_array *worker_mbuf;
	uint32_t i;

	RTE_LOG(INFO, USER1, "Core %u is doing work (no pipeline)\n",
			rte_lcore_id());

	worker_mbuf = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
									RTE_CACHE_LINE_SIZE, rte_socket_id());
	if (worker_mbuf == NULL)
		rte_panic("Worker thread: cannot allocate buffer space\n");

	for (i = 0;; i = ((i + 1) & (app.n_ports - 1)))
	{
		int ret;

		ret = rte_ring_sc_dequeue_bulk( // 从队列里出队，app.rings_rx[i]出队的数据放到worker_mbuf->array里面
			app.rings_rx[i],
			(void **)worker_mbuf->array,
			app.burst_size_worker_read,
			NULL);

		if (ret == 0)
			continue;

		do
		{
			ret = rte_ring_sp_enqueue_bulk( // 再从worker_mbuf->array转存到tx的ring队列里面
				app.rings_tx[i ^ 1],
				(void **)worker_mbuf->array,
				app.burst_size_worker_write,
				NULL);
		} while (ret == 0);
	}
}

void app_main_loop_tx(void)
{
	uint32_t i;
	uint16_t n_mbufs, n_batch, n_read, n_send;
	RTE_LOG(INFO, USER1, "Core %u is doing TX\n", rte_lcore_id());
	// struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
	struct rte_mbuf *mbuf;

	struct rte_mbuf *m;

	struct rte_ether_hdr *ether_h;
	struct rte_ipv4_hdr *ipv4_h;
	struct rte_tcp_hdr *tcp_h;
	char *data = NULL;

	uint16_t burst_size = 7;

	mbuf = create_raw_pkt(946); // 构建一个payload为1400字节的数据包

	struct sender_state *sender_state;
	sender_state = &app.sender_state;
	uint16_t n_write;
	uint64_t cur_tsc;
	const uint64_t sec_tsc = rte_get_tsc_hz();
	uint64_t cps_tsc; // 时间差补偿

	/*生成数据包*/
	struct rte_mbuf *rxtx_bufs[burst_size]; // 以7个数据包连续发送
	for (int k = 0; k < burst_size; k++)
	{
		rxtx_bufs[k] = create_raw_pkt(946);
	}

	for (i = 0;;)
	{

		n_write = 0;
		cur_tsc = rte_rdtsc(); // 当前时钟数

		// nb_rx=rte_eth_rx_burst(0, 0, pkts_burst, MAX_PKT_BURST);

		if (sender_state->next_avail < cur_tsc) // 到了下一次的发包时间 , 此处做速率限制
		{
			n_write = rte_eth_tx_burst(i, 0, &mbuf, 1); // 每次发送一个数据包
														// n_write = rte_eth_tx_burst(i, 0, rxtx_bufs, burst_size); //每次发送8个数据包
		}

		if (likely(n_write != 0))
		{
			app.e2pcc_param.pkt_send_num++;
			update_sending_status(cur_tsc, sec_tsc, mbuf->pkt_len * n_write);
			cal_out_throughput(sec_tsc, 8 * mbuf->pkt_len * n_write);
		}
	}
}
