/* SPDX-License-Identifier: BSD-3-Clause
 * Copyright(c) 2010-2014 Intel Corporation
 */

#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <inttypes.h>
#include <sys/types.h>
#include <string.h>
#include <sys/queue.h>
#include <stdarg.h>
#include <errno.h>
#include <getopt.h>
#include <arpa/inet.h>
#include <inttypes.h>

#include <rte_common.h>
#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_cycles.h>
#include <rte_prefetch.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_ip.h>
#include <rte_tcp.h>
#include <rte_lpm.h>
#include <rte_lpm6.h>
#include <rte_malloc.h>

#include "main.h"


static uint8_t check_isack(struct rte_mbuf *m){
    //源和目的ip地址
    rte_be32_t src_addr;

	struct rte_ether_hdr *eth_hdr;
	struct rte_ipv4_hdr *ipv4_hdr;

    eth_hdr = rte_pktmbuf_mtod(data_m, struct rte_ether_hdr *); //解析第一个数据包
	ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);

    src_addr=rte_be_to_cpu_32(ipv4_hdr->src_addr);
    if(src_addr==0xC0A802D9){//如果是从r1发来的包，说明是ack包
        return 1;
    }
    return 0;
}

/**
 * @brief 改变速率按照当前速率计算下一个数据包的发送时间
 * 
 * @param new_rate 
 */
static void
change_rate(uint64_t new_rate, const uint64_t sec_tsc, const uint32_t t_size){
	struct sender_state *sender_state;
	sender_state = &app.sender_state;
	uint64_t t_consume = (uint64_t)((t_size * 8 / new_rate) * sec_tsc); //根据SEND_RATE计算发这么多数据的耗时。
	sender_state->next_avail = cur_tsc + t_consume;
}

/**
 * @brief pee算法解码和调速逻辑
 * 
 * @param m 
 */
static void handle_pee_ack(struct rte_mbuf *m){
	struct sender_state *sender_state;
	sender_state = &app.sender_state;

	sender_state->ack_flag = 1;

	struct rte_ether_hdr *eth_hdr;
	struct rte_ipv4_hdr *ipv4_hdr;
	eth_hdr = rte_pktmbuf_mtod(data_m, struct rte_ether_hdr *); //解析第一个数据包
	ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
	uint8_t ecn = (ipv4_hdr->type_of_service)&0x03;
	uint8_t true_ecn_state = ecn==0x01?sender_state->last_state:ecn;
	if(true_ecn_state == sender_state->last_state){//如果是相同状态
		if(sender_state->bcnt>6){
			if(true_ecn_state == 0x03){
				sender_state->qlen = 10 * sender_state->bset;
			}else if(true_ecn_state == 0x02){
				sender_state->link_util = sender_state->bset;
			}
			sender_state->bset=0; //重置bset
			sender_state->bcnt=0; //重置bcnt
			if(ecn != 0x01){
				sender_state->bset = 1<<6;
			}
		}else if(ecn != 0x01){//如果是单独的编码包
			sender_state->bset = (sender_state->bset|(1<<(6-sender_state->bcnt)));
		}
	}else{//前后状态不一致，开启一个新状态。
		sender_state->bset=0; //重置bset
		sender_state->bcnt=0; //重置bcnt
	}

	double U = 0;
	double max_c = 0;
	uint32_t cur_tsc = rte_rdtsc();
	uint64_t tau = cur_tsc - sender_state->last_ack_arrive_tsc;
	tau = tau>BASE_RTT?BASE_RTT:tau;
	uint64_t dt = tau;
	if(true_ecn_state == 0x03){//是队列长度编码
		U = 1 + sender_state->qlen*1456*8/((1.0*BW_RATE/rte_get_tsc_hz())*BASE_RTT); //1456是一个包的数据大小，
	}else{//进行链路利用率的编码
		U = sender_state->link_util*1.0/100;
		if(U< 0.25){
			U = 0.25;//不能加速太快
		}
	}
	sender_state->u = (sender_state->u*(BASE_RTT-dt)+U*dt)/BASE_RTT;
	max_c = sender_state->u/1.0;

	if(1){
		double new_rate = sender_state->rate/max_c + 25*1e6;//25Gbps用做公平性参数
		new_rate = new_rate > BW_RATE? BW_RATE:new_rate;
		new_rate = new_rate < 1*1e9? 1e9:new_rate;//介于1Gbps~10Gbps之间
	}
	change_rate(new_rate,rte_get_tsc_hz(),1456);//下一个包的应该的发送时间
	

	sender_state->last_state = true_ecn_state;
	sender_state->last_ack_arrive_tsc = cur_tsc;
	
}

// static const uint64_t E2PCC_RL_T = 5; /* us */
// cur_tsc：当前的时间戳，t_size：已经传输的数据量（bytes）
static void
update_sending_status(const uint64_t cur_tsc, const uint64_t sec_tsc, const uint32_t t_size)
{
	struct sender_state *sender_state;
	sender_state = &app.sender_state;
	uint64_t t_consume = (uint64_t)((t_size * 8 / app.rate) * sec_tsc); //根据SEND_RATE计算发这么多数据的耗时。
	// printf("sec_tsc:%lu\n",sec_tsc);
	// printf("cps_tsc:%lu\n",cps_tsc);
	sender_state->next_avail = cur_tsc + t_consume; //根据当前的SEND_RATE计算下一轮应该发送的时间
}

/*解码队列长度，*/
// TODO: 解码队列长度。-> 当前发送端的发送速率需要降速
//参数ack_bufs：当前收到的ack_buf队列
//返回：当前交换机的队列长度大小
static void decoding_qlen(struct rte_mbuf **ack_bufs)
{
	//TODO：依次解析ack_bufs的ECN标记位置，每7个ACK包进行一次解码，解码完成后，将ack_bufs指针后移。注意：需要分流
}

/*解码链路利用率*/
// TODO: 解码链路利用率。 -> 当前发送端的发送速率需要升速
//参数ack_bufs，当前收到的ack_buf队列
//当前交换机的出端口利用率大小，0%~100%之间。
static void decoding_lutil(struct rte_mbuf **ack_bufs)
{
	//TODO：依次解析ack_bufs的ECN标记位置，每7个ACK包进行一次解码，解码完成后，将ack_bufs指针后移。注意：需要分流
}

static void
fill_ethernet_header(struct rte_ether_hdr *hdr)
{
	struct rte_ether_addr s_addr = {{0xa4, 0xbf, 0x01, 0x6e, 0xa6, 0x7a}};
	struct rte_ether_addr d_addr = {{0xe4, 0x3d, 0x1a, 0xac, 0xc8, 0x05}};
    //dstaddr=192.168.2.217的mac了A4:BF:01:6E:D3:0F
    //srcaddr=192.168.0.218的mac了 A4:BF:01:6E:A6:7A
    //192.168.63.221 的eno2np0 e4:3d:1a:ac:c8:04
    //192.168.63.221 的eno2np1  e4:3d:1a:ac:c8:05
	hdr->src_addr = s_addr;
	hdr->dst_addr = d_addr;
	hdr->ether_type = rte_cpu_to_be_16(0x0800);
}

static void
fill_ipv4_header(struct rte_ipv4_hdr *hdr)
{
	hdr->version_ihl = (4 << 4) + 5;		  // ipv4, length 5 (*4)
	hdr->type_of_service = 0x03;			  // No Diffserv
	hdr->total_length = rte_cpu_to_be_16(1440); // tcp 20
	hdr->packet_id = rte_cpu_to_be_16(5462);  // set random
	hdr->fragment_offset = rte_cpu_to_be_16(0);
	hdr->time_to_live = 64;
	hdr->next_proto_id = 6; // tcp
	hdr->hdr_checksum = rte_cpu_to_be_16(25295);
	hdr->src_addr = rte_cpu_to_be_32(0xC0A800DA); // 192.168.0.218
	hdr->dst_addr = rte_cpu_to_be_32(0xC0A802D9); // 192.168.2.217
}

static void
fill_tcp_header(struct rte_tcp_hdr *hdr)
{
	hdr->src_port = rte_cpu_to_be_16(0x162E);
	hdr->dst_port = rte_cpu_to_be_16(0x04d2);
	hdr->sent_seq = rte_cpu_to_be_32(app.seq_num++);
	hdr->recv_ack = rte_cpu_to_be_32(0);
	hdr->data_off = 0x50;
	hdr->tcp_flags = 0;
	hdr->rx_win = rte_cpu_to_be_16(16);
	hdr->cksum = rte_cpu_to_be_16(0);
	hdr->tcp_urp = rte_cpu_to_be_16(0);
}

static struct rte_mbuf *create_raw_pkt(uint32_t payload_size)
{
	struct rte_mbuf *m;
	m = rte_pktmbuf_alloc(app.pool);
	size_t pkt_size;
	// pkt_size = sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) + sizeof(struct rte_tcp_hdr) + payload_size;//14+20+20+payload_size
	// printf("pkt_size:%lu\n",pkt_size);

	//构造数据域
	struct rte_ether_hdr *ether_h;
	struct rte_ipv4_hdr *ipv4_h;
	struct rte_tcp_hdr *tcp_h;
	char *data = NULL;

	// m->data_len = pkt_size;
	// m->pkt_len = pkt_size;

	//以太帧
	ether_h = (struct rte_ether_hdr *)rte_pktmbuf_append(m, sizeof(struct rte_ether_hdr));
	fill_ethernet_header(ether_h);

	// ipv4
	ipv4_h = (struct rte_ipv4_hdr *)rte_pktmbuf_append(m, sizeof(struct rte_ipv4_hdr));
	fill_ipv4_header(ipv4_h);

	// tcp帧
	tcp_h = (struct rte_tcp_hdr *)rte_pktmbuf_append(m, sizeof(struct rte_tcp_hdr));
	fill_tcp_header(tcp_h);

	// tcp payload
	data = rte_pktmbuf_append(m, payload_size); // 1400字节作为Payload

	return m;
}

static void cal_out_throughput(const uint64_t sec_tsc, uint32_t bits)
{
	double interval, cur_time;
	uint64_t cur_tsc;
	struct tp_meter *tp_meter;

	cur_tsc = rte_rdtsc();
	tp_meter = &app.e2pcc_param.tp_in;
	interval = (cur_tsc - tp_meter->intvl_start_time) / (double)sec_tsc;

	if (sec_tsc <= cur_tsc - tp_meter->intvl_start_time)
	{
		cur_time = (cur_tsc - app.e2pcc_param.start_time) / (double)sec_tsc;
		printf("thoughtput out: %f %f\n", cur_time, (tp_meter->intvl_num_bits / interval) / 1e6);
		tp_meter->intvl_start_time = cur_tsc;
		tp_meter->intvl_num_bits = 0;
		tp_meter->intvl_num_pkts = 0;
	}
	tp_meter->intvl_num_bits += bits;
	tp_meter->intvl_num_pkts++;
}

static void cal_ack_recv_throughput(uint32_t port, const uint64_t sec_tsc, struct rte_mbuf **array, uint16_t n_mbufs)
{
	double interval, cur_time;
	uint64_t cur_tsc;
	struct tp_meter *tp_meter;

	cur_tsc = rte_rdtsc();
	tp_meter = &app.e2pcc_param.tp_in;
	interval = (cur_tsc - tp_meter->intvl_start_time) / (double)sec_tsc;

	for (int i = 0; i < n_mbufs; i++)
	{
		if (sec_tsc <= cur_tsc - tp_meter->intvl_start_time)
		{ //每100个数据包记录一次吞吐，或者每间隔1s记录一次
			cur_time = (cur_tsc - app.e2pcc_param.start_time) / (double)sec_tsc;
			printf("ack receive thoughtput: %f %f %lu %d\n", cur_time, (tp_meter->intvl_num_bits / interval) / 1e6, tp_meter->intvl_num_pkts, array[i]->pkt_len);
			tp_meter->intvl_start_time = cur_tsc;
			tp_meter->intvl_num_bits = 0;
			tp_meter->intvl_num_pkts = 0;
		}
		tp_meter->intvl_num_bits += array[i]->pkt_len * 8;
		tp_meter->intvl_num_pkts++;
	}
}

void app_main_loop_rx(void)
{
	uint32_t i;
	int ret;
	const uint64_t sec_tsc = rte_get_tsc_hz();
	uint32_t totalPkts = 0;
	struct rte_mbuf *ack_m;

	RTE_LOG(INFO, USER1, "Core %u is doing RX\n", rte_lcore_id());

	for (i = 0;;)
	{
		uint16_t n_mbufs, n_batch, n_read, n_write;

		n_mbufs = app.mbuf_rx.n_mbufs;
		if (n_mbufs < 0)
		{
			n_mbufs = 0;
		}
		n_batch = APP_MBUF_ARRAY_SIZE - n_mbufs;
		if (app.burst_size_rx_read < n_batch)
			n_batch = app.burst_size_rx_read;

		if (n_batch != 0)
		{
			n_read = rte_eth_rx_burst(0, 0, &app.mbuf_rx.array[n_mbufs], n_batch); //从第一个网卡接受数据

			n_mbufs += n_read;
		}

		if (n_read == 0)
			continue;

		if (n_read != 0)
		{
			// cal_ack_recv_throughput(0, sec_tsc, app.mbuf_rx.array, n_read); //计算第一个网卡的吞吐。
			// 																   // print_pkt_info(&totalPkts, &n_read);
			for(uint32_t i=0;i<n_read;i++){
				ack_m = app.mbuf_rx.array[i];
				if(check_isack(ack_m)){//如果是ack数据包。则进行解析
					handle_pee_ack(ack_m); //处理ACK数据包的核心逻辑
				}

				rte_pktmbuf_free(app.mbuf_rx.array[i]);//释放内存
			}
		}

		//释放内存
		for (uint16_t idx = n_read; idx < n_mbufs; idx++)
		{
			rte_pktmbuf_free(app.mbuf_rx.array[idx]);
			app.mbuf_rx.n_mbufs = 0;
		}
	}
	
}

void app_main_loop_worker(void)
{
	struct app_mbuf_array *worker_mbuf;
	uint32_t i;

	RTE_LOG(INFO, USER1, "Core %u is doing work (no pipeline)\n",
			rte_lcore_id());

	worker_mbuf = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
									RTE_CACHE_LINE_SIZE, rte_socket_id());
	if (worker_mbuf == NULL)
		rte_panic("Worker thread: cannot allocate buffer space\n");

	for (i = 0;; i = ((i + 1) & (app.n_ports - 1)))
	{
		int ret;

		ret = rte_ring_sc_dequeue_bulk( //从队列里出队，app.rings_rx[i]出队的数据放到worker_mbuf->array里面
			app.rings_rx[i],
			(void **)worker_mbuf->array,
			app.burst_size_worker_read,
			NULL);

		if (ret == 0)
			continue;

		do
		{
			ret = rte_ring_sp_enqueue_bulk( //再从worker_mbuf->array转存到tx的ring队列里面
				app.rings_tx[i ^ 1],
				(void **)worker_mbuf->array,
				app.burst_size_worker_write,
				NULL);
		} while (ret == 0);
	}
}

void app_main_loop_tx(void)
{
	uint32_t i;
	uint16_t n_mbufs, n_batch, n_read, n_send;
	RTE_LOG(INFO, USER1, "Core %u is doing TX\n", rte_lcore_id());
    //struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
	struct rte_mbuf *mbuf;

	struct rte_mbuf *m;

	struct rte_ether_hdr *ether_h;
	struct rte_ipv4_hdr *ipv4_h;
	struct rte_tcp_hdr *tcp_h;
	char *data = NULL;

	uint16_t burst_size = 7;



	struct sender_state *sender_state;
	sender_state = &app.sender_state;
	uint16_t n_write;
	uint64_t cur_tsc;
	const uint64_t sec_tsc = rte_get_tsc_hz();
	uint64_t cps_tsc; //时间差补偿

	/*生成数据包*/
	struct rte_mbuf *rxtx_bufs[burst_size]; //以7个数据包连续发送
	for (int k = 0; k < burst_size; k++)
	{
		rxtx_bufs[k] = create_raw_pkt(1400);
	}

	for (i = 0;;)
	{

		n_write = 0;
		cur_tsc = rte_rdtsc(); //当前时钟数
        
        //nb_rx=rte_eth_rx_burst(0, 0, pkts_burst, MAX_PKT_BURST);
        
		if (sender_state->next_avail < cur_tsc) //到了下一次的发包时间 , 此处做速率限制
		{
			mbuf = create_raw_pkt(1400); //构建一个payload为1400字节的数据包
			n_write = rte_eth_tx_burst(i, 0, &mbuf, 1); //每次发送一个数据包
			// n_write = rte_eth_tx_burst(i, 0, rxtx_bufs, burst_size); //每次发送8个数据包
		}

		if (likely(n_write != 0))
		{
			if(unlikely(!app.sender_state.ack_flag)){
				update_sending_status(cur_tsc, sec_tsc, mbuf->pkt_len * n_write);
			}
			// cal_out_throughput(sec_tsc, 8 * mbuf->pkt_len * n_write);
		}
	}
}
