#include "tcp.h"
#include "tcp_hash.h"
#include "tcp_sock.h"
#include "tcp_timer.h"
#include "ip.h"
#include "rtable.h"
#include "log.h"

// TCP socks should be hashed into table for later lookup: Those which
// occupy a port (either by *bind* or *connect*) should be hashed into
// bind_table, those which listen for incoming connection request should be
// hashed into listen_table, and those of established connections should
// be hashed into established_table.

struct tcp_hash_table tcp_sock_table;
#define tcp_established_sock_table	tcp_sock_table.established_table
#define tcp_listen_sock_table		tcp_sock_table.listen_table
#define tcp_bind_sock_table			tcp_sock_table.bind_table




inline void tcp_set_state(struct tcp_sock *tsk, int state)
{
	log(DEBUG, IP_FMT":%hu switch state, from %s to %s.", \
			HOST_IP_FMT_STR(tsk->sk_sip), tsk->sk_sport, \
			tcp_state_str[tsk->state], tcp_state_str[state]);
	tsk->state = state;
}
inline void tcp_set_c_state(struct tcp_sock *tsk, int state)
{
	const char *tcp_state_c_str[] = { "OPEN", "DISORDER", "RECOVERY", "LOSS" };
	log(INFO, IP_FMT":%hu switch congestion state, from %s to %s.", \
			HOST_IP_FMT_STR(tsk->sk_sip), tsk->sk_sport, \
			tcp_state_c_str[tsk->c_state], tcp_state_c_str[state]);
	tsk->c_state = state;
}

#define TCP_RECV_OFO_BUFFER_PRINT(tsk)                                         \
    do {                                                                       \
        struct recv_ofo_buf_entry *entry, *tmp;                                \
        int cnt = 0;                                                           \
        list_for_each_entry_safe(entry, tmp, &((tsk)->rcv_ofo_buf), list) {    \
            log(DEBUG, "tcp_recv_ofo_buffer_print: %d th, seq: %u, data: %.4s",\
                cnt++, entry->seq, entry->payload);                            \
        }                                                                      \
    } while (0)


// init tcp hash table and tcp timer
void init_tcp_stack()
{
	for (int i = 0; i < TCP_HASH_SIZE; i++)
		init_list_head(&tcp_established_sock_table[i]);

	for (int i = 0; i < TCP_HASH_SIZE; i++)
		init_list_head(&tcp_listen_sock_table[i]);

	for (int i = 0; i < TCP_HASH_SIZE; i++)
		init_list_head(&tcp_bind_sock_table[i]);

	pthread_t timer;
	pthread_create(&timer, NULL, tcp_timer_thread, NULL);
}

// allocate tcp sock, and initialize all the variables that can be determined
// now
struct tcp_sock *alloc_tcp_sock()
{
	struct tcp_sock *tsk = malloc(sizeof(struct tcp_sock));

	memset(tsk, 0, sizeof(struct tcp_sock));

	tsk->state = TCP_CLOSED;
	tsk->rcv_wnd = TCP_DEFAULT_WINDOW;
	tsk->cwnd=TCP_MSS;


	//初始化链表头
	init_list_head(&tsk->list);
	init_list_head(&tsk->listen_queue);
	init_list_head(&tsk->accept_queue);
	init_list_head(&tsk->rcv_ofo_buf);
	init_list_head(&tsk->send_buf);

	init_list_head(&tsk->rcv_ofo_buf);
	init_list_head(&tsk->send_buf);

	tsk->rcv_buf = alloc_ring_buffer(tsk->rcv_wnd);

	tsk->wait_connect = alloc_wait_struct();
	tsk->wait_accept = alloc_wait_struct();
	tsk->wait_recv = alloc_wait_struct();
	tsk->wait_send = alloc_wait_struct();


	//init the lock
	pthread_mutex_init(&tsk->sk_lock, NULL);
	pthread_mutex_init(&tsk->rcv_buf_lock, NULL);
	pthread_mutex_init(&tsk->send_buf_lock, NULL);



	return tsk;
}

// release all the resources of tcp sock
//
// To make the stack run safely, each time the tcp sock is refered (e.g. hashed), 
// the ref_cnt is increased by 1. each time free_tcp_sock is called, the ref_cnt
// is decreased by 1, and release the resources practically if ref_cnt is
// decreased to zero.
void free_tcp_sock(struct tcp_sock *tsk)
{
	if(!tsk){
		log(ERROR, "free_tcp_sock failed!\n");
		return;
	}
	else if(tsk->ref_cnt<=0)
	{
		free(tsk);
		log(DEBUG, "tsk has been freed!\n");
		return;
	}
}

// lookup tcp sock in established_table with key (saddr, daddr, sport, dport)
struct tcp_sock *tcp_sock_lookup_established(u32 saddr, u32 daddr, u16 sport, u16 dport)
{

	//step1:based on 4 elements group, find the hash value, preparing for later search.
	int sock_hash_value = tcp_hash_function(saddr, daddr, sport, dport);
	struct list_head *list = &tcp_established_sock_table[sock_hash_value];
	
	//step2: iterate the hased list
	struct tcp_sock *tsk;
	list_for_each_entry(tsk, list, hash_list){
		//check if 4 elements group match.
		if(tsk->sk_sip==saddr&&
			tsk->sk_dip==daddr&&
			tsk->sk_sport==sport&&
			tsk->sk_dport==dport){
			return tsk;//find the matching 4-group.
		}
	}

	//if matched sock not found, return NULL.
	return NULL;
}

// lookup tcp sock in listen_table with key (sport)
//
// In accordance with BSD socket, saddr is in the argument list, but never used.
struct tcp_sock *tcp_sock_lookup_listen(u32 saddr, u16 sport)
{
	//calculate the hash value using sport
	int sock_hash_value = tcp_hash_function(0, 0, sport, 0);
	struct list_head *list = &tcp_listen_sock_table[sock_hash_value];

	//traverse the list in the corresponding hash bucket
	struct tcp_sock *tsk;
	list_for_each_entry(tsk,list,hash_list){
		//check if socket' s sport matches
		if(tsk->sk_sport==sport){
			return tsk;
		}

	}

	// else:if no match found, return NULL;
	return NULL;
}

// lookup tcp sock in both established_table and listen_table
struct tcp_sock *tcp_sock_lookup(struct tcp_cb *cb)
{
	u32 saddr = cb->daddr,
		daddr = cb->saddr;
	u16 sport = cb->dport,
		dport = cb->sport;

	struct tcp_sock *tsk = tcp_sock_lookup_established(saddr, daddr, sport, dport);
	if (!tsk)
		tsk = tcp_sock_lookup_listen(saddr, sport);

	return tsk;
}

// hash tcp sock into bind_table, using sport as the key
static int tcp_bind_hash(struct tcp_sock *tsk)
{
	int bind_hash_value = tcp_hash_function(0, 0, tsk->sk_sport, 0);
	struct list_head *list = &tcp_bind_sock_table[bind_hash_value];
	list_add_head(&tsk->bind_hash_list, list);

	tsk->ref_cnt += 1;

	return 0;
}

// unhash the tcp sock from bind_table
void tcp_bind_unhash(struct tcp_sock *tsk)
{
	if (!list_empty(&tsk->bind_hash_list)) {
		list_delete_entry(&tsk->bind_hash_list);
		free_tcp_sock(tsk);
	}
}

// lookup bind_table to check whether sport is in use
static int tcp_port_in_use(u16 sport)
{
	int value = tcp_hash_function(0, 0, sport, 0);
	struct list_head *list = &tcp_bind_sock_table[value];
	struct tcp_sock *tsk;
	list_for_each_entry(tsk, list, bind_hash_list) {
		if (tsk->sk_sport == sport)
			return 1;
	}

	return 0;
}

// find a free port by looking up bind_table
static u16 tcp_get_port()
{
	for (u16 port = PORT_MIN; port < PORT_MAX; port++) {
		if (!tcp_port_in_use(port))
			return port;
	}

	return 0;
}

// tcp sock tries to use port as its source port
static int tcp_sock_set_sport(struct tcp_sock *tsk, u16 port)
{
	if ((port && tcp_port_in_use(port)) ||
			(!port && !(port = tcp_get_port())))
		return -1;

	tsk->sk_sport = port;

	tcp_bind_hash(tsk);

	return 0;
}

// hash tcp sock into either established_table or listen_table according to its
// TCP_STATE
int tcp_hash(struct tcp_sock *tsk)
{
	struct list_head *list;
	int hash;

	if (tsk->state == TCP_CLOSED)
		return -1;

	if (tsk->state == TCP_LISTEN) {
		log(DEBUG, "server hash to port: %ho",tsk->sk_sport);
		hash = tcp_hash_function(0, 0, tsk->sk_sport, 0);
		list = &tcp_listen_sock_table[hash];
	}
	else {
		int hash = tcp_hash_function(tsk->sk_sip, tsk->sk_dip, \
				tsk->sk_sport, tsk->sk_dport); 
		list = &tcp_established_sock_table[hash];

		struct tcp_sock *tmp;
		list_for_each_entry(tmp, list, hash_list) {
			if (tsk->sk_sip == tmp->sk_sip &&
					tsk->sk_dip == tmp->sk_dip &&
					tsk->sk_sport == tmp->sk_sport &&
					tsk->sk_dport == tmp->sk_dport)
				return -1;
		}
	}

	list_add_head(&tsk->hash_list, list);
	tsk->ref_cnt += 1;

	return 0;
}

// unhash tcp sock from established_table or listen_table
void tcp_unhash(struct tcp_sock *tsk)
{
	if (!list_empty(&tsk->hash_list)) {
		list_delete_entry(&tsk->hash_list);
		free_tcp_sock(tsk);
	}
}

// XXX: skaddr here contains network-order variables
int tcp_sock_bind(struct tcp_sock *tsk, struct sock_addr *skaddr)
{
	int err = 0;

	// omit the ip address, and only bind the port
	err = tcp_sock_set_sport(tsk, ntohs(skaddr->port));

	return err;
}


//cwnd 记录函数：
void *tcp_cwnd_thread(void *arg) {
    struct tcp_sock *tsk = (struct tcp_sock *)arg;
    FILE *fp = fopen("cwnd.txt", "w");

    int time_us = 0;
    while (tsk->state == TCP_ESTABLISHED && time_us < 1000000) {
        usleep(500); //每500us唤醒一次，按需更改
        time_us += 500;
		//const char *tcp_state_c_str[] = { "OPEN", "DISORDER", "RECOVERY", "LOSS" };
        //fprintf(fp, "%d %f %u %u %s\n", time_us, tsk->cwnd, tsk->ssthresh, tsk->adv_wnd,tcp_state_c_str[tsk->c_state]);
		fprintf(fp, "%d %f %u %u\n", time_us, tsk->cwnd, tsk->ssthresh, tsk->adv_wnd);
   
	}
    fclose(fp);
    return NULL;
}
// connect to the remote tcp sock specified by skaddr
//
// XXX: skaddr here contains network-order variables
// 1. initialize the four key tuple (sip, sport, dip, dport);
// 2. hash the tcp sock into bind_table;
// 3. send SYN packet, switch to TCP_SYN_SENT state, wait for the incoming`
//    SYN packet by sleep on wait_connect;
// 4. if the SYN packet of the peer arrives, this function is notified, which
//    means the connection is established.
int tcp_sock_connect(struct tcp_sock *tsk, struct sock_addr *skaddr)
{

	log(DEBUG, "Tcp sock Connecting!");
	// Step 1: Set destination address and port
    tsk->sk_dip = ntohl(skaddr->ip);    // Convert from network to host order
    tsk->sk_dport = ntohs(skaddr->port);

	log(DEBUG, "tcp_sock trying to connect to server ("IP_FMT":%hu)", \
				HOST_IP_FMT_STR(tsk->sk_dip), tsk->sk_dport);

	// Step 2: Set source address (from routing table) and bind the port
	tsk->sk_sip = longest_prefix_match(ntohl(skaddr->ip))->iface->ip;
	//sport need to call tcp_sock_get_sport to initialize
	//这个函数中已经hash到了bind_table！
    if (tcp_sock_set_sport(tsk, tsk->sk_sport) < 0) {
        log(ERROR, "Failed to bind source port.");
        return -1;
    }
	tcp_send_control_packet(tsk, TCP_SYN);

	// check if sock exist; ans: exists:
	/*
	if(tsk==NULL){
		log(ERROR, "client sock resources lost.");
	}
	*/	
	tcp_set_state(tsk, TCP_SYN_SENT);

	if(tcp_hash(tsk)<0){
		log(ERROR, "fail to hash connect sock");
	}
	
    //Wait for SYN-ACK from peer
	sleep_on(tsk->wait_connect);
	
	//wake up, means we need to establish 
	log(DEBUG, "client: woken up, to establish");

	tcp_set_state(tsk, TCP_ESTABLISHED);

	//Check if the connection was established
    if (tsk->state == TCP_ESTABLISHED) {
        log(DEBUG, "Connection established:  from "IP_FMT":%hu to\
 "IP_FMT":%hu",
            HOST_IP_FMT_STR(tsk->sk_sip),tsk->sk_sport,HOST_IP_FMT_STR(tsk->sk_dip),tsk->sk_dport);
       //进入了到 ESTABLISHED 状态，初始化拥塞控制参数

		tsk->ssthresh = 64*1024; // 64KB
		tsk->dupACKcount = 0; // Reset duplicate ACK count
		tsk->c_state = TCP_CONG_OPEN; // Set initial congestion control state

		// 创建记录cwnd的进程
		pthread_t cwnd_record; 
		pthread_create(&cwnd_record, NULL, tcp_cwnd_thread, (void *)tsk);


		return 0; // Success
    } else {
        log(ERROR, "Connection failed.");
        return -1; // Failure
    }
}

// set backlog (the maximum number of pending connection requst), switch the
// TCP_STATE, and hash the tcp sock into listen_table
int tcp_sock_listen(struct tcp_sock *tsk, int backlog)
{
	log(DEBUG, "listening to port %hu", tsk->sk_sport);
	if(!tsk){
		log(ERROR, "Sock listen failed for tsk not exist!");
		return -1;
	}
	tsk->backlog = backlog;

	tcp_set_state(tsk, TCP_LISTEN);	
	//hash the tcp sock into listen_table
	if(tcp_hash(tsk)<0){//
		log(ERROR, "listen hash error!");
		return -1;
	}
	return 1;
}

// check whether the accept queue is full
inline int tcp_sock_accept_queue_full(struct tcp_sock *tsk)
{
	if (tsk->accept_backlog >= tsk->backlog) {
		log(ERROR, "tcp accept queue (%d) is full.", tsk->accept_backlog);
		return 1;
	}

	return 0;
}

// push the tcp sock into accept_queue
inline void tcp_sock_accept_enqueue(struct tcp_sock *tsk)
{
	if (!list_empty(&tsk->list))
		list_delete_entry(&tsk->list);
	list_add_tail(&tsk->list, &tsk->parent->accept_queue);
	tsk->parent->accept_backlog += 1;
}

// pop the first tcp sock of the accept_queue
inline struct tcp_sock *tcp_sock_accept_dequeue(struct tcp_sock *tsk)
{
	struct tcp_sock *new_tsk = list_entry(tsk->accept_queue.next, struct tcp_sock, list);
	list_delete_entry(&new_tsk->list);
	init_list_head(&new_tsk->list);
	tsk->accept_backlog -= 1;

	return new_tsk;
}

// if accept_queue is not emtpy, pop the first tcp sock and accept it,
// otherwise, sleep on the wait_accept for the incoming connection requests
struct tcp_sock *tcp_sock_accept(struct tcp_sock *tsk)
{
	//valid check
	if (!tsk || !&tsk->accept_queue) {
        log(ERROR, "Invalid accept queue.");
        return NULL;
    }
	if (!list_empty(&tsk->accept_queue)) {
        struct tcp_sock *new_tsk = tcp_sock_accept_dequeue(tsk); // 从队列中取出
        return new_tsk; // 返回取出的 TCP socket
    }

	//队列为空等待新的连接
	log(DEBUG, "Accept queue empty, waiting for incoming connections");
	sleep_on(tsk->wait_accept);

	log(DEBUG, "New connection detected, wake accept.");
	// 唤醒后检查队列是否有新的连接
    if (!list_empty(&tsk->accept_queue)) {
        struct tcp_sock *new_tsk = tcp_sock_accept_dequeue(tsk); // 再次从队列中取出
		// 有新的连接，取消父级socket的重传计时器

		tcp_unset_retrans_timer(tsk); // 取消socket的重传计时器
        return new_tsk; 
    }

    // 如果仍然没有连接，返回 NULL
    log(WARNING, "No connection available after wait.");
    return NULL;
}

// close the tcp sock, by releasing the resources, sending FIN/RST packet
// to the peer, switching TCP_STATE to closed
void tcp_sock_close(struct tcp_sock *tsk)
{
	if(tsk->state==TCP_ESTABLISHED){
		//foir client: appl:close send:FIN tO:FIN_WAIT_1
		log(DEBUG, "CLIENT: initiating close process, sending FIN");
		//try to send FIN packet to peer. add a ACK to wake up peer, to fix CSendPy bugs.
		tcp_send_control_packet(tsk, TCP_FIN|TCP_ACK);
		tcp_set_state(tsk, TCP_FIN_WAIT_1);

		sleep_on(tsk->wait_recv);

	}
	if(tsk->state==TCP_CLOSE_WAIT){
		//for server: appl:close send:FIN  to:LAST_ACK
		log(DEBUG, "SERVER: initiating close process, sending FIN");
		tcp_send_control_packet(tsk, TCP_FIN|TCP_ACK);
		tcp_set_state(tsk, TCP_LAST_ACK);
		
		sleep_on(tsk->wait_recv);

	}
	log(INFO,"tcp_sock_close: close the socket,unset retrans timer.");
	tcp_unset_retrans_timer(tsk); // Unset retransmission timer
	log(DEBUG, "close function end");
}

// 返回值：0表示读到流结尾，对方关闭连接；-1表示出现错误；正值表示读取的数据长度
int tcp_sock_read(struct tcp_sock *tsk, char *buf, int len) {
	// Check for valid arguments
	if (!tsk || !buf || len <= 0) {
		log(ERROR, "Invalid arguments for tcp_sock_read.");
		return -1;
	}

	int total_read = 0; // Total bytes read

	log(INFO, "tcp_sock_read: locking receive buffer.");
	pthread_mutex_lock(&tsk->rcv_buf_lock); // Lock to protect receive buffer

	// If not enough data to read, wait for more data
	while (ring_buffer_empty(tsk->rcv_buf) && (tsk->state != TCP_CLOSED && tsk->state != TCP_CLOSE_WAIT)) {
		log(INFO, "WAIT 1: No data in buffer, waiting for data-recv.");

		pthread_mutex_unlock(&tsk->rcv_buf_lock); // Unlock for recv

		// Wait for data to arrive
		sleep_on(tsk->wait_recv);

		log(INFO,"tcp_sock_read: wake up");
		//check if the socket is closing

		pthread_mutex_lock(&tsk->rcv_buf_lock); // Lock again to check the buffer
		if(ring_buffer_empty(tsk->rcv_buf)&&(tsk->state == TCP_CLOSED || tsk->state == TCP_CLOSE_WAIT)){
			
			log(INFO, "tcp_sock_read: rcv_buf is empty and socket is closing, return 0.");
			pthread_mutex_unlock(&tsk->rcv_buf_lock); // Unlock the receive buffer
			return 0; // EOF
		}
		else if(!ring_buffer_empty(tsk->rcv_buf)){
			log(INFO, "tcp_sock_read: rcv_buf is not empty, continue reading.");
			break;
		}

	}

	log(INFO, "tcp_sock_read: data coming. read it.");

	// Read data in chunks of MSS size
	while (total_read < len && !ring_buffer_empty(tsk->rcv_buf)) {
		int chunk_size = min(TCP_MSS, len - total_read); // Read up to MSS or remaining length

		int read_len = read_ring_buffer(tsk->rcv_buf, buf + total_read, chunk_size);
		total_read += read_len;
	}


	pthread_mutex_unlock(&tsk->rcv_buf_lock); // Unlock the receive buffer

	//update rcv_wnd
	tsk->rcv_wnd=ring_buffer_free(tsk->rcv_buf);

	log(DEBUG, "tcp_sock_read: %d bytes read from buffer, %d bytes remaining.", total_read, len - total_read);
	log(INFO, "tcp_sock_read: rcv_wnd is %d", tsk->rcv_wnd);
	
	return total_read; // Return the total bytes read
}


// 从缓存区提取数据，到sock_buffer,是一个接受peer发来数据的过程
// 返回值：-1表示出现错误；正值表示写入的数据长度
int tcp_sock_write(struct tcp_sock *tsk, char *buf, int len) {
	// Check for valid arguments
	if (!tsk || !buf || len <= 0) {
		log(ERROR, "Invalid arguments for tcp_sock_write.");
		return -1;
	}

	int total_written = 0;

	log(DEBUG,"tcp_sock_write: locking receive buffer.");
	pthread_mutex_lock(&tsk->rcv_buf_lock); // Lock to protect receive buffer

	while (total_written < len) {
		int remaining = len - total_written; // Remaining data to write
		int write_size = min(remaining, ring_buffer_free(tsk->rcv_buf)); // Write size limited by free space in buffer

		if (write_size <= 0) {
			log(DEBUG, "tcp_sock_write: WAIT 2: Receive buffer is full, waiting for space. Unlock it");
			pthread_mutex_unlock(&tsk->rcv_buf_lock);

			// Wait for space in the receive buffer
			sleep_on(tsk->wait_recv);

			log(DEBUG, "tcp_sock_write: WAIT 2: Woken up, retrying to write. Lock it");
			pthread_mutex_lock(&tsk->rcv_buf_lock);
			continue;
		}

		// Write data to ring buffer
		write_ring_buffer(tsk->rcv_buf, buf + total_written, write_size);

		
		log(DEBUG, "tcp_sock_write: %d bytes written to buffer, %d bytes remaining.", write_size, len - total_written);
		
		// Update written count
		total_written += write_size;

	}
	tsk->rcv_wnd -= total_written; // Update receive window size
	log(DEBUG, "tcp_sock_write: rcv_wnd is %d", tsk->rcv_wnd);
	pthread_mutex_unlock(&tsk->rcv_buf_lock); // Unlock receive buffer
	
	return total_written; // Return the total number of bytes written
}

//buf: data buf len: length of buf
//如果重传队列有东西，就别发。
int tcp_send_data(struct tcp_sock *tsk, char *buf, int len)
{
	//如果有重传需求，就先别发了
	
	//只要还要重传，就别发
	// if(!list_empty(&tsk->send_buf)){
	// 	log(DEBUG,"tcp_send_data: send_buf is not empty, wait for it.");
	// 	sleep_on(tsk->wait_send);
	// }


	if (!tsk || !buf || len <= 0) {
		log(ERROR, "Invalid arguments for tcp_send_data.");
		return -1;
	}
	
	int total_sent = 0;

	while (total_sent < len) {
		pthread_mutex_lock(&tsk->rcv_buf_lock); // 加锁保护发送窗口
		int remaining = len - total_sent;    // 剩余需要发送的数据
		int send_size = min(remaining, tsk->snd_wnd); // 发送大小受限于发送窗口

		// 数据包大小
		int pkt_size = ETHER_HDR_SIZE + TCP_BASE_HDR_SIZE + IP_BASE_HDR_SIZE + len;
		char *packet = malloc(pkt_size);
		if (!packet) {
			log(ERROR, "Failed to allocate memory for packet.");
			pthread_mutex_unlock(&tsk->rcv_buf_lock);
			return -1;
		}
		memset(packet, 0, pkt_size);

		log(DEBUG," send_size: %d, para len: %d", send_size, len);
		// 填充数据负载
		memcpy(packet + ETHER_HDR_SIZE + TCP_BASE_HDR_SIZE + IP_BASE_HDR_SIZE, buf + total_sent, send_size);

		if(tcp_tx_window_test(tsk)==0){
			log(DEBUG,"window is not enough, sleep_on send.");
			pthread_mutex_unlock(&tsk->rcv_buf_lock); // Unlock the send buffer
			sleep_on(tsk->wait_send); // 等待发送窗口可用

			log(DEBUG,"woken up, try to send again.");
			
			pthread_mutex_unlock(&tsk->rcv_buf_lock); // Unlock the send buffer
		}
		

		//note: send_packet size is the total length of packet
		tcp_send_packet(tsk, packet, pkt_size); // tcp_send_packet can update snd_nxt by send_size;

		// 更新已发送的数据量
		total_sent += send_size;

		pthread_mutex_unlock(&tsk->rcv_buf_lock);

		log(DEBUG, "Sent %d bytes, %d bytes remaining. ", send_size, len - total_sent);
	}

	return total_sent; // 返回成功发送的数据量
}



// 使用tsk->snd_una, tsk->snd_wnd, tsk->snd_nxt计算剩余窗口大小，如果大于TCP_MSS，则返回1，否则返回0
int tcp_tx_window_test(struct tcp_sock *tsk)
{
	u32 remaining_window = tsk->snd_una + tsk->snd_wnd - tsk->snd_nxt;
	log(DEBUG,"tcp_tx_window_test: snd_una: %u, snd_wnd: %u, snd_nxt: %u",tsk->snd_una,tsk->snd_wnd,tsk->snd_nxt);
	//log(DEBUG," FUNCTION: tcp_tx_window_test: remaining_window is: %u", remaining_window);
	if (less_or_equal_32b(TCP_MSS, remaining_window)) {
		log(DEBUG, "tcp_tx_window_test: remaining window is larger or equal than TCP_MSS, return 1.");
		return 1;
	} else {
		log(DEBUG,"tcp_tx_window_test: remaining window: %u is less than TCP_MSS, return 0.",remaining_window);
		return 0;
	}
}



//implement senf_buffer
/*
创建send_buffer_entry加入send_buf尾部

注意上锁，后面不再强调。
*/
void tcp_send_buffer_add_packet(struct tcp_sock *tsk, char *packet, int len)
{
	pthread_mutex_lock(&tsk->send_buf_lock);

	struct send_buffer_entry *entry = malloc(sizeof(struct send_buffer_entry));
	if (!entry) {
		log(ERROR, "Failed to allocate memory for send buffer entry.");
		pthread_mutex_unlock(&tsk->send_buf_lock);
		return;
	}

	//copy packet into entry
	entry->packet = malloc(len);
	if (!entry->packet) {
		log(ERROR, "Failed to allocate memory for packet.");
		free(entry);
		pthread_mutex_unlock(&tsk->send_buf_lock);
		return;
	}
	memcpy(entry->packet, packet, len);

	//set packet len
	entry->len = len;

	//set seq for convenience
	struct tcphdr *tcp = packet_to_tcp_hdr(packet);
	entry->seq = ntohl(tcp->seq); // Convert sequence number from network to host byte order

	//add to tail of list
	list_add_tail(&entry->list, &tsk->send_buf); // Add to the end of the send buffer list


	pthread_mutex_unlock(&tsk->send_buf_lock);
}

/*
基于收到的ACK包，遍历发送队列，将已经接收的数据包从队列中移除

提取报文的tcp头可以使用packet_to_tcp_hdr，注意报文中的字段是大端序
*/
int tcp_update_send_buffer(struct tcp_sock *tsk, u32 ack)
{
	//lock the send_buffer
	log(DEBUG, "tcp_update_send_buffer: locking send buffer.");
	pthread_mutex_lock(&tsk->send_buf_lock); // Lock to protect send buffer
	

	
	int removed = 0;


	struct send_buffer_entry *entry, *tmp;
	list_for_each_entry_safe(entry, tmp, &tsk->send_buf, list) {
		// Check if the entry is acknowledged
		//是网络字节序（大端序，读取要转换）
		char* tmp_packet=entry->packet;
		int pl_len=entry->len-ETHER_HDR_SIZE - IP_BASE_HDR_SIZE - TCP_BASE_HDR_SIZE;
		// Calculate the end sequence number of the packet
	
		log(DEBUG,"entry seq: %u, entry pl_len: %d",entry->seq,pl_len);
		if (less_or_equal_32b(entry->seq+pl_len, ack)) {//已经被ack了
			log(INFO, "tcp_update_send_buffer: peer ack: %u, removing acknowledged entry with seq: %u", ack, entry->seq);
			
			struct send_buffer_entry *tmp_entry_list=entry;
			list_delete_entry(&entry->list); // Remove acknowledged entry from the list
			free(tmp_entry_list);
			removed++;
		}
	}

	if(list_empty(&tsk->send_buf)){
		// 发送队列为空，取消计时器
		log(INFO, "tcp_update_send_buffer: send buffer is empty, canceling retransmission timer.");
		tcp_unset_retrans_timer(tsk);
	}

	pthread_mutex_unlock(&tsk->send_buf_lock); // Unlock the send buffer
	return removed; // Return the number of removed entries
}

/*
获取重传队列第一个包，修改ack号和checksum并通过ip_send_packet发送。

注意不要更新snd_nxt之类的参数，这是一个独立的重传报文。ip_send_packet会释放传入的指针，因而需要拷贝需要重传的报文。
*/
int tcp_retrans_send_buffer(struct tcp_sock *tsk)
{
	//log(DEBUG,"tcp_retrans_send_buffer: trying to retrans send.");
	pthread_mutex_lock(&tsk->send_buf_lock);
    
    if (list_empty(&tsk->send_buf)) {
        pthread_mutex_unlock(&tsk->send_buf_lock);
        return -1;
    }

	struct send_buffer_entry *entry = list_entry(tsk->send_buf.next, struct send_buffer_entry, list);
	//log(DEBUG, "tcp_retrans_send_buffer: retransmitting entry with seq: %u", entry->seq);
	
	// Copy the packet to a new buffer for retransmission
	char *packet = malloc(entry->len);
	if (!packet) {
		log(ERROR, "Failed to allocate memory for retransmission packet.");
		pthread_mutex_unlock(&tsk->send_buf_lock);
		return -1;
	}
	memcpy(packet, entry->packet, entry->len); // Copy the packet data

	// Update the TCP header.
	// Only ack and checksum need to be updated.
	struct iphdr *ip = packet_to_ip_hdr(packet);
	// Tcp is net-ordered
	struct tcphdr *tcp = packet_to_tcp_hdr(packet);

	//tcp->ack=tsk->rcv_nxt; // Update ACK number to the current receive next number
	tcp->ack=htonl(tsk->rcv_nxt);

	tcp->checksum = tcp_checksum(ip, tcp); // Recalculate checksum

	// Send the packet using ip_send_packet
	ip_send_packet(packet, entry->len); // Send the packet
	log(INFO, "tcp_retrans_send_buffer: retransmission packet sent. with seq: %u", entry->seq);

	// Unlock the send buffer
	pthread_mutex_unlock(&tsk->send_buf_lock);
	return 0; // Return success
}

int tcp_recv_ofo_buffer_add_packet(struct tcp_sock *tsk, struct tcp_cb *cb)
{
	pthread_mutex_lock(&tsk->rcv_buf_lock);
	
	// log(INFO,"BEFORE ADD PACKET");
	// TCP_RECV_OFO_BUFFER_PRINT(tsk);

	// If acked(重复的报文), ignore it. (specially probe packet)
	// 重复报文: cb->seq_end < rcv_nxt
	//
	if (less_or_equal_32b(cb->seq_end, tsk->rcv_nxt)) {
        // 1.这是一个重复的包，直接丢弃
		log(WARNING,"tcp_recv_ofo_buffer_add_packet: dup packet ,throw it.");
        pthread_mutex_unlock(&tsk->rcv_buf_lock);
       // printf("duplicate packet: seq_end=%u, rcv_nxt=%u\n", cb->seq_end, tsk->rcv_nxt);
	   tcp_send_control_packet(tsk,TCP_ACK);
        return -1;
    }

	// Init entry
	struct recv_ofo_buf_entry *new_entry = malloc(sizeof(struct recv_ofo_buf_entry));
	if (!new_entry) {
		log(ERROR, "Failed to allocate memory for out-of-order buffer entry.");
		return -1;
	}


	new_entry->seq =cb->seq;
	new_entry->payload = malloc(cb->pl_len);
	memcpy(new_entry->payload, cb->payload, cb->pl_len);

	new_entry->pl_len=cb->pl_len;
	new_entry->seq_end=cb->seq_end;
	
	
	log(DEBUG,"tcp_recv_ofo_buffer_add_packet: initializing new_entry. seq: %u, pl_len is %d",new_entry->seq,new_entry->pl_len);

	
	if(less_than_32b(tsk->rcv_nxt,cb->seq)){
		log(WARNING,"tcp_recv_ofo_buffer_add_packet: packet with seq:%u, but expected recv: %u,not ordered",new_entry->seq,tsk->rcv_nxt);
	}

	

	//不为空，顺序查找


	struct recv_ofo_buf_entry *entry,*tmp;
	
	int flag=1;// 1: 正常退出 0: break出去的
	list_for_each_entry_safe(entry, tmp, &tsk->rcv_ofo_buf, list) {
		// if list is empty, directly add it.
		if(list_empty(&tsk->rcv_ofo_buf)){
			// ofo为空 直接加入
			log(INFO,"tcp_recv_ofo_buffer_add_packet: ofo_buf is empty, add seq: %u ", cb->seq);
			list_add_tail(&new_entry->list,&tsk->rcv_ofo_buf);
			flag=0;
			break;
		}

		// Check for duplicate sequence number
		log(DEBUG,"tcp_recv_ofo_buffer_add_packet: now the seq is %u",entry->seq);
		if (new_entry->seq==entry->seq) {
			log(DEBUG, "tcp_recv_ofo_buffer_add_packet: duplicate packet with seq: %u, discarding.", entry->seq);
			// Free the new entry memory: duplicated

			free(new_entry->payload);
			free(new_entry); // Free the new entry memory
			new_entry=NULL;
			break;
		}
		// Insert the entry in the correct position based on sequence number
		if (less_than_32b(cb->seq, entry->seq)) {
			log(INFO, "tcp_recv_ofo_buffer_add_packet: inserting packet with seq: %u before entry with seq: %u", cb->seq, entry->seq);
			list_insert(&new_entry->list, entry->list.prev,&entry->list); // Insert before the current entr
			flag=0;
			break;
		}
	}

	if(flag){//遍历出去的，说明还没加入
		if(new_entry)
		{
			log(DEBUG,"tcp_recv_ofo_buffer_add_packet: inserting packet with seq: %u in tail.(LARGEST)",new_entry->seq);
			list_add_tail(&new_entry->list,&tsk->rcv_ofo_buf);
		}
	}
	

	// 上送tcp_recv_ofo_buf_entry到接收缓冲区
	
	pthread_mutex_unlock(&tsk->rcv_buf_lock);

	int total_moved=tcp_move_recv_ofo_buffer(tsk); // Move packets from out-of-order buffer to receive buffer
	if(total_moved<0){
		log(ERROR, "tcp_recv_ofo_buffer_add_packet: move recv ofo buffer failed.");

		free(new_entry->payload);
		free(new_entry); // Free the new entry memory
		return -1; // Move failed, return error
	}
	



	return 0;
}

int tcp_move_recv_ofo_buffer(struct tcp_sock *tsk)
{
	// log(INFO,"before moving");

	// TCP_RECV_OFO_BUFFER_PRINT(tsk);

	// TODO: Implement the function to move packets from out-of-order buffer to receive buffer
	// Iterate through the out-of-order buffer and move packets to the receive buffer

	log(DEBUG,"tcp_recv_ofo_buffer_add_packet: moving ordered data to recv_buffer");

	struct recv_ofo_buf_entry *entry, *tmp;

	int total_moved = 0; // Total bytes moved

	if(list_empty(&tsk->rcv_ofo_buf)){
		log(WARNING,"tcp_move_recv_ofo_buffer: ofo_buf is empty!");
		return;
	}

	if(ring_buffer_full(tsk->rcv_buf)){
		log(WARNING,"tcp_move_recv_ofo_buffer: rcv_buf is full, exit");
		return -1;
	}

	list_for_each_entry_safe(entry, tmp, &tsk->rcv_ofo_buf, list) {
		// Check if the packet can be moved to the receive buffer	
		if(entry->seq== tsk->rcv_nxt) {
			// Check if rcv_buf has enough space
			if (ring_buffer_free(tsk->rcv_buf) < entry->pl_len) {
				log(WARNING, "tcp_move_recv_ofo_buffer: receive buffer is full, cannot move packet with seq: %u.", entry->seq);
				break; // Break if not enough space in the receive buffer
			}

			log(DEBUG,"tcp_move_recv_ofo_buffer: find ordered packet, seq:%u try to write. pl_len: %d",entry->seq,entry->pl_len);
		
			// 有序 (正是我想要的)
			int write_len=tcp_sock_write(tsk, entry->payload, entry->pl_len); // Write to the receive buffer

			if (write_len < 0) {
				log(ERROR, "tcp_move_recv_ofo_buffer: failed to write to receive buffer.");
				break; // Break if write fails
			}

			log(INFO, "tcp_move_recv_ofo_buffer: %d bytes moved to receive buffer.", write_len);
			total_moved+=write_len; // Update total moved bytes
			// Update rcv_nxt and rcv_wnd
			tsk->rcv_nxt += write_len; // Update the receive next sequence number
			tsk->rcv_wnd -= write_len; // Update the receive window size

			list_delete_entry(&entry->list); // Remove from out-of-order buffer

			free(entry->payload);
			free(entry); // Free the entry memory
			
	

		} else if(less_than_32b(tsk->rcv_nxt,entry->seq)){//乱序
			log(DEBUG,"tcp_move_recv_ofo_buffer: met later(not ordered) data, break");
			break;
		}
		// Move the packet to the receive buffer
		

		
	}
	// wake up the waiting process
	log(DEBUG,"tcp_move_recv_ofo_buffer: end. wake up recv");
	if(total_moved)
		wake_up(tsk->wait_recv); // Wake up the waiting process

	return total_moved; // Return the total bytes moved
}


