/*
 * TCP BSC: base bandwidth estimation congestion control for tcp
 *
 * cwnd = k * bw_est * rtt_min
 */
 
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/inet_diag.h>
#include <net/tcp.h>

/* TCP Bsc structure */
struct bsc {
	
	u32 bw_ns_est;	/* first bandwidth estimation..not too smoothed 8) */
	u32 bw_est;		/* bandwidth estimate */
	
	u32 prior_inflight;		/* for calculate the delivered data*/
	u32 delivered;		/* now_inflight - prior_inflight */
	u32 delivered_st_stamp;		/* delivered start stamp  */
	
	u32 rtt_min;
	u32 rtt_min_stamp;		//update stamp limited
	u32 rtt;
}

/* Tcp bsc functions and constants */
#define TCP_BSC_RTT_MIN (HZ/100) //10ms
#define TCP_BSC_INIT_MIN (20*HZ)

#define TCP_BSC_CWND_GAIN 9/8
#define TCP_BSC_SSTHRESH_GAIN 3/4

static void tcp_bsc_init(struct sock *sk){
	struct bsc *bs = inet_csk_ca(sk);

	bs->bw_ns_est = 0;
	bs->bw_est = 0;
	
	bs->prior_inflight = 0;
	bs->delivered = 0;
	bs->delivered_st_stamp = 0;
	
	bs->rtt_min = bs->rtt = TCP_WESTWOOD_INIT_RTT;
	bs->rtt_min_stamp = tcp_time_stamp;
}

/* important function
 * update delivered,delivered_st_stamp,bw_ns_est,bw_est
 */
static void bsc_update_window(struct sock *sk){
	struct tcp_sock *tp = tcp_sk(sk);
	struct bsc *bs = inet_csk_ca(sk);
	s32 interval = tcp_time_stamp - bs->delivered_st_stamp;
	
	bs->delivered = bs->prior_inflight - (tp->packets_out - tp->sacked_out);
	
	/* filter ack compression(ACK are delayed and then arrive in a burst)
	 * this method is so rough,have large room for improvement
	 */
	if(bs->rtt && interval > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)){
		bsc_filter(bs, interval);
		
		bs->delivered = 0;
		bs->delivered_st_stamp = tcp_time_stamp;
	}
}

static void bsc_filter(struct bsc *bs, u32 interval){
	if(bs->bw_ns_est == 0 && bs->bw_est == 0){
		bs->bw_ns_est = bs->delivered / interval;
		bs->bw_est = bs->bw_ns_est;
	}else{
		bs->bw_ns_est = (bs->bw_ns_est * 3 + bs->delivered / interval ) >> 2;
		bs->bw_est = (bs->bw_est * 3 + bs->bw_ns_est) >> 2;
	}
}

static u32 tcp_bsc_ssthresh(struct sock *sk){
	const struct tcp_sock *tp = tcp_sk(sk);
	const struct bsc *bs = inet_csk_ca(sk);
	
	
	return max(tp->snd_cwnd >> 1U, TCP_BSC_SSTHRESH_GAIN * bsc_bdp(sk));
}

static u32 tcp_bsc_cong_avoid(struct sock *sk, u32 ack, u32 in_flight){
	struct tcp_sock *tp = tcp_sk(sk);
	struct bsc *bs = inet_csk_ca(sk);
	
	bsc_update_window(sk);
	/*avoid in_flight >= tp->snd_cwnd or application limited */
	if (!tcp_is_cwnd_limited(sk, in_flight))
		return;

	/* In "safe" area, increase. */
	if (tp->snd_cwnd <= tp->snd_ssthresh){
		if(bs->bw_est * bs->rtt_min < tp->snd_cwnd_clamp){
			tp->snd_cwnd = bsc_bdp(sk);
		}
	}
		

	/* In dangerous area, increase slowly. */
	else if (sysctl_tcp_abc) {
		/* RFC3465: Appropriate Byte Count
		 * increase once for each full cwnd acked
		 */
		if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) {
			tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache;
			if (tp->snd_cwnd < tp->snd_cwnd_clamp)
				tp->snd_cwnd++;
		}
	} else {
		if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
			if (tp->snd_cwnd < tp->snd_cwnd_clamp)
				tp->snd_cwnd++;
			tp->snd_cwnd_cnt = 0;
		} else {
			tp->snd_cwnd_cnt++;
		}
	}
}

static void tcp_bsc_event(struct sock *sk, enum tcp_ca_event event){
	struct tcp_sock *tp = tcp_sk(sk);
	struct bsc *bs = inet_csk_ca(sk);
	
	switch(event){
		case CA_EVENT_FAST_ACK:
		/* packets_out is tcp send data,implement in tcp_output.c */
			bs->prior_inflight = tp->packets_out - tp->sacked_out;
			update_rtt_min(bs);
			break;
		case CA_EVENT_SLOW_ACK:
			bs->prior_inflight = tp->packets_out - tp->sacked_out;
			update_rtt_min(bs);
			break;
			
		default:
			break;
	}
	
}

static void tcp_bsc_pkts_acked(struct sock *sk, u32 cnt, s32 rtt){
	struct bsc *bs = inet_csk_ca(sk);
	
	if(rtt > 0)
		w->rtt = usecs_to_jiffies(rtt);
}

static void update_rtt_min(struct bsc *bs){
	/* so rough */
	if(!bs->rtt_min){
		bs->rtt_min = bs->rtt;
	}else{
		bs->rtt_min = min(bs->rtt, bs->rtt_min);
	}
}

static u32 bsc_bdp(const struct sock *sk){
	const struct tcp_sock *tp = tcp_sk(sk);
	const struct bsc *bs = inet_csk_ca(sk);
	return max_t(u32, (bs->bw_est * bs->rtt_min) / tp->mss_cache, 2);
}
static struct tcp_congestion_ops tcp_bsc = {
	.init		= tcp_bsc_init,
	.ssthresh	= tcp_bsc_ssthresh,
	.cong_avoid	= tcp_bsc_cong_avoid,
	.min_cwnd	= tcp_bsc_bw_rttmin,
	.cwnd_event	= tcp_bsc_event,
	.pkts_acked	= tcp_bsc_pkts_acked,

	.owner		= THIS_MODULE,
	.name		= "bsc"
};

static int __init tcp_bsc_register(void)
{
	BUILD_BUG_ON(sizeof(struct bsc) > ICSK_CA_PRIV_SIZE);
	return tcp_register_congestion_control(&tcp_bsc);
}

static void __exit tcp_bsc_unregister(void)
{
	tcp_unregister_congestion_control(&tcp_bsc);
}

module_init(tcp_bsc_register);
module_exit(tcp_bsc_unregister);

MODULE_AUTHOR("Jianfeng Bai,BaiShanCloud");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TCP Bsc");
