// SPDX-License-Identifier: GPL-2.0-only
/*
 * Pluggable TCP congestion control support and newReno
 * congestion control.
 * Based on ideas from I/O scheduler support and Web100.
 *
 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
 */

#define pr_fmt(fmt) "TCP: " fmt

#include <lwip/tcp.h>

struct tcp_congestion_ops *lwip_tcp_ca_ops = &lwip_tcp_reno;
static struct tcp_congestion_ops *tcp_cong_list[] = {&lwip_tcp_reno, &lwip_cubictcp,NULL};

void tcp_init_congestion_control(struct tcp_pcb *pcb)
{
	LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_cong: call tcp_init_congestion_control()\n"));
	pcb->prior_ssthresh = 0;
	if (lwip_tcp_ca_ops->init)
		lwip_tcp_ca_ops->init(pcb);
	pcb->icsk_ca_initialized = 1;
}

/* Manage refcounts on socket close. */
void tcp_cleanup_congestion_control(struct tcp_pcb *icsk)
{
	LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_cong: call tcp_cleanup_congestion_control()\n"));
	if (lwip_tcp_ca_ops->release)
		lwip_tcp_ca_ops->release(icsk);
	memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
	icsk->icsk_ca_initialized = 0;
	tcp_set_ca_state(icsk, LWIP_TCP_CA_Open);
}

/* Used by Gazelle to set default congestion control */
int tcp_set_default_congestion_control(const char *name)
{
	struct tcp_congestion_ops *ca;
	for (int i = 0;(ca = tcp_cong_list[i]) != NULL;i++)
	{
		if (strcmp(ca->name, name) == 0)
			break;
	}
	if(ca==NULL)
		ca = &lwip_tcp_reno;
	/* all algorithms must implement these */
	LWIP_ERROR("Current congestion algorithm does not implement required ops",
				ca->ssthresh && ca->undo_cwnd && (ca->cong_avoid || ca->cong_control),return -EINVAL);
	LWIP_PLATFORM_LOG(TCP_CWND_DEBUG,"LwIP tcp_cong: Set tcp congestion algorithm to %s\n",ca->name);
	lwip_tcp_ca_ops = ca;
	return 0;
}

/* Slow start is used when congestion window is no greater than the slow start
 * threshold. We base on RFC2581 and also handle stretch ACKs properly.
 * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
 * something better;) a packet is only considered (s)acked in its entirety to
 * defend the ACK attacks described in the RFC. Slow start processes a stretch
 * ACK of degree N as if N acks of degree 1 are received back to back except
 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
 * returns the leftover acks to adjust cwnd in congestion avoidance mode.
 */
u32_t tcp_slow_start(struct tcp_pcb *tp, u32_t acked)
{
	u32_t cwnd = tp->cwnd;
	TCP_WND_INC(cwnd,acked);
	cwnd = LWIP_MIN(cwnd, tp->ssthresh);
	acked -= cwnd - tp->cwnd;
	tp->cwnd = cwnd;
	return acked;
}

/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
 * for every packet that was ACKed.
 */
void tcp_cong_avoid_ai(struct tcp_pcb *tp, u32_t w, u32_t acked)
{
	/* If credits accumulated at a higher w, apply them gently now. */
	if (tp->snd_cwnd_cnt >= w) {
		tp->snd_cwnd_cnt = 0;
		tp->cwnd++;
	}

	tp->snd_cwnd_cnt += acked;
	if (tp->snd_cwnd_cnt >= w) {
		u32_t delta = tp->snd_cwnd_cnt / w;

		tp->snd_cwnd_cnt -= delta * w;
		tp->cwnd += delta;
	}
}

/*
 * TCP Reno congestion control
 * This is special case used for fallback as well.
 */
/* This is Jacobson's slow start and congestion avoidance.
 * SIGCOMM '88, p. 328.
 */
void tcp_reno_cong_avoid(struct tcp_pcb *tp, u32_t ack, u32_t acked)
{
	/* In "safe" area, increase. */
	if (tcp_in_slow_start(tp)) {
		acked = tcp_slow_start(tp, acked);
		if (!acked)
			return;
	}
	/* In dangerous area, increase slowly. */
	tcp_cong_avoid_ai(tp, tp->cwnd / 10, acked);
}

/* Slow start threshold is half the congestion window (min 2) */
u32_t tcp_reno_ssthresh(struct tcp_pcb *pcb)
{
	return LWIP_MAX(pcb->cwnd >> 1U, 2U);
}

u32_t tcp_reno_undo_cwnd(struct tcp_pcb *pcb)
{
	return LWIP_MAX(pcb->cwnd, pcb->prior_cwnd);
}

struct tcp_congestion_ops lwip_tcp_reno __read_mostly = {
	.name		= "reno",
	.ssthresh	= tcp_reno_ssthresh,
	.cong_avoid	= tcp_reno_cong_avoid,
	.undo_cwnd	= tcp_reno_undo_cwnd,
};
