// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * ip_vs_proto_tcp.c:	TCP load balancing support for IPVS
 *
 * Authors:     Wensong Zhang <wensong@linuxvirtualserver.org>
 *              Julian Anastasov <ja@ssi.bg>
 *
 * Changes:     Hans Schillstrom <hans.schillstrom@ericsson.com>
 *
 *              Network name space (netns) aware.
 *              Global data moved to netns i.e struct netns_ipvs
 *              tcp_timeouts table has copy per netns in a hash table per
 *              protocol ip_vs_proto_data and is handled by netns
 */

#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

#include <linux/kernel.h>
#include <linux/ip.h>
#include <linux/tcp.h>                  /* for tcphdr */
#include <net/ip.h>
#include <net/tcp.h>                    /* for csum_tcpudp_magic */
#include <net/ip6_checksum.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/indirect_call_wrapper.h>
#include <net/secure_seq.h>

#include "./ip_vs_fnat.h"

static int
tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp);

static int
tcp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
		  struct ip_vs_proto_data *pd,
		  int *verdict, struct ip_vs_conn **cpp,
		  struct ip_vs_iphdr *iph)
{
	struct ip_vs_service *svc;
	struct tcphdr _tcph, *th = NULL;
	__be16 _ports[2], *ports = NULL;

	/* In the event of icmp, we're only guaranteed to have the first 8
	 * bytes of the transport header, so we only check the rest of the
	 * TCP packet for non-ICMP packets
	 */
	if (likely(!ip_vs_iph_icmp(iph))) {
		th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
		if (th) {
			if (th->rst)
				return 1;
			ports = &th->source;
		}
	} else {
		ports = skb_header_pointer(
			skb, iph->len, sizeof(_ports), &_ports);
	}

	if (!ports) {
		*verdict = NF_DROP;
		return 0;
	}

	/* No !th->ack check to allow scheduling on SYN + ACK for Active FTP */

	if (likely(!ip_vs_iph_inverse(iph)))
		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
					 &iph->daddr, ports[1], iph->subnet_id);
	else
		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
					 &iph->saddr, ports[0], iph->subnet_id);


	if ((likely(!ip_vs_iph_icmp(iph))) && (th) && (!th->syn)) {
		svc = NULL;
	}

	if (svc) {
		int ignored;

		if (ip_vs_todrop(ipvs)) {
			/*
			 * It seems that we are very loaded.
			 * We have to drop this packet :(
			 */
			*verdict = NF_DROP;
			return 0;
		}

		/* check the client ip if is in allowd client address list or excluded list */
		if (sysctl_client_list_switch(ipvs)) {
			if (!ip_vs_allow_saddr(&iph->saddr, svc)) {
				*verdict = NF_DROP;
				return 0;
			}
		}

		/*
		 * Let the virtual server select a real server for the
		 * incoming connection, and create a connection entry.
		 */
		*cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph);
		if (!*cpp && ignored <= 0) {
			if (!ignored)
				*verdict = ip_vs_leave(svc, skb, pd, iph);
			else
				*verdict = NF_DROP;
			return 0;
		}
	}

	/* drop tcp packet which send to vip and !vport */
	if ((svc == NULL) && sysctl_tcp_drop_entry(ipvs)) {
		svc = ip_vs_lookup_vip(ipvs, af, iph->protocol, &iph->daddr,
			iph->subnet_id);
		if (svc != NULL) {
			IP_VS_INC_ESTATS(ip_vs_esmib, DEFENCE_TCP_DROP);
			*verdict = NF_DROP;
			return 0;
		}
	}

	/* drop tcp packet which send to only-udp-vip */
	if ((svc == NULL) && sysctl_tcp_drop_for_udp_entry(ipvs)) {
		svc = ip_vs_lookup_vip(ipvs, af, IPPROTO_UDP, &iph->daddr,
			iph->subnet_id);
		if (svc != NULL) {
			IP_VS_INC_ESTATS(ip_vs_esmib, DEFENCE_TCP_DROP_FOR_UDP);
			*verdict = NF_DROP;
			return 0;
		}
	}


	/* NF_ACCEPT */
	return 1;
}


static inline void
tcp_fast_csum_update(int af, struct tcphdr *tcph,
		     const union nf_inet_addr *oldip,
		     const union nf_inet_addr *newip,
		     __be16 oldport, __be16 newport)
{
#ifdef CONFIG_IP_VS_IPV6
	if (af == AF_INET6)
		tcph->check =
			csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
					 ip_vs_check_diff2(oldport, newport,
						~csum_unfold(tcph->check))));
	else
#endif
	tcph->check =
		csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
				 ip_vs_check_diff2(oldport, newport,
						~csum_unfold(tcph->check))));
}


static inline void
tcp_partial_csum_update(int af, struct tcphdr *tcph,
		     const union nf_inet_addr *oldip,
		     const union nf_inet_addr *newip,
		     __be16 oldlen, __be16 newlen)
{
#ifdef CONFIG_IP_VS_IPV6
	if (af == AF_INET6)
		tcph->check =
			~csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
					 ip_vs_check_diff2(oldlen, newlen,
						csum_unfold(tcph->check))));
	else
#endif
	tcph->check =
		~csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
				ip_vs_check_diff2(oldlen, newlen,
						csum_unfold(tcph->check))));
}

/* adjust tcp opt mss, sub TCPOLEN_CIP */
static void tcp_opt_adjust_mss(struct netns_ipvs *ipvs,
		int af, unsigned int tcphoff,
		struct tcphdr *tcph, struct sk_buff *skb, int toa)
{
	unsigned char *ptr = NULL;
	int length;
	unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
	int th_iph_len;

	if (!sysctl_mss_adjust_entry(ipvs))
		return;

	/*sometimes tcp option allocate in a non linear region,
	so we should use skb_header_pointer func to process data*/
	length = (tcph->doff * 4) - sizeof(struct tcphdr);
	ptr = skb_header_pointer(skb, tcphoff + sizeof(struct tcphdr),
					length, buff);
	if (ptr == NULL)
		return;
	while (length > 0) {
		int opcode = *ptr++;
		int opsize;

		switch (opcode) {
		case TCPOPT_EOL:
			return;
		case TCPOPT_NOP:	/* Ref: RFC 793 section 3.1 */
			length--;
			continue;
		default:
			opsize = *ptr++;
			if (opsize < 2)	/* "silly options" */
				return;
			if (opsize > length)
				return;	/* don't parse partial options */
			if ((opcode == TCPOPT_MSS) && (opsize == TCPOLEN_MSS)) {
				__be16 old = *(__be16 *) ptr;
				__u16 in_mss = ntohs(*(__be16 *) ptr);
#ifdef CONFIG_IP_VS_IPV6
				if (af == AF_INET6)
					th_iph_len = sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
				else
#endif
					th_iph_len = sizeof(struct iphdr) + sizeof(struct tcphdr);

				if (skb->dev && (skb->dev->mtu < (in_mss + th_iph_len)))
					in_mss = skb->dev->mtu - th_iph_len;

				if (toa) {
#ifdef CONFIG_IP_VS_IPV6
					if (af == AF_INET6)
						in_mss -= TCPOLEN_ADDR_V6;
					else
#endif
						in_mss -= TCPOLEN_ADDR;
				}

				*((__be16 *) ptr) = htons(in_mss); /* set mss, 16bit */
				tcph->check = csum_fold(ip_vs_check_diff2(old,
								*(__be16 *) ptr,
						~csum_unfold(tcph->check)));
				/* restore modify data to both linear*/
				if (skb_is_nonlinear(skb)) {
					length = (tcph->doff * 4) - sizeof(struct tcphdr);
					skb_store_bits(skb, tcphoff +
						sizeof(struct tcphdr), buff, length);
				}
				return;
			}

			ptr += opsize - 2;
			length -= opsize;
		}
	}
}

/* save tcp sequense for fullnat/nat, INside to OUTside */
static void
tcp_save_out_seq(struct sk_buff *skb, struct ip_vs_conn *cp,
		 struct tcphdr *th, int ihl)
{
	if (unlikely(th == NULL) || unlikely(cp == NULL) ||
	    unlikely(skb == NULL))
		return;

	if (sysctl_conn_expire_tcp_rst(cp->ipvs) && !th->rst) {

		/* seq out of order. just skip */
		if (before(ntohl(th->ack_seq), ntohl(cp->rs_ack_seq)) &&
							(cp->rs_ack_seq != 0))
			return;

		if (th->syn && th->ack)
			cp->rs_end_seq = htonl(ntohl(th->seq) + 1);
		else
			cp->rs_end_seq = htonl(ntohl(th->seq) + skb->len
					       - ihl - (th->doff << 2));
		cp->rs_ack_seq = th->ack_seq;
		IP_VS_DBG_RL("packet from RS, seq:%u ack_seq:%u, %c%c%c.",
			     ntohl(th->seq), ntohl(th->ack_seq),
			     (th->syn) ? 'S' : '-',
			     (th->ack) ? 'A' : '-',
			     (th->rst) ? 'R' : '-');
		IP_VS_DBG_RL("port:%u->%u", ntohs(th->source), ntohs(th->dest));
	}
}

static int tcp_out_adjust_seq(struct ip_vs_conn *cp, struct tcphdr *tcph, struct sk_buff *skb)
{
	__u8 i;
	__u8 *ptr = NULL;
	int length;
	__be32 old_seq;
	unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];

	/*
	 * FULLNAT ack-seq change
	 */

	old_seq = tcph->ack_seq;
	/* adjust ack sequence */
	tcph->ack_seq = htonl(ntohl(tcph->ack_seq) - cp->fnat_seq.delta);
	/* update checksum */
	tcph->check = csum_fold(ip_vs_check_diff4(old_seq, tcph->ack_seq,
						~csum_unfold(tcph->check)));

	/* adjust sack sequence */
	/*sometimes tcp option allocate in a non linear region,
	so we should use skb_header_pointer func to process data*/
	length = (tcph->doff * 4) - sizeof(struct tcphdr);
	if (cp->af == AF_INET6)
		 ptr = skb_header_pointer(skb, sizeof(struct ipv6hdr)+ sizeof(struct tcphdr),
				 length, buff);
	else
		ptr = skb_header_pointer(skb, ip_hdrlen(skb)+ sizeof(struct tcphdr),
				length, buff);
	if (ptr == NULL)
		return 0;
	/* Fast path for timestamp-only option */
	if (length == TCPOLEN_TSTAMP_ALIGNED &&
		*(__be32 *) ptr == htonl((TCPOPT_NOP << 24) |
					(TCPOPT_NOP << 16) |
					(TCPOPT_TIMESTAMP << 8) |
					TCPOLEN_TIMESTAMP))
		return 1;

	while (length > 0) {
		int opcode = *ptr++;
		int opsize;

		switch (opcode) {
		case TCPOPT_EOL:
			return 1;
		case TCPOPT_NOP:	/* Ref: RFC 793 section 3.1 */
			length--;
			continue;
		default:
			opsize = *ptr++;
			if (opsize < 2)	/* "silly options" */
				return 1;
			if (opsize > length)
				return 1;	/* don't parse partial options */
			if ((opcode == TCPOPT_SACK) &&
			(opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK))
			&& !((opsize - TCPOLEN_SACK_BASE) %
						TCPOLEN_SACK_PERBLOCK)) {
				for (i = 0; i < opsize - TCPOLEN_SACK_BASE;
						i += TCPOLEN_SACK_PERBLOCK) {
					__be32 *tmp = (__be32 *) (ptr + i);
					old_seq = *tmp;
					*tmp = htonl(ntohl(*tmp) -
							cp->fnat_seq.delta);
					tcph->check =
						csum_fold(ip_vs_check_diff4(
								old_seq, *tmp,
						~csum_unfold(tcph->check)));

					tmp++;

					old_seq = *tmp;
					*tmp = htonl(ntohl(*tmp) -
							cp->fnat_seq.delta);
					tcph->check =
						csum_fold(ip_vs_check_diff4(
								old_seq, *tmp,
						~csum_unfold(tcph->check)));
				}
				/* restore modify data to both linear*/
				if (skb_is_nonlinear(skb)) {
					length = (tcph->doff * 4) - sizeof(struct tcphdr);
					if (cp->af == AF_INET6)
						skb_store_bits(skb, sizeof(struct ipv6hdr) + sizeof(struct tcphdr), buff, length);
					else
						skb_store_bits(skb, ip_hdrlen(skb) + sizeof(struct tcphdr), buff, length);
				}
				return 1;
			}

			ptr += opsize - 2;
			length -= opsize;
		}
	}

	return 1;
}

/*
 * init first data sequence, INside to OUTside;
 */
static inline void
tcp_out_init_seq(struct ip_vs_conn *cp, struct tcphdr *tcph)
{
	cp->fnat_seq.fdata_seq = ntohl(tcph->seq) + 1;
}

/*
 * remove tcp timestamp opt in one packet, just set it to TCPOPT_NOP
 * reference to tcp_parse_options in tcp_input.c
 */
static void tcp_opt_remove_timestamp(struct netns_ipvs *ipvs,
		struct tcphdr *tcph, struct sk_buff *skb)
{
	unsigned char *ptr = NULL;
	__be32 old[4], new[4];
	int length;
	int i;
	unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];

	if (!sysctl_timestamp_remove_entry(ipvs))
		return;

	/*sometimes tcp option allocate in a non linear region,
	so we should use skb_header_pointer func to process data*/
	length = (tcph->doff * 4) - sizeof(struct tcphdr);
	ptr = skb_header_pointer(skb, ip_hdrlen(skb)+ sizeof(struct tcphdr),
				length, buff);
	if (ptr == NULL)
		return;

	while (length > 0) {
		int opcode = *ptr++;
		int opsize;

		switch (opcode) {
		case TCPOPT_EOL:
			return;
		case TCPOPT_NOP:	/* Ref: RFC 793 section 3.1 */
			length--;
			continue;
		default:
			opsize = *ptr++;
			if (opsize < 2)	/* "silly options" */
				return;
			if (opsize > length)
				return;	/* don't parse partial options */
			if ((opcode == TCPOPT_TIMESTAMP)
			    && (opsize == TCPOLEN_TIMESTAMP)) {
				/* the length of buf is 16Byte,
				 * but data is 10Byte. zero the buf
				 */
				memset((__u8 *)old, 0, sizeof(old));
				memset((__u8 *)new, 0, sizeof(new));
				memcpy((__u8 *)old, ptr - 2, TCPOLEN_TIMESTAMP);
				for (i = 0; i < TCPOLEN_TIMESTAMP; i++) {
					*(ptr - 2 + i) = TCPOPT_NOP;	/* TCPOPT_NOP replace timestamp opt */
				}
				memcpy((__u8 *)new, ptr - 2,	TCPOLEN_TIMESTAMP);
				tcph->check = csum_fold(ip_vs_check_diff16(
								old, new,
						~csum_unfold(tcph->check)));
				/* restore modify data to both linear and non linear regions*/
				if (skb_is_nonlinear(skb)) {
					length = (tcph->doff * 4) - sizeof(struct tcphdr);
					skb_store_bits(skb, ip_hdrlen(skb)+ sizeof(struct tcphdr), buff, length);
				}
				return;
			}

			ptr += opsize - 2;
			length -= opsize;
		}
	}
}

/*
 * recompute tcp sequence, OUTside to INside;
 */
static void
tcp_in_init_seq(struct ip_vs_conn *cp, struct sk_buff *skb, struct tcphdr *tcph)
{
	struct ip_vs_seq *fseq = &(cp->fnat_seq);
	__u32 seq = ntohl(tcph->seq);
	int conn_reused_entry;

	if ((fseq->delta == fseq->init_seq - seq) && (fseq->init_seq != 0)) {
		return;
	}

	conn_reused_entry = (sysctl_conn_reused_entry(cp->ipvs) == 1)
	    && (fseq->init_seq != 0)
	    && ((cp->state == IP_VS_TCP_S_SYN_RECV)
		|| (cp->state == IP_VS_TCP_S_SYN_SENT));
	if ((fseq->init_seq == 0) || conn_reused_entry) {
#ifdef CONFIG_IP_VS_IPV6
		if (cp->af == AF_INET6)
			fseq->init_seq = secure_tcpv6_seq(cp->laddr.ip6,
						cp->daddr.ip6,
						cp->lport, cp->dport);
		else
#endif
			fseq->init_seq = secure_tcp_seq(cp->laddr.ip,
						cp->daddr.ip, cp->lport,
						cp->dport);
		fseq->delta = fseq->init_seq - seq;

	}
}

/* adjust tcp sequence, OUTside to INside */
static void tcp_in_adjust_seq(struct ip_vs_conn *cp, struct tcphdr *tcph, struct sk_buff *skb)
{
	__be32 old_seq = tcph->seq;
	/* adjust seq for FULLNAT */
	tcph->seq = htonl(ntohl(tcph->seq) + cp->fnat_seq.delta);
	/* update checksum */
	tcph->check = csum_fold(ip_vs_check_diff4(old_seq, tcph->seq,
					~csum_unfold(tcph->check)));
}

static void tcp_send_rst_ipv4(int indir, struct sk_buff *skb,
		struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
{
	unsigned int tcphoff;
	struct ip_vs_iphdr ipvsh;
	struct tcphdr *th = NULL;
	struct iphdr *iph =
		(struct iphdr *)skb_push(skb, sizeof(struct iphdr));

	if (iph == NULL) {
		kfree_skb(skb);
		IP_VS_DBG_RL("IPVS: get ip header failed\n");
		return;
	}

	memset(iph, 0, sizeof(struct iphdr));
	skb_reset_network_header(skb);
	iph->version = 4;
	iph->ihl = 5;
	iph->tot_len = htons(skb->len);
	iph->frag_off = htons(IP_DF);
	iph->ttl = IPDEFTTL;
	iph->protocol = IPPROTO_TCP;
	if (indir == 1) {
		iph->saddr = cp->caddr.ip;
		iph->daddr = cp->vaddr.ip;
	} else {
		iph->saddr = cp->daddr.ip;
		iph->daddr = cp->laddr.ip;

	}

	ip_send_check(iph);
	th = (struct tcphdr *)skb_transport_header(skb);
	th->check = 0;
	tcphoff = sizeof(struct iphdr);
	skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
	skb->protocol = htons(ETH_P_IP);
	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
					  skb->len - tcphoff,
					  IPPROTO_TCP, skb->csum);

	memset(&ipvsh, 0, sizeof(struct ip_vs_iphdr));
	ip_vs_fill_iph_skb(AF_INET, skb, 0, &ipvsh);

	if (indir == 1)
		cp->packet_xmit(skb, cp, pp, &ipvsh);
	else {
		if (IP_VS_FWD_METHOD(cp) >= IP_VS_CONN_F_FULLNAT)
			ip_vs_fnat_response_xmit(skb, pp, cp, &ipvsh);
		else
			ip_vs_normal_response_xmit(skb, pp, cp, &ipvsh);
	}
}

#ifdef CONFIG_IP_VS_IPV6
static void tcp_send_rst_ipv6(int indir, struct sk_buff *skb,
		struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
{
	unsigned int tcphoff;
	struct ip_vs_iphdr ipvsh;
	struct tcphdr *th = NULL;
	struct ipv6hdr *iph =
		(struct ipv6hdr *)skb_push(skb, sizeof(struct ipv6hdr));
	if (iph == NULL) {
		kfree_skb(skb);
		IP_VS_DBG_RL("IPVS: get ip header failed\n");
		return;
	}

	memset(iph, 0, sizeof(struct ipv6hdr));
	skb_reset_network_header(skb);
	if (indir == 1) {
		memcpy(&iph->saddr, &cp->caddr.in6, sizeof(struct in6_addr));
		memcpy(&iph->daddr, &cp->vaddr.in6, sizeof(struct in6_addr));
	} else {
		memcpy(&iph->saddr, &cp->daddr.in6, sizeof(struct in6_addr));
		memcpy(&iph->daddr, &cp->laddr.in6, sizeof(struct in6_addr));
	}

	th = (struct tcphdr *)skb_transport_header(skb);
	iph->version = 6;
	iph->nexthdr = NEXTHDR_TCP;
	iph->hop_limit = IPV6_DEFAULT_HOPLIMIT;
	iph->payload_len = htons(sizeof(struct tcphdr));
	th->check = 0;

	tcphoff = sizeof(struct ipv6hdr);
	skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
	skb->protocol = htons(ETH_P_IPV6);
	th->check = csum_ipv6_magic(&iph->saddr, &iph->daddr,
					skb->len - tcphoff,
					IPPROTO_TCP, skb->csum);

	memset(&ipvsh, 0, sizeof(struct ip_vs_iphdr));
	ip_vs_fill_iph_skb(AF_INET6, skb, 0, &ipvsh);

	if (indir == 1)
		cp->packet_xmit(skb, cp, pp, &ipvsh);
	else {
		if (IP_VS_FWD_METHOD(cp) >= IP_VS_CONN_F_FULLNAT)
			ip_vs_fnat_response_xmit_v6(skb, pp, cp, &ipvsh);
		else
			ip_vs_normal_response_xmit_v6(skb, pp, cp, &ipvsh);
	}
}
#endif

static void __tcp_fnat_in_check_sum(struct sk_buff *skb,
			struct ip_vs_conn *cp, struct tcphdr *tcph,
			unsigned int tcphoff)
{
	/* Adjust TCP checksums */
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
#ifdef CONFIG_IP_VS_IPV6
		if (cp->af == AF_INET6)
			tcph->check = ~csum_ipv6_magic(&cp->laddr.in6, &cp->daddr.in6,
					(skb->len - tcphoff), IPPROTO_TCP, 0);
		else
#endif
			tcph->check = ~tcp_v4_check((skb->len - tcphoff),
					cp->laddr.ip, cp->daddr.ip, 0);
	} else if (!cp->app) {
		/* Only port and addr are changed, do fast csum update */
		tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr,
				     cp->vport, cp->dport);
		tcp_fast_csum_update(cp->af, tcph, &cp->caddr, &cp->laddr,
				     cp->cport, cp->lport);
		if (skb->ip_summed == CHECKSUM_COMPLETE)
			skb->ip_summed = CHECKSUM_NONE;
	} else {
		/* full checksum calculation */
		tcph->check = 0;
		skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
#ifdef CONFIG_IP_VS_IPV6
		if (cp->af == AF_INET6)
			tcph->check = csum_ipv6_magic(&cp->laddr.in6,
						      &cp->daddr.in6,
						      skb->len - tcphoff,
						      cp->protocol, skb->csum);
		else
#endif
			tcph->check = csum_tcpudp_magic(cp->laddr.ip,
							cp->daddr.ip,
							skb->len - tcphoff,
							cp->protocol, skb->csum);
		skb->ip_summed = CHECKSUM_UNNECESSARY;
	}
}

static struct sk_buff *tcp_alloc_hdr(__be16 sport, __be16 dport)
{
	struct sk_buff *skb = NULL;
	struct tcphdr *th = NULL;

	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
	if (unlikely(skb == NULL)) {
		IP_VS_ERR_RL("alloc skb failed when send rs RST packet\n");
		return NULL;
	}

	skb_reserve(skb, MAX_TCP_HEADER);
	th = (struct tcphdr *)skb_push(skb, sizeof(struct tcphdr));
	skb_reset_transport_header(skb);
	skb->csum = 0;

	/* set tcp head */
	memset(th, 0, sizeof(struct tcphdr));
	th->source = sport;
	th->dest = dport;
	th->doff = sizeof(struct tcphdr) >> 2;
	th->rst = 1;

	return skb;
}

/* send reset packet to RS */
static void tcp_send_rst_in(struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
{
	struct sk_buff *skb = NULL;
	struct tcphdr *th = NULL;
	struct ip_vs_dest *dest = cp->dest;

	skb = tcp_alloc_hdr(cp->cport, cp->vport);
	if (unlikely(skb == NULL)) {
		IP_VS_ERR_RL("alloc skb failed when send rs RST packet\n");
		return;
	}

	th = (struct tcphdr *)skb_transport_header(skb);
	if (cp->state == IP_VS_TCP_S_ESTABLISHED) {
		th->seq = cp->rs_ack_seq;
		/* Be careful! fullnat */
		if (IP_VS_FWD_METHOD(cp) >= IP_VS_CONN_F_FULLNAT)
			th->seq = htonl(ntohl(th->seq) - cp->fnat_seq.delta);

		if (likely(dest != NULL)) {
			IP_VS_INC_DEST_ESTATS(dest, rst_in_established);
		}
		IP_VS_INC_ESTATS(ip_vs_esmib, RST_IN_ESTABLISHED);
	} else {
		kfree_skb(skb);
		IP_VS_DBG_RL("IPVS: Is SYN_SENT or ESTABLISHED ?");
		return;
	}

	IP_VS_DBG_RL("IPVS: rst to rs seq: %u", htonl(th->seq));

#ifdef CONFIG_IP_VS_IPV6
	if (cp->af == AF_INET6)
		tcp_send_rst_ipv6(1, skb, pp, cp);
	else
#endif
		tcp_send_rst_ipv4(1, skb, pp, cp);
}

/* send reset packet to client */
static void tcp_send_rst_out(struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
{
	struct sk_buff *skb = NULL;
	struct tcphdr *th = NULL;
	__be16 dport;

	if (IP_VS_FWD_METHOD(cp) >= IP_VS_CONN_F_FULLNAT)
		dport = cp->lport;
	else
		dport = cp->cport;

	skb = tcp_alloc_hdr(cp->dport, dport);
	if (unlikely(skb == NULL)) {
		IP_VS_ERR_RL("alloc skb failed when send client RST packet\n");
		return;
	}

	th = (struct tcphdr *)skb_transport_header(skb);
	if (cp->state == IP_VS_TCP_S_ESTABLISHED) {
		th->seq = cp->rs_end_seq;
		IP_VS_INC_ESTATS(ip_vs_esmib, RST_OUT_ESTABLISHED);
	} else {
		kfree_skb(skb);
		IP_VS_DBG_RL("IPVS: Is in SYN_SENT or ESTABLISHED ?");
		return;
	}

	IP_VS_DBG_RL("IPVS: rst to client seq: %u", htonl(th->seq));

#ifdef CONFIG_IP_VS_IPV6
	if (cp->af == AF_INET6)
		tcp_send_rst_ipv6(0, skb, pp, cp);
	else
#endif
		tcp_send_rst_ipv4(0, skb, pp, cp);
}

/*
 * add client address in tcp option
 * alloc a new skb, and free the old skb
 * return new skb
 */
static struct sk_buff *tcp_opt_add_toa(struct ip_vs_conn *cp,
				       struct sk_buff *old_skb,
				       struct tcphdr **tcph)
{
	__u32 mtu;
	struct sk_buff *new_skb = NULL;
	struct ip_vs_tcpo_addr *toa;
	unsigned int tcphoff;
	struct tcphdr *th;
	__u8 *p = NULL;
	__u8 *q = NULL;

	/* now only process IPV4 */
	if (cp->af != AF_INET) {
		IP_VS_INC_ESTATS(ip_vs_esmib, FULLNAT_ADD_TOA_FAIL_PROTO);
		return old_skb;
	}

	/* skb length and tcp option length checking */
	mtu = dst_mtu((struct dst_entry *)skb_dst(old_skb));
	if (!gso_ok(old_skb, skb_dst(old_skb)->dev) &&
	    old_skb->len > (mtu - sizeof(struct ip_vs_tcpo_addr))) {
		IP_VS_INC_ESTATS(ip_vs_esmib, FULLNAT_ADD_TOA_FAIL_LEN);
		return old_skb;
	}

	/* the maximum length of TCP head is 60 bytes, so only 40 bytes for options */
	if ((60 - ((*tcph)->doff << 2)) < sizeof(struct ip_vs_tcpo_addr)) {
		IP_VS_INC_ESTATS(ip_vs_esmib, FULLNAT_ADD_TOA_HEAD_FULL);
		return old_skb;
	}

	/* copy all skb, plus ttm space , new skb is linear */
	new_skb = skb_copy_expand(old_skb,
				  skb_headroom(old_skb),
				  skb_tailroom(old_skb) +
				  sizeof(struct ip_vs_tcpo_addr), GFP_ATOMIC);
	if (new_skb == NULL) {
		IP_VS_INC_ESTATS(ip_vs_esmib, FULLNAT_ADD_TOA_FAIL_MEM);
		return old_skb;
	}

	/* free old skb */
	kfree_skb(old_skb);

	/*
	 * add client ip
	 */
	tcphoff = ip_hdrlen(new_skb);
	/* get new tcp header */
	*tcph = th =
	    (struct tcphdr *)(skb_network_header(new_skb) + tcphoff);

	/* ptr to old opts */
	p = skb_tail_pointer(new_skb) - 1;
	q = p + sizeof(struct ip_vs_tcpo_addr);

	/* move data down, offset is sizeof(struct ip_vs_tcpo_addr) */
	while (p >= ((__u8 *) th + sizeof(struct tcphdr))) {
		*q = *p;
		p--;
		q--;
	}

	/* move tail to new postion */
	new_skb->tail += sizeof(struct ip_vs_tcpo_addr);

	/* put client ip opt , ptr point to opts */
	toa = (struct ip_vs_tcpo_addr *)(th + 1);
	toa->opcode = TCPOPT_ADDR;
	toa->opsize = TCPOLEN_ADDR;
	toa->port = cp->cport;
	toa->addr = cp->caddr.ip;

	/* reset tcp header length */
	th->doff += sizeof(struct ip_vs_tcpo_addr) / 4;
	/* reset ip header totoal length */
	ip_hdr(new_skb)->tot_len =
	    htons(ntohs(ip_hdr(new_skb)->tot_len) +
		  sizeof(struct ip_vs_tcpo_addr));
	/* reset skb length */
	new_skb->len += sizeof(struct ip_vs_tcpo_addr);

	/* re-calculate tcp csum */
	th->check = 0;
	new_skb->csum = skb_checksum(new_skb, tcphoff,
					new_skb->len - tcphoff, 0);
	th->check = csum_tcpudp_magic(cp->caddr.ip,
					cp->vaddr.ip,
					new_skb->len - tcphoff,
					cp->protocol, new_skb->csum);

	/* re-calculate ip head csum, tot_len has been adjusted */
	ip_send_check(ip_hdr(new_skb));

	if (new_skb->ip_summed == CHECKSUM_PARTIAL) {
		new_skb->ip_summed = CHECKSUM_COMPLETE;
		if (sysctl_ipvs_toa_not_gso(cp->ipvs))
			skb_shinfo(new_skb)->gso_size = 0;
	}

	IP_VS_INC_ESTATS(ip_vs_esmib, FULLNAT_ADD_TOA_OK);

	return new_skb;
}

#ifdef CONFIG_IP_VS_IPV6
static struct sk_buff *tcp_opt_add_toa_v6(struct ip_vs_conn *cp,
				       struct sk_buff *old_skb,
				       struct tcphdr **tcph)
{
	__u32 mtu;
	struct sk_buff *new_skb = NULL;
	struct ip_vs_tcpo_addr_v6 *toa;
	unsigned int tcphoff;
	struct tcphdr *th;
	__u8 *p = NULL;
	__u8 *q = NULL;

	/* IPV6 */
	if (cp->af != AF_INET6) {
		IP_VS_INC_ESTATS(ip_vs_esmib, FULLNAT_ADD_TOA_FAIL_PROTO);
		return old_skb;
	}

	/* skb length and tcph length checking */
	mtu = dst_mtu(skb_dst(old_skb));
	if (!gso_ok(old_skb, skb_dst(old_skb)->dev) &&
	    old_skb->len > (mtu - sizeof(struct ip_vs_tcpo_addr_v6))) {
		IP_VS_INC_ESTATS(ip_vs_esmib, FULLNAT_ADD_TOA_FAIL_LEN);
		return old_skb;
	}

	/* the maximum length of TCP head is 60 bytes, so only 40 bytes for options */
	if ((60 - ((*tcph)->doff << 2)) < sizeof(struct ip_vs_tcpo_addr_v6)) {
		IP_VS_INC_ESTATS(ip_vs_esmib, FULLNAT_ADD_TOA_HEAD_FULL);
		return old_skb;
	}

	/* copy all skb, plus ttm space , new skb is linear */
	new_skb = skb_copy_expand(old_skb,
				  skb_headroom(old_skb),
				  skb_tailroom(old_skb) +
				  sizeof(struct ip_vs_tcpo_addr_v6), GFP_ATOMIC);
	if (new_skb == NULL) {
		IP_VS_INC_ESTATS(ip_vs_esmib, FULLNAT_ADD_TOA_FAIL_MEM);
		return old_skb;
	}

	/* free old skb */
	kfree_skb(old_skb);

	/*
	 * add client ip
	 */
	tcphoff = sizeof(struct ipv6hdr);
	/* get new tcp header */
	*tcph = th =
	    (struct tcphdr *)(skb_network_header(new_skb) + tcphoff);

	/* ptr to old opts */
	p = skb_tail_pointer(new_skb) - 1;
	q = p + sizeof(struct ip_vs_tcpo_addr_v6);

	/* move data down, offset is sizeof(struct ip_vs_tcpo_addr) */
	while (p >= ((__u8 *) th + sizeof(struct tcphdr))) {
		*q = *p;
		p--;
		q--;
	}

	/* move tail to new postion */
	new_skb->tail += sizeof(struct ip_vs_tcpo_addr_v6);

	/* put client ip opt , ptr point to opts */
	toa = (struct ip_vs_tcpo_addr_v6 *)(th + 1);
	toa->opcode = TCPOPT_ADDR_V6;
	toa->opsize = TCPOLEN_ADDR_V6;
	toa->port = cp->cport;
	toa->addr = cp->caddr.in6;

	/* reset tcp header length */
	th->doff += sizeof(struct ip_vs_tcpo_addr_v6) >> 2;
	/* reset ip header totoal length */
	ipv6_hdr(new_skb)->payload_len =
	    htons(ntohs(ipv6_hdr(new_skb)->payload_len) +
		  sizeof(struct ip_vs_tcpo_addr_v6));
	/* reset skb length */
	new_skb->len += sizeof(struct ip_vs_tcpo_addr_v6);

	/* re-calculate tcp csum */
	th->check = 0;
	new_skb->csum = skb_checksum(new_skb, tcphoff,
					new_skb->len - tcphoff, 0);
	th->check = csum_ipv6_magic(&cp->caddr.in6,
					&cp->vaddr.in6,
					new_skb->len - tcphoff,
					cp->protocol, new_skb->csum);

	if (new_skb->ip_summed == CHECKSUM_PARTIAL) {
		new_skb->ip_summed = CHECKSUM_COMPLETE;
		if (sysctl_ipvs_toa_not_gso(cp->ipvs))
			skb_shinfo(new_skb)->gso_size = 0;
	}

	IP_VS_INC_ESTATS(ip_vs_esmib, FULLNAT_ADD_TOA_OK);

	return new_skb;
}
#endif

INDIRECT_CALLABLE_SCOPE int
tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
		 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
{
	struct tcphdr *tcph;
	unsigned int tcphoff = iph->len;
	bool payload_csum = false;
	int oldlen;

#ifdef CONFIG_IP_VS_IPV6
	if (cp->af == AF_INET6 && iph->fragoffs)
		return 1;
#endif
	oldlen = skb->len - tcphoff;

	/* csum_check requires unshared skb */
	if (skb_ensure_writable(skb, tcphoff + sizeof(*tcph)))
		return 0;

	if (unlikely(cp->app != NULL)) {
		int ret;

		/* Some checks before mangling */
		if (!tcp_csum_check(cp->af, skb, pp))
			return 0;

		/* Call application helper if needed */
		ret = ip_vs_app_pkt_out(cp, skb, iph);
		if (!ret)
			return 0;
		/* ret=2: csum update is needed after payload mangling */
		if (ret == 1)
			oldlen = skb->len - tcphoff;
		else
			payload_csum = true;
	}

	tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
	tcp_save_out_seq(skb, cp, tcph, tcphoff);
	tcph->source = cp->vport;

	if (tcph->syn && tcph->ack) {
		tcp_opt_adjust_mss(cp->ipvs, cp->af, tcphoff, tcph, skb, 0);
	}

	/* Adjust TCP checksums */
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
					htons(oldlen),
					htons(skb->len - tcphoff));
	} else if (!payload_csum) {
		/* Only port and addr are changed, do fast csum update */
		tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
				     cp->dport, cp->vport);
		if (skb->ip_summed == CHECKSUM_COMPLETE)
			skb->ip_summed = cp->app ?
					 CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
	} else {
		/* full checksum calculation */
		tcph->check = 0;
		skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
#ifdef CONFIG_IP_VS_IPV6
		if (cp->af == AF_INET6)
			tcph->check = csum_ipv6_magic(&cp->vaddr.in6,
						      &cp->caddr.in6,
						      skb->len - tcphoff,
						      cp->protocol, skb->csum);
		else
#endif
			tcph->check = csum_tcpudp_magic(cp->vaddr.ip,
							cp->caddr.ip,
							skb->len - tcphoff,
							cp->protocol,
							skb->csum);
		skb->ip_summed = CHECKSUM_UNNECESSARY;

		IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
			  pp->name, tcph->check,
			  (char *)&(tcph->check) - (char *)tcph);
	}
	return 1;
}


static int
tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
		 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
{
	struct tcphdr *tcph;
	unsigned int tcphoff = iph->len;
	bool payload_csum = false;
	int oldlen;

#ifdef CONFIG_IP_VS_IPV6
	if (cp->af == AF_INET6 && iph->fragoffs)
		return 1;
#endif
	oldlen = skb->len - tcphoff;

	/* csum_check requires unshared skb */
	if (skb_ensure_writable(skb, tcphoff + sizeof(*tcph)))
		return 0;

	if (unlikely(cp->app != NULL)) {
		int ret;

		/* Some checks before mangling */
		if (!tcp_csum_check(cp->af, skb, pp))
			return 0;

		/*
		 *	Attempt ip_vs_app call.
		 *	It will fix ip_vs_conn and iph ack_seq stuff
		 */
		ret = ip_vs_app_pkt_in(cp, skb, iph);
		if (!ret)
			return 0;
		/* ret=2: csum update is needed after payload mangling */
		if (ret == 1)
			oldlen = skb->len - tcphoff;
		else
			payload_csum = true;
	}

	tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
	tcph->dest = cp->dport;

	if (tcph->syn & !tcph->ack) {
		tcp_opt_adjust_mss(cp->ipvs, cp->af, tcphoff, tcph, skb, 0);
	}

	/*
	 *	Adjust TCP checksums
	 */
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		tcp_partial_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr,
					htons(oldlen),
					htons(skb->len - tcphoff));
	} else if (!payload_csum) {
		/* Only port and addr are changed, do fast csum update */
		tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr,
				     cp->vport, cp->dport);
		if (skb->ip_summed == CHECKSUM_COMPLETE)
			skb->ip_summed = cp->app ?
					 CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
	} else {
		/* full checksum calculation */
		tcph->check = 0;
		skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
#ifdef CONFIG_IP_VS_IPV6
		if (cp->af == AF_INET6)
			tcph->check = csum_ipv6_magic(&cp->caddr.in6,
						      &cp->daddr.in6,
						      skb->len - tcphoff,
						      cp->protocol, skb->csum);
		else
#endif
			tcph->check = csum_tcpudp_magic(cp->caddr.ip,
							cp->daddr.ip,
							skb->len - tcphoff,
							cp->protocol,
							skb->csum);
		skb->ip_summed = CHECKSUM_UNNECESSARY;
	}
	return 1;
}

static int
tcp_fnat_in_handler(struct sk_buff **skb_p, struct ip_vs_protocol *pp,
		struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
{
	struct tcphdr *tcph;
	unsigned int tcphoff = iph->len;
	int oldlen;
	struct sk_buff *skb = *skb_p;

#ifdef CONFIG_IP_VS_IPV6
	if (cp->af == AF_INET6 && iph->fragoffs)
		return 1;
#endif
	oldlen = skb->len - tcphoff;

	if ((skb_ensure_writable(skb, tcphoff + sizeof(*tcph))) ||
		(unlikely(cp->app != NULL) &&
		((pp->csum_check && !pp->csum_check(cp->af, skb, pp)) ||
		(!ip_vs_app_pkt_in(cp, skb, iph))))) {
			return 0;
	}

	tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);

	if (tcph->syn & !tcph->ack) {
		cp->toa_init = 0;
		tcp_opt_remove_timestamp(cp->ipvs, tcph, skb);
		tcp_in_init_seq(cp, skb, tcph);
		tcp_opt_adjust_mss(cp->ipvs, cp->af, tcphoff, tcph, skb, 0);
	}

	if (!sysctl_toa_not_data(cp->ipvs) ||
		(!cp->toa_init && (ntohl(tcph->ack_seq) == cp->fnat_seq.fdata_seq) &&
		!tcph->syn && !tcph->rst && !tcph->fin)) {
		cp->toa_init = 1;
#ifdef CONFIG_IP_VS_IPV6
		if (cp->af == AF_INET6)
			skb = *skb_p = tcp_opt_add_toa_v6(cp, skb, &tcph);
		else
#endif
			skb = *skb_p = tcp_opt_add_toa(cp, skb, &tcph);
	}

	tcp_in_adjust_seq(cp, tcph, skb);

	tcph->source = cp->lport;
	tcph->dest = cp->dport;

	__tcp_fnat_in_check_sum(skb, cp, tcph, tcphoff);

	return 1;
}

static int
tcp_fnat_out_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
		struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
{
	struct tcphdr *tcph;
	unsigned int tcphoff = iph->len;
	int oldlen;

#ifdef CONFIG_IP_VS_IPV6
	if (cp->af == AF_INET6 && iph->fragoffs)
		return 1;
#endif
	oldlen = skb->len - tcphoff;

	/* csum_check requires unshared skb */
	if (skb_ensure_writable(skb, tcphoff + sizeof(*tcph)))
		return 0;

	if (unlikely(cp->app != NULL)) {
		/* Some checks before mangling */
		if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
			return 0;

		/* Call application helper if needed */
		if (!ip_vs_app_pkt_out(cp, skb, iph))
			return 0;
	}

	tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
	tcp_save_out_seq(skb, cp, tcph, tcphoff);
	tcph->source = cp->vport;
	tcph->dest = cp->cport;

	if (tcph->syn && tcph->ack) {
		tcp_opt_adjust_mss(cp->ipvs, cp->af, tcphoff, tcph, skb, 1);
	}

	if (tcp_out_adjust_seq(cp, tcph, skb) == 0) {
		return 0;
	}

	if (tcph->syn && tcph->ack) {
		tcp_out_init_seq(cp, tcph);
	}

	/* Adjust TCP checksums */
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
#ifdef CONFIG_IP_VS_IPV6
		if (cp->af == AF_INET6)
			tcph->check = ~csum_ipv6_magic(&cp->vaddr.in6, &cp->caddr.in6,
					(skb->len - tcphoff), IPPROTO_TCP, 0);
		else
#endif
			tcph->check = ~tcp_v4_check((skb->len - tcphoff),
					cp->vaddr.ip, cp->caddr.ip, 0);
	} else if (!cp->app) {
		/* Only port and addr are changed, do fast csum update */
		tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
				     cp->dport, cp->vport);
		tcp_fast_csum_update(cp->af, tcph, &cp->laddr, &cp->caddr,
				     cp->lport, cp->cport);
		if (skb->ip_summed == CHECKSUM_COMPLETE)
			skb->ip_summed = CHECKSUM_NONE;
	} else {
		/* full checksum calculation */
		tcph->check = 0;
		skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
#ifdef CONFIG_IP_VS_IPV6
		if (cp->af == AF_INET6)
			tcph->check = csum_ipv6_magic(&cp->vaddr.in6,
						      &cp->caddr.in6,
						      skb->len - tcphoff,
						      cp->protocol, skb->csum);
		else
#endif
			tcph->check = csum_tcpudp_magic(cp->vaddr.ip,
							cp->caddr.ip,
							skb->len - tcphoff,
							cp->protocol, skb->csum);

		IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
			pp->name, tcph->check,
			(char *)&(tcph->check) - (char *)tcph);
	}
	return 1;
}

static void
tcp_conn_expire_handler(struct net *net, struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
{
	struct netns_ipvs *ipvs = net_ipvs(net);

	/* support fullnat and nat */
	if (sysctl_conn_expire_tcp_rst(ipvs) &&
	    ((IP_VS_FWD_METHOD(cp) >= IP_VS_CONN_F_FULLNAT) ||
	    (IP_VS_FWD_METHOD(cp) == IP_VS_CONN_F_MASQ))) {
		/* send reset packet to RS */
		tcp_send_rst_in(pp, cp);
		/* send reset packet to client */
		tcp_send_rst_out(pp, cp);
	}
}

static int
tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
{
	unsigned int tcphoff;

#ifdef CONFIG_IP_VS_IPV6
	if (af == AF_INET6)
		tcphoff = sizeof(struct ipv6hdr);
	else
#endif
		tcphoff = ip_hdrlen(skb);

	switch (skb->ip_summed) {
	case CHECKSUM_NONE:
		skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
		fallthrough;
	case CHECKSUM_COMPLETE:
#ifdef CONFIG_IP_VS_IPV6
		if (af == AF_INET6) {
			if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
					    &ipv6_hdr(skb)->daddr,
					    skb->len - tcphoff,
					    ipv6_hdr(skb)->nexthdr,
					    skb->csum)) {
				IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
						 "Failed checksum for");
				return 0;
			}
		} else
#endif
			if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
					      ip_hdr(skb)->daddr,
					      skb->len - tcphoff,
					      ip_hdr(skb)->protocol,
					      skb->csum)) {
				IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
						 "Failed checksum for");
				return 0;
			}
		break;
	default:
		/* No need to checksum. */
		break;
	}

	return 1;
}


#define TCP_DIR_INPUT		0
#define TCP_DIR_OUTPUT		4
#define TCP_DIR_INPUT_ONLY	8

static const int tcp_state_off[IP_VS_DIR_LAST] = {
	[IP_VS_DIR_INPUT]		=	TCP_DIR_INPUT,
	[IP_VS_DIR_OUTPUT]		=	TCP_DIR_OUTPUT,
	[IP_VS_DIR_INPUT_ONLY]		=	TCP_DIR_INPUT_ONLY,
};

/*
 *	Timeout table[state]
 */
static const int tcp_timeouts[IP_VS_TCP_S_LAST + 1] = {
	[IP_VS_TCP_S_NONE]		=	2*HZ,
	[IP_VS_TCP_S_ESTABLISHED]	=	15*60*HZ,
	[IP_VS_TCP_S_SYN_SENT]		=	2*60*HZ,
	[IP_VS_TCP_S_SYN_RECV]		=	1*60*HZ,
	[IP_VS_TCP_S_FIN_WAIT]		=	2*60*HZ,
	[IP_VS_TCP_S_TIME_WAIT]		=	2*60*HZ,
	[IP_VS_TCP_S_CLOSE]		=	10*HZ,
	[IP_VS_TCP_S_CLOSE_WAIT]	=	60*HZ,
	[IP_VS_TCP_S_LAST_ACK]		=	30*HZ,
	[IP_VS_TCP_S_LISTEN]		=	2*60*HZ,
	[IP_VS_TCP_S_SYNACK]		=	120*HZ,
	[IP_VS_TCP_S_LAST]		=	2*HZ,
};

static const int tcp_timeouts_fnat[IP_VS_TCP_S_LAST + 1] = {
	[IP_VS_TCP_S_NONE] = 2 * HZ,
	[IP_VS_TCP_S_ESTABLISHED] = 90 * HZ,
	[IP_VS_TCP_S_SYN_SENT] = 3 * HZ,
	[IP_VS_TCP_S_SYN_RECV] = 30 * HZ,
	[IP_VS_TCP_S_FIN_WAIT] = 3 * HZ,
	[IP_VS_TCP_S_TIME_WAIT] = 3 * HZ,
	[IP_VS_TCP_S_CLOSE] = 3 * HZ,
	[IP_VS_TCP_S_CLOSE_WAIT] = 3 * HZ,
	[IP_VS_TCP_S_LAST_ACK] = 3 * HZ,
	[IP_VS_TCP_S_LISTEN] = 2 * 60 * HZ,
	[IP_VS_TCP_S_SYNACK] = 30 * HZ,
	[IP_VS_TCP_S_LAST] = 2 * HZ,
};

static const char *const tcp_state_name_table[IP_VS_TCP_S_LAST + 1] = {
	[IP_VS_TCP_S_NONE]		=	"NONE",
	[IP_VS_TCP_S_ESTABLISHED]	=	"ESTABLISHED",
	[IP_VS_TCP_S_SYN_SENT]		=	"SYN_SENT",
	[IP_VS_TCP_S_SYN_RECV]		=	"SYN_RECV",
	[IP_VS_TCP_S_FIN_WAIT]		=	"FIN_WAIT",
	[IP_VS_TCP_S_TIME_WAIT]		=	"TIME_WAIT",
	[IP_VS_TCP_S_CLOSE]		=	"CLOSE",
	[IP_VS_TCP_S_CLOSE_WAIT]	=	"CLOSE_WAIT",
	[IP_VS_TCP_S_LAST_ACK]		=	"LAST_ACK",
	[IP_VS_TCP_S_LISTEN]		=	"LISTEN",
	[IP_VS_TCP_S_SYNACK]		=	"SYNACK",
	[IP_VS_TCP_S_LAST]		=	"BUG!",
};

static const bool tcp_state_active_table[IP_VS_TCP_S_LAST] = {
	[IP_VS_TCP_S_NONE]		=	false,
	[IP_VS_TCP_S_ESTABLISHED]	=	true,
	[IP_VS_TCP_S_SYN_SENT]		=	true,
	[IP_VS_TCP_S_SYN_RECV]		=	true,
	[IP_VS_TCP_S_FIN_WAIT]		=	false,
	[IP_VS_TCP_S_TIME_WAIT]		=	false,
	[IP_VS_TCP_S_CLOSE]		=	false,
	[IP_VS_TCP_S_CLOSE_WAIT]	=	false,
	[IP_VS_TCP_S_LAST_ACK]		=	false,
	[IP_VS_TCP_S_LISTEN]		=	false,
	[IP_VS_TCP_S_SYNACK]		=	true,
};

#define sNO IP_VS_TCP_S_NONE
#define sES IP_VS_TCP_S_ESTABLISHED
#define sSS IP_VS_TCP_S_SYN_SENT
#define sSR IP_VS_TCP_S_SYN_RECV
#define sFW IP_VS_TCP_S_FIN_WAIT
#define sTW IP_VS_TCP_S_TIME_WAIT
#define sCL IP_VS_TCP_S_CLOSE
#define sCW IP_VS_TCP_S_CLOSE_WAIT
#define sLA IP_VS_TCP_S_LAST_ACK
#define sLI IP_VS_TCP_S_LISTEN
#define sSA IP_VS_TCP_S_SYNACK

struct tcp_states_t {
	int next_state[IP_VS_TCP_S_LAST];
};

static int tcp_set_state_timeout(struct ip_vs_proto_data *pd, char *sname, int to)
{
	return ip_vs_set_state_timeout(pd->timeout_table, IP_VS_TCP_S_LAST,
				       tcp_state_name_table, sname, to);
}

static int tcp_get_state_timeout(struct ip_vs_conn *cp,
		struct ip_vs_proto_data *pd,
		int state)
{
	int state_timeout = pd->timeout_table[state];
	if (state == IP_VS_TCP_S_ESTABLISHED && cp->tcp_timeout)
		state_timeout = cp->tcp_timeout;
	else if (state == IP_VS_TCP_S_FIN_WAIT && cp->tcp_fin_timeout)
		state_timeout = cp->tcp_fin_timeout;
	return state_timeout;
}

static const char *tcp_state_name(int state)
{
	if (state >= IP_VS_TCP_S_LAST)
		return "ERR!";
	return tcp_state_name_table[state] ? tcp_state_name_table[state] : "?";
}

static bool tcp_state_active(int state)
{
	if (state >= IP_VS_TCP_S_LAST)
		return false;
	return tcp_state_active_table[state];
}

static struct tcp_states_t tcp_states[] = {
/*	INPUT */
/*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA	*/
/*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR } },
/*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sTW } },
/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES } },
/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sSR } },

/*	OUTPUT */
/*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA	*/
/*syn*/ {{sSS, sES, sSS, sSR, sSS, sSS, sSS, sSS, sSS, sLI, sSR } },
/*fin*/ {{sTW, sFW, sSS, sTW, sFW, sTW, sCL, sTW, sLA, sLI, sTW } },
/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sLA, sES, sES } },
/*rst*/ {{sCL, sCL, sSS, sCL, sCL, sTW, sCL, sCL, sCL, sCL, sCL } },

/*	INPUT-ONLY */
/*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA	*/
/*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR } },
/*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW } },
/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES } },
/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL } },
};

static struct tcp_states_t tcp_states_dos[] = {
/*	INPUT */
/*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA	*/
/*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSA } },
/*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sSA } },
/*ack*/ {{sES, sES, sSS, sSR, sFW, sTW, sCL, sCW, sCL, sLI, sSA } },
/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL } },

/*	OUTPUT */
/*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA	*/
/*syn*/ {{sSS, sES, sSS, sSA, sSS, sSS, sSS, sSS, sSS, sLI, sSA } },
/*fin*/ {{sTW, sFW, sSS, sTW, sFW, sTW, sCL, sTW, sLA, sLI, sTW } },
/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sLA, sES, sES } },
/*rst*/ {{sCL, sCL, sSS, sCL, sCL, sTW, sCL, sCL, sCL, sCL, sCL } },

/*	INPUT-ONLY */
/*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA	*/
/*syn*/ {{sSA, sES, sES, sSR, sSA, sSA, sSA, sSA, sSA, sSA, sSA } },
/*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW } },
/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES } },
/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL } },
};

static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags)
{
	int on = (flags & 1);		/* secure_tcp */

	/*
	** FIXME: change secure_tcp to independent sysctl var
	** or make it per-service or per-app because it is valid
	** for most if not for all of the applications. Something
	** like "capabilities" (flags) for each object.
	*/
	pd->tcp_state_table = (on ? tcp_states_dos : tcp_states);
}

static inline int tcp_state_idx(struct tcphdr *th)
{
	if (th->rst)
		return 3;
	if (th->syn)
		return 0;
	if (th->fin)
		return 1;
	if (th->ack)
		return 2;
	return -1;
}

static inline void
set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
	      int direction, struct tcphdr *th)
{
	int state_idx;
	int new_state = IP_VS_TCP_S_CLOSE;
	int state_off = tcp_state_off[direction];

	/*
	 *    Update state offset to INPUT_ONLY if necessary
	 *    or delete NO_OUTPUT flag if output packet detected
	 */
	if (cp->flags & IP_VS_CONN_F_NOOUTPUT) {
		if (state_off == TCP_DIR_OUTPUT)
			cp->flags &= ~IP_VS_CONN_F_NOOUTPUT;
		else
			state_off = TCP_DIR_INPUT_ONLY;
	}
	state_idx = tcp_state_idx(th);
	if (state_idx < 0) {
		IP_VS_DBG(8, "tcp_state_idx=%d!!!\n", state_idx);
		goto tcp_state_out;
	}

	new_state =
		pd->tcp_state_table[state_off + state_idx].next_state[cp->state];

  tcp_state_out:
	if (new_state != cp->state) {
		struct ip_vs_dest *dest = cp->dest;

		IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] c:%s:%d v:%s:%d "
			      "d:%s:%d state: %s->%s conn->refcnt:%d\n",
			      pd->pp->name,
			      ((state_off == TCP_DIR_OUTPUT) ?
			       "output " : "input "),
			      th->syn ? 'S' : '.',
			      th->fin ? 'F' : '.',
			      th->ack ? 'A' : '.',
			      th->rst ? 'R' : '.',
			      IP_VS_DBG_ADDR(cp->af, &cp->caddr),
			      ntohs(cp->cport),
			      IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
			      ntohs(cp->vport),
			      IP_VS_DBG_ADDR(cp->daf, &cp->daddr),
			      ntohs(cp->dport),
			      tcp_state_name(cp->state),
			      tcp_state_name(new_state),
			      refcount_read(&cp->refcnt));

		if (dest) {
			if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
			    !tcp_state_active(new_state)) {
				if (!IP_VS_FWD_SYNC(cp)) {
					atomic_dec(&dest->activeconns);
					atomic_inc(&dest->inactconns);
				}
				cp->flags |= IP_VS_CONN_F_INACTIVE;
			} else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
				   tcp_state_active(new_state)) {
				if (!IP_VS_FWD_SYNC(cp)) {
					atomic_inc(&dest->activeconns);
					atomic_dec(&dest->inactconns);
				}
				cp->flags &= ~IP_VS_CONN_F_INACTIVE;
			}
			/*increase count of two type tcp close connections*/
			if (th->fin && cp->state == IP_VS_TCP_S_ESTABLISHED) {
				if (direction == IP_VS_DIR_OUTPUT) {
					IP_VS_INC_DEST_ESTATS(dest, server_close_conns);
				} else if (direction == IP_VS_DIR_INPUT ||
					   direction == IP_VS_DIR_INPUT_ONLY) {
					IP_VS_INC_DEST_ESTATS(dest, client_close_conns);
				}
			}
			/*increase count of two type tcp reset connections*/
			if (th->rst) {
				if (direction == IP_VS_DIR_OUTPUT)
					IP_VS_INC_DEST_ESTATS(dest, server_reset_conns);
				else if (direction == IP_VS_DIR_INPUT || direction == IP_VS_DIR_INPUT_ONLY)
					IP_VS_INC_DEST_ESTATS(dest, client_reset_conns);
			}
		}
		if (new_state == IP_VS_TCP_S_ESTABLISHED)
			ip_vs_control_assure_ct(cp);
	}

	if (likely(pd)) {
		cp->state = new_state;
		cp->timeout = (pd->pp->get_state_timeout != NULL) ?
			pd->pp->get_state_timeout(cp, pd, cp->state) :
			pd->timeout_table[cp->state]; }
	else	/* What to do ? */
		cp->timeout = tcp_timeouts[cp->state = new_state];
}

/*
 *	Handle state transitions
 */
static void
tcp_state_transition(struct ip_vs_conn *cp, int direction,
		     const struct sk_buff *skb,
		     struct ip_vs_proto_data *pd)
{
	struct tcphdr _tcph, *th;

#ifdef CONFIG_IP_VS_IPV6
	int ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
#else
	int ihl = ip_hdrlen(skb);
#endif

	th = skb_header_pointer(skb, ihl, sizeof(_tcph), &_tcph);
	if (th == NULL)
		return;

	spin_lock_bh(&cp->lock);
	set_tcp_state(pd, cp, direction, th);
	spin_unlock_bh(&cp->lock);
}

static inline __u16 tcp_app_hashkey(__be16 port)
{
	return (((__force u16)port >> TCP_APP_TAB_BITS) ^ (__force u16)port)
		& TCP_APP_TAB_MASK;
}


static int tcp_register_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
{
	struct ip_vs_app *i;
	__u16 hash;
	__be16 port = inc->port;
	int ret = 0;
	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);

	hash = tcp_app_hashkey(port);

	list_for_each_entry(i, &ipvs->tcp_apps[hash], p_list) {
		if (i->port == port) {
			ret = -EEXIST;
			goto out;
		}
	}
	list_add_rcu(&inc->p_list, &ipvs->tcp_apps[hash]);
	atomic_inc(&pd->appcnt);

  out:
	return ret;
}


static void
tcp_unregister_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
{
	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);

	atomic_dec(&pd->appcnt);
	list_del_rcu(&inc->p_list);
}


static int
tcp_app_conn_bind(struct ip_vs_conn *cp)
{
	struct netns_ipvs *ipvs = cp->ipvs;
	int hash;
	struct ip_vs_app *inc;
	int result = 0;

	/* Default binding: bind app only for NAT */
	if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
		return 0;

	/* Lookup application incarnations and bind the right one */
	hash = tcp_app_hashkey(cp->vport);

	list_for_each_entry_rcu(inc, &ipvs->tcp_apps[hash], p_list) {
		if (inc->port == cp->vport) {
			if (unlikely(!ip_vs_app_inc_get(inc)))
				break;

			IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
				      "%s:%u to app %s on port %u\n",
				      __func__,
				      IP_VS_DBG_ADDR(cp->af, &cp->caddr),
				      ntohs(cp->cport),
				      IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
				      ntohs(cp->vport),
				      inc->name, ntohs(inc->port));

			cp->app = inc;
			if (inc->init_conn)
				result = inc->init_conn(inc, cp);
			break;
		}
	}

	return result;
}


/*
 *	Set LISTEN timeout. (ip_vs_conn_put will setup timer)
 */
void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp)
{
	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(cp->ipvs, IPPROTO_TCP);

	spin_lock_bh(&cp->lock);
	cp->state = IP_VS_TCP_S_LISTEN;
	cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN]
			   : tcp_timeouts[IP_VS_TCP_S_LISTEN]);
	spin_unlock_bh(&cp->lock);
}

/* ---------------------------------------------
 *   timeouts is netns related now.
 * ---------------------------------------------
 */
static int __ip_vs_tcp_init(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
{
	ip_vs_init_hash_table(ipvs->tcp_apps, TCP_APP_TAB_SIZE);

	pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts_fnat,
					sizeof(tcp_timeouts_fnat));
	if (!pd->timeout_table)
		return -ENOMEM;
	pd->tcp_state_table = tcp_states;
	return 0;
}

static void __ip_vs_tcp_exit(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
{
	kfree(pd->timeout_table);
}


struct ip_vs_protocol ip_vs_protocol_tcp = {
	.name =			"TCP",
	.protocol =		IPPROTO_TCP,
	.num_states =		IP_VS_TCP_S_LAST,
	.dont_defrag =		0,
	.init =			NULL,
	.exit =			NULL,
	.init_netns =		__ip_vs_tcp_init,
	.exit_netns =		__ip_vs_tcp_exit,
	.register_app =		tcp_register_app,
	.unregister_app =	tcp_unregister_app,
	.conn_schedule =	tcp_conn_schedule,
	.conn_in_get =		ip_vs_conn_in_get_proto,
	.conn_out_get =		ip_vs_conn_out_get_proto,
	.snat_handler =		tcp_snat_handler,
	.dnat_handler =		tcp_dnat_handler,
	.fnat_in_handler =	tcp_fnat_in_handler,
	.fnat_out_handler =	tcp_fnat_out_handler,
	.state_name =		tcp_state_name,
	.state_transition =	tcp_state_transition,
	.app_conn_bind =	tcp_app_conn_bind,
	.debug_packet =		ip_vs_tcpudp_debug_packet,
	.timeout_change =	tcp_timeout_change,
	.set_state_timeout = tcp_set_state_timeout,
	.conn_expire_handler = tcp_conn_expire_handler,
	.get_state_timeout = tcp_get_state_timeout,
};
