// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/stddef.h>
#include <linux/atomic.h>
#include <linux/rculist.h>
#include <linux/rculist_nulls.h>
#include <linux/tcp.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_log.h>
#include <net/ip.h>
#include "nf_special_def.h"

#define NF_DEL_HOOK_MAX 16
#define MARK_DYN_RESEV_OFFSET 8
#define NF_CT_DEF_EVICTION_RANGE 8
#define NF_CT_DYN_RESEV_EVICTION_RANGE 64

#define IFADDR_SIZE 4
#define IFNAME_SIZE 16

#define FFWD_DSCP_MARK 2
#define FFWD_PORT_TYPE_DST 1
#define FFWD_TABLE_TYPE_STATIC 0

union ffwd_addr {
	u32 all[IFADDR_SIZE];
	u32 ipv4;
	struct in6_addr ipv6;
};

struct ffwd_mio {
	union ffwd_addr sip, dip;
	u16  sport, dport;
	u8   protocol;
	u8   find_mask;
	u8   port_type;
	u8   table_type;
	u32  policy;
	char indev_name[IFNAME_SIZE];
	char outdev_name[IFNAME_SIZE];
	u32  fwmark;
	u8   action;
};

struct nf_ct_ipinfo {
	u32 addr;
	u32 mask;
};

typedef void (*nfct_hook_fn)(struct nf_conn *ct);
static nfct_hook_fn g_nfct_del_hook[NF_DEL_HOOK_MAX] = {0};

atomic_t nat_port_back_pressure;
EXPORT_SYMBOL(nat_port_back_pressure);

atomic_t nf_conntrack_alloc_fail_num;
EXPORT_SYMBOL(nf_conntrack_alloc_fail_num);

int g_nf_dyn_res_flag;
EXPORT_SYMBOL(g_nf_dyn_res_flag);

int (*g_max_conntrack)(void);
EXPORT_SYMBOL(g_max_conntrack);

void (*nf_dev_ct_num_dec)(struct nf_conn *ct);
EXPORT_SYMBOL(nf_dev_ct_num_dec);

void (*nf_dev_ct_num_inc)(struct nf_conn *ct);
EXPORT_SYMBOL(nf_dev_ct_num_inc);

int (*nf_check_ct_can_del_hook)(struct nf_conn *ct);
EXPORT_SYMBOL(nf_check_ct_can_del_hook);

void (*nf_ct_sip_expect_hook)(struct nf_conn *ct) __read_mostly;
EXPORT_SYMBOL(nf_ct_sip_expect_hook);

void (*g_session_match_hook)(struct sk_buff *skb, unsigned int ip_famliy);
EXPORT_SYMBOL(g_session_match_hook);

void (*g_session_print_log_egress_hook)(struct nf_conn *ct, const struct net_device *dev,
					struct sk_buff *skb, unsigned short l3_proto,
					unsigned int hooknum);
EXPORT_SYMBOL(g_session_print_log_egress_hook);

void (*g_session_set_debug_mask_hook)(struct sk_buff *skb, unsigned int mask_bit);
EXPORT_SYMBOL(g_session_set_debug_mask_hook);

void (*g_session_print_log_hook)(struct sk_buff *skb, unsigned short l3_proto,
				 unsigned char l4_proto, unsigned int dir);
EXPORT_SYMBOL(g_session_print_log_hook);

unsigned int (*nf_conntrack_confirm_ipv4_hook)(struct sk_buff *skb,
					       const struct nf_hook_state *state,
					       int conntrack_ret);
EXPORT_SYMBOL(nf_conntrack_confirm_ipv4_hook);

unsigned int (*g_alg_set_noacc_table_hook)(struct ffwd_mio *info);
EXPORT_SYMBOL(g_alg_set_noacc_table_hook);

unsigned int (*g_func_nat_skip_port_in_table)(struct nf_conntrack_tuple *tuple,
					      struct nf_conn *ct);
EXPORT_SYMBOL(g_func_nat_skip_port_in_table);

bool (*g_map_is_map_tunnel)(const struct sk_buff *skb, const struct nf_conn *ct, int dir);
EXPORT_SYMBOL(g_map_is_map_tunnel);

bool (*g_map_check_port_is_valid)(const char *dev_name, u16 port, u32 rsvd_num);
EXPORT_SYMBOL(g_map_check_port_is_valid);

/* Check if port is used when CONENAT enabled. */
int (*nf_nat_port_inuse_hook)(struct nf_conntrack_tuple *tuple,
			      struct nf_conn *ct, int rsvd_portnum) __read_mostly;
EXPORT_SYMBOL(nf_nat_port_inuse_hook);

void (*iptable_print_hook)(struct net *net, u8 pf, unsigned int hooknum,
			   const struct sk_buff *skb, const struct net_device *in,
			   const struct net_device *out, const struct nf_loginfo *loginfo,
			   const char *prefix);
EXPORT_SYMBOL(iptable_print_hook);

void (*iptable_printv6_hook)(struct net *net, u8 pf, unsigned int hooknum,
			     const struct sk_buff *skb, const struct net_device *in,
			     const struct net_device *out, const struct nf_loginfo *loginfo,
			     const char *prefix);
EXPORT_SYMBOL(iptable_printv6_hook);

u32 nf_conntrack_get_dyn_mark(u32 mark)
{
	/* We use bit 8-11 for dynamic reserved, see NF_CT_MARK_BIT.
	 * Non zero mark means key services, that cannot be early-dropped.
	 */
	return ((mark >> MARK_DYN_RESEV_OFFSET) & 0x0F);
}
EXPORT_SYMBOL(nf_conntrack_get_dyn_mark);

void nf_conntrack_set_dyn_mark(u32 *mark, u32 val)
{
	*mark = ((*mark & (~0xF00)) | (val << MARK_DYN_RESEV_OFFSET));
}
EXPORT_SYMBOL(nf_conntrack_set_dyn_mark);

void register_conntrack_del_hook(unsigned int module, nfct_hook_fn func)
{
	if (module >= NF_DEL_HOOK_MAX)
		return;

	g_nfct_del_hook[module] = func;
}
EXPORT_SYMBOL(register_conntrack_del_hook);

inline void do_conntrack_del_hook(struct nf_conn *ct)
{
	int i;

	for (i = 0; i < NF_DEL_HOOK_MAX; i++) {
		if (g_nfct_del_hook[i])
			g_nfct_del_hook[i](ct);
	}
}

inline int do_conntrack_confirm_ipv4(struct sk_buff *skb, const struct nf_hook_state *state)
{
	int ret;

	ret = nf_conntrack_confirm(skb);
	if (nf_conntrack_confirm_ipv4_hook)
		nf_conntrack_confirm_ipv4_hook(skb, state, ret);

	return ret;
}

inline void nf_confirm_ipv4_hook(struct sk_buff *skb, const struct nf_hook_state *state, int ret)
{
	if (!state || state->pf != NFPROTO_IPV4)
		return;

	if (nf_conntrack_confirm_ipv4_hook)
		nf_conntrack_confirm_ipv4_hook(skb, state, ret);
}

inline void nf_decrease_dev_ct_num(struct nf_conn *ct)
{
	typeof(nf_dev_ct_num_dec) func;

	func = rcu_dereference(nf_dev_ct_num_dec);
	if (!func)
		return;

	func(ct);
}

/* Note: start counting after ct inserted to linked list. */
inline void nf_increase_dev_ct_num(struct nf_conn *ct)
{
	typeof(nf_dev_ct_num_inc) func;

	func = rcu_dereference(nf_dev_ct_num_inc);
	if (!func)
		return;

	func(ct);
}

inline bool nf_check_ct_reserved(struct nf_conn *ct)
{
	typeof(nf_check_ct_can_del_hook) func;

	func = rcu_dereference(nf_check_ct_can_del_hook);
	return (func && !func(ct));
}

static bool nf_ct_evicted(int cnt)
{
	if (g_nf_dyn_res_flag)
		return (cnt >= NF_CT_DYN_RESEV_EVICTION_RANGE);

	return (cnt >= NF_CT_DEF_EVICTION_RANGE);
}

inline bool nf_check_ct_eviction(int *cnt, bool *eviction)
{
	*cnt += 1;

	if (nf_ct_evicted(*cnt)) {
		*eviction = true;
		return true;
	}

	return false;
}

inline bool nf_check_key_services(struct nf_conn *ct, struct nf_conn **select)
{
	if (g_nf_dyn_res_flag && nf_conntrack_get_dyn_mark(ct->mark))
		return true;

	*select = ct;
	return false;
}

static bool nf_ct_dropped(struct nf_conn *ct)
{
	bool ret = false;

	if (!ct || nf_ct_is_dying(ct))
		return false;

	if (!atomic_inc_not_zero(&ct->ct_general.use))
		return false;

	ret = nf_ct_delete(ct, 0, 0);
	nf_ct_put(ct);
	return ret;
}

inline bool nf_force_ct_dropped(bool eviction, unsigned int drops, struct nf_conn *ct)
{
	if (eviction && !drops)
		return nf_ct_dropped(ct);

	return false;
}

inline bool nf_nat_ports_used_up(void)
{
	return (atomic_read(&nat_port_back_pressure) > 0);
}

inline void nf_record_failed_alloc(void)
{
	atomic_inc(&nf_conntrack_alloc_fail_num);
}

inline void nf_record_full_conntrack(void)
{
	if (g_max_conntrack)
		g_max_conntrack();
}

void nf_set_session_dbg_mark(struct sk_buff *skb, unsigned int bit)
{
	if (likely(!g_session_set_debug_mask_hook))
		return;

	g_session_set_debug_mask_hook(skb, bit);
}
EXPORT_SYMBOL(nf_set_session_dbg_mark);

inline void nf_record_conntrack_before(struct sk_buff *skb, const struct nf_conn *ct)
{
	if (likely(!g_session_set_debug_mask_hook))
		return;

	if (nf_ct_protonum(ct) != IPPROTO_TCP ||
	    ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED)
		return;

	g_session_set_debug_mask_hook(skb, 1);
}

inline void nf_record_conntrack_after(struct sk_buff *skb, const struct nf_conn *ct)
{
	if (unlikely(g_session_set_debug_mask_hook)) {
		if (!test_bit(IPS_ASSURED_BIT, &ct->status) ||
		    (nf_ct_protonum(ct) == IPPROTO_TCP &&
		    ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED))
			g_session_set_debug_mask_hook(skb, 1);
	}

	if (unlikely(g_session_print_log_hook))
		g_session_print_log_hook(skb, nf_ct_l3num(ct), nf_ct_protonum(ct), 0);
}

/* Try to copy flow template of master connection. */
inline void nf_copy_master_template(struct nf_conn *ct)
{
	struct nf_conn_template *t, *tm;

	if (!ct->master)
		return;

	tm = nfct_get_template(ct->master);
	if (!tm)
		return;

	t = nfct_add_template(ct);
	if (!t) {
		pr_err("failed to copy TEMPLATE extension\n");
		return;
	}

	*t = *tm;
}

inline void nf_setup_sip_expect(struct nf_conn *ct)
{
	typeof(nf_ct_sip_expect_hook) expfn;

	expfn = rcu_dereference(nf_ct_sip_expect_hook);
	if (expfn) {
		spin_lock_bh(&nf_conntrack_expect_lock);
		expfn(ct);
		spin_unlock_bh(&nf_conntrack_expect_lock);
	}

	nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
	nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
}

inline bool nf_check_tcp_flags(const struct sk_buff *skb, int dataoff, u8 pf, u8 protonum)
{
	const struct tcphdr *th;
	struct tcphdr _tcph;

	if (pf == NFPROTO_IPV4 && protonum == IPPROTO_TCP) {
		th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
		return (!th || th->rst || th->fin);
	}

	return false;
}

inline bool nf_check_udp_acct(const struct nf_conn *ct, u32 extra_jiffies, bool do_acct)
{
	return (do_acct &&
		nf_ct_protonum(ct) == IPPROTO_UDP &&
		test_bit(IPS_SEEN_REPLY_BIT, &ct->status) &&
		time_before((unsigned long)extra_jiffies, (unsigned long)ct->timeout));
}

void nf_set_sip_noacc_rule(unsigned int addr, unsigned short port)
{
	struct ffwd_mio info;

	if (!g_alg_set_noacc_table_hook)
		return;

	memset(&info, 0, sizeof(info));
	info.sip.ipv4 = 0;
	info.dip.ipv4 = addr;
	info.action = FFWD_DSCP_MARK;
	info.sport = htons(port);
	info.dport = htons(port);
	info.protocol = IPPROTO_UDP;
	info.port_type = FFWD_PORT_TYPE_DST;
	info.table_type = FFWD_TABLE_TYPE_STATIC;
	info.policy = 0;

	g_alg_set_noacc_table_hook(&info);
}
EXPORT_SYMBOL(nf_set_sip_noacc_rule);

bool nf_skip_nat_port(struct nf_conntrack_tuple *tuple, struct nf_conn *ct)
{
	return (g_func_nat_skip_port_in_table && g_func_nat_skip_port_in_table(tuple, ct));
}
EXPORT_SYMBOL(nf_skip_nat_port);

bool nf_check_map_tunnel(const struct sk_buff *skb, const struct nf_conn *ct)
{
	if (!g_map_is_map_tunnel)
		return false;

	return g_map_is_map_tunnel(skb, ct, IP_CT_DIR_REPLY);
}
EXPORT_SYMBOL(nf_check_map_tunnel);

bool nf_check_map_port_valid(const char *dev_name, u16 port, u32 rsvd_num)
{
	if (!g_map_check_port_is_valid)
		return true;

	return g_map_check_port_is_valid(dev_name, port, rsvd_num);
}
EXPORT_SYMBOL(nf_check_map_port_valid);

bool nf_check_conenat_used(void)
{
	if (nf_nat_port_inuse_hook)
		return true;

	return false;
}
EXPORT_SYMBOL(nf_check_conenat_used);

bool nf_check_nat_port_inuse(struct nf_conntrack_tuple *tuple, struct nf_conn *ct, int rsvd_num)
{
	return (nf_nat_port_inuse_hook && nf_nat_port_inuse_hook(tuple, ct, rsvd_num));
}
EXPORT_SYMBOL(nf_check_nat_port_inuse);

void nf_log_ipv4_packet(struct net *net, u8 pf, unsigned int hooknum,
			const struct sk_buff *skb, const struct net_device *in,
			const struct net_device *out, const void *loginfo,
			const char *prefix)
{
	typeof(iptable_print_hook) print_fn;

	rcu_read_lock();
	print_fn = rcu_dereference(iptable_print_hook);
	if (print_fn)
		print_fn(net, pf, hooknum, skb, in, out,
			 (const struct nf_loginfo *)loginfo, prefix);
	rcu_read_unlock();
}
EXPORT_SYMBOL(nf_log_ipv4_packet);

void nf_log_ipv6_packet(struct net *net, u8 pf, unsigned int hooknum,
			const struct sk_buff *skb, const struct net_device *in,
			const struct net_device *out, const void *loginfo,
			const char *prefix)
{
	typeof(iptable_printv6_hook) print_fn;

	rcu_read_lock();
	print_fn = rcu_dereference(iptable_printv6_hook);
	if (print_fn)
		print_fn(net, pf, hooknum, skb, in, out,
			 (const struct nf_loginfo *)loginfo, prefix);
	rcu_read_unlock();
}
EXPORT_SYMBOL(nf_log_ipv6_packet);

/* Just see kill_all() */
static int kill_all_net(struct nf_conn *i, void *data)
{
	return net_eq(nf_ct_net(i), data);
}

static int kill_all_master(struct nf_conn *i, void *data)
{
	return i->master ? 0 : 1;
}

static int kill_all_by_dip(struct nf_conn *i, void *data)
{
	struct nf_ct_ipinfo *info = (struct nf_ct_ipinfo *)data;
	u32 naddr = info->addr & info->mask;

	if (naddr == 0)
		return 0;

	if ((i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip & info->mask) == naddr ||
	    (i->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip & info->mask) == naddr)
		return 1;

	return 0;
}

void nf_conntrack_flush(void)
{
	nf_ct_iterate_cleanup_net(&init_net, kill_all_net, NULL, 0, 0);
}
EXPORT_SYMBOL_GPL(nf_conntrack_flush);

void nf_conntrack_flush_master(void)
{
	nf_ct_iterate_cleanup_net(&init_net, kill_all_master, NULL, 0, 0);
}
EXPORT_SYMBOL_GPL(nf_conntrack_flush_master);

void nf_conntrack_flush_by_dip(u32 addr, u32 mask)
{
	struct nf_ct_ipinfo data = { addr, mask };

	nf_ct_iterate_cleanup_net(&init_net, kill_all_by_dip, &data, 0, 0);
}
EXPORT_SYMBOL_GPL(nf_conntrack_flush_by_dip);

void nf_ct_refresh_napt(struct nf_conn *ct, unsigned long extra_jiffies)
{
	unsigned long newtime = jiffies + extra_jiffies;

	/* If not in hash table, timer will not be active yet */
	if (!nf_ct_is_confirmed(ct)) {
		ct->timeout = extra_jiffies;
		return;
	}

	/* Only update the timeout if the new timeout is at least
	 * HZ jiffies from the old timeout. Need del_timer for race
	 * avoidance (may already be dying).
	 */
	if (newtime - ct->timeout >= HZ)
		ct->timeout = newtime;
}
EXPORT_SYMBOL(nf_ct_refresh_napt);

/* Delete conntrack, refers to napt module */
int nf_del_ct_napt(struct nf_conntrack_tuple *tuple)
{
	struct nf_conntrack_tuple_hash *h;
	struct nf_conn *ct;

	/* look for tuple match */
	h = nf_conntrack_find_get(&init_net, &nf_ct_zone_dflt, tuple);
	if (!h)
		return -1;

	ct = nf_ct_tuplehash_to_ctrack(h);
	if (!ct)
		return -2;

	if (nf_ct_delete(ct, 0, 0))
		nf_decrease_dev_ct_num(ct);
	NF_CT_STAT_INC_ATOMIC(&init_net, early_drop);
	nf_ct_put(ct);

	return 0;
}
EXPORT_SYMBOL(nf_del_ct_napt);

static bool nf_match_tuple(const struct nf_conntrack_tuple *tuple1,
			   const struct nf_conntrack_tuple *tuple2, bool is_dst)
{
	if (tuple1->src.l3num != tuple2->src.l3num ||
	    tuple1->dst.protonum != tuple2->dst.protonum)
		return false;

	if (is_dst)
		return (tuple1->dst.u3.ip == tuple2->dst.u3.ip &&
			tuple1->dst.u.all == tuple2->dst.u.all);

	return (tuple1->src.u3.ip == tuple2->src.u3.ip &&
		tuple1->src.u.all == tuple2->src.u.all);
}

static struct nf_conn *nf_ct_find_by_tuple(const struct nf_conntrack_tuple *tuple, bool is_dst)
{
	struct nf_conntrack_tuple_hash *h;
	struct hlist_nulls_node *n;
	unsigned int hash;

	local_bh_disable();
	for (hash = 0; hash < nf_conntrack_htable_size; hash++) {
		hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) {
			if (nf_match_tuple(tuple, &h->tuple, is_dst)) {
				local_bh_enable();
				return nf_ct_tuplehash_to_ctrack(h);
			}
		}
	}
	local_bh_enable();

	return NULL;
}

struct nf_conn *nf_conntrack_find_dst(const struct nf_conntrack_tuple *tuple)
{
	return nf_ct_find_by_tuple(tuple, true);
}
EXPORT_SYMBOL(nf_conntrack_find_dst);

struct nf_conn *nf_conntrack_find_src(const struct nf_conntrack_tuple *tuple)
{
	return nf_ct_find_by_tuple(tuple, false);
}
EXPORT_SYMBOL(nf_conntrack_find_src);

struct nf_conn *nf_conntrack_find_src_by_wanip(const struct nf_conntrack_tuple *tuple,
					       const union nf_inet_addr *addr)
{
	struct nf_conntrack_tuple_hash *h;
	struct hlist_nulls_node *n;
	unsigned int hash;
	struct nf_conn *ct;
	const struct nf_conntrack_tuple *down;

	local_bh_disable();
	for (hash = 0; hash < nf_conntrack_htable_size; hash++) {
		hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) {
			if (h->tuple.dst.dir != IP_CT_DIR_REPLY)
				continue;

			if (!nf_match_tuple(tuple, &h->tuple, false))
				continue;

			ct = nf_ct_tuplehash_to_ctrack(h);
			down = nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL);
			if (nf_inet_addr_cmp(addr, &down->dst.u3)) {
				local_bh_enable();
				return ct;
			}
		}
	}
	local_bh_enable();

	return NULL;
}
EXPORT_SYMBOL(nf_conntrack_find_src_by_wanip);

static bool nf_ct_needed(const struct nf_conn *ct)
{
	u8 protonum;

	if (!ct)
		return false;

	protonum = nf_ct_protonum(ct);
	if (protonum != IPPROTO_TCP && protonum != IPPROTO_UDP)
		return false;

	/* ignore tcp ct whose status is timeout */
	if (protonum == IPPROTO_TCP &&
	    ct->proto.tcp.state >= TCP_CONNTRACK_TIME_WAIT)
		return false;

	return true;
}

void nf_conntrack_get_tuples(void *input, bool (*hook)(const struct nf_conn *, void *),
			     struct nf_conntrack_tuple *tuples, u32 max_num, u32 *act_num)
{
	struct nf_conntrack_tuple_hash *h;
	struct hlist_nulls_node *n;
	unsigned int hash;
	struct nf_conn *ct;
	u32 num = 0;

	/* Ensure return tuples is valid */
	if (!tuples || !act_num || !max_num)
		return;

	local_bh_disable();
	for (hash = 0; hash < nf_conntrack_htable_size; hash++) {
		hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) {
			if (h->tuple.dst.dir != IP_CT_DIR_ORIGINAL)
				continue;

			ct = nf_ct_tuplehash_to_ctrack(h);
			if (!nf_ct_needed(ct))
				continue;

			if (!hook || !hook(ct, input))
				continue;

			/* Copy out found conntracks, and use dst.dir indicating
			 * main/slave ct flag. If ct->master is NULL, we use ct
			 * in hook and set main flag 1, otherwise set slave flag 2.
			 */
			tuples[num] = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
			tuples[num].dst.dir = ct->master ? 2 : 1;

			num++;
			if (num >= max_num) {
				local_bh_enable();
				*act_num = max_num;
				return;
			}
		}
	}
	local_bh_enable();
	*act_num = num;
}
EXPORT_SYMBOL(nf_conntrack_get_tuples);

void nf_conntrack_foreach(int (*hook)(struct nf_conntrack_tuple_hash *, void *), void *data)
{
	struct nf_conntrack_tuple_hash *h;
	struct hlist_nulls_node *n;
	unsigned int bucket;

	if (!hook)
		return;

	spin_lock_bh(&nf_conntrack_expect_lock);
	for (bucket = 0; bucket < nf_conntrack_htable_size; bucket++) {
		hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[bucket], hnnode) {
			if (hook(h, data)) {
				/* error, stop iterator */
				spin_unlock_bh(&nf_conntrack_expect_lock);
				return;
			}
		}
	}
	spin_unlock_bh(&nf_conntrack_expect_lock);
}
EXPORT_SYMBOL(nf_conntrack_foreach);
