// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/rculist.h>
#include <linux/rculist_nulls.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include "nf_special_def.h"

struct nf_expect_hash_node {
	struct hlist_node hnode;
	struct nf_conntrack_expect *expect;
	struct rcu_head rcu;
};

struct netns_expect_hash {
	unsigned int count;
	struct hlist_head *hash;
	struct kmem_cache *cachep;
};

static int nf_ct_expect_hash_rnd_initted __read_mostly;
static unsigned int nf_ct_expect_hash_rnd __read_mostly;
static struct netns_expect_hash nf_ct_expect_save __read_mostly;

unsigned int nf_ct_expect_hash_get(const struct net *n, const struct nf_conntrack_tuple *t);
int nf_ct_expect_matches(const struct nf_conntrack_expect *a,
			 const struct nf_conntrack_expect *b);

static unsigned int nf_ct_expect_save_hash(__be32 saved_ip, __be16 port, u8 protonum)
{
	unsigned int hash;

	if (unlikely(!nf_ct_expect_hash_rnd_initted)) {
		get_random_bytes(&nf_ct_expect_hash_rnd, sizeof(nf_ct_expect_hash_rnd));
		nf_ct_expect_hash_rnd_initted = 1;
	}

	/* Only support ipv4 hash currently */
	hash = jhash2(&saved_ip, 1,
		      (((__force __u16)(protonum ^ AF_INET) << 16) |
		       ((__force __u16)port ^ nf_ct_expect_hash_rnd)));

	return ((u64)hash * nf_ct_expect_hsize) >> 32;
}

static inline unsigned int __nf_ct_expect_save_hash(struct nf_conntrack_expect *exp)
{
	return nf_ct_expect_save_hash(exp->saved_addr.ip, exp->saved_proto.all,
				      exp->tuple.dst.protonum);
}

static struct nf_expect_hash_node *nf_ct_expect_save_find(struct nf_conntrack_expect *exp)
{
	struct nf_expect_hash_node *i;
	struct net *net = nf_ct_exp_net(exp);
	unsigned int h = __nf_ct_expect_save_hash(exp);

	if (!net_eq(net, &init_net))
		return NULL;

	hlist_for_each_entry_rcu(i, &nf_ct_expect_save.hash[h], hnode) {
		if (nf_ct_expect_matches(i->expect, exp))
			return i;
	}

	return NULL;
}

void nf_ct_expect_save_insert(struct nf_conntrack_expect *exp)
{
	struct nf_expect_hash_node *snode;
	struct net *net = nf_ct_exp_net(exp);
	unsigned int h = __nf_ct_expect_save_hash(exp);

	if (!net_eq(net, &init_net))
		return;

	snode = kmem_cache_alloc(nf_ct_expect_save.cachep, GFP_ATOMIC);
	if (!snode) {
		if (net_ratelimit())
			pr_warn("nf_conntrack: expectation save table full\n");
		return;
	}

	snode->expect = exp;
	hlist_add_head_rcu(&snode->hnode, &nf_ct_expect_save.hash[h]);
	nf_ct_expect_save.count++;
}

static void nf_ct_expect_save_free_rcu(struct rcu_head *head)
{
	struct nf_expect_hash_node *snode;

	snode = container_of(head, struct nf_expect_hash_node, rcu);
	kmem_cache_free(nf_ct_expect_save.cachep, snode);
}

void nf_ct_expect_save_remove(struct nf_conntrack_expect *exp)
{
	struct nf_expect_hash_node *snode;
	struct net *net = nf_ct_exp_net(exp);

	if (!net_eq(net, &init_net))
		return;

	snode = nf_ct_expect_save_find(exp);
	if (!snode)
		return;

	hlist_del_rcu(&snode->hnode);
	nf_ct_expect_save.count--;
	call_rcu(&snode->rcu, nf_ct_expect_save_free_rcu);
}

void nf_ct_expect_save_replace(struct nf_conntrack_expect *exp, struct nf_conntrack_expect *rep)
{
	/* If saved ip or proto changed, replace the save-hash table. */
	if (likely(exp->saved_addr.ip == rep->saved_addr.ip &&
		   exp->saved_proto.all == rep->saved_proto.all))
		return;

	nf_ct_expect_save_remove(exp);
	nf_ct_expect_save_insert(rep);
}

struct nf_conntrack_expect *nf_ct_expect_save_proto_find(const struct nf_conntrack_tuple *tuple)
{
	struct nf_expect_hash_node *i;
	unsigned int h;

	rcu_read_lock();
	h = nf_ct_expect_save_hash(tuple->src.u3.ip, tuple->src.u.all, tuple->dst.protonum);
	hlist_for_each_entry(i, &nf_ct_expect_save.hash[h], hnode) {
		if (tuple->src.u3.ip == i->expect->saved_addr.ip &&
		    tuple->src.u.all == i->expect->saved_proto.all &&
		    tuple->dst.protonum == i->expect->tuple.dst.protonum) {
			rcu_read_unlock();
			return i->expect;
		}
	}
	rcu_read_unlock();

	return NULL;
}
EXPORT_SYMBOL_GPL(nf_ct_expect_save_proto_find);

#ifdef CONFIG_NF_CONNTRACK_PROCFS
struct ct_expect_save_iter_state {
	struct seq_net_private p;
	unsigned int bucket;
};

static struct hlist_node *ct_expect_save_get_first(struct seq_file *seq)
{
	struct ct_expect_save_iter_state *st = seq->private;
	struct hlist_node *n;

	for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
		n = rcu_dereference(nf_ct_expect_save.hash[st->bucket].first);
		if (n)
			return n;
	}
	return NULL;
}

static struct hlist_node *
ct_expect_save_get_next(struct seq_file *seq, struct hlist_node *head)
{
	struct ct_expect_save_iter_state *st = seq->private;

	head = rcu_dereference(head->next);
	while (!head) {
		if (++st->bucket >= nf_ct_expect_hsize)
			return NULL;
		head = rcu_dereference(nf_ct_expect_save.hash[st->bucket].first);
	}
	return head;
}

static struct hlist_node *ct_expect_save_get_idx(struct seq_file *seq, loff_t pos)
{
	struct hlist_node *head = ct_expect_save_get_first(seq);

	if (head)
		while (pos && (head = ct_expect_save_get_next(seq, head)))
			pos--;
	return pos ? NULL : head;
}

static void *exp_save_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU)
{
	rcu_read_lock();
	return ct_expect_save_get_idx(seq, *pos);
}

static void *exp_save_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	(*pos)++;
	return ct_expect_save_get_next(seq, v);
}

static void exp_save_seq_stop(struct seq_file *seq, void *v) __releases(RCU)
{
	rcu_read_unlock();
}

static int exp_save_seq_show(struct seq_file *s, void *v)
{
	struct nf_expect_hash_node *snode;
	struct nf_conntrack_expect *expect;
	struct nf_conntrack_helper *helper;
	struct hlist_node *n = v;
	char *delim = "";

	snode = hlist_entry(n, struct nf_expect_hash_node, hnode);
	expect = snode->expect;

	if (expect->timeout.function)
		seq_printf(s, "%ld ", timer_pending(&expect->timeout) ?
			   (long)(expect->timeout.expires - jiffies) / HZ : 0);
	else
		seq_puts(s, "- ");

	seq_printf(s, "l3proto = %u proto=%u svip=%pI4 svport=%u",
		   expect->tuple.src.l3num, expect->tuple.dst.protonum,
		   &expect->saved_addr.ip, ntohs(expect->saved_proto.all));
	print_tuple(s, &expect->tuple, nf_ct_l4proto_find(expect->tuple.dst.protonum));

	if (expect->flags & NF_CT_EXPECT_PERMANENT) {
		seq_puts(s, "PERMANENT_SAVE!!!");
		delim = ",";
	}
	if (expect->flags & NF_CT_EXPECT_INACTIVE)
		seq_printf(s, "%sINACTIVE", delim);

	helper = rcu_dereference(nfct_help(expect->master)->helper);
	if (helper) {
		seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
		if (helper->expect_policy[expect->class].name[0])
			seq_printf(s, "/%s", helper->expect_policy[expect->class].name);
	}
	seq_putc(s, '\n');

	return 0;
}

static const struct seq_operations exp_save_seq_ops = {
	.start = exp_save_seq_start,
	.next = exp_save_seq_next,
	.stop = exp_save_seq_stop,
	.show = exp_save_seq_show
};

int exp_save_proc_init(struct net *net)
{
	struct proc_dir_entry *proc;
	kuid_t uid;
	kgid_t gid;

	proc = proc_create_net("nf_conntrack_expect_save", 0440,
			       net->proc_net, &exp_save_seq_ops,
			       sizeof(struct ct_expect_save_iter_state));
	if (!proc)
		return -ENOMEM;

	uid = make_kuid(net->user_ns, 0);
	gid = make_kgid(net->user_ns, 0);
	if (uid_valid(uid) && gid_valid(gid))
		proc_set_user(proc, uid, gid);

	return 0;
}

void exp_save_proc_remove(struct net *net)
{
	remove_proc_entry("nf_conntrack_expect_save", net->proc_net);
}

#else /* CONFIG_NF_CONNTRACK_PROCFS */

int exp_save_proc_init(struct net *net)
{
	return 0;
}

void exp_save_proc_remove(struct net *net) { }
#endif /* CONFIG_NF_CONNTRACK_PROCFS */

int nf_ct_expect_save_init(void)
{
	nf_ct_expect_save.count = 0;
	nf_ct_expect_save.hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
	if (!nf_ct_expect_save.hash)
		goto save_err0;
	nf_ct_expect_save.cachep = kmem_cache_create("nf_conntrack_expect_save",
						     sizeof(struct nf_expect_hash_node),
						     0, 0, NULL);
	if (!nf_ct_expect_save.cachep)
		goto save_err1;

	return 0;

save_err1:
	kvfree(nf_ct_expect_save.hash);
save_err0:
	return -ENOMEM;
}

void nf_ct_expect_save_fini(void)
{
	kvfree(nf_ct_expect_save.hash);
	nf_ct_expect_save.count = 0;
	kmem_cache_destroy(nf_ct_expect_save.cachep);
}

void (*g_nf_ct_expect_report)(enum ip_conntrack_expect_events, struct nf_conntrack_expect *);
EXPORT_SYMBOL(g_nf_ct_expect_report);

void nf_ct_expect_nat_report(enum ip_conntrack_expect_events event,
			     struct nf_conntrack_expect *exp)
{
	if (g_nf_ct_expect_report)
		g_nf_ct_expect_report(event, exp);
}

int nf_ct_expect_refresh_timer(struct nf_conntrack_expect *exp)
{
	struct nf_conn_help *master_help = nfct_help(exp->master);
	const struct nf_conntrack_expect_policy *p;

	if (!del_timer(&exp->timeout))
		return 0;

	p = &master_help->helper->expect_policy[exp->class];
	exp->timeout.expires = jiffies + p->timeout * HZ;
	add_timer(&exp->timeout);
	return 1;
}
EXPORT_SYMBOL(nf_ct_expect_refresh_timer);

void nf_ct_expect_read_foreach(void (*hook)(const struct nf_conntrack_expect *))
{
	unsigned int i;
	struct nf_conntrack_expect *exp;

	if (!hook)
		return;

	for (i = 0; i < nf_ct_expect_hsize; i++) {
		hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[i], hnode)
			hook(exp);
	}
}
EXPORT_SYMBOL(nf_ct_expect_read_foreach);

int nf_ct_expect_fullconenat_del(const struct nf_conntrack_tuple *tuple,
				 unsigned int startport, unsigned int endport)
{
	struct nf_conntrack_expect *i;
	struct hlist_node *n;
	unsigned int h;
	unsigned int port;

	rcu_read_lock();
	for (h = 0; h < nf_ct_expect_hsize; h++) {
		hlist_for_each_entry_safe(i, n, &nf_ct_expect_hash[h], hnode) {
			port = ntohs(i->tuple.dst.u.udp.port);
			if (i->tuple.dst.protonum == IPPROTO_UDP &&
			    i->tuple.dst.u3.ip == tuple->dst.u3.ip &&
			    (port >= startport && port <= endport) &&
			    (i->flags & (NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_FULLCONE))) {
				i->flags &= ~NF_CT_EXPECT_PERMANENT;
				i->flags |= NF_CT_EXPECT_INACTIVE;
				rcu_read_unlock();
				return 0;
			}
		}
	}
	rcu_read_unlock();

	return 1;
}
EXPORT_SYMBOL_GPL(nf_ct_expect_fullconenat_del);

struct nf_conntrack_expect *
nf_conntrack_expect_find(struct net *net, const struct nf_conntrack_expect *a)
{
	unsigned int h;
	struct hlist_node *n;
	struct nf_conntrack_expect *i;

	rcu_read_lock();
	h = nf_ct_expect_hash_get(net, &a->tuple);
	hlist_for_each_entry_safe(i, n, &nf_ct_expect_hash[h], hnode) {
		if (__nf_ct_tuple_dst_equal(&a->tuple, &i->tuple)) {
			rcu_read_unlock();
			return i;
		}
	}
	rcu_read_unlock();

	return NULL;
}
EXPORT_SYMBOL(nf_conntrack_expect_find);

int nf_conntrack_expect_clash(struct net *net, const struct nf_conntrack_expect *a)
{
	return nf_conntrack_expect_find(net, a) ? 1 : 0;
}
EXPORT_SYMBOL(nf_conntrack_expect_clash);
