// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/sort.h>
#include <linux/bsearch.h>
#include <linux/rculist.h>
#include <linux/rculist_nulls.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_ct_expand.h>
#include "nf_special_def.h"

static int g_masquerade_dbg;

/* Reserve some ports for ALG protocols that needs consecutive ports,
 * just like RTSP. Defined in nf_nat.ko to avoid dependency between
 * CONENAT and other ALG applications. */
static int nf_nat_rsvd_portnum = 2;

int nf_nat_used_tuple_ext(const struct nf_conntrack_tuple *tuple,
			  const struct nf_conn *ignored_conntrack);
struct nf_conn *nf_nat_find_src(const struct nf_conntrack_tuple *tuple, void *data,
				bool (*hook)(const struct nf_conn *ct, void *data));

bool nf_nat_tuple_map_valid(struct nf_conntrack_tuple *tuple, const struct nf_nat_range2 *range,
			    const struct nf_conn *ct, enum nf_nat_manip_type maniptype)
{
	struct nf_conn_ext_port *mtport;
	__be16 port;

	if (maniptype != NF_NAT_MANIP_DST)
		return false;

	mtport = nfct_get_extport(ct);
	if (!mtport || !mtport->dpts[0] || !mtport->dpts[1])
		return false;

	pr_info("dstport[%d], minport[%d], mtport_s[%d], mtport_e[%d]\n",
		ntohs(tuple->dst.u.all), ntohs(range->min_proto.all),
		mtport->dpts[0], mtport->dpts[1]);

	/* Ports mapping to supports one-to-one mapping of intranet ports. */
	port = ntohs(range->min_proto.all) + (ntohs(tuple->dst.u.all) - mtport->dpts[0]);
	tuple->dst.u.all = htons(port);

	return (port <= ntohs(range->max_proto.all) && !nf_nat_used_tuple_ext(tuple, ct));
}

static void nf_nat_sip_exp_setup(struct nf_conn *ct)
{
	struct nf_conntrack_expect *expect;
	struct nf_conntrack_tuple *tuple;
	struct nf_nat_range2 range;

	expect = nf_ct_expect_save_proto_find(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
	if (!expect || (expect->flags & NF_CT_EXPECT_FULLCONE))
		return;

	/* This must be a fresh one. */
	if (ct->status & IPS_NAT_DONE_MASK) {
		tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
		pr_info("%s[%d] no bug_on: Sport=%d,Dport=%d,src=%pI4,dst=%pI4,use=%d\n",
			__func__, __LINE__,
			ntohs(tuple->src.u.udp.port), ntohs(tuple->dst.u.udp.port),
			&tuple->src.u3.ip, &tuple->dst.u3.ip, atomic_read(&ct->ct_general.use));
		/* nf_nat_setup_info doesn't support snat twice, will cause bug_on crash. */
		if (ct->status & IPS_SRC_NAT_DONE) {
			pr_info("Cann't SNAT twice\n");
			return;
		}
	}

	/* SNAT if the pkt src is the same as the expected connection. */
	range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
	range.min_addr.ip = expect->tuple.dst.u3.ip;
	range.max_addr.ip = expect->tuple.dst.u3.ip;
	range.min_proto.udp.port = expect->tuple.dst.u.udp.port;
	range.max_proto.udp.port = expect->tuple.dst.u.udp.port;
	nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
}

inline void nf_nat_set_sip_exp_hook(void)
{
	BUG_ON(nf_ct_sip_expect_hook);
	rcu_assign_pointer(nf_ct_sip_expect_hook, nf_nat_sip_exp_setup);
}

inline void nf_nat_clear_sip_exp_hook(void)
{
	rcu_assign_pointer(nf_ct_sip_expect_hook, NULL);
}

int nf_nat_get_rsvd_portnum(void)
{
	return nf_nat_rsvd_portnum;
}
EXPORT_SYMBOL(nf_nat_get_rsvd_portnum);

void nf_nat_set_rsvd_portnum(int num)
{
	nf_nat_rsvd_portnum = num;
}
EXPORT_SYMBOL(nf_nat_set_rsvd_portnum);

struct nf_conn *nf_nat_find_by_src(const struct nf_conntrack_tuple *tuple)
{
	return nf_nat_find_src(tuple, NULL, NULL);
}
EXPORT_SYMBOL(nf_nat_find_by_src);

static bool nf_nat_dev_matches(const struct nf_conn *ct, void *data)
{
	int dev_index = *(int *)data;
	const struct nf_conn_nat *nat = nfct_nat(ct);

	return (nat && (!nat->masq_index || nat->masq_index == dev_index));
}

struct nf_conn *nf_nat_find_by_src_dev(const struct nf_conntrack_tuple *tuple, int dev_index)
{
	return nf_nat_find_src(tuple, &dev_index, nf_nat_dev_matches);
}
EXPORT_SYMBOL(nf_nat_find_by_src_dev);

int nf_nat_mangle_tcp_packet2(struct sk_buff *skb, struct nf_conn *ct,
			      enum ip_conntrack_info ctinfo, unsigned int match_offset,
			      unsigned int match_len, const char *rep_buffer, unsigned int rep_len)
{
	struct iphdr *iph = ip_hdr(skb);

	return __nf_nat_mangle_tcp_packet(skb, ct, ctinfo, iph->ihl * 4,
					  match_offset, match_len,
					  rep_buffer, rep_len, false);
}
EXPORT_SYMBOL(nf_nat_mangle_tcp_packet2);

void nf_nat_mangle_tcp_sequence(struct sk_buff *skb, struct nf_conn *ct,
				enum ip_conntrack_info ctinfo, int diff)
{
	struct iphdr *iph;
	struct tcphdr *tcph;

	if (!diff)
		return;

	iph = ip_hdr(skb);
	tcph = (struct tcphdr *)((char *)iph + (iph->ihl * 4));
	nf_ct_seqadj_set(ct, ctinfo, tcph->seq, diff);
}
EXPORT_SYMBOL(nf_nat_mangle_tcp_sequence);

#define MASQ_PRINT(format, args...) \
do { \
	if (g_masquerade_dbg) \
		pr_info(format, ##args); \
} while (0)

static inline void nf_nat_print_tuple(const struct nf_conntrack_tuple *t)
{
	if (t->src.l3num == AF_INET) {
		pr_info("tuple : %u %pI4:%hu -> %pI4:%hu\n",
			t->dst.protonum,
			&t->src.u3.ip, ntohs(t->src.u.all),
			&t->dst.u3.ip, ntohs(t->dst.u.all));
	}
}

static inline void nf_nat_masquerade_dump_ct(const struct nf_conn *ct)
{
	if (!g_masquerade_dbg)
		return;

	nf_nat_print_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
	nf_nat_print_tuple(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
}

void nf_nat_masquerade_dbg_init(void)
{
	struct file *fp;

	fp = filp_open("/mnt/jffs2/masqdbg", O_RDONLY, 0);
	if (IS_ERR(fp))
		return;

	pr_info("masquerade log enable\n");
	g_masquerade_dbg = 1;
	filp_close(fp, NULL);
}
EXPORT_SYMBOL(nf_nat_masquerade_dbg_init);

static bool __masquerade_port_conflict(struct nf_conntrack_tuple *tuple, struct nf_conn *ct)
{
	struct nf_conntrack_expect exp;
	struct nf_conntrack_expect *find_exp;
	struct nf_conntrack_tuple *t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;

	if (nf_skip_nat_port(tuple, ct)) {
		MASQ_PRINT("MASQUERADE: port[%d] in skip table conflict\n",
			   ntohs(tuple->dst.u.all));
		return true;
	}

	nf_ct_expect_init(&exp, NF_CT_EXPECT_CLASS_DEFAULT, (u8)nf_ct_l3num(ct),
			  NULL, &tuple->dst.u3, tuple->dst.protonum, NULL, &tuple->dst.u.all);
	find_exp = nf_conntrack_expect_find(nf_ct_net(ct), &exp);
	if (find_exp) {
		MASQ_PRINT("MASQUERADE: port[%u] in exp table\n", ntohs(tuple->dst.u.all));
		/* For alg expect, ports of downstream dnat are stored in saved_proto,
		 * but ips are not always stored in saved_addr. So we must use src addr
		 * of original tuple of master conntrack for comparison.
		 */
		if (find_exp->master &&
		    nf_inet_addr_cmp(&find_exp->master->tuplehash[!find_exp->dir].tuple.src.u3,
				     &t->src.u3) &&
		    find_exp->saved_proto.all == t->src.u.all) {
			MASQ_PRINT("MASQUERADE: exp match, use exp port[%d]\n",
				   ntohs(tuple->dst.u.all));
			return false;
		}

		MASQ_PRINT("MASQUERADE: port[%d] exp conflict\n", ntohs(tuple->dst.u.all));
		return true;
	}

	if (nf_conntrack_tuple_taken(tuple, ct)) {
		MASQ_PRINT("MASQUERADE: port[%d] in ct table conflict\n",
			   ntohs(tuple->dst.u.all));
		return true;
	}

	return false;
}

static bool nf_nat_masquerade_port_conflict(struct nf_conntrack_tuple *tuple,
					    struct nf_conn *ct, int rsvd_portnum)
{
	int i;
	u16 port;
	struct nf_conntrack_tuple t = *tuple;

	port = ntohs(t.dst.u.all);
	port -= port % rsvd_portnum;

	for (i = 0; i < rsvd_portnum; i++, port++) {
		t.dst.u.all = htons(port);
		if (__masquerade_port_conflict(&t, ct))
			return true;
	}

	return false;
}

static bool __masquerade_find_port(struct nf_conntrack_tuple *tuple, struct nf_conn *ct,
				   int rsvd_portnum, u16 minport, u16 maxport, int masq_index)
{
	static u16 port; /* default 0 */
	unsigned int i, count, range = 0;
	unsigned int min = 0, max = 0;
	struct nf_conn *find_ct;
	u16 tport = ntohs(tuple->dst.u.all);

	MASQ_PRINT("MASQUERADE: port[%u][%u-%u]\n", tport, minport, maxport);

	if (minport < maxport) {
		if (tport >= minport && tport <= maxport) {
			if (!nf_nat_masquerade_port_conflict(tuple, ct, rsvd_portnum))
				return true;

			min = minport > 1024 ? minport : 1024;
			max = maxport < 65535 ? maxport : 65535;
			range = max - min + 1;
		}
	} else {
		if (!nf_nat_masquerade_port_conflict(tuple, ct, rsvd_portnum))
			return true;

		min = 1024;
		max = 65535;
		range = max - min + 1;
	}

	MASQ_PRINT("MASQUERADE: min[%u],max[%u],range[%u]\n", min, max, range);

	/* First, check conflictive with original src port, then check with ct port
	 * after NAT that found in by-src table with the same masq_index. Test with
	 * flow just like this:
	 * A:10000-->wan:10000-->C:20000
	 * B:10000-->wan:1026 -->C:20000
	 * A:10000-->wan:10000-->D:20000
	 * B:10000-->wan:1026 -->D:20000
	 */
	find_ct = nf_nat_find_by_src_dev(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, masq_index);
	if (find_ct) {
		tuple->dst.u.all = find_ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.all;
		MASQ_PRINT("MASQUERADE: use bysrc port[%u]\n", ntohs(tuple->dst.u.all));
		if (!nf_nat_masquerade_port_conflict(tuple, ct, rsvd_portnum))
			return true;
	}

	count = range / rsvd_portnum;

	/* tiny little optimization : skip the port(s) allocated last time */
	for (i = 0, port += rsvd_portnum; i < count; i++, port += rsvd_portnum) {
		tuple->dst.u.all = htons(min + port % range);
		if (!nf_nat_masquerade_port_conflict(tuple, ct, rsvd_portnum))
			return true;
	}

	return false;
}

static bool nf_nat_masquerade_find_port(struct nf_conn *ct, __be32 newsrc,
					__be16 minport, __be16 maxport,
					__be16 *findport, int masq_index)
{
	u16 port;
	struct nf_conntrack_tuple reply;
	int rsvd_portnum = nf_nat_get_rsvd_portnum();

	/* setup reply tuple and try to get a non-conflict SNAT port */
	reply = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
	reply.dst.u3.ip = newsrc;

	if (__masquerade_find_port(&reply, ct, rsvd_portnum,
				   ntohs(minport), ntohs(maxport), masq_index)) {
		/* keep the same residue */
		port = ntohs(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all) % rsvd_portnum;
		port += ntohs(reply.dst.u.all) - (ntohs(reply.dst.u.all) % rsvd_portnum);
		*findport = htons(port);
		return true;
	}

	return false;
}

inline bool nf_nat_masquerade_proto_check(const struct nf_conn *ct)
{
	u8 protonum = nf_ct_protonum(ct);

	return (protonum == IPPROTO_TCP || protonum == IPPROTO_UDP);
}

unsigned int
nf_nat_masquerade_setup_info(struct sk_buff *skb, struct nf_conn *ct,
			     const struct nf_conn_nat *nat,
			     const struct nf_nat_range2 *range, __be32 newsrc)
{
	__be16 find_port = 0;
	struct nf_nat_range2 newrange;

	MASQ_PRINT("MASQUERADE: Dev[0x%x], IP[%pI4]\n", nat->masq_index, &newsrc);
	nf_nat_masquerade_dump_ct(ct);

	if (!nf_nat_masquerade_find_port(ct, newsrc, range->min_proto.all, range->max_proto.all,
					 &find_port, nat->masq_index)) {
		pr_info("masquerade: could not find a proper SNAT port, drop it\n");
		nf_set_session_dbg_mark(skb, 2);
		return NF_DROP;
	}

	/* Transfer from original range. */
	memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
	memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
	newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED;
	newrange.min_addr.ip = newsrc;
	newrange.max_addr.ip = newsrc;
	newrange.min_proto.all = find_port;
	newrange.max_proto.all = find_port;

	MASQ_PRINT("MASQUERADE: find port[%d]\n", ntohs(find_port));

	/* Hand modified range to generic setup. */
	return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
}

int nf_nat_masquerade_tg_check(const struct xt_tgchk_param *par)
{
	const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;

	if (mr->rangesize > 1) {
		pr_info("masq_check: bad rangesize %u\n", mr->rangesize);
		return -EINVAL;
	}

	if (mr->rangesize == 1) {
		if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) {
			pr_info("masq_check: not MAP_IPS.\n");
			return -EINVAL;
		}

		if (mr->range[0].min_ip != mr->range[0].max_ip) {
			pr_info("masq_check: err MAP_IPS %pI4 != %pI4\n",
				&mr->range[0].min_ip, &mr->range[0].max_ip);
			return -EINVAL;
		}

		if (!mr->range[0].min_ip) {
			pr_info("masq_check: err zero min_ip\n");
			return -EINVAL;
		}
	}

	return nf_ct_netns_get(par->net, par->family);
}
EXPORT_SYMBOL(nf_nat_masquerade_tg_check);

struct port_iter {
	bool skip_used;
	char add_num;
	u16  ori_port;
	u16  last_port;
	u32  last_index;
};

#define USED_PORTS_MAX_NUM 256

static u16 g_used_ports[USED_PORTS_MAX_NUM] = {0};
static u16 g_ports_sorted[USED_PORTS_MAX_NUM] = {0};
static u32 g_last_port_idx = USED_PORTS_MAX_NUM - 1;

static int cmp_port(const void *a, const void *b)
{
	return *(u16 *)a - *(u16 *)b;
}

static u16 *search_port(u16 port)
{
	return (u16 *)bsearch(&port, g_ports_sorted, USED_PORTS_MAX_NUM, sizeof(u16), cmp_port);
}

static void sort_port(void)
{
	sort(g_ports_sorted, USED_PORTS_MAX_NUM, sizeof(u16), cmp_port, NULL);
}

static bool is_port_used(u16 port)
{
	return search_port(port) ? true : false;
}

static u16 get_next_port(struct port_iter *iter)
{
	u16 ret_port = 0;

	if (iter->add_num == 0)
		return 0;

	if (iter->skip_used) {
		/* reached 1024 means "all valid port checked" */
		while (iter->last_port > 1024 ||
		       iter->last_port == iter->ori_port - iter->add_num) {
			if (iter->add_num > 0 &&
			    iter->last_port > (65535 - iter->add_num)) {
				/* No available port, reverse traversal */
				iter->add_num = -iter->add_num;
				iter->last_port = iter->ori_port;
			}

			iter->last_port += iter->add_num;
			if (!is_port_used(iter->last_port))
				return iter->last_port;
		}

		/* All port used, try used ports */
		iter->skip_used = false;
		iter->last_index = g_last_port_idx;

		return get_next_port(iter);
	}

	while (ret_port == 0) {
		iter->last_index++;
		if (iter->last_index >= USED_PORTS_MAX_NUM)
			iter->last_index = 0;

		if (iter->last_index == g_last_port_idx)
			return 0; /* Traversal end */

		ret_port = g_used_ports[iter->last_index];
	}

	return ret_port;
}

static void mark_used_port(u16 port, u32 last_index)
{
	u16 *pos;
	u16 chg_from = 0;

	if (last_index >= USED_PORTS_MAX_NUM) {
		pr_warn("invalid index: %u\n", last_index);
		return;
	}

	if (g_used_ports[last_index] == port) {
		g_last_port_idx = last_index;
		return;
	}

	g_last_port_idx++;
	if (g_last_port_idx >= USED_PORTS_MAX_NUM)
		g_last_port_idx = 0;

	if (g_used_ports[g_last_port_idx] != port)
		chg_from = g_used_ports[g_last_port_idx];

	if (chg_from != port) {
		pos = search_port(chg_from);
		if (pos) {
			*pos = port;
			sort_port();
		}
	}

	g_used_ports[g_last_port_idx] = port;
}

static u16 nf_nat_get_port_for_map_tunnel(struct nf_conntrack_expect *exp,
					  struct nf_conn *ct, const char *devname)
{
	int ret;
	u16 port;
	struct port_iter iter = {0};
	struct nf_conntrack_tuple dup_tuple;

	iter.ori_port = ntohs(exp->saved_proto.tcp.port);
	iter.last_port = iter.ori_port - 1;
	iter.skip_used = true;
	iter.add_num = 1;

	port = iter.ori_port;
	if (is_port_used(port))
		port = get_next_port(&iter);

	for (; port != 0; port = get_next_port(&iter)) {
		exp->tuple.dst.u.tcp.port = htons(port);
		if (nf_ct_expect_exist(exp, 0)) {
			pr_warn("ftp alg expect port conflict:%hu\n", port);
			continue;
		}

		if (!nf_check_map_port_valid(devname, port, 1))
			continue;

		dup_tuple = exp->tuple;
		/* 20: ftp server port (active mode) */
		dup_tuple.src.u.tcp.port = htons(20);
		ret = nf_conntrack_tuple_taken(&dup_tuple, ct);
		if (ret) {
			pr_warn("ftp alg dup_tuple conflict:%hu\n", port);
			continue;
		}

		ret = nf_ct_expect_related(exp, 0);
		if (ret == 0) {
			mark_used_port(port, iter.last_index);
			break;
		} else if (ret != -EBUSY) {
			port = 0;
			break;
		}
	}

	return port;
}

static u16 nf_nat_get_related_port_up(struct nf_conntrack_expect *exp, struct nf_conn *ct)
{
	int ret;
	u16 port;

	for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
		exp->tuple.dst.u.tcp.port = htons(port);
		if (nf_ct_expect_exist(exp, 0))
			break;

		if (!nf_check_nat_port_inuse(&exp->tuple, ct, 1)) {
			ret = nf_ct_expect_related(exp, 0);
			if (ret == 0) {
				break;
			} else if (ret != -EBUSY) {
				port = 0;
				break;
			}
		}
	}

	return port;
}

static u16 nf_nat_get_related_port_down(struct nf_conntrack_expect *exp, struct nf_conn *ct)
{
	int ret;
	u16 port;

	/* try the port range 1025-saved_port */
	for (port = ntohs(exp->saved_proto.tcp.port) - 1; port > 1024; port--) {
		exp->tuple.dst.u.tcp.port = htons(port);
		if (nf_ct_expect_exist(exp, 0))
			break;

		if (!nf_check_nat_port_inuse(&exp->tuple, ct, 1)) {
			ret = nf_ct_expect_related(exp, 0);
			if (ret == 0) {
				break;
			} else if (ret != -EBUSY) {
				port = 0;
				break;
			}
		}
	}

	/* 1024 is not a valid port */
	if (port <= 1024) {
		pr_warn("ftp alg: no port available\n");
		port = 0;
	}

	return port;
}

static void nf_nat_map_tnl_proc(struct nf_conn *ct, struct nf_conntrack_expect *exp)
{
	struct nf_nat_range2 range = {0};

	/* We do DNAT with port here because nf_nat_follow_master has not select any port. */
	range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
	range.min_proto = exp->saved_proto;
	range.max_proto = exp->saved_proto;
	range.min_addr = ct->master->tuplehash[!exp->dir].tuple.src.u3;
	range.max_addr = ct->master->tuplehash[!exp->dir].tuple.src.u3;

	nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
}

u16 nf_nat_adapt_ftp_port(struct sk_buff *skb, struct nf_conntrack_expect *exp, int dir)
{
	u16 port;
	struct nf_conn *ct = exp->master;

	if (dir == IP_CT_DIR_REPLY && nf_check_map_tunnel(skb, ct))
		exp->expectfn = nf_nat_map_tnl_proc;

	if (dir == IP_CT_DIR_ORIGINAL && nf_check_map_tunnel(skb, ct)) {
		const char *dev_name = MAP_DEVICE_NAME;

		if (skb->dev)
			dev_name = skb->dev->name;

		return nf_nat_get_port_for_map_tunnel(exp, ct, dev_name);
	}

	port = nf_nat_get_related_port_up(exp, ct);
	if (port == 0)
		port = nf_nat_get_related_port_down(exp, ct);

	return port;
}
EXPORT_SYMBOL(nf_nat_adapt_ftp_port);

static int nf_nat_get_devindex(const struct nf_conn *ct)
{
	const struct nf_conn_nat *nat;

	if (!ct)
		return -1;

	nat = nfct_nat(ct);
	if (!nat)
		return -2;

	return nat->masq_index;
}

/* if an expect or conntrack has been created for the port, use that NAT port. */
static int nf_nat_find_sip_port(struct nf_conntrack_expect *exp, bool *from_expect)
{
	struct nf_conn *ct = exp->master;
	struct nf_conntrack_tuple tuple;
	struct nf_conn *ct_find;
	struct nf_conntrack_expect *exp_find;
	u16 port, endport;
	int dev_index;
	int rsvd_num;

	memset(&tuple, 0, sizeof(tuple));
	tuple.src.u3.ip = exp->saved_addr.ip;
	tuple.src.u.udp.port = exp->saved_proto.udp.port;
	tuple.src.l3num = exp->tuple.src.l3num;
	tuple.dst.protonum = exp->tuple.dst.protonum;

	exp_find = nf_ct_expect_save_proto_find(&tuple);
	if (exp_find) {
		*from_expect = true;
		exp->tuple.dst.u.udp.port = exp_find->tuple.dst.u.udp.port;
		nf_ct_expect_refresh_timer(exp_find);
		return 1;
	}

	dev_index = nf_nat_get_devindex(ct);
	if (dev_index > 0) {
		ct_find = nf_nat_find_by_src_dev(&tuple, dev_index);
		if (ct_find) {
			exp->tuple.dst.u.udp.port =
				ct_find->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.udp.port;
			return 1;
		}

		ct_find = nf_conntrack_find_src_by_wanip(&tuple, &exp->tuple.dst.u3);
		if (ct_find) {
			exp->tuple.dst.u.udp.port =
				ct_find->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.udp.port;
			return 1;
		}
	}

	tuple.dst.u3.ip = exp->tuple.dst.u3.ip;
	rsvd_num = nf_nat_get_rsvd_portnum();
	endport = 65535UL - rsvd_num;

	for (port = ntohs(exp->saved_proto.udp.port); port <= endport; port += rsvd_num) {
		tuple.dst.u.udp.port = htons(port);
		if (!nf_check_nat_port_inuse(&tuple, ct, rsvd_num) &&
		    nf_check_map_port_valid(MAP_DEVICE_NAME, port, rsvd_num)) {
			exp->tuple.dst.u.udp.port = htons(port);
			return 1;
		}
	}

	for (port = ntohs(exp->saved_proto.udp.port) - 1; port > 1024; port -= rsvd_num) {
		tuple.dst.u.udp.port = htons(port);
		if (!nf_check_nat_port_inuse(&tuple, ct, rsvd_num) &&
		    nf_check_map_port_valid(MAP_DEVICE_NAME, port, rsvd_num)) {
			exp->tuple.dst.u.udp.port = htons(port);
			return 1;
		}
	}

	pr_warn("WANNING: could not find available port for %pI4:%u\n",
		&exp->saved_addr.ip, exp->saved_proto.udp.port);
	return 0;
}

int nf_nat_adapt_sip_port(struct nf_conntrack_expect *exp, u16 *port, bool *exp_related)
{
	bool from_expect = false;

	if (!nf_nat_find_sip_port(exp, &from_expect))
		return 0;

	*port = ntohs(exp->tuple.dst.u.udp.port);

	if (from_expect) {
		*exp_related = false;
		return 1;
	}

	if (!nf_ct_expect_related(exp, NF_CT_EXP_F_SKIP_MASTER)) {
		*exp_related = true;
		return 1;
	}

	pr_warn("WANNING: could not relate expect for %pI4:%u\n",
		&exp->saved_addr.ip, exp->saved_proto.udp.port);
	return 0;
}
EXPORT_SYMBOL(nf_nat_adapt_sip_port);

int nf_nat_adapt_rtp_port(struct nf_conntrack_expect *rtp_exp,
			  struct nf_conntrack_expect *rtcp_exp,
			  u16 *port, bool *exp_related)
{
	bool from_expect = false;

	if (!nf_nat_find_sip_port(rtp_exp, &from_expect))
		return 0;

	*port = ntohs(rtp_exp->tuple.dst.u.udp.port) & ~1;
	rtp_exp->tuple.dst.u.udp.port = htons(*port);
	rtcp_exp->tuple.dst.u.udp.port = htons(*port + 1);

	if (from_expect) {
		*exp_related = false;
		return 1;
	}

	if (!nf_ct_expect_related(rtp_exp, NF_CT_EXP_F_SKIP_MASTER)) {
		if (!nf_ct_expect_related(rtcp_exp, NF_CT_EXP_F_SKIP_MASTER)) {
			*exp_related = true;
			return 1;
		}
		nf_ct_unexpect_related(rtp_exp);
	}

	pr_warn("WANNING: could not relate expect for %pI4:%u\n",
		&rtp_exp->saved_addr.ip, rtp_exp->saved_proto.udp.port);
	return 0;
}
EXPORT_SYMBOL(nf_nat_adapt_rtp_port);
