// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/mroute6.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/arp.h>
#include <net/icmp.h>
#include <net/ipv6_frag.h>
#include "./ip_vs_fnat.h"

static const char ip_vs_frags_cache_name[] = "ipvs-ipv6-frags";

static struct inet_frags ip_vs_frags_v6;

/*
 *	Options "fragmenting", just fill options not
 *	allowed in fragments with NOOPs.
 *	Simple and stupid 8), but the most efficient way.
 */
static void ip_vs_ip_options_fragment(struct sk_buff *skb)
{
	unsigned char *opt_ptr = skb_network_header(skb) + sizeof(struct iphdr);
	struct ip_options *opts = &(IPCB(skb)->opt);
	int  len = opts->optlen;
	int  opt_len;

	while (len > 0) {
		switch (*opt_ptr) {
		case IPOPT_END:
			return;
		case IPOPT_NOOP:
			len--;
			opt_ptr++;
			continue;
		}
		opt_len = opt_ptr[1];
		if (opt_len < 2 || opt_len > len)
			return;
		if (!IPOPT_COPIED(*opt_ptr))
			memset(opt_ptr, IPOPT_NOOP, opt_len);
		len -= opt_len;
		opt_ptr += opt_len;
	}
	opts->ts = 0;
	opts->rr = 0;
	opts->rr_needaddr = 0;
	opts->ts_needaddr = 0;
	opts->ts_needtime = 0;
}

static void ip_vs_ip_copy_metadata(struct sk_buff *skb_to, struct sk_buff *from)
{
	skb_to->pkt_type = from->pkt_type;
	skb_to->priority = from->priority;
	skb_to->protocol = from->protocol;
	skb_dst_drop(skb_to);
	skb_dst_copy(skb_to, from);
	skb_to->dev = from->dev;
	skb_to->mark = from->mark;

	IPCB(skb_to)->flags = IPCB(from)->flags;

#ifdef CONFIG_NET_SCHED
	skb_to->tc_index = from->tc_index;
#endif
	nf_copy(skb_to, from);
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
	skb_to->nf_trace = from->nf_trace;
#endif
#if IS_ENABLED(CONFIG_IP_VS) || IS_ENABLED(CONFIG_IP_VS_MODULE)
	skb_to->ipvs_property = from->ipvs_property;
#endif
	skb_copy_secmark(skb_to, from);
}

static void ip_vs_ip_fraglist_ipcb_prepare(struct sk_buff *skb,
					struct ip_fraglist_iter *iter)
{
	struct sk_buff *to = iter->frag;

	/* Copy the flags to each fragment. */
	IPCB(to)->flags = IPCB(skb)->flags;

	if (iter->offset == 0)
		ip_vs_ip_options_fragment(to);
}

static void ip_vs_ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
{
	unsigned int hlen = iter->hlen;
	struct iphdr *iph = iter->iph;
	struct sk_buff *frag;

	frag = iter->frag;
	frag->ip_summed = CHECKSUM_NONE;
	skb_reset_transport_header(frag);
	__skb_push(frag, hlen);
	skb_reset_network_header(frag);
	memcpy(skb_network_header(frag), iph, hlen);
	iter->iph = ip_hdr(frag);
	iph = iter->iph;
	iph->tot_len = htons(frag->len);
	ip_vs_ip_copy_metadata(frag, skb);
	iter->offset += skb->len - hlen;
	iph->frag_off = htons(iter->offset >> 3);
	if (frag->next)
		iph->frag_off |= htons(IP_MF);
	/* Ready, complete checksum */
	ip_send_check(iph);
}

static void ip_vs_ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
			bool first_frag, struct ip_frag_state *state)
{
	IPCB(to)->flags = IPCB(from)->flags;

	if (first_frag)
		ip_vs_ip_options_fragment(from);
}

int ip_vs_ip_finish_output2(struct sock *sk, struct sk_buff *skb)
{
	struct dst_entry *dest = skb_dst(skb);
	struct rtable *rt = (struct rtable *)dest;
	struct net_device *ndev = dest->dev;
	unsigned int hh_len = LL_RESERVED_SPACE(ndev);
	struct neighbour *nb;
	u32 next_hop;

	if (rt->rt_type == RTN_MULTICAST)
		IP_UPD_PO_STATS(dev_net(ndev), IPSTATS_MIB_OUTMCAST, skb->len);
	else if (rt->rt_type == RTN_BROADCAST)
		IP_UPD_PO_STATS(dev_net(ndev), IPSTATS_MIB_OUTBCAST, skb->len);

	if (unlikely(skb_headroom(skb) < hh_len && ndev->header_ops)) {
		struct sk_buff *skbuff2;

		skbuff2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(ndev));
		if (skbuff2 == NULL) {
			kfree_skb(skb);
			return -ENOMEM;
		}
		if (skb->sk)
			skb_set_owner_w(skbuff2, skb->sk);
		consume_skb(skb);
		skb = skbuff2;
	}

	rcu_read_lock_bh();
	next_hop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr);
	nb = __ipv4_neigh_lookup_noref(ndev, next_hop);
	if (unlikely(nb == NULL))
		nb = __neigh_create(&arp_tbl, &next_hop, ndev, false);
	if (!IS_ERR(nb)) {
		int ret = neigh_output(nb, skb, false);
		rcu_read_unlock_bh();
		return ret;
	}
	rcu_read_unlock_bh();

	kfree_skb(skb);
	return -EINVAL;
}

int ip_vs_ip_fragment(struct netns_ipvs *ipvs,
		struct sock *sk, struct sk_buff *skb,
		int (*output)(struct sock *, struct sk_buff *),
					struct ip_vs_conn *cp)
{
	struct iphdr *ip_h;
	struct sk_buff *skb2;
	unsigned int mtu, h_len, ll_rs;
	struct ip_fraglist_iter iter;
	struct rtable *rtt = skb_rtable(skb);
	struct ipvs_vxlan_encapsulation_info encap_info;
	struct ip_frag_state state;
	int ret = 0;

	ip_h = ip_hdr(skb);
	mtu = (sysctl_dev_mtu(ipvs) > (dst_mtu(&rtt->dst) - IP_VS_VXLAN_ENCAP_MAX_LEN)) ?
		(dst_mtu(&rtt->dst) - IP_VS_VXLAN_ENCAP_MAX_LEN) : sysctl_dev_mtu(ipvs);
	h_len = ip_h->ihl * 4;
	if (mtu < h_len) {
		pr_err("mtu less than h_len.\n");
		kfree_skb(skb);
		return -EINVAL;
	}

	if (unlikely(((ip_h->frag_off & htons(IP_DF)) && !skb->ignore_df)) ||
		(IPCB(skb)->frag_max_size &&
			IPCB(skb)->frag_max_size > mtu)) {
		IP_VS_INC_ESTATS(ip_vs_esmib, IP_FRAG_UNEXPECTED_MTU);
		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
			  htonl(mtu));
		kfree_skb(skb);
		return -EMSGSIZE;
	}

	mtu = mtu - h_len;	/* Size of data space */
	IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
	ll_rs = LL_RESERVED_SPACE(rtt->dst.dev);

	if (skb_has_frag_list(skb)) {
		struct sk_buff *skb_frag, *skb_frag2;
		unsigned int first_len = skb_pagelen(skb);

		if (first_len - h_len > mtu ||
		    ((first_len - h_len) & 7) ||
		    ip_is_fragment(ip_h) ||
		    skb_cloned(skb) ||
			skb_headroom(skb) < ll_rs)
			goto slow_path;

		skb_walk_frags(skb, skb_frag) {
			if (skb_frag->len > mtu ||
			    ((skb_frag->len & 7) && skb_frag->next) ||
			    skb_headroom(skb_frag) < h_len +ll_rs)
				goto slow_path_clean;

			if (skb_shared(skb_frag))
				goto slow_path_clean;

			if (skb_frag->sk != NULL) {
				pr_err("frag->sk is not NULL!\n");
				kfree_skb(skb);
				return -EINVAL;
			}

			if (skb->sk) {
				skb_frag->sk = skb->sk;
				skb_frag->destructor = sock_wfree;
			}
			skb->truesize -= skb_frag->truesize;
		}

		ip_fraglist_init(skb, ip_h, h_len, &iter);

		for (;;) {
			if (iter.frag) {
				ip_vs_ip_fraglist_ipcb_prepare(skb, &iter);
				ip_vs_ip_fraglist_prepare(skb, &iter);
			}
			encap_info.peer_vtep = NULL;
			encap_info.vni = &cp->vni;
			ret = vxlan_encapsulation(skb, &encap_info, output);
			if (ret || !iter.frag)
				break;

			skb = ip_fraglist_next(&iter);
		}

		if (ret == 0)
			return 0;

		kfree_skb_list(iter.frag);

		return ret;

slow_path_clean:
		skb_walk_frags(skb, skb_frag2) {
			if (skb_frag2 == skb_frag)
				break;
			skb_frag2->sk = NULL;
			skb_frag2->destructor = NULL;
			skb->truesize += skb_frag2->truesize;
		}
	}

slow_path:
	ip_frag_init(skb, h_len, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
			&state);
	while (state.left > 0) {
		bool first_frag = (state.offset == 0);
		skb2 = ip_frag_next(skb, &state);
		if (IS_ERR(skb2)) {
			ret = PTR_ERR(skb2);
			goto err;
		}

		ip_vs_ip_frag_ipcb(skb, skb2, first_frag, &state);

		encap_info.peer_vtep = NULL;
		encap_info.vni = &cp->vni;
		ret = vxlan_encapsulation(skb2, &encap_info, output);
		if (ret)
			goto err;

	}
	consume_skb(skb);
	return ret;

err:
	kfree_skb(skb);
	return ret;
}

#ifdef CONFIG_IP_VS_IPV6
static void ip_vs_ip_copy_metadata_v6(struct sk_buff *to, struct sk_buff *from)
{
	to->pkt_type = from->pkt_type;
	to->priority = from->priority;
	to->protocol = from->protocol;
	skb_dst_drop(to);
	skb_dst_set(to, dst_clone(skb_dst(from)));
	to->dev = from->dev;
	to->mark = from->mark;

#ifdef CONFIG_NET_SCHED
	to->tc_index = from->tc_index;
#endif
	nf_copy(to, from);
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
	to->nf_trace = from->nf_trace;
#endif
	skb_copy_secmark(to, from);
}

static void ip_vs_ip6_fraglist_prepare(struct sk_buff *skb,
             struct ip6_fraglist_iter *iter)
{
	struct sk_buff *frag = iter->frag;
	unsigned int hlen = iter->hlen;
	struct frag_hdr *fh;

	frag->ip_summed = CHECKSUM_NONE;
	skb_reset_transport_header(frag);
	fh = __skb_push(frag, sizeof(struct frag_hdr));
	__skb_push(frag, hlen);
	skb_reset_network_header(frag);
	memcpy(skb_network_header(frag), iter->tmp_hdr, hlen);
	iter->offset += skb->len - hlen - sizeof(struct frag_hdr);
	fh->nexthdr = iter->nexthdr;
	fh->reserved = 0;
	fh->frag_off = htons(iter->offset);
	if (frag->next)
		fh->frag_off |= htons(IP6_MF);
	fh->identification = iter->frag_id;
	ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
	ip_vs_ip_copy_metadata_v6(frag, skb);
}

int ip_vs_ip6_finish_output2(struct sock *sk, struct sk_buff *skb)
{
	struct dst_entry *dst = skb_dst(skb);
	struct net_device *dev = dst->dev;
	struct neighbour *neigh = NULL;
	int ret;

	EnterFunction(10);

	skb->protocol = htons(ETH_P_IPV6);
	skb->dev = dev;

	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));

		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
		    ((mroute6_is_socket(dev_net(dev), skb) &&
		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
		     ipv6_chk_mcast_addr_handle(dev, &ipv6_hdr(skb)->daddr,
					 &ipv6_hdr(skb)->saddr))) {
			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);

			/* Do not check for IFF_ALLMULTI; multicast routing
			   is not supported in any case.
			 */
			if (newskb != NULL)
				NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
					&init_net, sk, newskb, NULL, newskb->dev,
					dev_loopback_xmit);

			if (ipv6_hdr(skb)->hop_limit == 0) {
				IP6_INC_STATS(dev_net(dev), idev,
					      IPSTATS_MIB_OUTDISCARDS);
				kfree_skb(skb);
				return 0;
			}
		}

		IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
				skb->len);
		IP_VS_DBG(10, "%s: send mcast packet. \n", __func__);

		if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
		    IPV6_ADDR_SCOPE_NODELOCAL &&
		    !(dev->flags & IFF_LOOPBACK)) {
			kfree_skb(skb);
			return 0;
		}
	}

	rcu_read_lock_bh();
	neigh = __ipv6_neigh_lookup_noref(dst->dev,
			rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr));
	if (unlikely(!neigh))
		neigh = __neigh_create(&nd_tbl,
				rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr), dst->dev, false);
	if (!IS_ERR(neigh)) {
		sock_confirm_neigh(skb, neigh);
		ret = neigh_output(neigh, skb, false);
		rcu_read_unlock_bh();
		return ret;
	}
	rcu_read_unlock_bh();

	IP6_INC_STATS(dev_net(dst->dev),
		      ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
	IP_VS_DBG(10, "%s: no route to send packet. \n", __func__);
	kfree_skb(skb);
	return -EINVAL;
}

int ip_vs_ipv6_frag_over_vxlan(struct netns_ipvs *ipvs,
		struct sock *sk, struct sk_buff *skb,
		int (*output)(struct sock *, struct sk_buff *),
		struct ipvs_vxlan_encapsulation_info *encap_info)
{
	struct sk_buff *frag;
	struct dst_entry *dst = skb_dst(skb);
	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
				inet6_sk(skb->sk) : NULL;
	struct ip6_frag_state state;
	struct ipv6hdr *tmp_hdr;
	struct frag_hdr *fh;
	unsigned int mtu, hlen, len, nexthdr_offset;
	int hroom;
	__be32 frag_id;
	int err = 0;
	u8 *fragnexthdr_offset = NULL;
	u8 *prevhdr = NULL;
	u8 nexthdr = 0;

	EnterFunction(10);

	err = ip6_find_1stfragopt(skb, &prevhdr);
	if (err < 0)
		goto fail;
	hlen = err;
	nexthdr = *prevhdr;
	nexthdr_offset = prevhdr - skb_network_header(skb);

	mtu = (sysctl_dev_mtu(ipvs) > (dst_mtu(dst) - IP_VS_VXLAN_ENCAP_MAX_LEN)) ?
			(dst_mtu(dst) - IP_VS_VXLAN_ENCAP_MAX_LEN) : sysctl_dev_mtu(ipvs);
	if (mtu < IPV6_MIN_MTU)
		mtu = IPV6_MIN_MTU;
	/* We must not fragment if the socket is set to force MTU discovery
	 * or if the skb it not generated by a local socket.
	 */
	if (unlikely(!skb->ignore_df && skb->len > mtu)) {
		IP_VS_INC_ESTATS(ip_vs_esmib, IPV6_FRAG_UNEXPECTED_MTU);
		goto fail_toobig;
	}
	if (IP6CB(skb)->frag_max_size) {
		if (IP6CB(skb)->frag_max_size > mtu) {
			IP_VS_INC_ESTATS(ip_vs_esmib, IPV6_FRAG_UNEXPECTED_MAXSIZE);
			goto fail_toobig;
		}
		/* don't send fragments larger than what we received */
		mtu = IP6CB(skb)->frag_max_size;
		if (mtu < IPV6_MIN_MTU)
			mtu = IPV6_MIN_MTU;
	}

	if ((np != NULL) && np->frag_size < mtu) {
		if (np->frag_size)
			mtu = np->frag_size;
	}

	mtu -= hlen + sizeof(struct frag_hdr);

	frag_id = ipv6_select_ident(ipvs->net, &ipv6_hdr(skb)->daddr,
				    &ipv6_hdr(skb)->saddr);

	prevhdr = skb_network_header(skb) + nexthdr_offset;
	hroom = LL_RESERVED_SPACE(dst->dev);
	if (skb_has_frag_list(skb)) {
		int first_len = skb_pagelen(skb);
		struct ip6_fraglist_iter iter;
		struct sk_buff *frag2;

		if (first_len - hlen > mtu ||
		    ((first_len - hlen) & 7) ||
		    skb_cloned(skb) ||
		    skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
			goto slow_path;

		skb_walk_frags(skb, frag) {
			/* Correct geometry. */
			if (frag->len > mtu ||
			    ((frag->len & 7) && frag->next) ||
			    skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
				goto slow_path_clean;

			/* Partially cloned skb? */
			if (skb_shared(frag))
				goto slow_path_clean;

			if (frag->sk != NULL) {
				pr_err("frag->sk is not NULL!\n");
				kfree_skb(skb);
				return -EINVAL;
			}

			if (skb->sk != NULL) {
				frag->sk = skb->sk;
				frag->destructor = sock_wfree;
			}
			skb->truesize -= frag->truesize;
		}

		err = ip6_fraglist_init(skb, hlen, prevhdr, nexthdr, frag_id,
					&iter);
		if (err < 0) {
			goto fail;
		}

		dst_hold(dst);

		for (;;) {
			/* Prepare header of the next frame,
			 * before previous one went down. */
			if (iter.frag)
				ip_vs_ip6_fraglist_prepare(skb, &iter);

			err = vxlan_encapsulation(skb, encap_info, output);
			if (err || (iter.frag == NULL))
				break;

			skb = ip6_fraglist_next(&iter);
		}

		kfree(tmp_hdr);

		if (err == 0) {
			dst_release(dst);
			return 0;
		}

		kfree_skb_list(frag);

		IP_VS_DBG(0, "IPv6: fragment failed!\n");
		IP_VS_INC_ESTATS(ip_vs_esmib, DEFENCE_IPV6_FRAG_FAILED);
		dst_release(dst);
		return err;

slow_path_clean:
		skb_walk_frags(skb, frag2) {
			if (frag2 == frag)
				break;
			frag2->sk = NULL;
			frag2->destructor = NULL;
			skb->truesize += frag2->truesize;
		}
	}

slow_path:
	ip6_frag_init(skb, hlen, mtu, dst->dev->needed_tailroom,
				LL_RESERVED_SPACE(dst->dev), prevhdr, nexthdr, frag_id,
				&state);

	/*
	 *	Keep copying data until we run out.
	 */
	while (state.left > 0)	{
		len = state.left;
		/* IF: it doesn't fit, use 'mtu' - the data space left */
		if (len > state.mtu)
			len = state.mtu;
		/* IF: we are not sending up to and including the packet end
		   then align the next start on an eight byte boundary */
		if (len < state.left)	{
			len &= ~7;
		}
		/*
		 *	Allocate buffer.
		 */
		frag = alloc_skb(len + state.hlen + sizeof(struct frag_hdr)
						+ state.hroom + state.troom, GFP_ATOMIC);
		if (frag == NULL) {
			IP_VS_DBG(0, "IPv6: frag: no memory for new fragment!\n");
			err = -ENOMEM;
			goto fail;
		}

		/*
		 *	Set up data on packet
		 */

		ip_vs_ip_copy_metadata_v6(frag, skb);
		skb_reserve(frag, state.hroom);
		skb_put(frag, len + state.hlen + sizeof(struct frag_hdr));
		skb_reset_network_header(frag);
		fh = (struct frag_hdr *)(skb_network_header(frag) + state.hlen);
		frag->transport_header = (frag->network_header + state.hlen +
					  sizeof(struct frag_hdr));

		/*
		 *	Charge the memory for the fragment to any owner
		 *	it might possess
		 */
		if (skb->sk != NULL)
			skb_set_owner_w(frag, skb->sk);

		/*
		 *	Copy the packet header into the new buffer.
		 */
		skb_copy_from_linear_data(skb, skb_network_header(frag), state.hlen);

		fragnexthdr_offset = skb_network_header(frag);
		fragnexthdr_offset += state.prevhdr - skb_network_header(skb);
		*fragnexthdr_offset = NEXTHDR_FRAGMENT;

		/*
		 *	Build fragment header.
		 */
		fh->nexthdr = state.nexthdr;
		fh->reserved = 0;
		fh->identification = state.frag_id;

		/*
		 *	Copy a block of the IP datagram.
		 */
		if (skb_copy_bits(skb, state.ptr, skb_transport_header(frag), len)) {
			IP_VS_DBG(0, "%s: fragment error at skb_copy_bits!\n", __func__);
			err = -EINVAL;
			goto fail;
		}

		state.left -= len;

		fh->frag_off = htons(state.offset);
		if (state.left > 0)
			fh->frag_off |= htons(IP6_MF);
		ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr));

		state.ptr += len;
		state.offset += len;

		err = vxlan_encapsulation(frag, encap_info, output);
		if (err)
			goto fail;

		IP_VS_DBG(10, "%s: success to send ipv6 fragment over vxlan!\n",
			__func__);
	}

	consume_skb(skb);
	return err;

fail_toobig:
	if ((skb->sk != NULL) && dst_allfrag(skb_dst(skb)))
		sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);

	skb->dev = skb_dst(skb)->dev;
	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
	err = -EMSGSIZE;

fail:
	IP_VS_INC_ESTATS(ip_vs_esmib, DEFENCE_IPV6_FRAG_FAILED);
	kfree_skb(skb);
	return err;
}

static inline u8 ip_vs_frag_ecn_v6(const struct ipv6hdr *ipv6h)
{
	return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
}

static int ip_frag_reasm_v6(struct frag_queue *fqueue, struct sk_buff *skb,
			  struct sk_buff *prev_tail, struct net_device *dev)
{
	void *reasm_data;
	int payload;
	unsigned int nhoff;
	u8 ecn;

	inet_frag_kill(&fqueue->q);

	ecn = ip_frag_ecn_table[fqueue->ecn];
	if (unlikely(ecn == 0xff))
		goto fail;

	reasm_data = inet_frag_reasm_prepare(&fqueue->q, skb, prev_tail);
	if (!reasm_data)
		goto fail;

	/* Unfragmented part is taken from the first segment. */
	payload = ((skb->data - skb_network_header(skb)) -
		       sizeof(struct ipv6hdr) + fqueue->q.len -
		       sizeof(struct frag_hdr));
	if (payload > IPV6_MAXPLEN)
		goto oversize;

	nhoff = fqueue->nhoffset;
	skb_network_header(skb)[nhoff] = skb_transport_header(skb)[0];
	memmove(skb->head + sizeof(struct frag_hdr), skb->head,
		(skb->data - skb->head) - sizeof(struct frag_hdr));
	if (skb_mac_header_was_set(skb))
		skb->mac_header += sizeof(struct frag_hdr);
	skb->network_header += sizeof(struct frag_hdr);

	skb_reset_transport_header(skb);
	skb_push(skb, skb->data - skb_network_header(skb));

	inet_frag_reasm_finish(&fqueue->q, skb, reasm_data, true);

	skb->next = NULL;
	skb->dev = dev;
	ipv6_hdr(skb)->payload_len = htons(payload);
	ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
	IP6CB(skb)->nhoff = nhoff;
	IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
	IP6CB(skb)->frag_max_size = fqueue->q.max_size;

	/* Yes, and fold redundant checksum back. 8) */
	skb_postpush_rcsum(skb, skb_network_header(skb),
			   skb_network_header_len(skb));

	fqueue->q.fragments_tail = NULL;
	return 0;

oversize:
	IP_VS_DBG(1, "ip_frag_reasm_v6: payload len = %d\n", payload);
	goto fail;
fail:
	IP_VS_INC_ESTATS(ip_vs_esmib, IPV6_DEFRAG_REASMERR);
	return -1;
}


static int ip_frag_queue_v6(struct frag_queue *fqueue, struct sk_buff *skb,
			   struct frag_hdr *fhead, int nhoff)
{
	unsigned int payload_len;
	struct sk_buff *prev_skb;
	struct net_device *dev;
	int off, end, err;

	if (fqueue->q.flags & INET_FRAG_COMPLETE)
		goto err;

	payload_len = ntohs(ipv6_hdr(skb)->payload_len);
	off = ntohs(fhead->frag_off) & ~0x7;

	end = off + (payload_len -
			((u8 *)(fhead + 1) - (u8 *)(ipv6_hdr(skb) + 1)));

	if ((unsigned int)end > IPV6_MAXPLEN) {
		IP_VS_INC_ESTATS(ip_vs_esmib, DEFENCE_IPV6_FRAG_FAILED);
		return -1;
	}

	if (skb->ip_summed == CHECKSUM_COMPLETE) {
		const unsigned char *nh = skb_network_header(skb);
		skb->csum = csum_sub(skb->csum,
					csum_partial(nh, (u8 *)(fhead + 1) - nh, 0)
					);
	}

	if (!(fhead->frag_off & htons(IP6_MF))) {

		if (end < fqueue->q.len ||
			((fqueue->q.flags & INET_FRAG_LAST_IN) && end != fqueue->q.len))
			goto err;
		fqueue->q.flags |= INET_FRAG_LAST_IN;
		fqueue->q.len = end;
	} else {
		if (end & 0x7) {
			IP_VS_INC_ESTATS(ip_vs_esmib, DEFENCE_IPV6_FRAG_FAILED);
			return -1;
		}
		if (end > fqueue->q.len) {
			if (fqueue->q.flags & INET_FRAG_LAST_IN)
				goto err;
			fqueue->q.len = end;
		}
	}

	if (end == off)
		goto err;

	if (!pskb_pull(skb, (u8 *) (fhead + 1) - skb->data))
		goto err;

	if (pskb_trim_rcsum(skb, end - off))
		goto err;

	dev = skb->dev;

	barrier();

	prev_skb = fqueue->q.fragments_tail;
	err = inet_frag_queue_insert(&fqueue->q, skb, off, end);
	if (err) {
		if (err == IPFRAG_DUP) {
			/* No error for duplicates, pretend they got queued. */
			kfree_skb(skb);
			return -EINPROGRESS;
		}
		goto drop;
	}

	if (dev)
		fqueue->iif = dev->ifindex;

	skb->ip_defrag_offset = off;

	fqueue->q.stamp = skb->tstamp;
	fqueue->q.meat += skb->len;
	fqueue->ecn |= ip_vs_frag_ecn_v6(ipv6_hdr(skb));
	if (payload_len > fqueue->q.max_size)
		fqueue->q.max_size = payload_len;
	add_frag_mem_limit(fqueue->q.fqdir, skb->truesize);

	if (off == 0) {
		fqueue->nhoffset = nhoff;
		fqueue->q.flags |= INET_FRAG_FIRST_IN;
	}

	if (fqueue->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
		fqueue->q.meat == fqueue->q.len) {
		unsigned long orefdst = skb->_skb_refdst;

		skb->_skb_refdst = 0UL;
		err = ip_frag_reasm_v6(fqueue, skb, prev_skb, dev);
		skb->_skb_refdst = orefdst;

		return err ? -1 : 0;
	}

	skb_dst_drop(skb);
	return -1;
drop:
	inet_frag_kill(&fqueue->q);
err:
	IP_VS_INC_ESTATS(ip_vs_esmib, DEFENCE_IPV6_FRAG_FAILED);
	kfree_skb(skb);
	return -1;
}


static inline struct frag_queue *
ip_vs_fq_find_v6(struct net *net, __be32 id, const struct ipv6hdr *hdr6,
	int iif,  __u32 user)
{
	struct inet_frag_queue *ifq;
	struct frag_v6_compare_key key = {
		.id = id,
		.saddr = hdr6->saddr,
		.daddr = hdr6->daddr,
		.user = user,
		.iif = iif,
	};

	if (!(ipv6_addr_type(&hdr6->daddr) & (IPV6_ADDR_MULTICAST |
			IPV6_ADDR_LINKLOCAL)))
		key.iif = 0;

	ifq = inet_frag_find(net->ipvs->frags, &key);
	if (ifq == NULL) {
		IP_VS_DBG(1, "Fragment hash bucket grew over limit");
		return NULL;
	}
	return container_of(ifq, struct frag_queue, q);
}

int ip_defrag_v6(struct netns_ipvs *ipvs,
		struct sk_buff *skb, u_int32_t user)
{
	u8 nexthdr = NEXTHDR_FRAGMENT;
	int fhoff, ret;
	struct frag_hdr *fhead;
	struct frag_queue *fqueue;
	struct ipv6hdr *hdr6;

	/* Jumbo payload inhibits frag. header */
	if (ipv6_hdr(skb)->payload_len == 0) {
		IP_VS_DBG(1, "payload len = 0");
		return 0;
	}

	if (ipv6frag_thdr_truncated(skb, fhoff, &nexthdr)) {
		pr_debug("Drop incomplete fragment\n");
		return 0;
	}

	if (!pskb_may_pull(skb, (skb_transport_offset(skb) + sizeof(struct frag_hdr))))
		goto failed;

	hdr6 = ipv6_hdr(skb);
	fhead = (struct frag_hdr *)skb_transport_header(skb);

	if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
	    fhead->frag_off & htons(IP6_MF))
		goto failed;

	fqueue = ip_vs_fq_find_v6(ipvs->net, fhead->identification, hdr6,
		skb->dev ? skb->dev->ifindex : 0, user);

	if (fqueue == NULL) {
		IP_VS_INC_ESTATS(ip_vs_esmib, IPV6_DEFRAG_QUEUE_FAILED);
		goto kfree;
	}

	spin_lock(&fqueue->q.lock);
	ret = ip_frag_queue_v6(fqueue, skb, fhead, IP6CB(skb)->nhoff);
	spin_unlock(&fqueue->q.lock);

	inet_frag_put(&fqueue->q);

	return ret ? -1 : 0;

kfree:
	kfree_skb(skb);
failed:
	IP_VS_DBG(1, "ip_defrag_v6: pskb_may_pull failed");
	return 1;
}

static void ip_vs_frag_expire_v6(struct timer_list *t)
{
	struct inet_frag_queue *frag = from_timer(frag, t, timer);
	struct frag_queue *fq = NULL;

	fq = container_of(frag, struct frag_queue, q);

	IP_VS_INC_ESTATS(ip_vs_esmib, IPV6_FRAG_EXPIRE);
	ip6frag_expire_frag_queue(fq->q.fqdir->net, fq);
}

static int ip_vs_frags_init_net_v6(struct net *net)
{
	int res;

	res = fqdir_init(&net->ipvs->frags, &ip_vs_frags_v6, net);
	if (res < 0) {
		IP_VS_DBG(0, "ip_vs ipv6 inet_frags_init_net failed!");
		return res;
	}
	net->ipvs->frags->high_thresh = IPV6_FRAG_HIGH_THRESH;
	net->ipvs->frags->low_thresh = IPV6_FRAG_LOW_THRESH;
	net->ipvs->frags->timeout = IPV6_FRAG_TIMEOUT;

	return res;
}

static void __net_exit ip_vs_frags_pre_exit_net_v6(struct net *net)
{
	if (net != NULL && net->ipvs != NULL) {
		fqdir_pre_exit(net->ipvs->frags);
	}
}

static void __net_exit ip_vs_frags_exit_net_v6(struct net *net)
{
	if (net != NULL && net->ipvs != NULL) {
		fqdir_exit(net->ipvs->frags);
	}
}

static struct pernet_operations ip_vs_frags_ops_v6 = {
	.init = ip_vs_frags_init_net_v6,
	.pre_exit	= ip_vs_frags_pre_exit_net_v6,
	.exit = ip_vs_frags_exit_net_v6,
};

static const struct rhashtable_params ip_vs_frag_rhash_params = {
	.head_offset = offsetof(struct inet_frag_queue, node),
	.hashfn = ip6frag_key_hashfn,
	.obj_hashfn = ip6frag_obj_hashfn,
	.obj_cmpfn = ip6frag_obj_cmpfn,
	.automatic_shrinking = true,
};

int ip_vs_frag_init_v6(void)
{
	int ret = 0;

	ip_vs_frags_v6.constructor = ip6frag_init;
	ip_vs_frags_v6.destructor = NULL;
	ip_vs_frags_v6.qsize = sizeof(struct frag_queue);
	ip_vs_frags_v6.frag_expire = ip_vs_frag_expire_v6;
	ip_vs_frags_v6.frags_cache_name = ip_vs_frags_cache_name;
	ip_vs_frags_v6.rhash_params = ip_vs_frag_rhash_params;
	ret = inet_frags_init(&ip_vs_frags_v6);
	if (ret) {
		IP_VS_DBG(0, "inet_frags_init failed!");
		goto out;
	}
	ret = register_pernet_subsys(&ip_vs_frags_ops_v6);
	if (ret) {
		IP_VS_DBG(0, "register_pernet_subsys failed!");
		inet_frags_fini(&ip_vs_frags_v6);
	}

out:
	return ret;
}

void ip_vs_frag_cleanup_v6(void)
{
	unregister_pernet_subsys(&ip_vs_frags_ops_v6);
	inet_frags_fini(&ip_vs_frags_v6);
}

#endif
