// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 Huawei Technologies Co., Ltd */

#define pr_fmt(fmt) KBUILD_MODNAME ": [XSA]" fmt

#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <net/xfrm.h>
#include <net/esp.h>
#include <crypto/aead.h>

#include "ossl_knl.h"

#ifdef CONFIG_XFRM_OFFLOAD

#ifndef _LLT_TEST_
#include <linux/hashtable.h>
#endif

#include "hinic3_crm.h"
#include "hinic3_nic_dev.h"
#include "hinic3_xfrm.h"
#include "hisec_cfg.h"
#include "hisec_alg.h"
#include "hisec_common.h"
#include "hinic3_ipsec.h"
#include "hinic3_xsa.h"

#define HINIC3_IPSEC_SA_INBOUND(xs) \
			((xs)->xso.flags & XFRM_OFFLOAD_INBOUND)

#define HINIC3_SA_KEY_MATCH(sa_key, tmp_sa, ipv4) \
	((sa_key)->spi == (tmp_sa)->xs->id.spi && \
	 (((ipv4) && (sa_key)->daddr.a4 == (tmp_sa)->xs->id.daddr.a4) || \
	  (!(ipv4) && !memcmp(&(sa_key)->daddr.a6, &(tmp_sa)->xs->id.daddr.a6, \
			      sizeof((tmp_sa)->xs->id.daddr.a6)))) && \
	 (sa_key)->proto == (tmp_sa)->xs->id.proto)

#define XS_IS_EALG_INVALID(xs) \
	(((xs)->props.ealgo != SADB_X_EALG_AESCBC) && \
	 ((xs)->props.ealgo != SADB_X_EALG_AESCTR))

#define XS_IS_AALG_INVALID(xs) \
	(((xs)->props.aalgo != SADB_AALG_SHA1HMAC) && \
	 ((xs)->props.aalgo != SADB_X_AALG_SHA2_256HMAC) && \
	 ((xs)->props.aalgo != SADB_X_AALG_SHA2_384HMAC) && \
	 ((xs)->props.aalgo != SADB_X_AALG_SHA2_512HMAC))

static int hinic3_verify_xfrm_algo(struct xfrm_state *xs)
{
	struct net_device *netdev = xs->xso.dev;
	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
	int ret;

	if (xs->aead) {
		ret = strcmp(xs->aead->alg_name,
			     crypt_alg_name[HISEC_CRYPTO_ALG_AES_GCM]);
		if (ret) {
			nicif_warn(nic_dev, drv, netdev, "Unsupport offload xfrm aead (%s)\n",
				   xs->aead->alg_name);
			return -EINVAL;
		}
	}

	if (xs->ealg) {
		if (XS_IS_EALG_INVALID(xs)) {
			nicif_warn(nic_dev, drv, netdev, "Unsupport offload xfrm aes (%s)\n",
				   xs->ealg->alg_name);
			return -EINVAL;
		}
	}

	if (xs->aalg) {
		if (XS_IS_AALG_INVALID(xs)) {
			nicif_warn(nic_dev, drv, netdev, "Unsupport offload xfrm auth hmac (%s)\n",
				   xs->aalg->alg_name);
			return -EINVAL;
		}
	}

	if (xs->props.calgo != SADB_X_CALG_NONE) {
		nicif_warn(nic_dev, drv, netdev, "Unsupport offload xfrm compressed(%u)\n",
			   xs->props.calgo);
		return -EINVAL;
	}

	return 0;
}

static int hinic3_ipsec_verify_xfrm_state(struct xfrm_state *xs)
{
	struct net_device *netdev = xs->xso.dev;
	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
	int ret;

	ret = hinic3_verify_xfrm_algo(xs);
	if (ret)
		return ret;

	if (xs->props.family != AF_INET &&
	    xs->props.family != AF_INET6) {
		nicif_warn(nic_dev, drv, netdev, "Unsupport offload xfrm family(%u)\n",
			   xs->props.family);
		return -EINVAL;
	}

	if (xs->props.mode != XFRM_MODE_TRANSPORT &&
	    xs->props.mode != XFRM_MODE_TUNNEL) {
		nicif_warn(nic_dev, drv, netdev, "Unsupport offload xfrm mode(%u)\n",
			   xs->props.mode);
		return -EINVAL;
	}

	if (xs->id.proto != IPPROTO_ESP &&
	    xs->id.proto != IPPROTO_AH) {
		nicif_warn(nic_dev, drv, netdev, "Unsupport offload xfrm proto(%u)\n",
			   xs->id.proto);
		return -EINVAL;
	}

	return 0;
}

static struct hinic3_ipsec_sa *hinic3_ipsec_get_sa_from_xso(struct xfrm_state *xs)
{
	struct hinic3_ipsec_sa *sa = NULL;

	if (!xs)
		return NULL;

	sa = (struct hinic3_ipsec_sa *)xs->xso.offload_handle;
	if (!sa)
		return NULL;

	WARN_ON(sa->xs != xs);

	return sa;
}

static struct hinic3_ipsec_sa *hinic3_ipsec_alloc_sa(struct hinic3_nic_dev *nic_dev,
						     struct xfrm_state *xs)
{
	struct hinic3_ipsec_sa *sa = NULL;

	sa = kzalloc(sizeof(*sa), GFP_KERNEL);
	if (!sa) {
		nic_err(&nic_dev->pdev->dev, "Alloc SA mem failed\n");
		return NULL;
	}

	sa->xs = xs;
	sa->ipsec = nic_dev->ipsec;

	return sa;
}

static void hinic3_ipsec_free_sa(struct hinic3_ipsec_sa *sa)
{
	kfree(sa);
}

static int hinic3_ipsec_alloc_sa_ctx(struct hinic3_nic_dev *nic_dev,
				     struct hinic3_ipsec_sa *sa)
{
	cqm_qpc_mpt_s *ctx;
	struct hinic3_ipsec *ipsec = nic_dev->ipsec;
	unsigned long flags;

	ctx = cqm_object_qpc_mpt_create(nic_dev->hwdev,
					SERVICE_T_IPSEC,
					CQM_OBJECT_SERVICE_CTX,
					HINIC3_IPSEC_SA_CTX_SIZE,
					sa, CQM_INDEX_INVALID);
	if (!ctx) {
		nic_err(&nic_dev->pdev->dev, "Fail to alloc SA context from CQM\n");
		return -ENOMEM;
	}

	if (ctx->xid > (HINIC3_IPSEC_PER_PF_SA_CTX_NUM +
			HINIC3_IPSEC_XID_RESERVE_NUM)) {
		nic_err(&nic_dev->pdev->dev, "Get bad xid(%u) from CQM\n",
			ctx->xid);
		hiudk_cqm_object_delete(nic_dev->hwdev, &ctx->object);
		return -EINVAL;
	}

	sa->ctx = (void *)ctx;
	sa->xid = ctx->xid;
	sa->qpc_gpa = ctx->paddr;
	spin_lock_irqsave(&ipsec->sadb_lock, flags);
	ipsec->xmap[sa->xid].sa_ctx = (void *)sa;
	spin_unlock_irqrestore(&ipsec->sadb_lock, flags);

	nic_info(&nic_dev->pdev->dev,
		 "Alloc SA QPC xid(0x%x) vaddr(%p) gpa(0x%llx) from CQM success\n",
		 sa->xid, ctx->vaddr, sa->qpc_gpa);

	return 0;
}

static void hinic3_ipsec_free_sa_ctx(struct hinic3_nic_dev *nic_dev,
				     struct hinic3_ipsec_sa *sa)
{
	cqm_qpc_mpt_s *ctx;
	struct hinic3_ipsec *ipsec = nic_dev->ipsec;
	unsigned long flags;

	ctx = (cqm_qpc_mpt_s *)sa->ctx;
	if (ctx) {
		hiudk_cqm_object_delete(nic_dev->hwdev, &ctx->object);
		spin_lock_irqsave(&ipsec->sadb_lock, flags);
		ipsec->xmap[sa->xid].sa_ctx = NULL;
		spin_unlock_irqrestore(&ipsec->sadb_lock, flags);
		sa->ctx = NULL;
	}

	nic_info(&nic_dev->pdev->dev,
		 "Free SA context xid(%d) to CQM success\n", sa->xid);
}

static int hinic3_ipsec_init_sa_ctx(struct hinic3_ipsec_sa *sa,
				    struct hisec_ipsec_sa_pctx_info *sa_ctx)
{
	struct hisec_ipsec_sa_pctx_sws *ctx_sws = &sa_ctx->ctx_sws;
	struct hinic3_nic_dev *nic_dev = sa->ipsec->nic_dev;
	struct hisec_ipsec_alg_info aes_alg;
	struct xfrm_state *xs = sa->xs;
	struct xfrm_replay_state_esn *replay_esn = xs->replay_esn;
	cqm_qpc_mpt_s *cqm_qpc;
	u8 work_mode = 0, ipsec_proto = 0, ca_en = 0;
	int ret = 0;
	u32 esn_flag = 0, replay_win = HISEC_MAX_REPLAY_WIN_SIZE;

	cqm_qpc = (cqm_qpc_mpt_s *)sa->ctx;
	memset((struct hisec_ipsec_sa_pctx_info *)cqm_qpc->vaddr,
	       0, sizeof(*sa_ctx));
	memset(sa_ctx, 0, sizeof(*sa_ctx));
	memset(&aes_alg, 0, sizeof(struct hisec_ipsec_alg_info));

	work_mode = (xs->props.mode == XFRM_MODE_TRANSPORT) ?
			HISEC_IPSEC_MODE_TRANSPORT : HISEC_IPSEC_MODE_TUNNEL;

	if (xs->props.flags & XFRM_STATE_ESN) {
		esn_flag = HISEC_IPSEC_ESN_FLAG_ENABLE;
	} else { /* non-esn repwin <= 32 */
		replay_win = xs->props.replay_window;
	}

	if (replay_esn)
		replay_win = replay_esn->replay_window;

	switch (xs->id.proto) {
	case IPPROTO_ESP:
		ipsec_proto = HISEC_IPSEC_PROTO_ESP;
		break;
	case IPPROTO_AH:
		ipsec_proto = HISEC_IPSEC_PROTO_AH;
		break;
	default:
		ipsec_proto = HISEC_IPSEC_PROTO_MAX;
		break;
	}

	if (xs->aead) {
		ret = hisec_get_aead_gcm_info(nic_dev->hwdev, xs, &aes_alg);
		if (ret) {
			nic_err(&nic_dev->pdev->dev, "Get aead gcm info failed:%d\n",
				ret);
			return ret;
		}
		ca_en = HISEC_IPSEC_AUTH_CIPHER_ENABLE;
	}

	if (xs->ealg) {
		ret = hisec_get_aes_enc_info(nic_dev->hwdev, xs, &aes_alg);
		if (ret) {
			nic_err(&nic_dev->pdev->dev, "Get aes enc info failed:%d\n",
				ret);
			return ret;
		}
		ca_en = HISEC_IPSEC_ONLY_CIPHER_ENABLE;
	}

	if (xs->aalg) {
		ret = hisec_get_auth_hmac_info(nic_dev->hwdev, xs, &aes_alg);
		if (ret) {
			nic_err(&nic_dev->pdev->dev, "Get auth hmac info failed:%d\n",
				ret);
			return ret;
		}
		ca_en = HISEC_IPSEC_ONLY_AUTH_ENABLE;
	}

	if (xs->ealg && xs->aalg)
		ca_en = HISEC_IPSEC_AUTH_CIPHER_ENABLE;

	ctx_sws->dw0_val =
		HISEC_IPSEC_SA_CTX_SW_DW0(ca_en, work_mode, ipsec_proto, aes_alg.auth_key_len,
					  aes_alg.auth_type, aes_alg.cipher_type,
					  aes_alg.cipher_key_len_sel, aes_alg.cipher_alg_sel,
					  aes_alg.esp_iv_size);
	ctx_sws->dw1_val = HISEC_IPSEC_SA_CTX_SW_DW1(0, esn_flag, aes_alg.icv_mac_len, 0, 0,
						     aes_alg.sha2_alg_sel);

	nic_info(&nic_dev->pdev->dev, "Init SA ctx. dw0:0x%x, dw1:0x%x\n",
		 ctx_sws->dw0_val, ctx_sws->dw1_val);

	ctx_sws->dw0_val = cpu_to_be32(ctx_sws->dw0_val);
	ctx_sws->dw1_val = cpu_to_be32(ctx_sws->dw1_val);

	memcpy(ctx_sws->cipher_key, aes_alg.cipher_key,
	       aes_alg.cipher_key_len_byte);
	memcpy(ctx_sws->auth_key, aes_alg.auth_key,
	       aes_alg.auth_key_len);
	memcpy(&ctx_sws->salt, &aes_alg.salt, sizeof(ctx_sws->salt));
	ctx_sws->replay_window = replay_win > HISEC_MAX_REPLAY_WIN_SIZE ?
		 htonl(HISEC_MAX_REPLAY_WIN_SIZE) : htonl(replay_win);
	nic_info(&nic_dev->pdev->dev, "Init SA ctx. salt:0x%x, esn_flag:0x%x, replay_win:0x%x\n",
		 ctx_sws->salt, esn_flag, replay_win);

	return 0;
}

/* write sa ctx info into SMF by cmdq msg */
static int hinic3_ipsec_hw_add_sa_ctx(struct hinic3_nic_dev *nic_dev,
				      struct hinic3_ipsec_sa_key *sa_key,
				      u32 xid, u64 gpa_addr, bool ipv4,
				      struct hisec_ipsec_sa_pctx_info *sa_ctx)
{
	struct hisec_ipsec_sa_pctx_msg sa_ctx_msg;
	int ret = 0;

	memset(&sa_ctx_msg, 0, sizeof(sa_ctx_msg));
	sa_ctx_msg.add = htonl(HISEC_IPSEC_OP_ADD_SA_CTX);
	sa_ctx_msg.xid = htonl(xid);
	sa_ctx_msg.proto = htonl(sa_key->proto);
	sa_ctx_msg.spi = sa_key->spi;
	sa_ctx_msg.ctx_gpa_hi = cpu_to_be32(upper_32_bits(gpa_addr));
	sa_ctx_msg.ctx_gpa_lo = cpu_to_be32(lower_32_bits(gpa_addr));
	memcpy(&sa_ctx_msg.pctx_info, sa_ctx, sizeof(*sa_ctx));

	if (ipv4) {
		sa_ctx_msg.daddr[0] = sa_key->daddr.a4;
		sa_ctx_msg.ipv4 = htonl(true);
	} else  {
		memcpy(sa_ctx_msg.daddr, &sa_key->daddr.a6,
		       sizeof(sa_key->daddr.a6));
	}

	ret = hisec_set_ipsec_sa_ctx(nic_dev->hwdev, &sa_ctx_msg);
	if (ret)
		nic_err(&nic_dev->pdev->dev,
			"Fail to add hw sa ctx: %d\n", ret);

	return ret;
}

static int hinic3_ipsec_hw_del_sa_ctx(struct hinic3_nic_dev *nic_dev,
				      struct hinic3_ipsec_sa_key *sa_key,
				      u32 xid, bool ipv4)
{
	struct hisec_ipsec_sa_pctx_msg sa_ctx_msg;
	int ret = 0;

	memset(&sa_ctx_msg, 0, sizeof(sa_ctx_msg));
	sa_ctx_msg.add = htonl(HISEC_IPSEC_OP_DEL_SA_CTX);
	sa_ctx_msg.xid = htonl(xid);
	sa_ctx_msg.proto = htonl(sa_key->proto);
	sa_ctx_msg.spi = sa_key->spi;

	if (ipv4) {
		sa_ctx_msg.daddr[0] = sa_key->daddr.a4;
		sa_ctx_msg.ipv4 = htonl(true);
	} else {
		memcpy(sa_ctx_msg.daddr, &sa_key->daddr.a6,
		       sizeof(sa_key->daddr.a6));
	}

	ret = hisec_set_ipsec_sa_ctx(nic_dev->hwdev, &sa_ctx_msg);
	if (ret)
		nic_err(&nic_dev->pdev->dev,
			"Fail to del hw sa ctx: %d\n", ret);

	return ret;
}

static inline u32 hinic3_ipsec_gen_hash_key(struct hinic3_ipsec_sa_key *sa_key)
{
	return jhash(sa_key, sizeof(*sa_key), 0);
}

static void hinic3_ipsec_make_sa_key(struct hinic3_nic_dev *nic_dev,
				     struct xfrm_state *xs,
				     struct hinic3_ipsec_sa_key *sa_key)
{
	memset(sa_key, 0, sizeof(*sa_key));

	/* note: xfrm state use network byte order is big endian */
	if (xs->props.family == AF_INET6) {
		memcpy(&sa_key->daddr.a6, &xs->id.daddr.a6,
		       sizeof(xs->id.daddr.a6));
		nic_info(&nic_dev->pdev->dev, "SA %s key, IPv6 daddr:0x%x-0x%x-0x%x-0x%x\n",
			 HINIC3_IPSEC_SA_INBOUND(xs) ? "inbound" : "outbound",
			 ntohl(sa_key->daddr.a6[0]),
			 ntohl(sa_key->daddr.a6[1]),
			 ntohl(sa_key->daddr.a6[2]),
			 ntohl(sa_key->daddr.a6[3]));
	} else {
		sa_key->daddr.a4 = xs->id.daddr.a4;
		nic_info(&nic_dev->pdev->dev, "SA %s key, IPv4 daddr:0x%x\n",
			 HINIC3_IPSEC_SA_INBOUND(xs) ? "inbound" : "outbound",
			 ntohl(sa_key->daddr.a4));
	}

	sa_key->spi = xs->id.spi;
	sa_key->proto = xs->id.proto;

	nic_info(&nic_dev->pdev->dev, "SA %s key, spi:0x%x, proto:%d\n",
		 HINIC3_IPSEC_SA_INBOUND(xs) ? "inbound" : "outbound",
		 ntohl(sa_key->spi), sa_key->proto);
}

static bool hinic3_ipsec_check_sa_is_exist(struct hinic3_ipsec *ipsec,
					   struct hinic3_ipsec_sa_key *sa_key,
					   bool ipv4)
{
	struct hinic3_ipsec_sa *tmp_sa = NULL;
	u32 hash_key = hinic3_ipsec_gen_hash_key(sa_key);
	bool found = false;

#ifndef _LLT_TEST_
	/* hash key always based on inbound direction */
	rcu_read_lock();
	hash_for_each_possible_rcu(ipsec->sadb, tmp_sa, hlist, hash_key) {
		if (HINIC3_SA_KEY_MATCH(sa_key, tmp_sa, ipv4)) {
			found = true;
			break;
		}
	}
	rcu_read_unlock();
#endif

	return found;
}

struct xfrm_state *hinic3_ipsec_get_xs_by_xid(struct hinic3_ipsec *ipsec, u32 xid)
{
	struct hinic3_ipsec_sa *sa = NULL;
	struct xfrm_state *xs = NULL;
	unsigned long flags;

	if (!ipsec || (xid > (HINIC3_IPSEC_PER_PF_SA_CTX_NUM +
			      HINIC3_IPSEC_XID_RESERVE_NUM)))
		return xs;

	spin_lock_irqsave(&ipsec->sadb_lock, flags);
	sa = (struct hinic3_ipsec_sa *)ipsec->xmap[xid].sa_ctx;
	if (!sa) {
		spin_unlock_irqrestore(&ipsec->sadb_lock, flags);
		return xs;
	}

	if (sa->xid == xid) {
		xfrm_state_hold(sa->xs);
		xs = sa->xs;
	}
	spin_unlock_irqrestore(&ipsec->sadb_lock, flags);

	return xs;
}

static void hinic3_ipsec_sadb_add_sa(struct hinic3_ipsec_sa *sa,
				     struct hinic3_ipsec_sa_key *sa_key)
{
	struct hinic3_ipsec *ipsec = sa->ipsec;
	unsigned long flags;
	u32 hash_key = hinic3_ipsec_gen_hash_key(sa_key);

	spin_lock_irqsave(&ipsec->sadb_lock, flags);
	hash_add_rcu(ipsec->sadb, &sa->hlist, hash_key);
	spin_unlock_irqrestore(&ipsec->sadb_lock, flags);
}

static void hinic3_ipsec_sadb_del_sa(struct hinic3_ipsec_sa *sa)
{
	struct hinic3_ipsec *ipsec = sa->ipsec;
	unsigned long flags;

	spin_lock_irqsave(&ipsec->sadb_lock, flags);
	hash_del_rcu(&sa->hlist);
	spin_unlock_irqrestore(&ipsec->sadb_lock, flags);
}

int hinic3_add_xfrm_dev_sa(struct xfrm_state *xs)
{
	struct net_device *netdev = xs->xso.dev;
	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
	struct hinic3_ipsec_sa *new_sa = NULL;
	struct hisec_ipsec_sa_pctx_info sa_ctx;
	struct hinic3_ipsec_sa_key sa_key;
	bool found = false;
	int ret = 0;

	ret = hinic3_ipsec_verify_xfrm_state(xs);
	if (ret)
		return ret;

	hinic3_ipsec_make_sa_key(nic_dev, xs, &sa_key);
	found = hinic3_ipsec_check_sa_is_exist(nic_dev->ipsec, &sa_key,
					       (xs->props.family == AF_INET) ?
					       true : false);
	if (found) {
		nic_warn(&nic_dev->pdev->dev,
			 "SA item existed: %d\n", ret);
		return ret;
	}

	new_sa = hinic3_ipsec_alloc_sa(nic_dev, xs);
	if (!new_sa) {
		ret = -ENOMEM;
		goto alloc_sa_fail;
	}

	ret = hinic3_ipsec_alloc_sa_ctx(nic_dev, new_sa);
	if (ret) {
		nic_err(&nic_dev->pdev->dev,
			"Fail to alloc sa ctx: %d\n", ret);
		goto alloc_sa_ctx_fail;
	}

	hinic3_ipsec_sadb_add_sa(new_sa, &sa_key);

	ret = hinic3_ipsec_init_sa_ctx(new_sa, &sa_ctx);
	if (ret) {
		nic_err(&nic_dev->pdev->dev, "Fail to add hw sa: %d\n", ret);
		goto init_sa_fail;
	}

	ret = hinic3_ipsec_hw_add_sa_ctx(nic_dev, &sa_key,
					 new_sa->xid, new_sa->qpc_gpa,
					 (xs->props.family == AF_INET) ?
					 true : false, &sa_ctx);
	if (ret) {
		nic_err(&nic_dev->pdev->dev, "Fail to add hw sa: %d\n", ret);
		goto add_hwsa_fail;
	}

	xs->xso.offload_handle = (unsigned long)new_sa;

	return ret;

add_hwsa_fail:
init_sa_fail:
	hinic3_ipsec_sadb_del_sa(new_sa);
	hinic3_ipsec_free_sa_ctx(nic_dev, new_sa);

alloc_sa_ctx_fail:
	hinic3_ipsec_free_sa(new_sa);

alloc_sa_fail:
	return ret;
}
EXPORT_SYMBOL(hinic3_add_xfrm_dev_sa);

void hinic3_del_xfrm_dev_sa(struct xfrm_state *xs)
{
	struct hinic3_ipsec_sa *sa;

	sa = hinic3_ipsec_get_sa_from_xso(xs);
	if (!sa)
		return;

	hinic3_ipsec_sadb_del_sa(sa);
}
EXPORT_SYMBOL(hinic3_del_xfrm_dev_sa);

void hinic3_free_xfrm_dev_sa(struct xfrm_state *xs)
{
	struct hinic3_ipsec_sa *sa;
	struct hinic3_ipsec_sa_key sa_key;

	sa = hinic3_ipsec_get_sa_from_xso(xs);
	if (!sa)
		return;

	hinic3_ipsec_make_sa_key(sa->ipsec->nic_dev, xs, &sa_key);
	if (sa->ctx) {
		(void)hinic3_ipsec_hw_del_sa_ctx(sa->ipsec->nic_dev,
						 &sa_key, sa->xid,
						 (xs->props.family == AF_INET) ?
							true : false);
		hinic3_ipsec_free_sa_ctx(sa->ipsec->nic_dev, sa);
	}

	/* todo:: avoid resolve hwacc mode could not rmmod hinic3 ko */
	if (ipsec_work_mode == HISEC_IPSEC_OFFLOAD_MODE_HWACC)
		dev_put(sa->ipsec->nic_dev->netdev);

	kfree(sa);
}
EXPORT_SYMBOL(hinic3_free_xfrm_dev_sa);
#endif

