// SPDX-License-Identifier: GPL-2.0-only
/*
 * Based on arch/arm64/include/asm/module.h
 *
 * Copyright (C) 2012 ARM Ltd.
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 * Author: Huawei OS Kernel Lab
 * Create: Thu Aug 15 16:14:23 2023
 * plt entry and got support for arm64 relocation.
 */

#include <linux/moduleloader.h>
#include <trace/hooks/liblinux.h>
#include <asm/override.h>
#include <asm/insn.h>

struct plt_entry __override get_plt_entry(u64 val, void *pc)
{
	/*
	 * MOVK/MOVN/MOVZ opcode:
	 * +--------+------------+--------+-----------+-------------+---------+
	 * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
	 * +--------+------------+--------+-----------+-------------+---------+
	 *
	 * Rd     := 0x10 (x16)
	 * hw     := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
	 * opc    := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
	 * sf     := 1 (64-bit variant)
	 */
	return (struct plt_entry){
		/* for liblinux, bit63-bit48 should be `0`, so we use MOVZ for mov0 */
		cpu_to_le32(0xd2800010 | (((val & 0xffff))) << 5),
		cpu_to_le32(0xf2a00010 | (((val >> 16) & 0xffff)) << 5),
		cpu_to_le32(0xf2c00010 | (((val >> 32) & 0xffff)) << 5),
		cpu_to_le32(0xd61f0200)
	};
}

bool __override plt_entries_equal(const struct plt_entry *a,
		       const struct plt_entry *b)
{
	return a->mov0 == b->mov0 &&
	       a->mov1 == b->mov1 &&
	       a->mov2 == b->mov2;
}

static bool got_in_init(const struct module *mod, void *loc)
{
	return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
}

static u64 module_emit_got_entry(struct module *mod, Elf64_Shdr *sechdrs,
				 void *loc, const Elf64_Rela *rela,
				 Elf64_Sym *sym)
{
	struct mod_plt_sec *pltsec = !got_in_init(mod, loc) ? &mod->arch.core :
							      &mod->arch.init;
	struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
	unsigned long plt_size =
			((unsigned int)(pltsec->plt_max_entries + 1)) * sizeof(struct plt_entry);

	unsigned long *got_num_entries = (unsigned long *)((char *)plt + plt_size);
	struct got_entry *got =
			(struct got_entry *)((char *)got_num_entries + sizeof(unsigned long));

	unsigned long got_size =
			sechdrs[pltsec->plt_shndx].sh_size - plt_size - sizeof(unsigned long);
	unsigned long got_max_entries = got_size / sizeof(struct got_entry) - 1;
	int i = 0;
	u64 val = sym->st_value + rela->r_addend;

	for (i = 0; i < *got_num_entries; i++)
		if (val == got[i].slot)
			return (u64)(&got[i]);

	got[i].slot = val;
	(*got_num_entries)++;
	if (WARN_ON(*got_num_entries > got_max_entries))
		return 0;

	return (u64)&got[i];
}

static unsigned int count_gots(Elf64_Rela *rela, int num)
{
	unsigned int ret = 0;
	int i;

	for (i = 0; i < num; i++) {
		switch (ELF64_R_TYPE(rela[i].r_info)) {
		case R_AARCH64_ADR_GOT_PAGE:
		case R_AARCH64_LD64_GOT_LO12_NC:
			ret++;
		}
	}
	return ret;
}

static void hook_module_frob_sections(void *args, Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
			      char *secstrings, struct module *mod)
{
	unsigned long core_gots = 0;
	unsigned long init_gots = 0;
	Elf_Shdr *pltsec = NULL;
	int i;

	/*
	 * plt section has be found in count_plts
	 * if has got reloc type, plt section layout is:
	 * +-----------------+
	 * | plt             |
	 * |                 |
	 * +-----------------+
	 * |(gap)            |
	 * +-----------------+
	 * | got_num_entries |
	 * +-----------------+
	 * | got             |
	 * |                 |
	 * +-----------------+
	 * |(gap)            |
	 * +-----------------+
	 *
	 * if don't has got reloc type, plt section layout is:
	 * +-----------------+
	 * | plt             |
	 * |                 |
	 * +-----------------+
	 * |(gap)            |
	 * +-----------------+
	 */

	for (i = 0; i < ehdr->e_shnum; i++) {
		Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset;
		int numrels = (int)(sechdrs[i].sh_size / sizeof(Elf64_Rela));
		Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info;

		if (sechdrs[i].sh_type != SHT_RELA)
			continue;

		/* ignore relocations that operate on non-exec sections */
		if (!(dstsec->sh_flags & SHF_EXECINSTR))
			continue;

		if (!str_has_prefix(secstrings + dstsec->sh_name, ".init"))
			core_gots += count_gots(rels, numrels);
		else
			init_gots += count_gots(rels, numrels);
	}

	if (core_gots > 0) {
		pltsec = sechdrs + mod->arch.core.plt_shndx;
		pltsec->sh_size += (core_gots  + 1) * sizeof(struct got_entry) +
					sizeof(unsigned long);
	}

	if (init_gots > 0) {
		pltsec = sechdrs + mod->arch.init.plt_shndx;
		pltsec->sh_size += (init_gots + 1) * sizeof(struct got_entry) +
					sizeof(unsigned long);
	}
}
INIT_VENDOR_HOOK(ldk_rvh_module_frob_sections, hook_module_frob_sections);

static u64 do_got_reloc(int rela_type, __le32 *place, u64 val)
{
	switch (rela_type) {
	case R_AARCH64_LD64_GOT_LO12_NC:
		return val;
	case R_AARCH64_ADR_GOT_PAGE:
		return (val & ~0xfff) - ((u64)place & ~0xfff);
	}

	pr_err("do_reloc: unknown relocation operation %d\n", rela_type);
	return 0;
}

static int reloc_got_insn_imm(int rela_type, __le32 *place, u64 val,
			  int lsb, int len, enum aarch64_insn_imm_type imm_type)
{
	u64 imm, imm_mask;
	s64 sval;
	u32 insn = le32_to_cpu(*place);

	/* Calculate the relocation value. */
	sval = (s64)do_got_reloc(rela_type, place, val);
	sval >>= lsb;

	/* Extract the value bits and shift them to bit 0. */
	imm_mask = (BIT(lsb + len) - 1) >> lsb;
	imm = ((u64)sval) & imm_mask;

	/* Update the instruction's immediate field. */
	insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
	*place = cpu_to_le32(insn);

	/*
	 * Extract the upper value bits (including the sign bit) and
	 * shift them to bit 0.
	 */
	sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);

	/*
	 * Overflow has occurred if the upper bits are not all equal to
	 * the sign bit of the value.
	 */
	if ((u64)(sval + 1) >= 2)
		return -ERANGE;

	return 0;
}

static void hook_apply_relocations(void *args, Elf64_Shdr *sechdrs, unsigned int symindex,
			    unsigned int relsec, struct module *me, int *ret)
{
	unsigned int i;
	int ovf;
	Elf64_Sym *sym;
	void *loc;
	u64 val;
	Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;

	*ret = -ENOEXEC;

	if (sechdrs[relsec].sh_type != SHT_RELA) {
		*ret = 0;
		return;
	}

	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
		/* loc corresponds to P in the AArch64 ELF document. */
		loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
			+ rel[i].r_offset;

		/* sym is the ELF symbol we're referring to. */
		sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
			+ ELF64_R_SYM(rel[i].r_info);

		/* val corresponds to (S + A) in the AArch64 ELF document. */
		val = sym->st_value + (u64)rel[i].r_addend;

		/* Perform the static relocation. */
		switch (ELF64_R_TYPE(rel[i].r_info)) {
		case R_AARCH64_ADR_GOT_PAGE:
			/*
			 * 311: PAGE(G(GDAT(S+A))) - PAGE(P)
			 * GDAT(S+A): a pointer-sized entry in the GOT for address S+A
			 * G(expr): is the address of the GOT entry for the expression expr
			 * PAGE(expr): is the page address of the expression expr,
			 *		defined as (expr & ~0xFFF)
			 */
			val = module_emit_got_entry(me, sechdrs, loc, &rel[i], sym);
			if (!val)
				return;
			ovf = reloc_got_insn_imm(R_AARCH64_ADR_GOT_PAGE, loc, val, 12, 21,
					     AARCH64_INSN_IMM_ADR);
			if (ovf == -ERANGE) {
				pr_err("module %s: overflow in relocation type %d val %llx\n",
				       me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
				return;
			}

			rel[i].r_info = ELF64_R_INFO(ELF64_R_SYM(rel[i].r_info),  R_AARCH64_NONE);
			break;
		case R_AARCH64_LD64_GOT_LO12_NC:
			/*
			 * 312: G(GDAT(S+A)): Set the LD/ST immediate field to bits [11:3] of X.
			 * No overflow check, check that X&7 = 0
			 */
			val = module_emit_got_entry(me, sechdrs, loc, &rel[i], sym);
			if (!val)
				return;
			(void)reloc_got_insn_imm(R_AARCH64_LD64_GOT_LO12_NC, loc, val, 3, 9,
					     AARCH64_INSN_IMM_12);
			rel[i].r_info = ELF64_R_INFO(ELF64_R_SYM(rel[i].r_info),  R_AARCH64_NONE);
			break;

		default:
			/* do relocate in apply_relocate_add */
			break;
		}
	}

	*ret = 0;
}
INIT_VENDOR_HOOK(ldk_rvh_apply_relocations, hook_apply_relocations);
