// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2023. All rights reserved.
 * Description: Euler Hybrid Memory Management for Persistent Memory.
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mmzone.h>
#include <linux/mm_types.h>
#include <linux/gfp.h>
#include <linux/bitmap.h>
#include <linux/crc16.h>
#include <linux/mm.h>

#include <asm/types.h>

#include "hpmm_common.h"
#include "hpmm_mgr.h"

void clear_uce_records(struct hpmm_mgr_context *hpmm_mgr, unsigned long uuid, bool flag);

/* check the hpmm_mgr_context from addr is valid */
bool check_hpmm_mgr_context_valid(unsigned long addr)
{
	struct hpmm_mgr_context *mgr_context = (struct hpmm_mgr_context *)addr;
	struct hpmm_mgr_context *mgr_check;
	unsigned long mgr_len;
	u16 save_crc = 0;
	u16 calc_crc = 0;

	mgr_len = struct_size(mgr_context, hpmm_mgr_info, MGR_INFO_NUM);

	mgr_check = kzalloc(mgr_len + 1, GFP_KERNEL);
	if (!mgr_check)
		return false;

	memcpy(mgr_check, mgr_context, mgr_len);
	save_crc = mgr_check->mgr_sum;
	mgr_check->mgr_sum = 0;

	calc_crc = crc16(~0, (__u8 *)mgr_check, mgr_len);
	if (save_crc != calc_crc) {
		pr_err("%s is wrong, save_crc: %d, calc_crc: %d",
		       __func__, save_crc, calc_crc);
		kfree(mgr_check);
		return false;
	}

	if (mgr_check->magic != HPMM_MGR_MAGIC) {
		pr_err("%s, magic : %d is wrong", __func__, mgr_check->magic);
		kfree(mgr_check);
		return false;
	}

	reconstruct_persistent_mgr(addr);
	kfree(mgr_check);
	return true;
}

bool hpmm_is_free_buddy_page(struct page *page)
{
	struct zone *zone = page_zone(page);
	unsigned long pfn = page_to_pfn(page);
	unsigned long flags;
	unsigned int order;

	spin_lock_irqsave(&zone->lock, flags);
	for (order = 0; order < MAX_ORDER; order++) {
		struct page *page_head = page - (pfn & ((1 << order) - 1));

		if (PageBuddy(page_head) && page_private(page) >= order)
			break;
	}
	spin_unlock_irqrestore(&zone->lock, flags);

	return order < MAX_ORDER;
}

static inline pte_t hpmm_ptep_get_and_clear(pte_t *ptep)
{
	pte_t pte;

	pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
	return pte;
}

static void hpmm_zap_pte_range(struct page *pte_page, struct hpmm_persist_device *dev)
{
	pte_t *start_pte;
	pte_t *pte;
	unsigned int count = 0;
	/* get the pte total count from the pte page */
	int nr = PAGE_SIZE >> 3;

	start_pte = page_to_virt(pte_page);
	pte = start_pte;

	do {
		pte_t ptent = *pte;

		if (pte_none(ptent))
			goto next;

		if (pte_present(ptent)) {
			struct page *page;

			page = pfn_to_page(pte_pfn(ptent));
			ptent = hpmm_ptep_get_and_clear(pte);
			if (!hpmm_is_free_buddy_page(page))
				put_page(page);
		}
next:
		count++;
	} while (pte++, count < nr);
}

/*
 * clear the pid bbu Memory, we need to clear the pgtables,
 * so, should make sure the process is not running
 */
int persist_memory_clear(struct hpmm_persist_device *dev, int flags, unsigned long uuid)
{
	struct hpmm_mgr_context *hpmm_mgr;
	struct pgtable_head *pgtable_head;
	struct pgtable_node *pgtable_node;
	struct page *page;
	unsigned long obj_index;
	unsigned long pageaddr;

	if (!dev)
		return -1;

	hpmm_mgr = (struct hpmm_mgr_context *)dev->mgr_addr;

	if (!check_hpmm_mgr_context_valid((unsigned long)hpmm_mgr)) {
		pr_err("the dev is wrong , node is not init. please do init first.\n");
		return -1;
	}

	pgtable_head = (struct pgtable_head *)
		(hpmm_mgr->hpmm_mgr_info[HPMM_PGTABLE_HEAD].pgtable_head);
	pgtable_node = (struct pgtable_node *)pgtable_head->node;

	for_each_pgtable(pgtable_head, pgtable_node, obj_index) {
		if (test_bit(obj_index, pgtable_head->bitmap)) {
			if ((flags & (HPMM_CLEAR_ALL | HPMM_CLEAR_METADATA)) ||
			    uuid == pgtable_node->uuid) {
				pageaddr = (unsigned long)pgtable_head->page_base +
							(obj_index << PAGE_SHIFT);

				page = virt_to_page((struct page *)pageaddr);
				hpmm_zap_pte_range(page, dev);
				spin_lock(&dev->dev_lock);
				clear_bit(obj_index, pgtable_head->bitmap);
				pgtable_node->addr = 0;
				pgtable_node->uuid = 0;
				pgtable_node->index = 0;
				pgtable_head->last = min(pgtable_head->last, obj_index);
				spin_unlock(&dev->dev_lock);
			}
		}
		cond_resched();
	}
	if (flags & HPMM_CLEAR_ALL)
		clear_uce_records(hpmm_mgr, uuid, 1);
	else
		clear_uce_records(hpmm_mgr, uuid, 0);

	return 0;
}

static int uce_mgr_init(struct hpmm_mgr_context *mgr, unsigned long len,
			unsigned long start, unsigned long *end)
{
	struct uce_record_head *head;
	size_t size;

	size = sizeof(struct uce_record_head) + MAX_UCE_RECORD_NR * sizeof(struct uce_record) +
		sizeof(long) * BITS_TO_LONGS(MAX_UCE_RECORD_NR);
	if ((start + size - (unsigned long)mgr) > len)
		return -ENOMEM;

	head = (struct uce_record_head *)start;
	head->magic = HPMM_UCE_RECORD_HEAD_MAGIC;
	head->max_index = MAX_UCE_RECORD_NR;
	head->last = 0;
	head->bitmap = (unsigned long *)((unsigned long)head + sizeof(struct uce_record_head));
	head->records = (struct uce_record *)((unsigned long)head + sizeof(struct uce_record_head) +
			sizeof(long) * BITS_TO_LONGS(MAX_UCE_RECORD_NR));
	mgr->hpmm_mgr_info[HPMM_UCE_RECORD_HEAD].offset = (unsigned long)head - (unsigned long)mgr;
	mgr->hpmm_mgr_info[HPMM_UCE_RECORD_HEAD].type = HPMM_MGR_UCE_RECORD;
	mgr->hpmm_mgr_info[HPMM_UCE_RECORD_HEAD].uce_record_head = head;
	mgr->hpmm_mgr_info[HPMM_UCE_RECORD_HEAD].size = size;
	*end = start + size;
	return 0;
}

static int mgr_layout_init(struct hpmm_mgr_context *hpmm_mgr,
			   unsigned long mgr_len,
			   unsigned long size)
{
	struct pgtable_head *pgtable_head = (struct pgtable_head *)((void *)hpmm_mgr + mgr_len);
	unsigned long offset;
	unsigned long left;
	unsigned long max_index;
	unsigned long start, end;
	unsigned long uce_record_size;

	hpmm_mgr->hpmm_mgr_info[HPMM_PGTABLE_HEAD].offset = mgr_len;
	hpmm_mgr->hpmm_mgr_info[HPMM_PGTABLE_HEAD].type = HPMM_MGR_PGTABLE;
	hpmm_mgr->hpmm_mgr_info[HPMM_PGTABLE_HEAD].pgtable_head = pgtable_head;

	uce_record_size = sizeof(struct uce_record_head) +
			  sizeof(long) * BITS_TO_LONGS(MAX_UCE_RECORD_NR) +
			  MAX_UCE_RECORD_NR * sizeof(struct uce_record);

	hpmm_mgr->hpmm_mgr_info[HPMM_PGTABLE_HEAD].size = size - mgr_len - uce_record_size;

	offset = mgr_len + sizeof(struct pgtable_head);
	left = size - offset;
	max_index = left / (sizeof(struct pgtable_node) + PAGE_SIZE);
	max_index -= (BITS_TO_LONGS(max_index) * sizeof(long)) /
		     (sizeof(struct pgtable_node) + PAGE_SIZE) + 1;

	pgtable_head->magic = HPMM_PGTABLE_HEAD_MAGIC;
	pgtable_head->max_index = max_index;
	pgtable_head->last = 0;
	pgtable_head->next = NULL;
	pgtable_head->linear_mapping_pfn = 0;
	pgtable_head->linear_mapping_nrpage = 0;
	pgtable_head->bitmap = (unsigned long *)((void *)pgtable_head +
						 sizeof(struct pgtable_head));
	pgtable_head->node = (struct pgtable_node *)((void *)pgtable_head +
					sizeof(struct pgtable_head) +
					BITS_TO_LONGS(max_index) * sizeof(long));
	pgtable_head->page_base = (void *)PAGE_ALIGN((unsigned long)((void *)pgtable_head->node +
					max_index * sizeof(struct pgtable_node)));

	if (((unsigned long)pgtable_head->page_base + max_index * PAGE_SIZE) >
	    ((unsigned long)hpmm_mgr + size)) {
		pr_warn("the size cac is wrong");
		return -1;
	}

	start = hpmm_mgr->hpmm_mgr_info[HPMM_PGTABLE_HEAD].size +
		hpmm_mgr->hpmm_mgr_info[HPMM_PGTABLE_HEAD].offset + (unsigned long)hpmm_mgr;

	if (uce_mgr_init(hpmm_mgr, size, start, &end)) {
		pr_err("uce mgr init failed\n");
		return -ENOMEM;
	}

	return 0;
}

/* hpmm_mgr_context init */
int persistent_mgr_init(void *start_addr, unsigned long size)
{
	struct hpmm_mgr_context *hpmm_mgr;
	unsigned long mgr_len = struct_size(hpmm_mgr, hpmm_mgr_info, MGR_INFO_NUM);
	unsigned long min;
	u16 crc = 0;

	min = mgr_len + sizeof(struct pgtable_head) + sizeof(struct uce_record_head);

	if (size <= min) {
		pr_warn("size reserved for hpmm_mgr is too small\n");
		return -1;
	}

	memset(start_addr, 0, size);
	hpmm_mgr = (struct hpmm_mgr_context *)start_addr;

	hpmm_mgr->mgr_sum = 0;
	hpmm_mgr->magic = cpu_to_le16(HPMM_MGR_MAGIC);
	hpmm_mgr->seq = 1;
	hpmm_mgr->version = 1;
	hpmm_mgr->flags = 0;
	hpmm_mgr->num = MGR_INFO_NUM;

	if (mgr_layout_init(hpmm_mgr, mgr_len, size) != 0)
		return -1;

	crc = crc16(~0, (__u8 *)hpmm_mgr, mgr_len);
	hpmm_mgr->mgr_sum = crc;
	pr_info("hpmm_mgr init: sum: %d, magic: %d,seq:%ld, version: %d,flags:%d",
			hpmm_mgr->mgr_sum,
			hpmm_mgr->magic,
			hpmm_mgr->seq,
			hpmm_mgr->version,
			hpmm_mgr->flags);

	return 0;
}

void reconstruct_persistent_mgr(unsigned long addr)
{
	struct hpmm_mgr_context *hpmm_mgr = (struct hpmm_mgr_context *)addr;
	unsigned long old_addr;
	unsigned long mgr_len = struct_size(hpmm_mgr, hpmm_mgr_info, MGR_INFO_NUM);
	struct pgtable_head *pgtable_head;
	struct uce_record_head *uce_records;
	long offset;
	unsigned long startaddr;

	old_addr = (unsigned long)hpmm_mgr->hpmm_mgr_info[HPMM_PGTABLE_HEAD].pgtable_head;
	startaddr = hpmm_mgr->hpmm_mgr_info[HPMM_PGTABLE_HEAD].offset + addr;
	hpmm_mgr->hpmm_mgr_info[HPMM_PGTABLE_HEAD].pgtable_head = (struct pgtable_head *)startaddr;
	hpmm_mgr->hpmm_mgr_info[HPMM_UCE_RECORD_HEAD].uce_record_head = (struct uce_record_head *)
		(hpmm_mgr->hpmm_mgr_info[HPMM_UCE_RECORD_HEAD].offset + addr);
	pgtable_head = hpmm_mgr->hpmm_mgr_info[HPMM_PGTABLE_HEAD].pgtable_head;
	uce_records = hpmm_mgr->hpmm_mgr_info[HPMM_UCE_RECORD_HEAD].uce_record_head;
	offset = (unsigned long)pgtable_head - old_addr;
	if (!offset)
		return;

	if (pgtable_head->next)
		pgtable_head->next = (struct pgtable_head *)((unsigned long)pgtable_head->next
							     + offset);
	pgtable_head->bitmap = (unsigned long *)((unsigned long)pgtable_head->bitmap + offset);
	pgtable_head->node = (struct pgtable_node *)((unsigned long)pgtable_head->node + offset);
	pgtable_head->page_base = (void *)((unsigned long)pgtable_head->page_base + offset);

	uce_records->records = (struct uce_record *)((unsigned long)uce_records->records + offset);
	uce_records->bitmap = (unsigned long *)((unsigned long)uce_records->bitmap + offset);

	hpmm_mgr->mgr_sum = 0;
	hpmm_mgr->mgr_sum = crc16(~0, (__u8 *)hpmm_mgr, mgr_len);
}

void clear_uce_record(struct uce_record *record)
{
	record->pfn = 0;
	record->uuid = 0;
	record->va = 0;
	record->pfn = 0;
}

void clear_uce_records(struct hpmm_mgr_context *hpmm_mgr, unsigned long uuid, bool flag)
{
	struct uce_record_head *head;
	struct uce_record *record;
	unsigned long i;

	head = hpmm_mgr->hpmm_mgr_info[HPMM_UCE_RECORD_HEAD].uce_record_head;
	for_each_uce_record(head, record, i) {
		if (test_bit(i, head->bitmap)) {
			if (!flag && record->uuid != uuid)
				continue;
			clear_uce_record(record);
			clear_bit(i, head->bitmap);
			head->last = min(i, head->last);
		}
	}
}
