// SPDX-License-Identifier: GPL-2.0
/*
 * VPMEM Main Entry
 *
 * Copyright (C) 2020 HUAWEI, Inc.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/anon_inodes.h>
#include <linux/vfio.h>
#include <linux/init.h>
#include <linux/mempolicy.h>
#include <linux/nodemask.h>
#include <linux/vmstat.h>
#include <linux/kallsyms.h>
#include <linux/kernel_hotupgrade.h>
#include <linux/vpmem_metadata.h>
#include <linux/vpmem_page.h>
#include <linux/iommu.h>

#if defined(__aarch64__)
#include <asm/pgtable-hwdef.h>
#include <asm/vpmem/api.h>
#else
#include <asm/e820/api.h>
#endif

static struct vpmem vpmem_state;

struct vpmem *vpmem_info_get_locked(void)
{
	mutex_lock(&vpmem_state.lock);
	return &vpmem_state;
}
EXPORT_SYMBOL_GPL(vpmem_info_get_locked);

void vpmem_info_put_unlocked(void)
{
	mutex_unlock(&vpmem_state.lock);
}
EXPORT_SYMBOL_GPL(vpmem_info_put_unlocked);

int vpmem_blk_is_opened(struct vpmem *p, metadata_block_ptr blk)
{
	struct list_head *list;
	struct vpmem_blk_struct *block;

	list_for_each(list, &p->vpmem_blk_header) {
		block = list_entry(list, struct vpmem_blk_struct, entry);
		if (blk == block->blk)
			return 1;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(vpmem_blk_is_opened);

static u32 vpmem_stat_find_richer_node(struct vpmem *p)
{
	u64 min_used;
	u32 i, ret;

	ret = 0;
	min_used = p->stat.resved_used[ret];
	for (i = 1; i <= p->stat.nr_node; ++i) {
		if (p->stat.resved_used[i] < min_used) {
			min_used = p->stat.resved_used[i];
			ret = i;
		}
	}
	return ret;
}

void vpmem_stat_alloc(struct vpmem *p, metadata_block_ptr blk)
{
	if (!metadata_block_is_vm_ram(blk->name)) {
		metadata_ranges_stat(&blk->ranges, p->stat.resved_used,
			p->stat.nr_node, true);
	} else {
		metadata_ranges_stat(&blk->ranges, p->stat.used,
			p->stat.nr_node, true);
	}
	metadata_ranges_stat(&blk->ranges, p->stat.free,
			p->stat.nr_node, false);
}

void vpmem_stat_release(struct vpmem *p, metadata_block_ptr blk)
{
	if (!metadata_block_is_vm_ram(blk->name)) {
		metadata_ranges_stat(&blk->ranges, p->stat.resved_used,
			p->stat.nr_node, false);
	} else {
		metadata_ranges_stat(&blk->ranges, p->stat.used,
			p->stat.nr_node, false);
	}
	metadata_ranges_stat(&blk->ranges, p->stat.free,
			p->stat.nr_node, true);
}

metadata_block_ptr vpmem_blk_alloc(struct vpmem *p, struct vpmem_block_info *info)
{
	metadata_block_ptr blk;

	blk = metadata_block_find(p->metadata_header, info);
	if (blk) {
		if (!VPMEM_BLOCK_IS_PERSISTEN(blk)) {
			VPMEM_ERR("%pUb/%s already exist\n", info->uuid, info->name);
			return ERR_PTR(-EEXIST);
		}

		if (vpmem_blk_is_opened(p, blk)) {
			VPMEM_ERR("%pUb/%s being used\n", info->uuid, info->name);
			return ERR_PTR(-EBUSY);
		}

		if (metadata_block_size(blk) != info->size) {
			VPMEM_ERR("%pUb/%s persisten size mismatch\n", info->uuid, info->name);
			return ERR_PTR(-ENOMEM);
		}
		VPMEM_INFO("%pUb/%s 0x%pK reused\n", info->uuid, info->name, (void *)blk);
	} else {
		if (!metadata_block_is_vm_ram(info->name) && info->numa == VPMEM_NO_NUMA)
			info->numa = vpmem_stat_find_richer_node(p);

		blk = metadata_block_alloc(p->metadata_header, info);
	}

	if (!blk)
		return ERR_PTR(-ENOMEM);

	return blk;
}
EXPORT_SYMBOL_GPL(vpmem_blk_alloc);

static int vpmem_init_policy(metadata_header_ptr header, struct mempolicy *pols)
{
	struct list_head *list;
	struct list_head *tmp;
	metadata_range_ptr pool = NULL;
	int max_numa = -1;
	int i;

	list_for_each_safe(list, tmp, &header->free_pool) {
		pool = list_entry(list, struct metadata_range, entry);
		if (max_numa < pool->numa)
			max_numa = pool->numa;
	}

	if (max_numa < 0 || max_numa >= MAX_NUMNODES) {
		VPMEM_ERR("invalid numa %d, valid range %d-%d", max_numa, 0, MAX_NUMNODES - 1);
		return -1;
	}

	memset(pols, 0, (max_numa + 1) * sizeof(struct mempolicy));
	for (i = 0; i <= max_numa; ++i) {
		atomic_set(&pols[i].refcnt, 1);
		pols[i].mode = MPOL_BIND;
		pols[i].flags = MPOL_F_SHARED;
		node_set(i, pols[i].v.nodes);
	}

	return 0;
}

static int force_init = -1;
module_param(force_init, int, 0444);

/*
 * Reserve 64M for each node for future use
 */
#define VPAGE_MEM_RESERVED_PER_NODE 64 /* MB */

unsigned long vpmem_early_prepare_vpage_mem_per_node(void)
{
	unsigned long mem_reserved_per_node;

	mem_reserved_per_node = VPAGE_MEM_RESERVED_PER_NODE * 1024 * 1024;
	mem_reserved_per_node = ALIGN(mem_reserved_per_node, PMD_SIZE);

	VPMEM_INFO("Memory reserved for vpage per node: %luMB",
		mem_reserved_per_node / 1024 / 1024);

	return mem_reserved_per_node;
}

void __init vpmem_early_prepare_vpage(struct vpmem *p)
{
	u32 i;
	struct list_head *list;
	metadata_block_ptr blk;
	metadata_range_ptr range;
	struct vpmem_block_info info = { 0 };
	unsigned long vpage_mem_per_node = vpmem_early_prepare_vpage_mem_per_node();

	for (i = 0; i <= p->stat.nr_node; ++i) {
		snprintf(info.name, sizeof(info.name), "nodevpage%d", i);
		info.numa = (u8)i;
		info.size = vpage_mem_per_node;
		blk = metadata_block_find(p->metadata_header, &info);
		if (blk) {
			vpmem_stat_release(p, blk);
			metadata_block_free(p->metadata_header, blk);
		}
		blk = vpmem_blk_alloc(p, &info);
		if (IS_ERR(blk)) {
			VPMEM_WARN("Can not find block for vpage, skip.\n");
			continue;
		}
		vpmem_stat_alloc(p, blk);

		list_for_each(list, &blk->ranges) {
			range = list_entry(list, struct metadata_range, entry);
			vpage_init_zone(range->start << VPMEM_PAGE_SHIFT,
			range->size << VPMEM_PAGE_SHIFT, (u32)range->numa, 1);
		}
	}
}

void __init vpmem_early_reused_vpage(struct vpmem *p)
{
	struct list_head *list;
	struct list_head *range_list;
	metadata_block_ptr blk;
	metadata_range_ptr range;

	list_for_each(list, &p->metadata_header->used_blocks) {
		blk = list_entry(list, struct metadata_block, entry);
		if (strncmp(blk->name, "nodevpage", strlen("nodevpage")))
			continue;

		list_for_each(range_list, &blk->ranges) {
			range = list_entry(range_list, struct metadata_range, entry);
			vpage_init_zone(range->start << VPMEM_PAGE_SHIFT,
					range->size << VPMEM_PAGE_SHIFT, (u32)range->numa, 0);
		}
	}
}

int __init vpmem_early_init(void)
{
	bool reused = false;
	struct vpmem_table *table;
	struct vpmem_entry *entry;
	u32 i;
	int r = 0;

	if (force_init < 0)
		force_init = (kernel_hotupgrade & UPGRADE_F_VPMEM_PERSIST) ? 0 : 1;
	else
		force_init = force_init > 0 ? 1 : 0;

	mutex_init(&vpmem_state.lock);

	table = vpmem_numa_init();
	if (IS_ERR(table))
		return PTR_ERR(table);

	if (!table->nr_entry) {
		VPMEM_ERR("not found vpmem memory block\n");
		r = -ENOENT;
		goto clean_table;
	}

	vpmem_numa_get_stat(table, &vpmem_state.stat);
	for (i = 0; i < table->nr_entry; i++) {
		entry = &table->entry[i];
		if ((entry->end - entry->start) < METADATA_SIZE + RESERVED_SIZE) {
			VPMEM_WARN("bypass small memory block\n");
			continue;
		}

		if (entry->numa >= VPMEM_NO_NUMA) {
			VPMEM_WARN("bypass node %d(>= %d) memory block\n",
				entry->numa, VPMEM_NO_NUMA);
			continue;
		}

		if (!vpmem_state.metadata_header) {
			vpmem_state.metadata_header = metadata_init((u8)entry->numa, entry->start,
				entry->end - entry->start, force_init, &reused);
			if (IS_ERR(vpmem_state.metadata_header)) {
				r = PTR_ERR(vpmem_state.metadata_header);
				goto clean_table;
			}
			continue;
		}

		if (vpmem_state.metadata_header && !reused) {
			r = metadata_add_range(vpmem_state.metadata_header, entry->start,
				entry->end - entry->start, (u8)entry->numa);
			if (r)
				goto clean_metadata;
			continue;
		}
	}

	if (!vpmem_state.metadata_header) {
		VPMEM_ERR("not found usable memory block\n");
		r = -ENOENT;
		goto clean_table;
	}

	/* Update vpmem stat info */
	metadata_info_hugepages(vpmem_state.metadata_header, &vpmem_state.stat);

	if (vpmem_init_policy(vpmem_state.metadata_header, vpmem_state.pols) < 0) {
		VPMEM_ERR("get policy failed\n");
		goto clean_metadata;
	}


	INIT_LIST_HEAD(&vpmem_state.vpmem_blk_header);
	kfree(table);
	VPMEM_INFO("vpmem early success\n");

	if (kernel_hotupgrade & UPGRADE_F_VPAGE_ALLOC) {
		if (reused && (kernel_hotupgrade & UPGRADE_F_IOMMU_LIVE))
			vpmem_early_reused_vpage(&vpmem_state);
		else
			vpmem_early_prepare_vpage(&vpmem_state);
	}

	return 0;

clean_metadata:
	metadata_fini(vpmem_state.metadata_header);
	vpmem_state.metadata_header = NULL;
clean_table:
	kfree(table);
	return r;
}

static void __exit vpmem_late_exit(void)
{
	metadata_fini(vpmem_state.metadata_header);
}

int first_valid_vpmem_start(u64 *start)
{
	u64 size;

	size = e820__mapped_find(start, E820_TYPE_VPMEM);
	while (size && size < METADATA_SIZE + RESERVED_SIZE) {
		*start += size;
		size = e820__mapped_find(start, E820_TYPE_VPMEM);
	}

	if (size < METADATA_SIZE + RESERVED_SIZE)
		return -1;
	return 0;
}
