// SPDX-License-Identifier: GPL-2.0
/*
 * VPMEM Metadata
 *
 * Copyright (C) 2020 HUAWEI, Inc.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/io.h>
#if defined(__aarch64__)
#include <asm/pgtable-hwdef.h>
#endif
#include <linux/vpmem_metadata.h>

static void metadata_ranges_free(metadata_header_ptr header,
		struct list_head *ranges);

static void metadata_print_range(struct list_head *head, struct seq_file *m)
{
	int index = 0;
	struct list_head *list;
	metadata_range_ptr pool;

	list_for_each(list, head) {
		pool = list_entry(list, struct metadata_range, entry);
		seq_printf(m, "  Node %d [ 0x%llx - 0x%llx ] 0x%llx\n", pool->numa,
			   pool->start, pool->start + pool->size, pool->size);
		index += 1;
	}
}

static metadata_unit_ptr metadata_alloc_unit(metadata_header_ptr header)
{
	metadata_unit_ptr unit;

	if (!list_empty(&header->free_units)) {
		unit = list_first_entry(&header->free_units,
				struct metadata_unit, entry);
		list_del(&unit->entry);
		memset_io(unit, 0, sizeof(*unit));
		return unit;
	}

	if (header->free_unit <= 2) {
		VPMEM_ERR("metadata header without enough free unit\n");
		return NULL;
	}

	unit = &(header->units[METADATA_UNIT_COUNT - header->free_unit]);
	header->free_unit--;
	memset_io(unit, 0, sizeof(*unit));
	return unit;
}

static void metadata_free_unit(metadata_header_ptr header,
		metadata_unit_ptr unit)
{
	list_add(&unit->entry, &header->free_units);
}

static metadata_range_ptr metadata_range_alloc(metadata_header_ptr header,
		u8 numa, size_t size, size_t *free)
{
	size_t free_size = 0;
	struct list_head *list;
	struct list_head *tmp;
	metadata_range_ptr pool;
	metadata_range_ptr range;

	range = (metadata_range_ptr)metadata_alloc_unit(header);
	if (!range)
		return NULL;

	range->size = size;

	list_for_each_safe(list, tmp, &header->free_pool) {
		pool = list_entry(list, struct metadata_range, entry);
		if (numa != VPMEM_NO_NUMA && pool->numa != numa)
			continue;
		if (pool->size >= range->size) {
			range->start = pool->start;
			range->numa = pool->numa;
			pool->start += range->size;
			pool->size -= range->size;
			if (pool->size == 0) {
				list_del_init(&pool->entry);
				metadata_free_unit(header, (metadata_unit_ptr)pool);
			}
			return range;
		}
		free_size += pool->size;
	}

	if (free)
		*free = free_size;

	metadata_free_unit(header, (metadata_unit_ptr)range);
	return NULL;
}

static int metadata_range_defrag(metadata_header_ptr header,
		u8 numa, size_t size, struct list_head *ranges)
{
	struct list_head *list;
	struct list_head *tmp;
	metadata_range_ptr pool;
	metadata_range_ptr range;

	list_for_each_safe(list, tmp, &header->free_pool) {
		if (!size)
			break;

		pool = list_entry(list, struct metadata_range, entry);
		if (numa != VPMEM_NO_NUMA && pool->numa != numa)
			continue;
		if (pool->size <= size) {
			list_del_init(&pool->entry);
			list_add(&pool->entry, ranges);
			size -= pool->size;
		} else {
			range = (metadata_range_ptr)metadata_alloc_unit(header);
			if (!range)
				goto cleanup;

			range->size = size;
			range->numa = pool->numa;
			range->start = pool->start;
			pool->start += range->size;
			pool->size -= range->size;
			size = 0;
			list_add(&range->entry, ranges);
			break;
		}
	}
	return 0;

cleanup:
	metadata_ranges_free(header, ranges);
	return -ENOMEM;
}

static void metadata_range_free(metadata_header_ptr header,
		metadata_range_ptr range)
{
	struct list_head *list;
	struct list_head *tmp;
	metadata_range_ptr pool = NULL;
	metadata_range_ptr prev = NULL;
	metadata_range_ptr next = NULL;

	list_for_each_safe(list, tmp, &header->free_pool) {
		pool = list_entry(list, struct metadata_range, entry);
		if (pool->start > range->start) {
			list_add_tail(&range->entry, list);
			goto merge;
		}
	}

	list_add_tail(&range->entry, &header->free_pool);

merge:
	if (range->entry.prev != (&header->free_pool)) {
		prev = list_entry(range->entry.prev, struct metadata_range, entry);
		if (range->numa == prev->numa &&
		    (prev->start + prev->size) == range->start) {
			prev->size += range->size;
			list_del_init(&range->entry);
			metadata_free_unit(header, (metadata_unit_ptr)range);
			range = prev;
		}
	}

	if (range->entry.next != (&header->free_pool)) {
		next = list_entry(range->entry.next, struct metadata_range, entry);
		if (range->numa == next->numa &&
		    (range->start + range->size) == next->start) {
			range->size += next->size;
			list_del_init(&next->entry);
			metadata_free_unit(header, (metadata_unit_ptr)next);
		}
	}
}

static void metadata_ranges_free(metadata_header_ptr header,
		struct list_head *ranges)
{
	metadata_range_ptr range;

	while (!list_empty(ranges)) {
		range = list_first_entry(ranges, struct metadata_range, entry);
		list_del_init(&range->entry);
		metadata_range_free(header, range);
	}
}

static void metadata_init_new(metadata_header_ptr header,
		u8 numa, uint64_t start, uint64_t size)
{
	metadata_range_ptr pool_range;

	memset_io(header, 0, sizeof(struct metadata_header));

	header->base_addr = (u64)header;
	header->version = 0x1;

	header->free_unit = METADATA_UNIT_COUNT;

	header->start = start;
	header->size = size;

	INIT_LIST_HEAD(&header->free_units);
	INIT_LIST_HEAD(&header->free_pool);
	INIT_LIST_HEAD(&header->used_blocks);
	INIT_LIST_HEAD(&header->used_ranges);

	pool_range = (metadata_range_ptr)metadata_alloc_unit(header);
	pool_range->start = (start + METADATA_SIZE + RESERVED_SIZE) >> VPMEM_PAGE_SHIFT;
	pool_range->size = (size - METADATA_SIZE - RESERVED_SIZE) >> VPMEM_PAGE_SHIFT;
	pool_range->numa = numa;
	pool_range->parent = NULL;

	list_add(&pool_range->entry, &header->free_pool);
	memcpy_toio(header->magic, METADATA_MAGIC, strlen(METADATA_MAGIC));
}

static unsigned long metadata_random_header(void)
{
	unsigned long random;

	get_random_bytes(&random, sizeof(random));
	/* random in 64GB */
	random = (random % (1UL << 38)) & PMD_MASK;
	return VPMEM_BASE_START + random;
}

metadata_header_ptr metadata_init(u8 numa, u64 start, u64 size, bool force,
		bool *reused)
{
	int ret = 0;
	unsigned long base_addr;
	unsigned long haddr;
	metadata_header_ptr header;

	haddr = metadata_random_header();
	header = ioremap_to_vaddr(haddr, start, METADATA_SIZE);
	if ((unsigned long)header != haddr) {
		VPMEM_ERR("failed to remap 0x%pK to 0x%pK\n", (void *)start, (void *)haddr);
		return ERR_PTR(-EFAULT);
	}
	VPMEM_INFO("remap 0x%pK to 0x%pK\n", (void *)start, (void *)haddr);

	if (force || strcmp(header->magic, METADATA_MAGIC)) {
		memset_io(header, 0, METADATA_SIZE);
		metadata_init_new(header, numa, start, size);
		VPMEM_INFO("%sinit new metadata header 0x%pK\n",
			force ? "force " : "", (void *)header);
		return header;
	}

	if (header->start != start || header->size != size) {
		pr_warn("vpmem: header info does not match previous version\n");
		ret = -EINVAL;
		goto clean;
	}

	base_addr = header->base_addr;
	if (base_addr < VPMEM_BASE_START || base_addr >= VPMEM_BASE_END) {
		VPMEM_ERR("invalid header base addr 0x%pK\n", (void *)base_addr);
		ret = -EINVAL;
		goto clean;
	}

	iounmap_from_vaddr(header, start, METADATA_SIZE);
	VPMEM_INFO("unmap temporary metadata header 0x%pK\n", (void *)header);

	VPMEM_INFO("remap previous metadata header 0x%pK\n", (void *)base_addr);
	header = ioremap_to_vaddr(base_addr, start, METADATA_SIZE);
	VPMEM_INFO("reuse previous metadata header 0x%pK 0x%pK\n",
		(void *)base_addr, (void *)header);
	if (reused)
		*reused = true;

	return header;

clean:
	iounmap_from_vaddr(header, start, METADATA_SIZE);
	return ERR_PTR(ret);
}

int metadata_add_range(metadata_header_ptr header, u64 start, u64 size, u8 numa)
{
	metadata_range_ptr range;

	range = (metadata_range_ptr)metadata_alloc_unit(header);
	if (!range)
		return -ENOMEM;

	range->start = start >> VPMEM_PAGE_SHIFT;
	range->size = size >> VPMEM_PAGE_SHIFT;
	range->numa = numa;
	metadata_range_free(header, range);
	return 0;
}

void metadata_fini(metadata_header_ptr header)
{
	resource_size_t start;
	resource_size_t end;

	if (header) {
		start = header->start;
		end = start + METADATA_SIZE;
		iounmap_from_vaddr(header, start, METADATA_SIZE);
		VPMEM_INFO("unmap [ 0x%pK - 0x%pK ] from 0x%pK\n",
			(void *)(start >> VPMEM_PAGE_SHIFT), (void *)(end >> VPMEM_PAGE_SHIFT),
			(void *)header);
	}
}

void metadata_info(metadata_header_ptr header, struct seq_file *m)
{
	int idx = 0;
	struct list_head *list;
	metadata_block_ptr block;

	seq_printf(m, "Version: %d\n", header->version);
	seq_printf(m, "HeaderStructSize: %ld\n", sizeof(struct metadata_header));
	seq_printf(m, "UnitStructSize: %ld\n", sizeof(struct metadata_unit));
	seq_printf(m, "BlockStructSize: %ld\n", sizeof(struct metadata_block));
	seq_printf(m, "RangeStructSize: %ld\n", sizeof(struct metadata_range));
	seq_printf(m, "FreeUnit: %d\n", header->free_unit);
	seq_printf(m, "BaseAddr: 0x%llx\n", header->base_addr);

	seq_puts(m, "FreeRanges:\n");
	metadata_print_range(&header->free_pool, m);

	list_for_each(list, &header->used_blocks) {
		block = list_entry(list, struct metadata_block, entry);

		seq_printf(m, "Block%d: %pUb/%s [%c]\n", idx,
			block->uuid, block->name,
			VPMEM_BLOCK_IS_PERSISTEN(block) ? 'P' : '-');
		metadata_print_range(&block->ranges, m);
		idx++;
	}
	vpage_info(m);
}
EXPORT_SYMBOL_GPL(metadata_info);

void metadata_ranges_stat(struct list_head *ranges,
		u64 *stat, u32 len, bool add)
{
	u64 pages;
	struct list_head *list;
	metadata_range_ptr range;

	list_for_each(list, ranges) {
		range = list_entry(list, struct metadata_range, entry);
		if (range->numa >= 0 && range->numa <= len) {
			pages = range->size >> (VPMEM_HUGE_PAGE_SHIFT - VPMEM_PAGE_SHIFT);
			if (add)
				stat[range->numa] += pages;
			else
				stat[range->numa] -= pages;
		}
	}
}
EXPORT_SYMBOL_GPL(metadata_ranges_stat);

void metadata_info_hugepages(metadata_header_ptr header,
		struct vpmem_stat *stat)
{
	u64 header_pages;
	u32 i;
	struct list_head *list;
	metadata_block_ptr block = NULL;

	memset(stat->used, 0, sizeof(stat->used));
	memset(stat->free, 0, sizeof(stat->free));
	memset(stat->resved_used, 0, sizeof(stat->resved_used));

	list_for_each(list, &header->used_blocks) {
		block = list_entry(list, struct metadata_block, entry);
		if (!metadata_block_is_vm_ram(block->name))
			metadata_ranges_stat(&block->ranges, stat->resved_used,
					stat->nr_node, true);
		else
			metadata_ranges_stat(&block->ranges, stat->used,
					stat->nr_node, true);
	}

	metadata_ranges_stat(&header->free_pool, stat->free, stat->nr_node, true);

	/* Add header pages stat to used and resved_used */
	for (i = 0; i <= stat->nr_node; ++i) {
		header_pages = stat->vpmem[i] - stat->used[i] - stat->free[i];
		header_pages -= stat->resved_used[i];
		stat->resved_used[i] += header_pages;
	}
}

metadata_block_ptr metadata_block_find(metadata_header_ptr header,
			struct vpmem_block_info *info)
{
	struct list_head *list;
	metadata_block_ptr block = NULL;

	list_for_each(list, &header->used_blocks) {
		block = list_entry(list, struct metadata_block, entry);
		if (!memcmp(block->uuid, info->uuid, sizeof(block->uuid)) &&
			!memcmp(block->name, info->name, sizeof(block->name)))
			return block;
	}
	return NULL;
}
EXPORT_SYMBOL_GPL(metadata_block_find);

metadata_block_ptr metadata_block_alloc(metadata_header_ptr header,
			struct vpmem_block_info *info)
{
	metadata_block_ptr blk;

	blk = (metadata_block_ptr)metadata_alloc_unit(header);
	if (!blk)
		return NULL;

	INIT_LIST_HEAD(&blk->ranges);
	list_add(&blk->entry, &header->used_blocks);
	memcpy_toio(blk->uuid, info->uuid, sizeof(info->uuid));
	memcpy_toio(blk->name, info->name, sizeof(info->name));

	if (metadata_block_truncate(header, blk, info->numa,
			info->size >> VPMEM_PAGE_SHIFT)) {
		VPMEM_ERR("%pUb/%s truncate 0x%llx fail\n",
				blk->uuid, blk->name, info->size);
		metadata_block_free(header, blk);
		blk = NULL;
	}

	return blk;
}
EXPORT_SYMBOL_GPL(metadata_block_alloc);

void metadata_block_free(metadata_header_ptr header, metadata_block_ptr blk)
{
	metadata_ranges_free(header, &blk->ranges);
	list_del(&blk->entry);
	metadata_free_unit(header, (metadata_unit_ptr)blk);
}
EXPORT_SYMBOL_GPL(metadata_block_free);

long metadata_block_truncate(metadata_header_ptr header,
			metadata_block_ptr blk, u8 numa, size_t size)
{
	size_t free_size = 0;
	metadata_range_ptr range;

	if (!list_empty(&blk->ranges))
		return -EINVAL;

	/* try to alloc continuous large memory */
	range = metadata_range_alloc(header, numa, size, &free_size);
	if (range) {
		list_add(&range->entry, &blk->ranges);
		return 0;
	}

	/* try to alloc fragment memory */
	if (free_size >= size)
		return metadata_range_defrag(header, numa, size, &blk->ranges);

	return -ENOMEM;
}

u64 metadata_ranges_size(struct list_head *ranges)
{
	struct list_head *list;
	metadata_range_ptr range;
	u64 size = 0;

	list_for_each(list, ranges) {
		range = list_entry(list, struct metadata_range, entry);
		size += range->size;
	}
	return size << VPMEM_PAGE_SHIFT;
}

u64 metadata_block_size(metadata_block_ptr blk)
{
	return metadata_ranges_size(&blk->ranges);
}
EXPORT_SYMBOL_GPL(metadata_block_size);

void *metadata_remap_ranges(struct list_head *ranges, u64 start)
{
	struct list_head *list;
	metadata_range_ptr range;
	metadata_range_ptr err_range;
	u64 offset = 0;
	u64 addr;

	list_for_each(list, ranges) {
		range = list_entry(list, struct metadata_range, entry);

		addr = start + offset;
		VPMEM_INFO("remap [ 0x%pK - 0x%pK ] to 0x%pK\n", (void *)range->start,
			(void *)(range->start + range->size), (void *)addr);
		addr = (u64)ioremap_to_vaddr(addr, range->start << VPMEM_PAGE_SHIFT,
				range->size << VPMEM_PAGE_SHIFT);
		if (addr != (start + offset)) {
			err_range = range;
			goto error;
		}
		offset += (range->size << VPMEM_PAGE_SHIFT);
	}
	return (void __iomem *)start;
error:
	offset = 0;
	list_for_each(list, ranges) {
		range = list_entry(list, struct metadata_range, entry);
		if (range == err_range)
			break;

		addr = start + offset;
		VPMEM_INFO("unmap [ 0x%pK - 0x%pK ] from 0x%pK\n", (void *)range->start,
			(void *)(range->start + range->size), (void *)addr);
		iounmap_from_vaddr((void __iomem *)addr,
			range->start << VPMEM_PAGE_SHIFT, range->size << VPMEM_PAGE_SHIFT);
		offset += (range->size << VPMEM_PAGE_SHIFT);
	}
	return NULL;
}

void metadata_unmap_ranges(struct list_head *ranges, u64 start)
{
	struct list_head *list;
	metadata_range_ptr range;
	u64 offset = 0;

	list_for_each(list, ranges) {
		range = list_entry(list, struct metadata_range, entry);
		VPMEM_INFO("unmap [ 0x%pK - 0x%pK ] from 0x%pK\n", (void *)range->start,
			(void *)(range->start + range->size), (void *)(start + offset));
		iounmap_from_vaddr((void __iomem *)(start + offset),
			range->start << VPMEM_PAGE_SHIFT, range->size << VPMEM_PAGE_SHIFT);
		offset += (range->size << VPMEM_PAGE_SHIFT);
	}
}
