// SPDX-License-Identifier: GPL-2.0
/*
 * VPMEM PAGE (Aka VPAGE) for page allocation
 *
 * Copyright (C) 2022 HUAWEI, Inc.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/atomic.h>
#include <linux/list.h>
#include <linux/gfp.h>
#include <linux/vmalloc.h>
#include <linux/random.h>
#include <linux/vpmem_page.h>
#include <linux/io.h>
#include <linux/seq_file.h>

#define VPAGE_MAGIC  "HWVPAGE"
#define TITLE_FMT    "vpage: "

static unsigned long vaddr_start;
static unsigned long vaddr_idx;
#define ZONE_INTERVAL 0x20000000

#define VPAGE_DEBUG(fmt, arg...) pr_debug(TITLE_FMT fmt, ## arg)
#define VPAGE_INFO(fmt, arg...) pr_info(TITLE_FMT fmt, ## arg)
#define VPAGE_WARN(fmt, arg...) pr_warn(TITLE_FMT fmt, ## arg)
#define VPAGE_ERR(fmt, arg...) pr_err(TITLE_FMT fmt, ## arg)

struct vpage_zone {
	char			magic[8];
	u16			zone;
	u16			node;
	u64			nr_total_pages;
	u64			nr_free_pages;
	u64			nr_used_pages;
	u64			base_addr;
	u64			base_size;
	u64			pages_base;
	u64			pages_size;
	u64			data_base;
	u64			data_size;
	struct			list_head free_pages;
	struct			vpage pages[0];
};

typedef struct vpage_zone __iomem *vpage_zone_ptr;
static vpage_zone_ptr vpage_zones[VPAGE_MAX_ZONE];
static spinlock_t vpage_zone_spinlock;
static spinlock_t vpage_range_spinlock;

struct vpage_range {
	u64 virt_start;
	u64 virt_end;
	u64 phys_start;
	u64 phys_end;
};
static struct vpage_range vpage_ranges[VPAGE_MAX_ZONE];

static void vpage_add_range(u64 virt, u64 phys, u64 size, u16 zone)
{
	struct vpage_range *range;
	unsigned long flags;

	spin_lock_irqsave(&vpage_range_spinlock, flags);
	if (zone < VPAGE_MAX_ZONE) {
		range = &vpage_ranges[zone];
		range->phys_start = phys;
		range->phys_end = phys + size;
		range->virt_start = virt;
		range->virt_end = virt + size;
	}
	spin_unlock_irqrestore(&vpage_range_spinlock, flags);
}

void vpage_info(struct seq_file *m)
{
	unsigned long zoneid;
	vpage_zone_ptr zone = NULL;
	unsigned long flags;

	spin_lock_irqsave(&vpage_zone_spinlock, flags);
	seq_puts(m, "VPAGE INFO:\n");
	for (zoneid = 0; zoneid < VPAGE_MAX_ZONE; zoneid++) {
		zone = vpage_zones[zoneid];
		if (!zone)
			continue;
		seq_printf(m, "NodeID: %d\n", zone->node);
		seq_printf(m, "Total Pages: %llu\n", zone->nr_total_pages);
		seq_printf(m, "Free  Pages: %llu\n", zone->nr_free_pages);
		seq_printf(m, "Used  Pages: %llu\n", zone->nr_used_pages);
	}
	spin_unlock_irqrestore(&vpage_zone_spinlock, flags);
}

static inline int in_zone_range(u64 addr, vpage_zone_ptr zone)
{
	if (addr < zone->data_base)
		return 0;
	if (addr >= (zone->data_base + zone->data_size))
		return 0;
	return 1;
}

static unsigned long zone_random_header(void)
{
	unsigned long random;

	get_random_bytes(&random, sizeof(random));
	/* random in 64GB */
	random = (random % (1UL << 38)) & PMD_MASK;
	if (vaddr_idx == 0)
		vaddr_start = VPMEM_BASE_START + random + (1UL << 38);
	vaddr_idx++;
	return vaddr_start + vaddr_idx * ZONE_INTERVAL;
}

static unsigned long zone_max_pages(vpage_zone_ptr header, u64 size)
{
	unsigned long pages;
	unsigned long header_size;

	pages = size / VPAGE_SIZE;
	header_size = (pages * sizeof(struct vpage) + sizeof(*header));
	pages -= DIV_ROUND_UP(header_size, VPAGE_SIZE);
	return pages;
}

int vpage_init_zone(u64 start, u64 size, u32 nid, int force)
{
	vpage_ptr page = NULL;
	vpage_zone_ptr header = NULL;
	unsigned long haddr = 0;
	unsigned long reuse_addr = 0;
	unsigned long pages = 0;
	unsigned long i;
	u64 header_size;
	unsigned long flags;

	spin_lock_init(&vpage_zone_spinlock);
	spin_lock_init(&vpage_range_spinlock);

	header = ioremap_to_vaddr(haddr, start, size);

	pages = zone_max_pages(header, size);
	VPAGE_INFO("NodeID: %d, remap vpage (size: %#016Lx) to 0x%pK page count %lu\n",
			nid, size, (void *)header, pages);

	if (force || strcmp(header->magic, VPAGE_MAGIC)
		|| header->base_size != size || header->node != nid) {
		haddr = zone_random_header();
		iounmap_from_vaddr(header, start, size);
		header = ioremap_to_vaddr(haddr, start, size);
		if ((unsigned long)header != haddr) {
			VPAGE_ERR("failed to remap to 0x%pK\n", (void *)haddr);
			return -EFAULT;
		}
		VPAGE_INFO("NodeID: %d, remap to (size: %#016Lx) to 0x%pK page count %lu\n",
			nid, size, (void *)haddr, pages);
		memset_io(header, 0, sizeof(*header));
		header->node = nid;
		header->nr_total_pages = pages;
		header->nr_free_pages = pages;
		header->nr_used_pages = 0;
		header->base_addr = haddr;
		header->base_size = size;
		header->pages_base = haddr + sizeof(*header);
		header->pages_size = pages * sizeof(struct vpage);
		header_size = sizeof(*header) + header->pages_size;
		header->data_size = pages << VPAGE_SHIFT;
		header->data_base = (u64)header + header->base_size - header->data_size;

		VPAGE_DEBUG("init page_base[%#016Lx + %#016Lx] data_base[%#016Lx + %#016Lx]\n",
			header->pages_base, header->pages_size, header->data_base,
			header->data_size);

		INIT_LIST_HEAD(&header->free_pages);
		for (i = 0; i < pages; i++) {
			page = (vpage_ptr)&header->pages[i];
			list_add(&page->lru, &header->free_pages);
		}
		memcpy_toio(header->magic, VPAGE_MAGIC, strlen(VPAGE_MAGIC));
		VPAGE_INFO("init vpage page count %lu success\n", pages);
		goto finish;
	}

	if (header->zone >= VPAGE_MAX_ZONE) {
		VPAGE_ERR("vpage 0x%pK zone id(%d) too large\n", (void *)header, header->zone);
		iounmap_from_vaddr(header, start, size);
		return -EFAULT;
	}

	reuse_addr = header->base_addr;
	iounmap_from_vaddr(header, start, size);

	header = ioremap_to_vaddr(reuse_addr, start, size);
	if ((unsigned long)header != reuse_addr) {
		VPAGE_ERR("failed to remap to reuse addr 0x%pK\n", (void *)reuse_addr);
		return -EFAULT;
	}
	VPAGE_DEBUG("reuse page_base[%#016Lx + %#016Lx] data_base[%#016Lx + %#016Lx]\n",
		header->pages_base, header->pages_size, header->data_base, header->data_size);

finish:
	spin_lock_irqsave(&vpage_zone_spinlock, flags);
	if (reuse_addr && vpage_zones[header->zone]) {
		spin_unlock_irqrestore(&vpage_zone_spinlock, flags);
		VPAGE_ERR("vpage 0x%pK zone id(%d) already exist\n", (void *)header, header->zone);
		goto unmap;
	}

	if (reuse_addr) {
		vpage_zones[header->zone] = header;
		vpage_add_range((u64)header, start, size, header->zone);
		spin_unlock_irqrestore(&vpage_zone_spinlock, flags);
		return 0;
	}

	for (i = 0; i < VPAGE_MAX_ZONE; i++) {
		if (!vpage_zones[i]) {
			vpage_zones[i] = header;
			header->zone = i;
			vpage_add_range((u64)header, start, size, header->zone);
			spin_unlock_irqrestore(&vpage_zone_spinlock, flags);
			return 0;
		}
	}
	spin_unlock_irqrestore(&vpage_zone_spinlock, flags);

	VPAGE_ERR("vpage 0x%pK zone id alloc fail\n", (void *)header);
unmap:
	iounmap_from_vaddr(header, start, size);
	return -EFAULT;
}

static inline void vpage_debug_log(vpage_ptr page, const char *tag)
{
	void *virt = vpage_to_virt(page);
	unsigned long phys = vpage_virt_to_phys(virt);

	VPAGE_DEBUG("%s page:%#016Lx virt:%#016Lx phys:%#016Lx\n",
		tag, (u64)page, (u64)virt, (u64)phys);
}

vpage_ptr vpage_alloc(u32 nid, gfp_t gfp_mask, unsigned int order)
{
	vpage_ptr page = NULL;
	vpage_zone_ptr zone = NULL;
	unsigned long zoneid;
	void *data;
	unsigned long flags;

	spin_lock_irqsave(&vpage_zone_spinlock, flags);
	for (zoneid = 0; zoneid < VPAGE_MAX_ZONE; zoneid++) {
		if (!vpage_zones[zoneid])
			break;

		if (vpage_zones[zoneid]->node == nid) {
			zone = vpage_zones[zoneid];
			break;
		}
	}

	if (!zone) {
		zone = vpage_zones[0];
		VPAGE_DEBUG("alloc page on default zone %u\n", nid);
	}

	if (!list_empty(&zone->free_pages)) {
		page = list_first_entry(&zone->free_pages, struct vpage, lru);
		list_del(&page->lru);
		zone->nr_free_pages -= 1;
		zone->nr_used_pages += 1;
		memset_io(page, 0, sizeof(*page));
		page->zone = zone->zone;
	}
	spin_unlock_irqrestore(&vpage_zone_spinlock, flags);

	if (gfp_mask & __GFP_ZERO) {
		data = vpage_to_virt(page);
		memset(data, 0, VPAGE_SIZE);
	}
	vpage_debug_log(page, "alloc");
	return page;
}

void vpage_release(vpage_ptr page)
{
	vpage_zone_ptr zone;
	unsigned long flags;

	BUG_ON(page == NULL);
	vpage_debug_log(page, "free");

	spin_lock_irqsave(&vpage_zone_spinlock, flags);
	zone = vpage_zones[page->zone];
	if (zone) {
		zone->nr_free_pages += 1;
		zone->nr_used_pages -= 1;
		list_add(&page->lru, &zone->free_pages);
	}
	spin_unlock_irqrestore(&vpage_zone_spinlock, flags);
}

void vpage_free(const void *b, int order)
{
	vpage_ptr page = virt_to_vpage(b);

	vpage_release(page);
}

vpage_ptr virt_to_vpage(const void *addr)
{
	vpage_zone_ptr zone = NULL;
	vpage_ptr page = NULL;
	unsigned long id;
	u64 idx = 0;
	unsigned long flags;

	if (!addr)
		return NULL;

	spin_lock_irqsave(&vpage_zone_spinlock, flags);
	for (id = 0; id < VPAGE_MAX_ZONE; id++) {
		if (!vpage_zones[id])
			break;
		if (in_zone_range((u64)addr, vpage_zones[id])) {
			zone = vpage_zones[id];
			idx = ((u64)addr - zone->data_base) >> VPAGE_SHIFT;
			page = &zone->pages[idx];
			break;
		}
	}
	spin_unlock_irqrestore(&vpage_zone_spinlock, flags);
	VPAGE_DEBUG("virt %#016Lx to page %#016Lx idx:%#016Lx\n", (u64)addr, (u64)page, idx);
	return page;
}

vpage_ptr phys_to_vpage(unsigned long phys)
{
	void *addr = vpage_phys_to_virt(phys);

	return virt_to_vpage(addr);
}

void *vpage_to_virt(vpage_ptr page)
{
	void *addr = NULL;
	vpage_zone_ptr zone;
	u64 idx = 0;
	unsigned long flags;

	if (!page || page->zone >= VPAGE_MAX_ZONE)
		return NULL;

	spin_lock_irqsave(&vpage_zone_spinlock, flags);
	zone = vpage_zones[page->zone];
	if (zone) {
		BUG_ON((u64)page < zone->pages_base);
		BUG_ON((u64)page > (zone->pages_base + zone->pages_size));

		idx = ((u64)page - zone->pages_base) / sizeof(struct vpage);
		BUG_ON(idx > (zone->pages_size / sizeof(struct vpage)));
		addr = (void *)(zone->data_base + (idx << VPAGE_SHIFT));
	}
	spin_unlock_irqrestore(&vpage_zone_spinlock, flags);
	VPAGE_DEBUG("page %#016Lx [%#016Lx] to addr %#016Lx\n", (u64)page, idx, (u64)addr);
	return addr;
}

unsigned long vpage_virt_to_phys(volatile void *addr)
{
	u32 i;
	u64 virt = (u64)addr;
	u64 phys;
	struct vpage_range *range;
	unsigned long flags;

	spin_lock_irqsave(&vpage_range_spinlock, flags);
	for (i = 0; i < VPAGE_MAX_ZONE; i++) {
		range = &vpage_ranges[i];
		if (!range->phys_start)
			break;

		if (virt >= range->virt_start && virt < range->virt_end) {
			phys = range->phys_start + (virt - range->virt_start);
			spin_unlock_irqrestore(&vpage_range_spinlock, flags);
			VPAGE_DEBUG("virt %#016Lx to phys %#016Lx\n", (u64)addr, (u64)phys);
			return phys;
		}
	}

	spin_unlock_irqrestore(&vpage_range_spinlock, flags);
	return virt_to_phys(addr);
}

void *vpage_phys_to_virt(unsigned long addr)
{
	u32 i;
	u64 virt;
	u64 phys = (u64)addr;
	struct vpage_range *range;
	unsigned long flags;

	spin_lock_irqsave(&vpage_range_spinlock, flags);
	for (i = 0; i < VPAGE_MAX_ZONE; i++) {
		range = &vpage_ranges[i];
		if (!range->virt_start)
			break;

		if (phys >= range->phys_start && phys < range->phys_end) {
			virt = range->virt_start + (phys - range->phys_start);
			spin_unlock_irqrestore(&vpage_range_spinlock, flags);
			VPAGE_DEBUG("virt %#016Lx to phys %#016Lx\n", (u64)addr, (u64)phys);
			return (void *)virt;
		}
	}
	spin_unlock_irqrestore(&vpage_range_spinlock, flags);
	return phys_to_virt(addr);
}
