// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2023. All rights reserved.
 * Description: Euler Hybrid Memory Management for Persistent Memory.
 */

#include <linux/mm.h>
#include <linux/hpmm.h>
#include <linux/mman.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/oom.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/pfn_t.h>
#include <linux/memcontrol.h>
#include <linux/swapops.h>
#include <linux/gfp.h>
#include <linux/migrate.h>
#include <linux/string.h>
#include <linux/memblock.h>
#include <linux/pagevec.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>

#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <linux/pgtable.h>

#include "internal.h"

struct file_operations hpmm_fops;
EXPORT_SYMBOL_GPL(hpmm_fops);

static int bbu_memory_enable __read_mostly;
static int __init parse_bbu_memory(char *arg)
{
	if (!arg)
		return -EINVAL;

	if (!strcasecmp(arg, "0"))
		bbu_memory_enable = 0;
	else if (!strcasecmp(arg, "1"))
		bbu_memory_enable = 1;

	pr_info("bbu_memory_enable: %d\n", bbu_memory_enable);

	return 0;
}
early_param("bbu_memory", parse_bbu_memory);

bool bbu_memory_on(void)
{
	return READ_ONCE(bbu_memory_enable);
}
EXPORT_SYMBOL_GPL(bbu_memory_on);

#if defined(CONFIG_DEBUG_FS)
static u64 etmem2_debug;

void hpmm_create_debugfs(void)
{
	etmem2_debug = 0;
	debugfs_create_u64("etmem2_debug", 0644, NULL, &etmem2_debug);
}
EXPORT_SYMBOL_GPL(hpmm_create_debugfs);

void hpmm_remove_debugfs(void)
{
	etmem2_debug = 0;
	debugfs_lookup_and_remove("etmem2_debug", NULL);
}
EXPORT_SYMBOL_GPL(hpmm_remove_debugfs);

static bool hpmm_etmem2_debug_enabled(void)
{
	return READ_ONCE(etmem2_debug) != 0;
}

#else
static bool hpmm_etmem2_debug_enabled(void)
{
	return false;
}
#endif

int check_should_skip_page(struct page *page, int ret)
{
	if (ret != 1)
		return 0;

	if (bbu_memory_on() && page && is_node_pmem(page_to_nid(page))) {
		if (!hpmm_etmem2_debug_enabled()) {
			put_page(page);
			return 0;
		}
	}

	return ret;
}

bool check_file_hpmm(struct file *file, unsigned long flags,
		     unsigned long len, unsigned long addr)
{
	bool ret = false;

	if (!file->private_data || !(flags & MAP_FIXED)) {
		pr_err("file is not right. please check. should have the MAP_FIXED flag\n");
		goto out;
	}

	if (!len || offset_in_page(addr) || (len % PAGE_SIZE)) {
		pr_err("the addr or len is not right. should be page align");
		goto out;
	}

	ret = true;

out:
	return ret;
}

static struct zone *get_usable_zone(int node)
{
	pg_data_t *pgdat;
	int zoneid;
	struct zone *zone;

	/* alloc page for the thp mapping */
	pgdat = NODE_DATA(node);
	/* Find first usable zone. */
	for (zoneid = 0; zoneid <= MAX_NR_ZONES - 1; zoneid++) {
		zone = &pgdat->node_zones[zoneid];
		if (managed_zone(zone))
			break;
	}
	if (zoneid > MAX_NR_ZONES - 1) {
		pr_warn("Unpopulated node %d\n", node);
		return NULL;
	}

	return zone;
}

static void set_hpmm_node_info(struct hpmm_node_info *hpmm_node_info,
			       struct zone *zone)
{
	unsigned long start_pfn;
	phys_addr_t zone_base, zone_end;
	int memblock_index;
	struct memblock_region *rgn;
	struct memblock_type *type = &memblock.reserved;

	start_pfn = zone->zone_start_pfn + BBU_THP_MAPPING_OFFSET;

	hpmm_node_info->phy_addr[0] = __pfn_to_phys(start_pfn);
	hpmm_node_info->phy_size[0] = (zone_managed_pages(zone) -
				       BBU_THP_MAPPING_OFFSET) << PAGE_SHIFT;

	zone_base = __pfn_to_phys(zone->zone_start_pfn);
	zone_end = __pfn_to_phys(zone->zone_start_pfn + zone_managed_pages(zone));

	for (memblock_index = 0, rgn = &type->regions[0];
			memblock_index < type->cnt;
			memblock_index++, rgn = &type->regions[memblock_index]) {
		if (rgn->base >= zone_base && rgn->base <= zone_end) {
			hpmm_node_info->phy_size[0] = rgn->base -
						hpmm_node_info->phy_addr[0];
			break;
		}
	}
}

/*
 * hpmm_get_node_info
 * get the persist node phy_addr and available memory size
 * @node: the persist node num
 */
struct hpmm_node_info *hpmm_get_node_info(int node)
{
	struct hpmm_node_info *hpmm_node_info;
	struct zone *zone;

	if (node < 0 || node >= MAX_NUMNODES) {
		pr_err("node num: %d is invalid", node);
		return NULL;
	}

	/* this node is not online or is not a persist memory node */
	if (!node_online(node) || !is_node_pmem(node))
		return NULL;

	hpmm_node_info = kzalloc(sizeof(struct hpmm_node_info), GFP_KERNEL);
	if (!hpmm_node_info)
		return NULL;

	hpmm_node_info->max_index = 1;

	zone = get_usable_zone(node);
	if (zone == NULL) {
		kfree(hpmm_node_info);
		return NULL;
	}

	set_hpmm_node_info(hpmm_node_info, zone);

	return hpmm_node_info;
}
EXPORT_SYMBOL_GPL(hpmm_get_node_info);

/* This code is mainly borrowed from do_anonymous_page. */
vm_fault_t hpmm_fault(struct vm_fault *vmf)
{
	struct vm_area_struct *vma = vmf->vma;

	WARN_ON(!(vma->vm_flags & VM_HYBRID_MM));
	pr_warn("hpmm fault: Shared and mmap for hpmm dynamic is not supported yet. should not reach here\n");
	return VM_FAULT_SIGBUS;
}
EXPORT_SYMBOL_GPL(hpmm_fault);

static int hpmm_do_page_mapping(struct page *page,
				struct vm_area_struct *vma,
				bool recover,
				unsigned long addr,
				pmd_t *pmd)
{
	pte_t entry;
	spinlock_t *ptl;
	pte_t *pte = NULL;
	int err;
	struct pagevec pvec;

	if (PageLRU(page) && !page_mapped(page))
		return -EAGAIN;

	/* Allocate our own private page. */
	if (unlikely(anon_vma_prepare(vma)))
		return -EINVAL;

	if (!recover && !page_mapped(page))
		clear_user_highpage(page, addr);

	__SetPageUptodate(page);

	entry = mk_pte(page, vma->vm_page_prot);
	entry = pte_sw_mkyoung(entry);
	if (vma->vm_flags & VM_WRITE)
		entry = pte_mkwrite(pte_mkdirty(entry));

	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	if (!pte_none(*pte)) {
		update_mmu_cache(vma, addr, pte);
		pte_unmap_unlock(pte, ptl);
		return -EAGAIN;
	}

	err = check_stable_address_space(vma->vm_mm);
	if (err) {
		pte_unmap_unlock(pte, ptl);
		return -EAGAIN;
	}

	if (page_mapped(page))
		page_add_anon_rmap(page, vma, addr, false);
	else
		page_add_new_anon_rmap(page, vma, addr, false);

	/* !PageLRU: page is new, should add to lru
	 *  PageLRU: page is already in lru, no need to get page becase
	 *           we already get page from the start.
	 */
	if (!PageLRU(page)) {
		VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
		VM_BUG_ON_PAGE(PageLRU(page), page);

		pagevec_init(&pvec);
		get_page(page);
		pagevec_add(&pvec, page);
		__pagevec_lru_add(&pvec);
	}

	set_pte_at(vma->vm_mm, addr, pte, entry);
	add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
	update_mmu_cache(vma, addr, pte);

	pte_unmap_unlock(pte, ptl);

	return 0;
}

static int hpmm_apply_to_pte_range(struct vm_area_struct *vma,
				    pmd_t *pmd,
				    unsigned long addr,
				    unsigned long end,
				    unsigned long start_pfn,
				    bool recover)
{
	int err;
	pte_t *pte = NULL;
	struct page *page = NULL;
	struct zone *zone;
	unsigned long start_addr = vma->vm_start;
	unsigned long alloc_pfn;
	unsigned int retry_count = 0;

	if (pmd_none(*pmd)) {
		if (pte_alloc(vma->vm_mm, pmd))
			return -ENOMEM;
	}

	/* See the comment in pte_alloc_one_map() */
	if (unlikely(pmd_trans_unstable(pmd)))
		return 0;

	pte = pte_offset_kernel(pmd, addr);
	do {
		if (!pte_none(*pte)) {
			pr_warn("the addr is aleardy been used\n");
			return -EEXIST;
		}
		retry_count = 0;
retry:
		alloc_pfn = start_pfn + ((addr - start_addr) >> PAGE_SHIFT);
		page = pfn_to_page(alloc_pfn);
		if (PageHWPoison(page))
			continue;

		if (!get_page_unless_zero(page)) {
			if (take_pfn_off_buddy(alloc_pfn, 0, GFP_USER)) {
				retry_count++;
				cond_resched();
				zone = page_zone(page);
				drain_all_pages(zone);
				if (retry_count <= MAX_TAKE_PFN_RETYR)
					goto retry;
				return -EAGAIN;
			}
		}

		if (unlikely(!trylock_page(page))) {
			put_page(page);
			return -EAGAIN;
		}

		err = hpmm_do_page_mapping(page, vma, recover, addr, pmd);
		if (err != 0)
			goto unlock_release;

		unlock_page(page);

		cond_resched();
	} while (pte++, addr += PAGE_SIZE, addr != end);

	return err;

unlock_release:
	unlock_page(page);
	put_page(page);
	return err;
}

static int hpmm_do_pmd_page_mapping(struct page *page,
				    struct vm_area_struct *vma,
				    bool recover,
				    unsigned long addr,
				    pmd_t *pmd)
{
	pmd_t entry;
	spinlock_t *ptl;
	int err;
	pgtable_t pgtable;

	pgtable = pte_alloc_one(vma->vm_mm);
	if (unlikely(!pgtable)) {
		unlock_page(page);
		if (!page_mapped(page))
			put_page(page);
		return -ENOMEM;
	}

	if (!recover && !page_mapped(page))
		clear_huge_page(page, addr, HPAGE_PMD_NR);

	__SetPageUptodate(page);

	ptl = pmd_lock(vma->vm_mm, pmd);
	if (unlikely(!pmd_none(*pmd)))
		goto unlock_release;

	err = check_stable_address_space(vma->vm_mm);
	if (err) {
		pr_err("check_stable_address_space failed");
		goto unlock_release;
	}

	entry = mk_huge_pmd(page, vma->vm_page_prot);
	entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
	if (page_mapped(page))
		page_add_anon_rmap(page, vma, addr, true);
	else
		page_add_new_anon_rmap(page, vma, addr, true);

	/* page is aleardy been added to lru list, should inc the ref count */
	if (PageLRU(page))
		get_page(page);
	else
		lru_cache_add_inactive_or_unevictable(page, vma);

	pgtable_trans_huge_deposit(vma->vm_mm, pmd, pgtable);
	set_pmd_at(vma->vm_mm, addr, pmd, entry);
	add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
	mm_inc_nr_ptes(vma->vm_mm);
	spin_unlock(ptl);
	count_vm_event(THP_FAULT_ALLOC);
	count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);

	unlock_page(page);

	return 0;

unlock_release:
	spin_unlock(ptl);
	unlock_page(page);
	put_page(page);

	return -EAGAIN;
}

static int hpmm_create_thp_pmd_range(struct vm_area_struct *vma,
				    pud_t *pud,
				    unsigned long addr,
				    unsigned long end,
				    unsigned long start_pfn,
				    bool recover)
{
	pmd_t *pmd = NULL;
	struct page *page;
	struct zone *zone;
	unsigned long next;
	unsigned long alloc_pfn;
	bool reserved;
	unsigned int retry_count = 0;

	pmd = pmd_offset(pud, addr);
	do {
		if (!pmd_none(*pmd))
			return -EEXIST;

		retry_count = 0;
retry:
		next = pmd_addr_end(addr, end);
		if (pmd_trans_huge(*pmd) && pmd_present(*pmd))
			goto next;

		alloc_pfn = start_pfn + ((addr - vma->vm_start) >> PAGE_SHIFT);
		page = pfn_to_page(alloc_pfn);
		if (unlikely(!trylock_page(page)))
			return -EAGAIN;
		if (!PageLRU(page))
			reserved = false;
		else
			reserved = true;

		if (!reserved) {
			if (take_pfn_off_buddy(alloc_pfn, HPAGE_PMD_ORDER,
					       GFP_TRANSHUGE_LIGHT | GFP_USER)) {
				unlock_page(page);
				retry_count++;
				cond_resched();
				zone = page_zone(page);
				drain_all_pages(zone);
				if (retry_count <= MAX_TAKE_PFN_RETYR)
					goto retry;
				return -EAGAIN;
			}
			page = pfn_to_page(alloc_pfn);
			prep_transhuge_page(page);
		} else {
			if (!page_mapped(page)) {
				unlock_page(page);
				return -EAGAIN;
			}
		}

		if (hpmm_do_pmd_page_mapping(page, vma, recover, addr, pmd) != 0)
			return -EAGAIN;
next:
		cond_resched();
	} while (pmd++, addr = next, addr != end);

	return 0;
}

static int hpmm_apply_linear_pmd_range(struct vm_area_struct *vma,
				    pud_t *pud,
				    unsigned long addr,
				    unsigned long end,
				    unsigned long start_pfn,
				    bool recover)
{
	int err = 0;
	struct mm_struct *mm = vma->vm_mm;
	pmd_t *pmd = NULL;
	unsigned long next;
	spinlock_t *ptl;

	if (pud_none(*pud)) {
		pmd = pmd_alloc(mm, pud, addr);
		if (!pmd)
			return -ENOMEM;
	}

	pmd = pmd_offset(pud, addr);
	if ((vma->vm_flags & VM_HYBRID_MM) && (vma->vm_flags & VM_HUGEPAGE))
		return hpmm_create_thp_pmd_range(vma, pud, addr, end, start_pfn, recover);

	do {
		ptl = pmd_lock(mm, pmd);
		if (!pmd_none(*pmd) && pmd_huge(*pmd)) {
			spin_unlock(ptl);
			return -EEXIST;
		}
		spin_unlock(ptl);
		next = pmd_addr_end(addr, end);
		err = hpmm_apply_to_pte_range(vma, pmd, addr, next, start_pfn, recover);
		if (err)
			break;
	} while (pmd++, addr = next, addr != end);

	return err;
}

static int hpmm_apply_linear_pud_range(struct vm_area_struct *vma,
				    p4d_t *p4d,
				    unsigned long addr,
				    unsigned long end,
				    unsigned long start_pfn,
				    bool recover)
{
	pud_t *pud = NULL;
	unsigned long next;
	int err = 0;

	if (p4d_none(*p4d)) {
		pud = pud_alloc(vma->vm_mm, p4d, addr);
		if (!pud)
			return -ENOMEM;
	}

	pud = pud_offset(p4d, addr);
	do {
		next = pud_addr_end(addr, end);
		err = hpmm_apply_linear_pmd_range(vma, pud, addr, next, start_pfn, recover);
		if (err)
			break;
	} while (pud++, addr = next, addr != end);

	return err;
}

static int hpmm_apply_linear_p4d_range(struct vm_area_struct *vma,
				    pgd_t *pgd,
				    unsigned long addr,
				    unsigned long end,
				    unsigned long start_pfn,
				    bool recover)
{
	p4d_t *p4d = NULL;
	unsigned long next;
	int err = 0;
	struct mm_struct *mm = vma->vm_mm;

	if (pgd_none(*pgd)) {
		p4d = p4d_alloc_one(mm, addr);
		if (!p4d)
			return -ENOMEM;

		spin_lock(&mm->page_table_lock);
		if (pgd_none(*pgd))
			pgd_populate(mm, pgd, p4d);
		else
			p4d_free(mm, p4d);
		spin_unlock(&mm->page_table_lock);
	}

	p4d = p4d_offset(pgd, addr);
	do {
		next = p4d_addr_end(addr, end);
		err = hpmm_apply_linear_pud_range(vma, p4d, addr, next, start_pfn, recover);
		if (err)
			break;
	} while (p4d++, addr = next, addr != end);
	return err;
}

static int hpmm_apply_linear_range(struct vm_area_struct *vma,
				unsigned long addr,
				unsigned long end,
				unsigned long start_pfn,
				bool recover)
{
	pgd_t *pgd = NULL;
	unsigned long next;
	int err = 0;

	if (WARN_ON(addr >= end))
		return -EINVAL;

	pgd = pgd_offset(vma->vm_mm, addr);
	do {
		next = pgd_addr_end(addr, end);
		err = hpmm_apply_linear_p4d_range(vma, pgd, addr, next, start_pfn, recover);
		if (err)
			break;
	} while (pgd++, addr = next, addr != end);

	return err;
}

int hpmm_fault_linear_mapping(struct vm_area_struct *vma,
				 unsigned long va_start,
				 unsigned long va_end,
				 unsigned long start_pfn,
				 bool recover)
{
	int ret = -EINVAL;

	if (!vma)
		return -EINVAL;

	WARN_ON(!(vma->vm_flags & VM_HYBRID_MM));
	if (vma->vm_flags & VM_SHARED) {
		pr_warn("Shared mmap for hpmm is not supported yet\n");
		return -EINVAL;
	}

	if (va_start != vma->vm_start || va_end != vma->vm_end)
		return -EINVAL;

	if (unlikely(anon_vma_prepare(vma)))
		return -EINVAL;

	ret = hpmm_apply_linear_range(vma, va_start, va_end, start_pfn, recover);
	if (ret) {
		pr_err("hpmm fault linear mapping failed");
		return ret;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(hpmm_fault_linear_mapping);

static void hpmm_memblock_free_reserved_area(phys_addr_t base, phys_addr_t size)
{
	struct memblock_region *rgn;
	struct memblock_type *type = &memblock.reserved;
	phys_addr_t reserve_base, reserve_size, reserve_end;
	int memblock_index;

	for (memblock_index = 0, rgn = &type->regions[0];
			memblock_index < type->cnt;
			memblock_index++, rgn = &type->regions[memblock_index]) {
		reserve_base = rgn->base;
		reserve_size = rgn->size;
		reserve_end = reserve_base + reserve_size;

		if (base >= reserve_base && reserve_end >= (base + size)) {
			memblock_free(base, size);
			return;
		}
	}
}

void hpmm_free_reserved_persistent_memory(void)
{
	struct memblock_region *mblk;

	/* Check that valid nid is set to memblks */
	for_each_mem_region(mblk)
		if (get_node_type(mblk->nid) == NODE_TYPE_PMEM)
			hpmm_memblock_free_reserved_area(mblk->base, mblk->size);

	memblock_dump_all();
}

/*
 * Take off the pages at the beginning of the node.
 * nid: node to take pages from.
 * nr_pages: the number of pages to take.
 */
struct page *take_pages_off_node(int nid, unsigned long nr_pages, gfp_t gfp_mask)
{
	unsigned long start_pfn = 0;
	unsigned long end_pfn = 0;
	struct zone *zone;
	pg_data_t *pgdat;
	int zoneid;
	int ret = -1;

	VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
	VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));

	if (!nr_pages) {
		pr_warn("Unexpected nr_pages %ld\n", nr_pages);
		goto out;
	}

	pgdat = NODE_DATA(nid);
	/* Find first usable zone. */
	for (zoneid = 0; zoneid <= MAX_NR_ZONES - 1; zoneid++) {
		zone = &pgdat->node_zones[zoneid];
		if (managed_zone(zone))
			break;
	}
	if (zoneid > MAX_NR_ZONES - 1) {
		pr_warn("Unpopulated node %d\n", nid);
		goto out;
	}

	start_pfn = zone->zone_start_pfn;
	end_pfn = zone_end_pfn(zone);
	if (end_pfn - start_pfn < nr_pages) {
		pr_warn("Can't find suitable pfn range for %lu pages\n", nr_pages);
		goto out;
	}

	/* should consider the hole in alloc aera */
	ret = alloc_contig_range(start_pfn, start_pfn + nr_pages,
				 MIGRATE_MOVABLE, gfp_mask);
	if (!ret)
		return pfn_to_page(start_pfn);
out:
	return NULL;
}
EXPORT_SYMBOL(take_pages_off_node);

bool is_node_pmem(int nid)
{
	if (nid < 0 || nid >= MAX_NUMNODES)
		return false;
	return get_node_type(nid) == NODE_TYPE_PMEM;
}
EXPORT_SYMBOL_GPL(is_node_pmem);

