/*
 * DIM-SUM操作系统 - MMU管理
 *
 * Copyright (C) 2023 国科础石(重庆)软件有限公司
 *
 * 作者: Dong Peng <w-pengdong@kernelsoft.com>
 *
 * License terms: GNU General Public License (GPL) version 3
 *
 */

#include <dim-sum/boot_allotter.h>
#include <dim-sum/cache.h>
#include <dim-sum/init.h>
#include <dim-sum/memory_regions.h>
#include <dim-sum/mm_types.h>
#include <dim-sum/beehive.h>
#include <dim-sum/mmu.h>
#include <dim-sum/string.h>
#include <asm/head64.h>
#include <asm/early_map.h>
#include <asm/memory.h>
#include <asm/mmu_context.h>
#include <asm/uaccess.h>
#include <asm/csr.h>

int arch_create_mmu(struct mem_desc *m)
{
	
	mmu_l1_t *kertbl;

	if (!m)
		return -EINVAL;

	kertbl = (mmu_l1_t *)kzalloc(PAGE_SIZE, PAF_KERNEL);
	if (!kertbl)
		return -ENOMEM;

	if ((u64)kertbl & (PAGE_SIZE - 1)) {
		kfree((void *)kertbl);
		return -EFAULT;
	}
	/* 设置内核态和用户态页表 */
	memcpy(kertbl, kernel_tbl_page, PAGE_SIZE);
	smp_lock(&m->lock);
	m->mem_arch.mmu.mmu_arch.kern_l1_tbl = kertbl; //(mmu_l1_t *)kern_memory_map.pt_l1;
	m->mem_arch.mmu.mmu_arch.user_l1_tbl = NULL;
	smp_unlock(&m->lock);
	return 0;
}

int arch_mmu_set_info_prot(struct mmu_walk_info *info, unsigned long prot)
{
	return arch_mmu_walk_info_set_prot(info, prot);
}


static inline bool esr_is_write_abort(unsigned long esr)
{
	unsigned long cause = SCAUSE_TO_ECODE(esr);
	if (cause == EXC_STORE_ACCESS || cause == EXC_STORE_PAGE_FAULT)
		return true;
	return false;	
}

static inline bool esr_is_read_abort(unsigned long esr)
{
	unsigned long cause = SCAUSE_TO_ECODE(esr);
	if (cause == EXC_INST_MISALIGNED ||
	 cause == EXC_INST_ACCESS || cause == EXC_LOAD_ACCESS ||
	 cause == EXC_INST_PAGE_FAULT || cause == EXC_LOAD_PAGE_FAULT)
		return true;
	return false;
}


int arch_mmu_vma_permission(struct vma_desc *v, virt_addr_t addr, unsigned long data)
{
	(void)addr;
	if (esr_is_write_abort(data)) {
		if (v->acces & VAC_WRITE)
			return 0;
	}
	if (esr_is_read_abort(data)) {
		if (v->acces & VAC_READ)
			return 0;
	}
	return -EFAULT;
}

unsigned long arch_mmu_vma_to_esr(struct vma_desc *v)
{
	unsigned long esr = 0;
	if (v->acces & VAC_READ)
		esr = EXC_LOAD_ACCESS;
	if (v->acces & VAC_WRITE)
		esr = EXC_STORE_ACCESS;
	return esr;
}

void arch_mmu_set_tlb(struct mmu_desc* mmu)
{
	volatile mmu_l1_t *k_l1 = NULL;
	
	BUG_ON(!mmu);
	
	k_l1 = mmu->mmu_arch.kern_l1_tbl;
	
	BUG_ON(!k_l1);
	
	if (k_l1) {
		set_sapt((mmu_l1_t*)k_l1);
		local_flush_tlb_all();
	}

	return;
}

u64 arch_mmu_get_cpu_el1_ttbr0(void)
{
	// return mrs(ttbr0_el1);
	BUG();
	return 0;
}

u64 arch_mmu_get_cpu_el1_ttbr1(void)
{
	// return mrs(ttbr1_el1);
	BUG();
	return 0;
}

u64 arch_mmu_get_utlb(void)
{
	return arch_mmu_get_cpu_el1_ttbr0();
}

u64 arch_mmu_get_ktlb(void)
{
	return arch_mmu_get_cpu_el1_ttbr1();
}

mmu_l1_t *arch_mmu_get_l1(struct mmu_desc *mmu, virt_addr_t addr)
{
	volatile mmu_l1_t *ttb;
	if (!mmu)
		return NULL;

	ttb = mmu->mmu_arch.kern_l1_tbl;
	BUG_ON(!ttb);
	return (mmu_l1_t*)ttb;
}

/**
 * mmu刷新tlb
 */
void arch_mmu_flush_tlb(struct mmu_walk_info *info, virt_addr_t addr, virt_addr_t end, int level)
{
	unsigned long asid_addr =
				info->in_mmu->mmu_arch.user_asid << 48;
	virt_addr_t start;

	if (level)
		return flush_tlb_kernel_range(addr, end);

	for (start = addr; start < end; start += PAGE_SIZE) {
		asid_addr |= start >> PAGE_SHIFT;
		flush_tlb_addr(asid_addr);
	}
}

/**
 * 在创建映射时，分配各级页表
 * 页表需要按照页面大小对齐
 */
static void* arch_alloc_mmu_lx(unsigned long sz)
{
	void *mmu_lx = NULL;
	BUG_ON(sz != PAGE_SIZE); 
	mmu_lx = (void *)alloc_zeroed_page_memory(PAF_KERNEL);
	BUG_ON(!mmu_lx);
	return mmu_lx;
}

static void __init *arch_boot_alloc_mmu_lx(unsigned long sz)
{
	void *ptr = NULL;
	BUG_ON(sz != PAGE_SIZE); 
	ptr = alloc_boot_mem_permanent(sz, sz);
	BUG_ON(!ptr);
	memset(ptr, 0, sz);
	return ptr;
}

static void mmu_lx_set_entry(void *write, phys_addr_t p,
							 u64 attr, int level)
{
	phys_addr_t phys, pfn;

	if (level == 1)
		return;
	/**
	 * 现有配置下只需要2、3、4级
	 */
	pfn = __phys_to_pgnum(p);
	phys = (pfn << MMU_LX_PFN_SHIFT) | (phys_addr_t)attr;

	if (level == 2) {
		*((mmu_l2_t *)write) = (mmu_l2_t)phys;
		return;
	}
	if (level == 3) {
		*((mmu_l3_t *)write) = (mmu_l3_t)phys;
		return;
	}
	if (level == 4) {
		*((mmu_l4_t *)write) = (mmu_l4_t)phys;
		return;
	}
	return;
}
/**
 * 设置MMU的转换表项
 */
void arch_mmu_ttbl_set_entry(struct mmu_walk_info *info, void *write,
	phys_addr_t p, u64 attr, int level)
{
	struct mmu_desc *mmu = info->in_mmu;
	if (level == 1)
		return;

	arch_mmu_flush_tlb(info, info->in_faddr, info->in_faddr + PAGE_SIZE, 0);
	mmu_lx_set_entry(write, p, attr, level);

	if (level == 2) {
		mmu->mmu_arch.l2_nr++;
		return;
	}
	if (level == 3) {
		mmu->mmu_arch.l3_nr++;
		return;
	}
	return;
}

static int arch_mmu_find_l2(struct mmu_walk_info *info)
{
	mmu_l2_t *l2w, l2_v;
	if (!info->out_l1)
		return -EFAULT;

	l2w = mmu_l2_wptr(info->out_l1, info->in_faddr);
	info->out_l2wptr = l2w;
	l2_v = mmu_l2_value(l2w);
	if (mmu_l2_is_empty(l2_v)) {

		info->out_l2 = 0;
		return -EFAULT;
	}
	info->out_l2 = mmu_l2_val_vaddr(l2_v);
	return 0;
}

static int arch_mmu_find_l3(struct mmu_walk_info *info)
{
	mmu_l3_t *l3w, l3_v;
	if (!info->out_l2)
		return -EFAULT;

	l3w = mmu_l3_wptr(info->out_l2, info->in_faddr);
	info->out_l3wptr = l3w;

	l3_v = mmu_l3_value(l3w);
	if (mmu_l3_is_empty(l3_v)) {
		info->out_l3 = 0;
		return -EFAULT;
	}
	info->out_l3 = mmu_l3_val_vaddr(l3_v);
	return 0;
}

static int arch_mmu_find_l4(struct mmu_walk_info *info)
{
	mmu_l4_t *l4w, l4_v;
	if (!info->out_l3)
		return -EFAULT;

	l4w = mmu_l4_wptr(info->out_l3, info->in_faddr);
	info->out_l4wptr = l4w;

	l4_v = mmu_l4_value(l4w);
	if (mmu_l4_is_empty(l4_v)) {
		info->out_l4 = 0;
		return -EFAULT;
	}

	/**
	 * 这里就是物理页的地址
	 */
	info->out_l4 = mmu_l4_val_paddr(l4_v);
	return 0;
}

static int arch_mmu_check_empt_tlx(mmu_l4_t *lx)
{
	int i;
	if (!lx)
		return 0;
	for (i = 0; i < (PAGE_SIZE / sizeof(*lx)); i++)
		if (lx[i])
			return 0;
	return 1;
}

/**
 * 解除映射MMU的四级页
 */
static int arch_mmu_unmap_l4(struct mmu_walk_info *info)
{
	mmu_l4_t l4_v;

	if (!info->out_l4wptr || !info->out_l4)
		return -EINVAL;
	l4_v = (mmu_l4_t)(*(info->out_l4wptr));
	BUG_ON(info->out_l4 != mmu_l4_val_paddr(l4_v));

	arch_mmu_flush_tlb(info, info->in_faddr, info->in_faddr + PAGE_SIZE, 0);

	*(info->out_l4wptr) = 0;
	put_one_user_page((unsigned long)(info->out_l4));
	return 0;
}

/**
 * 断开映射MMU的四级页
 */
static int arch_mmu_cutoff_l4(struct mmu_walk_info *info)
{
	mmu_l4_t l4_v;

	if (!info->out_l4wptr || !info->out_l4)
		return -EINVAL;
	l4_v = (mmu_l4_t)(*(info->out_l4wptr));
	BUG_ON(info->out_l4 != mmu_l4_val_paddr(l4_v));

	arch_mmu_flush_tlb(info, info->in_faddr, info->in_faddr + PAGE_SIZE, 0);

	*(info->out_l4wptr) = 0;
	return 0;
}


/**
 * 解除映射MMU的三级转换表
 */
__maybe_unused static int arch_mmu_unmap_l3(struct mmu_walk_info *info)
{
	mmu_l3_t l3_v;

	if (!info->out_l3wptr || !info->out_l3)
		return -EINVAL;
	l3_v = (mmu_l3_t)(*(info->out_l3wptr));
	BUG_ON(info->out_l3 != (mmu_l3_t *)linear_phys_to_virt((virt_addr_t)l3_v & PAGE_MASK));

	if (!arch_mmu_check_empt_tlx((mmu_l4_t *)(info->out_l3)))
		return 0;

	arch_mmu_flush_tlb(info, info->in_faddr, info->in_faddr + PAGE_SIZE, 0);

	*(info->out_l3wptr) = 0;
	kfree((void *)(info->out_l3));
	return 0;
}

/**
 * 解除映射MMU的二级转换表
 */
__maybe_unused static int arch_mmu_unmap_l2(struct mmu_walk_info *info)
{
	mmu_l2_t l2_v;

	if (!info->out_l2wptr || !info->out_l2)
		return -EINVAL;
	l2_v = (mmu_l2_t)(*(info->out_l2wptr));
	BUG_ON(info->out_l2 != (mmu_l2_t *)linear_phys_to_virt((virt_addr_t)l2_v & PAGE_MASK));

	if (!arch_mmu_check_empt_tlx((mmu_l4_t *)(info->out_l2)))
		return 0;

	arch_mmu_flush_tlb(info, info->in_faddr, info->in_faddr + PAGE_SIZE, 0);

	*(info->out_l2wptr) = 0;
	kfree((void *)(info->out_l2));
	return 0;
}

/**
 * 映射MMU的二级转换表
 */
static int arch_mmu_ttbl_map_l2(struct mmu_walk_info *info)
{
	mmu_l2_t *l2w, l2_v;
	virt_addr_t l2_n;
	phys_addr_t l2_n_phys;

	l2w = mmu_l2_wptr(info->out_l1, info->in_faddr);
	info->out_l2wptr = l2w;
	l2_v = mmu_l2_value(l2w);
	if (mmu_l2_is_empty(l2_v)) {
		l2_n = (virt_addr_t)arch_alloc_mmu_lx(PAGE_SIZE);
		if (!l2_n)
			return -ENOMEM;

		l2_n_phys = linear_virt_to_phys(l2_n);
		arch_mmu_ttbl_set_entry(info, l2w, l2_n_phys, 1, 2);
		info->out_l2 = (mmu_l2_t *)l2_n;
		return 0;
	}

	info->out_l2 = mmu_l2_val_vaddr(l2_v);

	return 0;
}

/**
 * 映射MMU的三级转换表
 */
static int arch_mmu_ttbl_map_l3(struct mmu_walk_info *info)
{
	mmu_l3_t *l3w, l3_v;
	virt_addr_t l3_n;
	phys_addr_t l3_n_phys;

	l3w = mmu_l3_wptr(info->out_l2, info->in_faddr);
	info->out_l3wptr = l3w;

	l3_v = mmu_l3_value(l3w);
	if (mmu_l3_is_empty(l3_v)) {
		l3_n = (virt_addr_t)arch_alloc_mmu_lx(PAGE_SIZE);		
		if (!l3_n)
			return -ENOMEM;

		l3_n_phys = linear_virt_to_phys(l3_n);
		arch_mmu_ttbl_set_entry(info, l3w, l3_n_phys, 1, 3);
		info->out_l3 = (mmu_l3_t *)l3_n;
		return 0;
	}
	info->out_l3 = mmu_l3_val_vaddr(l3_v);
	return 0;
}

/**
 * MMU的转换表异常处理核心函数
 */
int arch_mmu_ttbl_fault(struct mmu_walk_info *info)
{
	mmu_l1_t *mmu_l1;
	int ret;
	mmu_l1 = arch_mmu_get_l1(info->in_mmu, info->in_faddr);
	if (!mmu_l1) {
		info->out_l1 = NULL;
		return MMU_MAP_L1_TBL;
	}
	info->out_l1 = mmu_l1;

	ret = arch_mmu_ttbl_map_l2(info);
	if (ret) {
		info->out_l2 = NULL;
		return MMU_MAP_L1_TBL;
	}

	ret = arch_mmu_ttbl_map_l3(info);
	if (ret) {
		info->out_l3 = NULL;
		return MMU_MAP_L2_TBL;
	}

	info->out_l4wptr =
			mmu_l4_wptr(info->out_l3, info->in_faddr);
	return MMU_MAP_L3_TBL;
}

int arch_mmu_find_ttbl(struct mmu_walk_info *info)
{
	mmu_l1_t *mmu_l1;
	int ret;
	mmu_l1 = arch_mmu_get_l1(info->in_mmu, info->in_faddr);
	if (!mmu_l1) {
		info->out_l1 = NULL;
		return MMU_MAP_L1_TBL;
	}
	info->out_l1 = mmu_l1;
	ret = arch_mmu_find_l2(info);
	if (ret)
		return ret;
	ret = arch_mmu_find_l3(info);
	if (ret)
		return ret;
	return arch_mmu_find_l4(info);
}

/**
 * MMU解除映射一个虚拟地址
 */
int arch_mmu_unmap_vaddr(struct mmu_walk_info *info)
{
	int ret;

	ret = arch_mmu_find_ttbl(info);
	if (ret)
		return 0;
	ret = arch_mmu_unmap_l4(info);
	if (ret)
		return ret;
/**
 * 目前可以不释放2、3级页表
 */
#if 0
	ret = mmu_unmap_l3(info);
	if (ret)
		return ret;
	ret = mmu_unmap_l2(info);
	if (ret)
		return ret;
#endif
	return 0;
}

void dump_mmu_walk_info(struct mmu_walk_info* info)
{
	pr_info("info mmutlb:%p kernel_tbl_page:%p\n", info->in_mmu->mmu_arch.kern_l1_tbl, kernel_tbl_page);

	pr_info("info mmu:%p port:%p\n", info->in_mmu, info->in_prot);
	pr_info("info faddr:%p paddr:%p\n", info->in_faddr, info->in_paddr);
	pr_info("info mapstart:%p mapend:%p\n", info->in_mapstart, info->in_mapend);
	pr_info("info out_l1:%p out_l1wptr:%p l1_idx:%lx\n", info->out_l1, info->out_l1wptr, mmu_l1_index(info->in_faddr));
	pr_info("info out_l2:%p out_l2wptr:%p l2_idx:%lx\n", info->out_l2, info->out_l2wptr, mmu_l2_index(info->in_faddr));
	pr_info("info out_l3:%p out_l3wptr:%p l3_idx:%lx\n", info->out_l3, info->out_l3wptr, mmu_l3_index(info->in_faddr));
	pr_info("info out_l4:%p out_l4wptr:%p\n", info->out_l4, info->out_l4wptr);

	pr_info("info *out_l2wptr:%lx v:%p p:%p\n", 
	*info->out_l2wptr, 
	mmu_l2_val_vaddr(mmu_l2_value(info->out_l2wptr)),
	linear_virt_to_phys(mmu_l2_val_vaddr(mmu_l2_value(info->out_l2wptr))));

	pr_info("info *out_l3wptr:%lx v:%p p:%p\n", 
	*info->out_l3wptr, 
	mmu_l3_val_vaddr(mmu_l3_value(info->out_l3wptr)), 
	linear_virt_to_phys(mmu_l3_val_vaddr(mmu_l3_value(info->out_l3wptr))));

	pr_info("info *out_l4wptr:%lx v:%p p:%p\n", 
	*info->out_l4wptr, 
	mmu_l4_val_vaddr(mmu_l4_value(info->out_l4wptr)), 
	linear_virt_to_phys(mmu_l4_val_vaddr(mmu_l4_value(info->out_l4wptr))));
}

void arch_mmu_check_test_map(struct mmu_desc* mmu, virt_addr_t addr)
{
	struct mmu_walk_info info;
	memset(&info, 0, sizeof(info));
	info.in_faddr = addr;
	info.in_mmu = mmu;
	BUG_ON(arch_mmu_find_ttbl(&info));
	dump_mmu_walk_info(&info);
}

/**
 * MMU映射一个虚拟地址
 */
int arch_mmu_map_vaddr(struct mmu_walk_info *info)
{
	int err;
	virt_addr_t p_addr;

	err = arch_mmu_ttbl_fault(info);
	if (err != MMU_MAP_L3_TBL)
		BUG();

	err = arch_mmu_ttbl_permission(info);
	if (err == MMU_L4_PERM)
		BUG();
	/**
	 * 其实这里应该分配用户态的物理页面
	 */
	p_addr = (virt_addr_t)get_one_user_page();

	if (!p_addr)
		return -ENOMEM;
	if (p_addr & (PAGE_SIZE - 1))
		return -EINVAL;
	info->in_paddr = p_addr;

	err = arch_mmu_kernel_rw_map(info);
	if (err) {
		put_one_user_page(p_addr);
		return -EFAULT;
	}

	memset((char *)(info->in_faddr & PAGE_MASK), 0, PAGE_SIZE);

	return arch_mmu_complete_map(info);
}

/**
 * MMU断开映射一个虚拟地址
 */
int arch_mmu_cutoff_vaddr(struct mmu_walk_info *info)
{
	int ret;
	ret = arch_mmu_find_ttbl(info);
	if (ret)
		return 0;
	ret = arch_mmu_cutoff_l4(info);
	if (ret)
		return ret;

	return 0;
}


/**
 * 修改映射MMU的四级页的属性
 */
static int arch_mmu_mprotect_l4(struct mmu_walk_info *info)
{
	mmu_l4_t l4_v;

	if (!info->out_l4wptr || !info->out_l4)
		return -EINVAL;
	l4_v = (mmu_l4_t)(*(info->out_l4wptr));
	BUG_ON(info->out_l4 != mmu_l4_val_paddr(l4_v));
	arch_mmu_flush_tlb(info, info->in_faddr, info->in_faddr + PAGE_SIZE, 0);

	arch_mmu_ttbl_set_entry(info, info->out_l4wptr,
			(phys_addr_t)mmu_l4_val_paddr(l4_v), info->in_prot, 4);

	return 0;
}

/**
 * MMU修改映射一个虚拟地址属性
 */
int arch_mmu_mprotect_vaddr(struct mmu_walk_info *info)
{
	int ret;

	ret = arch_mmu_find_ttbl(info);
	if (ret)
		return 0;
	ret = arch_mmu_mprotect_l4(info);
	if (ret)
		return ret;

	return 0;
}

/**
 * 修改映射MMU的四级页所对应page的脏位属性
 */
static int arch_mmu_dirty_vaddr_l4(struct mmu_walk_info *info)
{
	mmu_l4_t l4_v, *l4_phy = NULL;
	struct page_frame* page = NULL;
	
	if (!info->out_l4wptr || !info->out_l4)
		return -EINVAL;
	l4_v = mmu_l4_value(info->out_l4wptr);
	arch_mmu_flush_tlb(info, info->in_faddr, info->in_faddr + PAGE_SIZE, 0);
	l4_phy = mmu_l4_val_paddr(l4_v);
	page = phys_addr_to_page((unsigned long)l4_phy);
	if (page)
		set_pageflag_dirty(page);
	return 0;
}

/**
 * 修改映射MMU的四级页所对应page
 */
int arch_mmu_dirty_vaddr(struct mmu_walk_info *info)
{
	int ret;

	ret = arch_mmu_find_ttbl(info);
	if (ret)
		return 0;
	ret = arch_mmu_dirty_vaddr_l4(info);
	if (ret)
		return ret;

	return 0;
}

static int arch_mmu_clean_tbl_l4(mmu_l4_t *phyl4)
{
	if (!phyl4)
		return -EINVAL;

	put_one_user_page((unsigned long)(phyl4));
	return 0;
}

static int arch_mmu_clean_tbl_l3(mmu_l3_t *l3)
{
	int i, ret, count;
	int lx_c = 0, _lx_c = 0;
	mmu_l4_t* v_lx;
	if (!l3)
		return -EINVAL;

	count = (PAGE_SIZE / sizeof(*l3));
	for (i = 0; i < count; i++) {
		if (l3[i]) {
			v_lx = mmu_l4_val_paddr(l3[i]);
			ret = arch_mmu_clean_tbl_l4(v_lx);
			if (!ret)
				lx_c++;
		} else
			_lx_c++;
	}

	if ((lx_c + _lx_c) != (count))
		return -EFAULT;
	
	memset(l3, 0, PAGE_SIZE);
	kfree(l3);
	return 0;
}

static int arch_mmu_clean_tbl_l2(mmu_l2_t *l2)
{
	int i, ret, count;
	int lx_c = 0, _lx_c = 0;
	mmu_l3_t* v_lx;
	if (!l2)
		return -EINVAL;

	count = (PAGE_SIZE / sizeof(*l2));
	for (i = 0; i < count; i++) {
		if (l2[i]) {
			v_lx = mmu_l3_val_vaddr(l2[i]);
			ret = arch_mmu_clean_tbl_l3(v_lx);
			if (!ret)
				lx_c++;
		} else
			_lx_c++;
	}

	if ((lx_c + _lx_c) != (count))
		return -EFAULT;
	
	memset(l2, 0, PAGE_SIZE);
	kfree(l2);
	return 0;
}

static int arch_mmu_clean_tbl_l1(mmu_l1_t *l1)
{
	int i, ret, count;
	int lx_c = 0, _lx_c = 0;
	mmu_l2_t* v_lx;
	if (!l1)
		return -EINVAL;

	/**
	 * 由于riscv64的MMU，不像arm64使用单独一个页面来保存用户态二、三级页表
	 * 所以这里i和count只能取用户态地址范围的值，不能消除整个l1页面的二、三
	 * 级页表,由于数组从0开始，count需要加上1，形成数组的大小 
	 */
	i = mmu_l1_index(USER_VA_START);
	count = mmu_l1_index(USER_VA_END) + 1;

	for (; i < count; i++) {
		if (l1[i]) {
			v_lx = mmu_l2_val_vaddr(l1[i]);
			ret = arch_mmu_clean_tbl_l2(v_lx);
			if (!ret)
				lx_c++;
		} else
			_lx_c++;
	}

	if ((lx_c + _lx_c) != (count))
		return -EFAULT;

	return 0;
}

int arch_mmu_clean_tbl(struct mmu_walk_info *info)
{
	int ret = -EFAULT;
	mmu_l1_t *l1 = arch_mmu_get_l1(info->in_mmu, 0);
	if (!l1)
		return 0;
	ret = arch_mmu_clean_tbl_l1(l1);
	if (ret)
		return ret;

	return 0;
}

int arch_exit_mmu(struct process_desc* proc, struct mem_desc* m)
{
	mmu_l1_t *l1; 
	int ret;

	if (!proc || !m)
		return -EINVAL;

	l1 = arch_mmu_get_l1(&m->mem_arch.mmu, 0);
	if (!l1)
		return 0;
	
	ret = arch_mmu_clean_tbl_l1(l1);
	if (ret)
		return ret;
	proc->arch_desc.arch_destroy.save_exitmmu_l1 = l1;
	m->mem_arch.mmu.mmu_arch.kern_l1_tbl = NULL;
	return 0;
}

int arch_mmu_ttbl_permission(struct mmu_walk_info *info)
{
	if (!info || !info->out_l4wptr)
		return -EINTR;
	if (mmu_l4_is_empty(*(info->out_l4wptr)))
		return MMU_L4_EMPT;
	/**
	 * 处理l4页表项中的权限问题
	 */
	if ((*(info->out_l4wptr) & MMU_L4_PROT_MASK) == info->in_prot)
		return 0;
	return MMU_L4_PERM;
}

/**
 * MMU完成映射，先要调用MMU的转换表异常处理接口函数
 * 并且基于同一个mmu_walk_info，其中要分配一个数据页面
 */
int arch_mmu_complete_map(struct mmu_walk_info *info)
{
	if (!info || !info->out_l4wptr ||
			!info->in_paddr || !info->in_mmu)
		return -EFAULT;

	arch_mmu_ttbl_set_entry(info, info->out_l4wptr,
			info->in_paddr, info->in_prot, 4);
	return 0;
}

int arch_mmu_kernel_rw_map(struct mmu_walk_info *info)
{
	u64 prot = PAGE_DEFAULT | PTE_HW_READ | PTE_HW_WRITE | PTE_DF | PTE_NG | PTE_AF;
	// prot |= PTE_USER;
	if (!info || !info->out_l4wptr ||
			!info->in_paddr || !info->in_mmu)
		return -EFAULT;

	arch_mmu_ttbl_set_entry(info, info->out_l4wptr,
			info->in_paddr, prot, 4);
	return 0;
}

static void arch_mmu_l3_mapping_range(mmu_l3_t *mmu_l3, unsigned long addr,
				unsigned long end,
				phys_addr_t phys, page_attr_t prot,
				void *(*alloc)(unsigned long size))
{
	mmu_l4_t* l4wptr;
	unsigned long next;

	// if (phys >= 0x9c0000000 && phys <= 0x9cfffffff)
	// 	printk("%s : phys = %llx, addr = %llx, end = %llx, prot = %x\n", __func__, phys, addr, end, prot);

	l4wptr = mmu_l4_wptr(mmu_l3, addr);
	do {
		/**
		 * 下一个二级页表项对应的地址
		 * 或者结束地址
		 */
		next = mmu_l3_promote(addr, end);
		if (mmu_l4_is_empty(*l4wptr)) {
			mmu_lx_set_entry(l4wptr, phys, prot, 4);
		} else {
			// if (phys >= 0x9c0000000 && phys <= 0x9cfffffff)
			// 	printk("%s : phys = %llx, addr = %llx, end = %llx nothing!!!!\n", __func__, phys, addr, end);
		}
		
		phys += next - addr;

		l4wptr++;
		addr = next;
	} while (addr != end);
}

static void arch_mmu_l2_mapping_range(mmu_l2_t *mmu_l2, unsigned long addr,
				unsigned long end,
				phys_addr_t phys, page_attr_t prot,
				void *(*alloc)(unsigned long size))
{
	mmu_l3_t* l3wptr, *mmu_l3;
	unsigned long next;
	l3wptr = mmu_l3_wptr(mmu_l2, addr);


	// if (phys >= 0x9c0000000 && phys <= 0x9cfffffff)
	// 	printk("%s : phys = %llx, addr = %llx, end = %llx\n", __func__, phys, addr, end);

	do {
		next = mmu_l2_promote(addr, end);
		if (mmu_l3_is_empty(*l3wptr)) {
			mmu_l3 = alloc(PTRS_PER_MMU_L3 * sizeof(mmu_l3_t));
			BUG_ON(!mmu_l3); 
			mmu_lx_set_entry(l3wptr, linear_virt_to_phys(mmu_l3), 1, 3);
		} else {
			mmu_l3 = mmu_l3_val_vaddr(mmu_l3_value(l3wptr));
		}

		arch_mmu_l3_mapping_range(mmu_l3, addr, next, phys, prot, alloc);
		phys += next - addr;
		l3wptr++;
		addr = next;
	} while (addr != end);
}

static void arch_mmu_l1_mapping_range(mmu_l1_t *mmu_l1,
				    unsigned long addr, unsigned long end,
					phys_addr_t phys, page_attr_t prot,
				    void *(*alloc)(unsigned long size))
{

	mmu_l2_t* l2wptr, *mmu_l2;
	unsigned long next;

	l2wptr = mmu_l2_wptr(mmu_l1, addr);

	// if (phys >= 0x9c0000000 && phys <= 0x9cfffffff)
	// 	printk("%s : phys = %llx, addr = %llx, end = %llx\n", __func__, phys, addr, end);

	do {
		next = mmu_l1_promote(addr, end);
		if (mmu_l2_is_empty(*l2wptr)) {
			mmu_l2 = alloc(PTRS_PER_MMU_L2 * sizeof(mmu_l2_t));
			BUG_ON(!mmu_l2); 
			mmu_lx_set_entry(l2wptr, linear_virt_to_phys(mmu_l2), _PAGE_TABLE, 2);
		} else {
			mmu_l2 = mmu_l2_val_vaddr(mmu_l2_value(l2wptr));
		}

		arch_mmu_l2_mapping_range(mmu_l2, addr, next, phys, prot, alloc);
		phys += next - addr;
		l2wptr++;
		addr = next;
	} while (addr != end);
}


static void mapping_addr_range_core(struct memory_map_desc *mm,
				    phys_addr_t phys, unsigned long virt,
				    phys_addr_t size, page_attr_t prot,
				    void *(*alloc)(unsigned long size))
{
	unsigned long addr, length, end;

	/* 地址和长度都需要对齐到页面 */
	addr = virt & PAGE_MASK;
	length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
	end = addr + length;
	arch_mmu_l1_mapping_range(mm->pt_l1, addr, end, phys, prot, alloc);
}


static void __init mapping_addr_range(phys_addr_t phys, unsigned long virt,
				  phys_addr_t size, page_attr_t prot)
{
	/**
	 * 内核的虚拟地址空间从KERNEL_VA_START开始
	 * 低于这个地址就不对了
	 */
	if (virt < KERNEL_VA_START) {
		pr_warn("BUG: failure to create linear-space for %pa at 0x%016lx.\n",
					&phys, virt);
		return;
	}
	mapping_addr_range_core(&kern_memory_map, phys, virt,
			 size, prot, arch_boot_alloc_mmu_lx);
}


int arch_mmu_ioremap_range(unsigned long addr,
		unsigned long end, phys_addr_t phys_addr, unsigned long prot)
{
	if (!addr || !end) {
		return -EINVAL;
	}
	if (addr & ~PAGE_MASK || phys_addr & ~PAGE_MASK) {
		return -EINVAL;
	}
	mapping_addr_range_core(&kern_memory_map, phys_addr, addr,
			 end - addr, prot, arch_alloc_mmu_lx);
	return 0;
}

void *empty_zero_page;
void *kernel_tbl_page;

/**
 * 建立所有物理内存页面的映射
 */
void __init init_allmemmap_kernel_tbl(void)
{
	int i;

	kernel_tbl_page = alloc_boot_mem_permanent(PAGE_SIZE, PAGE_SIZE);
	BUG_ON(!kernel_tbl_page);
	memset(kernel_tbl_page, 0x0, PAGE_SIZE);
	kern_memory_map.pt_l1 = (pt_l1_t*)kernel_tbl_page;
	/**
	 * 遍历所有内存块
	 */
	for (i = 0; i < all_memory_regions.cnt; i++) {
		unsigned long start;
		unsigned long end;

		start = all_memory_regions.regions[i].base;
		end = start + all_memory_regions.regions[i].size;
		if (start >= end)
			break;
		/**
		 * 为每一块内存创建线性映射
		 * 在 QEMU 上是将 0x80200000 映射到 0xffffffe000000000, 映射大小为 bfe00000;
		 */

		mapping_addr_range(start, (unsigned long)linear_phys_to_virt(start),
				end - start, PAGE_ATTR_KERNEL_EXEC);
	}
	set_sapt(kernel_tbl_page);
	local_flush_tlb_all();

}

void __init init_linear_mapping(void)
{
}