#include <dim-sum/errno.h>
#include <dim-sum/boot_allotter.h>
#include <dim-sum/beehive.h>
#include <dim-sum/cache.h>
#include <dim-sum/init.h>
#include <dim-sum/memory_regions.h>
#include <dim-sum/mm_types.h>
#include <dim-sum/mmu.h>
#include <dim-sum/mem.h>
#include <dim-sum/string.h>
#include <asm/early_map.h>
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/cpu_esr.h>
#include <asm/mmu_context.h>

int arch_create_mmu(struct mem_desc *m)
{
	
	mmu_l1_t *usertbl;

	if (!m)
		return -EINVAL;

	usertbl = (mmu_l1_t *)kzalloc(PAGE_SIZE, PAF_KERNEL);
	if (!usertbl)
		return -ENOMEM;

	if ((u64)usertbl & (PAGE_SIZE - 1)) {
		kfree((void *)usertbl);
		return -EFAULT;
	}
	/* 设置内核态和用户态页表 */

	smp_lock(&m->lock);
	m->mem_arch.mmu.mmu_arch.kern_l1_tbl = (mmu_l1_t *)kern_memory_map.pt_l1;
	m->mem_arch.mmu.mmu_arch.user_l1_tbl = usertbl;
	smp_unlock(&m->lock);
	return 0;
}

int arch_mmu_set_info_prot(struct mmu_walk_info *info, unsigned long prot)
{
	return arch_mmu_walk_info_set_prot(info, prot);
}

static inline bool esr_is_write_abort(unsigned long esr)
{
	return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM);
}

static inline bool esr_is_read_abort(unsigned long esr)
{
	return !(esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM);
}


int arch_mmu_vma_permission(struct vma_desc *v, virt_addr_t addr, unsigned long data)
{
	(void)addr;
	if (esr_is_write_abort((u32)data)) {
		if (v->acces & VAC_WRITE)
			return 0;
	}
	if (esr_is_read_abort((u32)data)) {
		if (v->acces & VAC_READ)
			return 0;
	}
	return -EFAULT;
}

unsigned long arch_mmu_vma_to_esr(struct vma_desc *v)
{
	unsigned long esr = 0;
	if (v->acces & VAC_WRITE)
		esr |= ESR_ELx_WNR;

	if (v->acces & VAC_READ)
		esr &= ~ESR_ELx_WNR;
	return esr;
}

void arch_mmu_set_tlb(struct mmu_desc* mmu)
{
	phys_addr_t k_tbl = 0, u_tbl = 0;
	volatile mmu_l1_t *k_l1 = NULL, *u_l1 = NULL;
	
	BUG_ON(!mmu);
	
	k_l1 = mmu->mmu_arch.kern_l1_tbl;
	u_l1 = mmu->mmu_arch.user_l1_tbl;
	
	BUG_ON(!k_l1);
	BUG_ON(!u_l1);
	
	if (k_l1) {
		k_tbl = linear_virt_to_phys(k_l1);
		msr(ttbr1_el1, k_tbl);
	}

	if (u_l1) {
		u_tbl = linear_virt_to_phys(u_l1);
		msr(ttbr0_el1, u_tbl);
	}
	flush_tlb_all();
	flush_cache_all();
	return;
}

u64 arch_mmu_get_cpu_el1_ttbr0(void)
{
	return mrs(ttbr0_el1);
}

u64 arch_mmu_get_cpu_el1_ttbr1(void)
{
	return mrs(ttbr1_el1);
}

u64 arch_mmu_get_utlb(void)
{
	return arch_mmu_get_cpu_el1_ttbr0();
}

u64 arch_mmu_get_ktlb(void)
{
	return arch_mmu_get_cpu_el1_ttbr1();
}

mmu_l1_t *arch_mmu_get_l1(struct mmu_desc *mmu, virt_addr_t addr)
{
	volatile mmu_l1_t *ttb;
	if (!mmu)
		return NULL;
	if (addr <= USER_VA_END)
		ttb = mmu->mmu_arch.user_l1_tbl;
	else
		ttb = mmu->mmu_arch.kern_l1_tbl;

	BUG_ON(!ttb);
	return (mmu_l1_t*)ttb;
}

/**
 * mmu刷新tlb
 */
void arch_mmu_flush_tlb(struct mmu_walk_info *info, virt_addr_t addr, virt_addr_t end, int level)
{
	unsigned long asid_addr =
				info->in_mmu->mmu_arch.user_asid << 48;
	virt_addr_t start;

	if (level)
		return flush_tlb_kernel_range(addr, end);

	for (start = addr; start < end; start += PAGE_SIZE) {
		asid_addr |= start >> PAGE_SHIFT;
		flush_tlb_addr(asid_addr);
	}
}

/**
 * 设置MMU的转换表项
 */
void arch_mmu_ttbl_set_entry(struct mmu_walk_info *info, void *write,
	virt_addr_t p, u64 attr, int level)
{
	struct mmu_desc *mmu = info->in_mmu;
	phys_addr_t phys;

	if (level == 1)
		return;
	
	/**
	 * 现有配置下只需要2、3、4级
	 */
	phys = p;
	phys |= (phys_addr_t)attr;

	if (level == 2) {
		*((mmu_l2_t *)write) = (mmu_l2_t)phys;
		mmu->mmu_arch.l2_nr++;
		arch_mmu_flush_tlb(info, info->in_faddr, info->in_faddr + PAGE_SIZE, 0);
		return;
	}
	if (level == 3) {
		*((mmu_l3_t *)write) = (mmu_l3_t)phys;
		mmu->mmu_arch.l3_nr++;
		arch_mmu_flush_tlb(info, info->in_faddr, info->in_faddr + PAGE_SIZE, 0);
		return;
	}
	if (level == 4) {
		*((mmu_l4_t *)write) = (mmu_l4_t)phys;
		arch_mmu_flush_tlb(info, info->in_faddr, info->in_faddr + PAGE_SIZE, 0);
		return;
	}
	return;
}


static int arch_mmu_find_l2(struct mmu_walk_info *info)
{
	mmu_l2_t *l2w, l2_v;
	if (!info->out_l1)
		return -EFAULT;

	l2w = mmu_l2_wptr(info->out_l1, info->in_faddr);
	info->out_l2wptr = l2w;
	l2_v = (mmu_l2_t)(*l2w);
	if (mmu_l2_is_empty(l2_v)) {

		info->out_l2 = 0;
		return -EFAULT;
	}
	info->out_l2 =  (mmu_l2_t *)linear_phys_to_virt((virt_addr_t)l2_v & PAGE_MASK);
	return 0;
}

static int arch_mmu_find_l3(struct mmu_walk_info *info)
{
	mmu_l3_t *l3w, l3_v;
	if (!info->out_l2)
		return -EFAULT;

	l3w = mmu_l3_wptr(info->out_l2, info->in_faddr);
	info->out_l3wptr = l3w;

	l3_v = (mmu_l3_t)(*l3w);
	if (mmu_l3_is_empty(l3_v)) {
		info->out_l3 = 0;
		return -EFAULT;
	}
	info->out_l3 = (mmu_l3_t *)linear_phys_to_virt((virt_addr_t)l3_v & PAGE_MASK);
	return 0;
}

static int arch_mmu_find_l4(struct mmu_walk_info *info)
{
	mmu_l4_t *l4w, l4_v;
	if (!info->out_l3)
		return -EFAULT;

	l4w = mmu_l4_wptr(info->out_l3, info->in_faddr);
	info->out_l4wptr = l4w;

	l4_v = (mmu_l4_t)(*l4w);
	if (mmu_l4_is_empty(l4_v)) {
		info->out_l4 = 0;
		return -EFAULT;
	}

	/**
	 * 这里就是物理页的地址
	 */
	info->out_l4 = (mmu_l4_t *)((virt_addr_t)l4_v & PTE_ADDR_MASK);
	return 0;
}

static int arch_mmu_check_empt_tlx(mmu_l4_t *lx)
{
	int i;
	if (!lx)
		return 0;
	for (i = 0; i < (PAGE_SIZE / sizeof(*lx)); i++)
		if (lx[i])
			return 0;
	return 1;
}

/**
 * 解除映射MMU的四级页
 */
static int arch_mmu_unmap_l4(struct mmu_walk_info *info)
{
	mmu_l4_t l4_v;

	if (!info->out_l4wptr || !info->out_l4)
		return -EINVAL;
	l4_v = (mmu_l4_t)(*(info->out_l4wptr));
	BUG_ON(info->out_l4 != (mmu_l4_t *)((virt_addr_t)l4_v & PTE_ADDR_MASK));

	arch_mmu_flush_tlb(info, info->in_faddr, info->in_faddr + PAGE_SIZE, 0);

	*(info->out_l4wptr) = 0;
	put_one_user_page((unsigned long)(info->out_l4));
	return 0;
}

/**
 * 断开映射MMU的四级页
 */
static int arch_mmu_cutoff_l4(struct mmu_walk_info *info)
{
	mmu_l4_t l4_v;

	if (!info->out_l4wptr || !info->out_l4)
		return -EINVAL;
	l4_v = (mmu_l4_t)(*(info->out_l4wptr));
	BUG_ON(info->out_l4 != (mmu_l4_t *)((virt_addr_t)l4_v & PTE_ADDR_MASK));

	arch_mmu_flush_tlb(info, info->in_faddr, info->in_faddr + PAGE_SIZE, 0);

	*(info->out_l4wptr) = 0;
	return 0;
}


/**
 * 解除映射MMU的三级转换表
 */
__maybe_unused static int arch_mmu_unmap_l3(struct mmu_walk_info *info)
{
	mmu_l3_t l3_v;

	if (!info->out_l3wptr || !info->out_l3)
		return -EINVAL;
	l3_v = (mmu_l3_t)(*(info->out_l3wptr));
	BUG_ON(info->out_l3 != (mmu_l3_t *)linear_phys_to_virt((virt_addr_t)l3_v & PAGE_MASK));

	if (!arch_mmu_check_empt_tlx((mmu_l4_t *)(info->out_l3)))
		return 0;

	arch_mmu_flush_tlb(info, info->in_faddr, info->in_faddr + PAGE_SIZE, 0);

	*(info->out_l3wptr) = 0;
	kfree((void *)(info->out_l3));
	return 0;
}

/**
 * 解除映射MMU的二级转换表
 */
__maybe_unused static int arch_mmu_unmap_l2(struct mmu_walk_info *info)
{
	mmu_l2_t l2_v;

	if (!info->out_l2wptr || !info->out_l2)
		return -EINVAL;
	l2_v = (mmu_l2_t)(*(info->out_l2wptr));
	BUG_ON(info->out_l2 != (mmu_l2_t *)linear_phys_to_virt((virt_addr_t)l2_v & PAGE_MASK));

	if (!arch_mmu_check_empt_tlx((mmu_l4_t *)(info->out_l2)))
		return 0;

	arch_mmu_flush_tlb(info, info->in_faddr, info->in_faddr + PAGE_SIZE, 0);

	*(info->out_l2wptr) = 0;
	kfree((void *)(info->out_l2));
	return 0;
}

/**
 * 映射MMU的二级转换表
 */
static int arch_mmu_ttbl_map_l2(struct mmu_walk_info *info)
{
	mmu_l2_t *l2w, l2_v;
	virt_addr_t l2_n;
	phys_addr_t l2_n_phys;

	l2w = mmu_l2_wptr(info->out_l1, info->in_faddr);
	info->out_l2wptr = l2w;
	l2_v = (mmu_l2_t)(*l2w);
	if (mmu_l2_is_empty(l2_v)) {
		l2_n = (virt_addr_t)kzalloc(PAGE_SIZE, PAF_KERNEL);
		if (!l2_n)
			return -ENOMEM;

		l2_n_phys = linear_virt_to_phys(l2_n);
		arch_mmu_ttbl_set_entry(info, l2w, l2_n_phys, 3, 2);
		info->out_l2 = (mmu_l2_t *)l2_n;
		return 0;
	}
	info->out_l2 =  (mmu_l2_t *)linear_phys_to_virt((virt_addr_t)l2_v & PAGE_MASK);
	return 0;
}

/**
 * 映射MMU的三级转换表
 */
static int arch_mmu_ttbl_map_l3(struct mmu_walk_info *info)
{
	mmu_l3_t *l3w, l3_v;
	virt_addr_t l3_n;
	phys_addr_t l3_n_phys;

	l3w = mmu_l3_wptr(info->out_l2, info->in_faddr);
	info->out_l3wptr = l3w;

	l3_v = (mmu_l3_t)(*l3w);
	if (mmu_l3_is_empty(l3_v)) {
		l3_n = (virt_addr_t)kzalloc(PAGE_SIZE, PAF_KERNEL);
		if (!l3_n)
			return -ENOMEM;

		l3_n_phys = linear_virt_to_phys(l3_n);
		arch_mmu_ttbl_set_entry(info, l3w, l3_n_phys, 3, 3);
		info->out_l3 = (mmu_l3_t *)l3_n;
		return 0;
	}
	info->out_l3 = (mmu_l3_t *)linear_phys_to_virt((virt_addr_t)l3_v & PAGE_MASK);
	return 0;
}

/**
 * MMU的转换表异常处理核心函数
 */
int arch_mmu_ttbl_fault(struct mmu_walk_info *info)
{
	mmu_l1_t *mmu_l1;
	int ret;
	mmu_l1 = arch_mmu_get_l1(info->in_mmu, info->in_faddr);
	if (!mmu_l1) {
		info->out_l1 = NULL;
		return MMU_MAP_L1_TBL;
	}
	info->out_l1 = mmu_l1;

	ret = arch_mmu_ttbl_map_l2(info);
	if (ret) {
		info->out_l2 = NULL;
		return MMU_MAP_L1_TBL;
	}

	ret = arch_mmu_ttbl_map_l3(info);
	if (ret) {
		info->out_l3 = NULL;
		return MMU_MAP_L2_TBL;
	}

	info->out_l4wptr =
			mmu_l4_wptr(info->out_l3, info->in_faddr);

	return MMU_MAP_L3_TBL;
}

int arch_mmu_find_ttbl(struct mmu_walk_info *info)
{
	mmu_l1_t *mmu_l1;
	int ret;
	mmu_l1 = arch_mmu_get_l1(info->in_mmu, info->in_faddr);
	if (!mmu_l1) {
		info->out_l1 = NULL;
		return MMU_MAP_L1_TBL;
	}
	info->out_l1 = mmu_l1;
	ret = arch_mmu_find_l2(info);
	if (ret)
		return ret;
	ret = arch_mmu_find_l3(info);
	if (ret)
		return ret;
	return arch_mmu_find_l4(info);
}

/**
 * MMU解除映射一个虚拟地址
 */
int arch_mmu_unmap_vaddr(struct mmu_walk_info *info)
{
	int ret;

	ret = arch_mmu_find_ttbl(info);
	if (ret)
		return 0;
	ret = arch_mmu_unmap_l4(info);
	if (ret)
		return ret;
/**
 * 目前可以不释放2、3级页表
 */
#if 0
	ret = mmu_unmap_l3(info);
	if (ret)
		return ret;
	ret = mmu_unmap_l2(info);
	if (ret)
		return ret;
#endif
	return 0;
}

/**
 * MMU映射一个虚拟地址
 */
int arch_mmu_map_vaddr(struct mmu_walk_info *info)
{
	int err;
	virt_addr_t p_addr;
	err = arch_mmu_ttbl_fault(info);
	if (err != MMU_MAP_L3_TBL)
		BUG();

	err = arch_mmu_ttbl_permission(info);
	if (err == MMU_L4_PERM)
		BUG();
	/**
	 * 其实这里应该分配用户态的物理页面
	 */
	p_addr = (virt_addr_t)get_one_user_page();

	if (!p_addr)
		return -ENOMEM;
	if (p_addr & (PAGE_SIZE - 1))
		return -EINVAL;
	info->in_paddr = p_addr;

	err = arch_mmu_kernel_rw_map(info);
	if (err) {
		put_one_user_page(p_addr);
		return -EFAULT;
	}

	memset((char *)(info->in_faddr & PAGE_MASK), 0, PAGE_SIZE);
	
	return arch_mmu_complete_map(info);
}

/**
 * MMU断开映射一个虚拟地址
 */
int arch_mmu_cutoff_vaddr(struct mmu_walk_info *info)
{
	int ret;
	ret = arch_mmu_find_ttbl(info);
	if (ret)
		return 0;
	ret = arch_mmu_cutoff_l4(info);
	if (ret)
		return ret;

	return 0;
}


/**
 * 修改映射MMU的四级页的属性
 */
static int arch_mmu_mprotect_l4(struct mmu_walk_info *info)
{
	mmu_l4_t l4_v;

	if (!info->out_l4wptr || !info->out_l4)
		return -EINVAL;
	l4_v = (mmu_l4_t)(*(info->out_l4wptr));
	BUG_ON(info->out_l4 != (mmu_l4_t *)((virt_addr_t)l4_v & PTE_ADDR_MASK));

	arch_mmu_flush_tlb(info, info->in_faddr, info->in_faddr + PAGE_SIZE, 0);

	arch_mmu_ttbl_set_entry(info, info->out_l4wptr,
			((virt_addr_t)l4_v & PTE_ADDR_MASK), info->in_prot, 4);

	return 0;
}

/**
 * MMU修改映射一个虚拟地址属性
 */
int arch_mmu_mprotect_vaddr(struct mmu_walk_info *info)
{
	int ret;

	ret = arch_mmu_find_ttbl(info);
	if (ret)
		return 0;
	ret = arch_mmu_mprotect_l4(info);
	if (ret)
		return ret;

	return 0;
}


/**
 * 修改映射MMU的四级页所对应page的脏位属性
 */
static int arch_mmu_dirty_vaddr_l4(struct mmu_walk_info *info)
{
	mmu_l4_t l4_v, *l4_phy = NULL;
	struct page_frame* page = NULL;
	
	if (!info->out_l4wptr || !info->out_l4)
		return -EINVAL;
	l4_v = mmu_l4_value(info->out_l4wptr);
	arch_mmu_flush_tlb(info, info->in_faddr, info->in_faddr + PAGE_SIZE, 0);
	l4_phy = mmu_l4_val_paddr(l4_v);
	page = phys_addr_to_page((unsigned long)l4_phy);
	if (page)
		set_pageflag_dirty(page);
	return 0;
}

/**
 * 修改映射MMU的四级页所对应page
 */
int arch_mmu_dirty_vaddr(struct mmu_walk_info *info)
{
	int ret;

	ret = arch_mmu_find_ttbl(info);
	if (ret)
		return 0;
	ret = arch_mmu_dirty_vaddr_l4(info);
	if (ret)
		return ret;

	return 0;
}

static int arch_mmu_clean_tbl_l4(mmu_l4_t *phyl4)
{
	if (!phyl4)
		return -EINVAL;

	put_one_user_page((unsigned long)(phyl4));
	return 0;
}

static int arch_mmu_clean_tbl_l3(mmu_l3_t *l3)
{
	int i, ret, count;
	int lx_c = 0, _lx_c = 0;
	virt_addr_t v_lx;
	if (!l3)
		return -EINVAL;

	count = (PAGE_SIZE / sizeof(*l3));
	for (i = 0; i < count; i++) {
		if (l3[i]) {
			v_lx = ((virt_addr_t)l3[i] & PTE_ADDR_MASK);
			ret = arch_mmu_clean_tbl_l4((mmu_l4_t *)v_lx);
			if (!ret)
				lx_c++;
		} else
			_lx_c++;
	}

	if ((lx_c + _lx_c) != (count))
		return -EFAULT;
	
	memset(l3, 0, PAGE_SIZE);
	kfree(l3);
	return 0;
}

static int arch_mmu_clean_tbl_l2(mmu_l2_t *l2)
{
	int i, ret, count;
	int lx_c = 0, _lx_c = 0;
	virt_addr_t v_lx;
	if (!l2)
		return -EINVAL;

	count = (PAGE_SIZE / sizeof(*l2));
	for (i = 0; i < count; i++) {
		if (l2[i]) {
			v_lx = (virt_addr_t)linear_phys_to_virt((virt_addr_t)l2[i] & PAGE_MASK);
			ret = arch_mmu_clean_tbl_l3((mmu_l3_t *)v_lx);
			if (!ret)
				lx_c++;
		} else
			_lx_c++;
	}

	if ((lx_c + _lx_c) != (count))
		return -EFAULT;
	
	memset(l2, 0, PAGE_SIZE);
	kfree(l2);
	return 0;
}

static int arch_mmu_clean_tbl_l1(mmu_l1_t *l1)
{
	int i, ret, count;
	int lx_c = 0, _lx_c = 0;
	virt_addr_t v_lx;
	if (!l1)
		return -EINVAL;

	count = (PAGE_SIZE / sizeof(*l1));
	for (i = 0; i < count; i++) {
		if (l1[i]) {
			v_lx = (virt_addr_t)linear_phys_to_virt((virt_addr_t)l1[i] & PAGE_MASK);
			ret = arch_mmu_clean_tbl_l2((mmu_l2_t *)v_lx);
			if (!ret)
				lx_c++;
		} else
			_lx_c++;
	}

	if ((lx_c + _lx_c) != (count))
		return -EFAULT;
	memset(l1, 0, PAGE_SIZE);
	kfree(l1);
	return 0;
}

int arch_mmu_clean_tbl(struct mmu_walk_info *info)
{
	int ret = -EFAULT;
	mmu_l1_t *l1 = arch_mmu_get_l1(info->in_mmu, 0);
	if (!l1)
		return 0;
	ret = arch_mmu_clean_tbl_l1(l1);
	if (ret)
		return ret;

	info->in_mmu->mmu_arch.user_l1_tbl = NULL;
	return 0;
}

int arch_exit_mmu(struct process_desc* proc, struct mem_desc* m)
{
	mmu_l1_t *l1;
	int ret;

	if (!proc || !m)
		return -EINVAL;

	l1 = arch_mmu_get_l1(&m->mem_arch.mmu, 0);
	if (!l1)
		return 0;

	ret = arch_mmu_clean_tbl_l1(l1);
	if (ret)
		return ret;
	
	m->mem_arch.mmu.mmu_arch.user_l1_tbl = NULL;
	return 0;
}

int arch_mmu_ttbl_permission(struct mmu_walk_info *info)
{
	if (!info || !info->out_l4wptr)
		return -EINTR;
	if (mmu_l4_is_empty(*(info->out_l4wptr)))
		return MMU_L4_EMPT;
	/**
	 * 处理l4页表项中的权限问题
	 */
	if ((*(info->out_l4wptr) & ~PTE_ADDR_MASK) == info->in_prot)
		return 0;
	return MMU_L4_PERM;
}

/**
 * MMU完成映射，先要调用MMU的转换表异常处理接口函数
 * 并且基于同一个mmu_walk_info，其中要分配一个数据页面
 */
int arch_mmu_complete_map(struct mmu_walk_info *info)
{
	if (!info || !info->out_l4wptr ||
			!info->in_paddr || !info->in_mmu)
		return -EFAULT;

	arch_mmu_ttbl_set_entry(info, info->out_l4wptr,
			info->in_paddr, info->in_prot, 4);
	return 0;
}

int arch_mmu_kernel_rw_map(struct mmu_walk_info *info)
{
	u64 prot = PAGE_DEFAULT | PTE_UXN;
	prot &= ~PTE_RDONLY;
	/* prot &= ~PTE_UXN; */
	if (!info || !info->out_l4wptr ||
			!info->in_paddr || !info->in_mmu)
		return -EFAULT;

	arch_mmu_ttbl_set_entry(info, info->out_l4wptr,
			info->in_paddr, prot, 4);
	return 0;
}


static void break_pt_l3_sections(pt_l3_t *pt_l3, pt_l4_val_t *pt_l4)
{
	unsigned long page_num = pmd_pfn(*pt_l3);
	int i = 0;

	for (i = 0; i < PTRS_PER_PT_L4; i++) {
		set_pt_l4(pt_l4, pfn_pte(page_num, PAGE_ATTR_KERNEL_EXEC));

		page_num++;
		pt_l4++;
	}
}

/**
 * 处理四级页表映射，类似，不注释
 */
static void alloc_and_set_pt_l4(pt_l3_t *pt_l3, unsigned long addr,
				  unsigned long end, unsigned long page_num,
				  page_attr_t prot,
				  void *(*alloc)(unsigned long size))
{
	pt_l4_val_t *pt_l4;

	if (pt_l3_is_empty(*pt_l3) || pt_l3_is_section(*pt_l3)) {
		pt_l4 = alloc(PTRS_PER_PT_L4 * sizeof(pt_l4_val_t));
		
		if (pt_l3_is_section(*pt_l3))
			break_pt_l3_sections(pt_l3, pt_l4);

		__attach_to_pt_l3(pt_l3, linear_virt_to_phys(pt_l4), PT_L3_TYPE_TABLE);
		flush_tlb_all();
	}
	BUG_ON(pt_l3_is_invalid(*pt_l3));

	pt_l4 = pt_l4_ptr(pt_l3, addr);
	do {
		set_pt_l4(pt_l4, pfn_pte(page_num, prot));

		page_num++;
		pt_l4++;
		addr += PAGE_SIZE;
	} while (addr != end);
}

static void break_pt_l2_sections(pt_l2_t *old_pt_l2, pt_l3_t *pt_l3)
{
	unsigned long addr = pt_l2_page_num(*old_pt_l2) << PAGE_SHIFT;
	page_attr_t prot = page_attr(pt_l2_val(*old_pt_l2) ^ addr);
	int i = 0;

	for (i = 0; i < PTRS_PER_PT_L3; i++) {
		set_pt_l3(pt_l3, pt_l3(addr | page_attr_val(prot)));
		addr += PT_L3_SIZE;
		pt_l3++;
	}
}

static void alloc_and_set_pt_l3(struct memory_map_desc *mm, pt_l2_t *pt_l2,
				  unsigned long addr, unsigned long end,
				  phys_addr_t phys, page_attr_t prot,
				  void *(*alloc)(unsigned long size))
{
	pt_l3_t *pt_l3;
	unsigned long next;

	/**
	 * 该二级页表项是空的
	 * 或者旧的二级页表项是section 描述符，映射了1G的地址块
	 */
	if (pt_l2_is_empty(*pt_l2) || pt_l2_is_section(*pt_l2)) {
		/* 分配PMD表 */
		pt_l3 = alloc(PTRS_PER_PT_L3 * sizeof(pt_l3_t));
		if (pt_l2_is_section(*pt_l2)) /* 原来映射了1G */
			/* 将原来的映射分拆 */
			break_pt_l2_sections(pt_l2, pt_l3);

		/* 指向新的三级页表内存*/
		attach_to_pt_l2(pt_l2, pt_l3);
		/* flush tlb的内容 */
		flush_tlb_all();
	}
	BUG_ON(pt_l2_is_invalid(*pt_l2));

	pt_l3 = pt_l3_ptr(pt_l2, addr);
	do {
		next = pt_l3_promote(addr, end);

		/* 处理2M段映射 */
		if (((addr | next | phys) & ~PT_L3_MASK) == 0) {
			pt_l3_t old_pt_l3 =*pt_l3;
			set_pt_l3(pt_l3, pt_l3(phys |page_attr_val(mk_sect_prot(prot))));
			/**
			 * 如果在boot阶段映射了该区域
			 * 则刷新tlb
			 */
			if (!pt_l3_is_empty(old_pt_l3))
				flush_tlb_all();
		} else {/* 处理四级页表映射 */
			alloc_and_set_pt_l4(pt_l3, addr, next, __phys_to_pgnum(phys),
				       prot, alloc);
		}

		phys += next - addr;
		pt_l3++;
		addr = next;
	} while (addr != end);
}

static void alloc_and_set_pt_l2(struct memory_map_desc *mm,
				pt_l1_t *pt_l1, unsigned long addr, unsigned long end,
				phys_addr_t phys, page_attr_t prot,
				void *(*alloc)(unsigned long size))
{
	pt_l2_t *pt_l2;
	unsigned long next;

	/* 当前一级页表项是全0 */
	if (pt_l1_is_empty(*pt_l1)) {
		/* 进行二级页表内存的分配 */
		pt_l2 = alloc(PTRS_PER_PT_L2 * sizeof(pt_l2_t));
		/* 建立一级页表项和二级页表内存的关系 */
		attach_to_pt_l1(pt_l1, pt_l2);
	}
	BUG_ON(pt_l1_is_invalid(*pt_l1));

	/* addr地址对应的二级页表描述符内存 */
	pt_l2 = pt_l2_ptr(pt_l1, addr);
	/**
	 * 循环，逐一填充二级页表项
	 * 同时分配并初始化下一级页表
	 */
	do {
		/**
		 * 下一个二级页表项对应的地址
		 * 或者结束地址
		 */
		next = pt_l2_promote(addr, end);

		/**
		 * 分配三级页表项
		 * 并且依次处理每一个三级页表项
		 */
		alloc_and_set_pt_l3(mm, pt_l2, addr, next, phys, prot, alloc);

		phys += next - addr;
		pt_l2++;
		addr = next; 
	} while (addr != end);
}

/**
 * 为物理页面建立线性映射
 * 如果有必要，还会为其分配页表
 */
static void  __linear_mapping(struct memory_map_desc *mm, pt_l1_t *pt_l1,
				    phys_addr_t phys, unsigned long virt,
				    phys_addr_t size, page_attr_t prot,
				    void *(*alloc)(unsigned long size))
{
	unsigned long addr, length, end, next;

	/* 地址和长度都需要对齐到页面 */
	addr = virt & PAGE_MASK;
	length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
	end = addr + length;

	/* 循环处理多个二级页表 */
	do {
		/**
		 * next是下一个一级页表指向的地址
		 * 或者是结束地址
		 */
		next = pt_l1_promote(addr, end);
		/**
		 * 分配二级页表项
		 * 并依次处理每一个二级页表
		 */
		alloc_and_set_pt_l2(mm, pt_l1, addr, next, phys, prot, alloc);
		phys += next - addr;
		pt_l1++;
		addr = next;
	} while (addr != end);
}

/**
 * 在创建映射时，分配各级页表
 * 页表需要按照页面大小对齐
 */
static void __init *alloc_page_table(unsigned long sz)
{
	void *ptr = alloc_boot_mem_permanent(sz, sz);

	BUG_ON(!ptr);
	memset(ptr, 0, sz);

	return ptr;
}

static void __init linear_mapping(phys_addr_t phys, unsigned long virt,
				  phys_addr_t size, page_attr_t prot)
{
	pt_l1_t *pt_l1 = pt_l1_ptr(kern_memory_map.pt_l1, virt & PAGE_MASK);
	/**
	 * 内核的虚拟地址空间从KERNEL_VA_START开始
	 * 低于这个地址就不对了
	 */
	if (virt < KERNEL_VA_START) {
		pr_warn("BUG: failure to create linear-space for %pa at 0x%016lx.\n",
					&phys, virt);
		return;
	}

	__linear_mapping(&kern_memory_map, pt_l1, phys, virt,
			 size, prot, alloc_page_table);
}

void *empty_zero_page;
/**
 * 建立所有物理内存页面的映射
 */
void __init init_linear_mapping(void)
{
	int i;

	/**
	 * 分配0页
	 */
	empty_zero_page = alloc_boot_mem_permanent(PAGE_SIZE, PAGE_SIZE);
	/**
	 * 将identify映射指向0页
	 * 使其失效
	 */
	set_ttbr0(linear_virt_to_phys(empty_zero_page));

	/**
	 * 遍历所有内存块
	 */
	for (i = 0; i < all_memory_regions.cnt; i++) {
		unsigned long start;
		unsigned long end;

		start = all_memory_regions.regions[i].base;
		end = start + all_memory_regions.regions[i].size;

		if (start >= end)
			break;

		/**
		 * 为每一块内存创建线性映射
		 */
		linear_mapping(start, (unsigned long)linear_phys_to_virt(start),
				end - start, PAGE_ATTR_KERNEL_EXEC);
	}
	
	flush_tlb_all();
	cpu_set_default_tcr_t0sz();
}
