#ifndef __ASM_MMU_H
#define __ASM_MMU_H

/*
 * 软件定义的PTE标志
 * 硬件不用，软件这里借用
 */
#define PTE_VALID		(_AT(pt_l4_val_t, 1) << 0)
#define PTE_DIRTY		(_AT(pt_l4_val_t, 1) << 55)
#define PTE_SPECIAL		(_AT(pt_l4_val_t, 1) << 56)
#define PTE_WRITE		(_AT(pt_l4_val_t, 1) << 57)
#define PTE_PROT_NONE		(_AT(pt_l4_val_t, 1) << 58) /* only when !PTE_VALID */

#define PROT_READ	0x1		/* page can be read */
#define PROT_WRITE	0x2		/* page can be written */
#define PROT_EXEC	0x4		/* page can be executed */
#define PROT_NONE	0x0		/* page can not be accessed */
#define PROT_USER	0x8		/* page can be user */

#ifndef __ASSEMBLY__

#include <dim-sum/smp_lock.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <dim-sum/string.h>
#include <asm/memory.h>

struct memory_map_desc;
struct vma_desc;

struct memory_map_desc;

extern struct   memory_map_desc kern_memory_map;

/**
 * ARM64体系结构特定的内存映射描述符
 */
struct arch_memory_map {
	/**
	 * 保护ASID的自旋锁
	 */
	struct smp_lock id_lock;
	/**
	 * ASID，目前未用
	 * 但是以后支持用户态进程时需要
	 */
	unsigned int asid;
};

typedef u64 mmu_l4_t;
typedef u64 mmu_l3_t;
typedef u64 mmu_l2_t;
typedef u64 mmu_l1_t;

/**
 * MMU硬件相关的描述符
 */
struct arch_mmu_desc {
	struct smp_lock lock;
	/**
	 * MMU是几级映射
	*/
	u64 mmu_level;
	/**
	 * 对应MMU的ttbr0_el1和
	 * ttbr1_el1寄存器
	 */
	volatile mmu_l1_t* kern_l1_tbl;
	volatile mmu_l1_t* user_l1_tbl;
	u64 user_asid;
	/**
	 * 对应MMU的l1、l2、l3数量
	 */
	u64 l1_nr;
	u64 l2_nr;
	u64 l3_nr;
};

/**
 * MMU描述符
 */
struct mmu_desc {
	struct smp_lock lock;
	struct mem_desc* at_mem;
	/**
	 * MMU硬件相关的
	 */
	struct arch_mmu_desc mmu_arch;
};

#define mmu_at_arch_mem_desc(mmu) container_of(mmu, struct arch_mem_desc, mmu)
#define mmu_at_mem(mmu) container_of(mmu_at_arch_mem_desc(mmu), struct mem_desc, mem_arch)

#define MMU_MAP_L1_TBL (1)
#define MMU_MAP_L2_TBL (2)
#define MMU_MAP_L3_TBL (3)
#define MMU_MAP_L4_TBL (4)

#define MMU_L4_PERM (1)
#define MMU_L4_EMPT (2)

struct mmu_walk_info
{
	struct mmu_desc* in_mmu;
	/**
	 * 虚拟地址
	 */
	virt_addr_t in_faddr;
	/**
	 * 虚拟地址待绑定的物理地址
	 */
	phys_addr_t in_paddr;
	union {
		virt_addr_t in_mapstart;
		virt_addr_t in_umstart;
		virt_addr_t in_mprotstart;
		virt_addr_t in_dirtystart;
	};
	union {
		virt_addr_t in_mapend;
		virt_addr_t in_umend;
		virt_addr_t in_mprotend;
		virt_addr_t in_dirtyend;
	};
	
	/**
	 * 虚拟地址对应的属性
	 */
	unsigned long in_prot;

	mmu_l1_t* out_l1;
	mmu_l2_t* out_l2;
	mmu_l3_t* out_l3;
	mmu_l4_t* out_l4;
	mmu_l1_t* out_l1wptr;
	mmu_l2_t* out_l2wptr;
	mmu_l3_t* out_l3wptr;
	mmu_l4_t* out_l4wptr;
};

static inline void cpu_mmu_invalidate_range(virt_addr_t start,
					    virt_addr_t size)
{
	__inval_cache_range(start, start + size);
}

int arch_create_mmu(struct mem_desc *m);
int arch_mmu_set_info_prot(struct mmu_walk_info *info, unsigned long prot);
int arch_mmu_vma_permission(struct vma_desc *v, virt_addr_t addr, unsigned long data);
unsigned long arch_mmu_vma_to_esr(struct vma_desc *v);

void arch_mmu_set_tlb(struct mmu_desc* mmu);
u64 arch_mmu_get_cpu_el1_ttbr0(void);
u64 arch_mmu_get_cpu_el1_ttbr1(void);
u64 arch_mmu_get_utlb(void);
u64 arch_mmu_get_ktlb(void);
mmu_l1_t *arch_mmu_get_l1(struct mmu_desc *mmu, virt_addr_t addr);
void arch_mmu_ttbl_set_entry(struct mmu_walk_info *info, void *write,
	virt_addr_t p, u64 attr, int level);
/**
 * mmu刷新tlb
 */
void arch_mmu_flush_tlb(struct mmu_walk_info *info, virt_addr_t addr, virt_addr_t end, int level);
int arch_mmu_ttbl_fault(struct mmu_walk_info *info);
int arch_mmu_find_ttbl(struct mmu_walk_info *info);
int arch_mmu_unmap_vaddr(struct mmu_walk_info *info);
int arch_mmu_cutoff_vaddr(struct mmu_walk_info *info);
int arch_mmu_mprotect_vaddr(struct mmu_walk_info *info);
int arch_mmu_dirty_vaddr(struct mmu_walk_info *info);
int arch_mmu_clean_tbl(struct mmu_walk_info *info);
int arch_exit_mmu(struct process_desc* proc, struct mem_desc* m);
int arch_mmu_ttbl_permission(struct mmu_walk_info *info);
int arch_mmu_complete_map(struct mmu_walk_info *info);
int arch_mmu_kernel_rw_map(struct mmu_walk_info *info);
int arch_mmu_map_vaddr(struct mmu_walk_info *info);

#define mmu_l1_index(addr)		\
	(((addr) >> MMU_L1_SHIFT) & (PTRS_PER_MMU_L1 - 1))
#define mmu_l2_index(addr)		\
	(((addr) >> MMU_L2_SHIFT) & (PTRS_PER_MMU_L2 - 1))
#define mmu_l3_index(addr)		\
	(((addr) >> MMU_L3_SHIFT) & (PTRS_PER_MMU_L3 - 1))

static inline  mmu_l2_t *mmu_l2_wptr(mmu_l1_t *ttbl, unsigned long addr)
{
	return ttbl + mmu_l1_index(addr);
}

static inline  mmu_l2_t mmu_l2_value(mmu_l2_t *l2rwa)
{
	return *l2rwa;
}

static inline  mmu_l2_t* mmu_l2_val_paddr(mmu_l2_t l2val)
{
	return (mmu_l2_t*)(((l2val & MMU_LX_PFN_MASK) >> MMU_LX_PFN_SHIFT) << PAGE_SHIFT);
}

static inline  mmu_l2_t* mmu_l2_val_vaddr(mmu_l2_t l2val)
{
	return (mmu_l2_t*)linear_phys_to_virt((virt_addr_t)mmu_l2_val_paddr(l2val));
}

static inline  mmu_l3_t *mmu_l3_wptr(mmu_l2_t *mmu_l2, unsigned long addr)
{
	return (mmu_l3_t*)(mmu_l2 + mmu_l2_index(addr));
}

static inline  mmu_l3_t mmu_l3_value(mmu_l3_t *l3rwa)
{
	return *l3rwa;
}

static inline  mmu_l3_t* mmu_l3_val_paddr(mmu_l3_t l3val)
{
	return (mmu_l3_t*)(((l3val & MMU_LX_PFN_MASK) >> MMU_LX_PFN_SHIFT) << PAGE_SHIFT);
}

static inline  mmu_l3_t* mmu_l3_val_vaddr(mmu_l3_t l3val)
{
	return (mmu_l3_t*)linear_phys_to_virt((virt_addr_t)mmu_l3_val_paddr(l3val));
}

static inline  mmu_l4_t *mmu_l4_wptr(mmu_l3_t *mmu_l3, unsigned long addr)
{
	return (mmu_l4_t*)(mmu_l3 +mmu_l3_index(addr));
}

static inline  mmu_l4_t mmu_l4_value(mmu_l4_t *l4rwa)
{
	return *l4rwa;
}

static inline  mmu_l4_t* mmu_l4_val_paddr(mmu_l4_t l4val)
{
	return (mmu_l4_t*)(((l4val & MMU_LX_PFN_MASK)  >> MMU_LX_PFN_SHIFT) << PAGE_SHIFT);
}

static inline  mmu_l4_t* mmu_l4_val_vaddr(mmu_l4_t l4val)
{
	return (mmu_l4_t*)linear_phys_to_virt((virt_addr_t)mmu_l4_val_paddr(l4val));
}


#define pt_l1_index(addr)		(((addr) >> PT_L1_SHIFT) & (PTRS_PER_PT_L1 - 1))
static inline  pt_l1_t * pt_l1_ptr(pt_l1_t *pt_l1, unsigned long addr)
{
	return pt_l1 + pt_l1_index(addr);
}

#define pt_l3_index(addr)		(((addr) >> PT_L3_SHIFT) & (PTRS_PER_PT_L3 - 1))

static inline pt_l3_t *pt_l2_page_vaddr(pt_l2_t pt_l2)
{
	return linear_phys_to_virt(pt_l2_val(pt_l2) & PHYS_MASK & (s32)PAGE_MASK);
}

static inline pt_l3_t *pt_l3_ptr(pt_l2_t *pt_l2, unsigned long addr)
{
	return (pt_l3_t *)pt_l2_page_vaddr(*pt_l2) + pt_l3_index(addr);
}

extern pt_l2_t *
follow_pt_l2(struct memory_map_desc *desc, unsigned long addr);

extern pt_l3_t *
follow_pt_l3(struct memory_map_desc *desc, unsigned long addr);

extern pt_l4_val_t *
follow_pt_l4(struct memory_map_desc *desc, unsigned long addr);


/*************************分割线************************/

#define INIT_MM_CONTEXT(name) \
	.context.id_lock = SMP_LOCK_UNLOCKED(name.context.id_lock),

#define ASID(mm)	((mm)->context.id & 0xffff)

extern void init_linear_mapping(void);

#define PMD_SECT_VALID		(_AT(pt_l3_val, 1) << 0)
#define PMD_SECT_PROT_NONE	(_AT(pt_l3_val, 1) << 58)
#define PMD_SECT_USER		(_AT(pt_l3_val, 1) << 6)		/* AP[1] */
#define PMD_SECT_RDONLY		(_AT(pt_l3_val, 1) << 7)		/* AP[2] */
#define PMD_SECT_S		(_AT(pt_l3_val, 3) << 8)
#define PMD_SECT_AF		(_AT(pt_l3_val, 1) << 10)
#define PMD_SECT_NG		(_AT(pt_l3_val, 1) << 11)
#define PMD_SECT_PXN		(_AT(pt_l3_val, 1) << 53)
#define PMD_SECT_UXN		(_AT(pt_l3_val, 1) << 54)

#define PMD_ATTRINDX(t)		(_AT(pt_l3_val, (t)) << 2)
#define PMD_ATTRINDX_MASK	(_AT(pt_l3_val, 7) << 2)

#define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)

#define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
#define PROT_NORMAL		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))

#define PROT_SECT_DEVICE_nGnRE	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_SECT_NORMAL	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
#define PROT_SECT_NORMAL_EXEC	(PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))

#define PAGE_DEFAULT		(PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))

#define PAGE_ATTR_KERNEL		page_attr(PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_ATTR_KERNEL_EXEC	page_attr(PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_ATTR_KERNEL_NOCACHE		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))

#define PAGE_ATTR_KERNEL_IO	page_attr(PROT_DEVICE_nGnRE)

#define pte_pfn(pte)		((pt_l4_val(pte) & PHYS_MASK) >> PAGE_SHIFT)

#define pfn_pte(pfn,prot)	(pt_l4(((phys_addr_t)(pfn) << PAGE_SHIFT) | page_attr_val(prot)))

#define pte_none(pte)		(!pt_l4_val(pte))
#define invalidate_pt_l4(mm,addr,ptep)	set_pt_l4(ptep, pt_l4(0))
#define pte_page(pte)		(number_to_page(pte_pfn(pte)))

#define pte_index(addr)		(((addr) >> PAGE_SHIFT) & (PTRS_PER_PT_L4 - 1))

#define pt_l4_ptr(dir,addr)	(pmd_page_vaddr(*(dir)) + pte_index(addr))

#define pte_present(pte)	(!!(pt_l4_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
#define pte_dirty(pte)		(!!(pt_l4_val(pte) & PTE_DIRTY))
#define pte_young(pte)		(!!(pt_l4_val(pte) & PTE_AF))
#define pte_special(pte)	(!!(pt_l4_val(pte) & PTE_SPECIAL))
#define pte_write(pte)		(!!(pt_l4_val(pte) & PTE_WRITE))
#define pte_exec(pte)		(!(pt_l4_val(pte) & PTE_UXN))

#define pte_valid_user(pte) \
	((pt_l4_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
#define pte_valid_not_user(pte) \
	((pt_l4_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)

static inline void set_pt_l4(pt_l4_val_t *pt_l4, pt_l4_val_t val)
{
	*pt_l4 = val;

	/*
	 * 如果PTE无效
	 * 或者是用户态的页表
	 * 则需要屏障
	 */
	if (pte_valid_not_user(val)) {
		dsb(ishst);
		isb();
	}
}

extern void __sync_icache_dcache(pt_l4_val_t pteval, unsigned long addr);

static inline void set_pte_at(struct memory_map_desc *mm, unsigned long addr,
			      pt_l4_val_t *ptep, pt_l4_val_t pte)
{
	if (pte_valid_user(pte)) {
		if (!pte_special(pte) && pte_exec(pte))
			__sync_icache_dcache(pte, addr);
		if (pte_dirty(pte) && pte_write(pte))
			pt_l4_val(pte) &= ~PTE_RDONLY;
		else
			pt_l4_val(pte) |= PTE_RDONLY;
	}

	set_pt_l4(ptep, pte);
}

/**
 * 三级页表直接映射时
 * 构建section映射的属性
 */
static inline page_attr_t mk_sect_prot(page_attr_t prot)
{
	return page_attr(page_attr_val(prot) & ~PTE_TABLE_BIT);
}

#define pmd_write(pmd)		pte_write(pmd_pte(pmd))

#define pmd_pfn(pmd)		(((pt_l3_val(pmd) & PT_L3_MASK) & PHYS_MASK) >> PAGE_SHIFT)
#define pfn_pmd(pfn,prot)	(pt_l3(((phys_addr_t)(pfn) << PAGE_SHIFT) | page_attr_val(prot)))
#define mk_pmd(page,prot)	pfn_pmd(number_of_page(page),prot)

#define pud_write(pud)		pte_write(pud_pte(pud))
#define pt_l2_page_num(pud)		(((pt_l2_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)

#define __pgprot_modify(prot,mask,bits) \
	page_attr((page_attr_val(prot) & ~(mask)) | (bits))

/**
 * 修改页表属性
 */
#define pgprot_noncached(prot) \
	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
#define pgprot_writecombine(prot) \
	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
#define pgprot_device(prot) \
	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)

#define pt_l3_is_empty(pmd)		(!pt_l3_val(pmd))
#define pmd_present(pmd)	(pt_l3_val(pmd))

#define pt_l3_is_invalid(pmd)		(!(pt_l3_val(pmd) & 2))

#define pmd_table(pmd)		((pt_l3_val(pmd) & PMD_TYPE_MASK) == \
				 PT_L3_TYPE_TABLE)
#define pt_l3_is_section(pmd)		((pt_l3_val(pmd) & PMD_TYPE_MASK) == \
				 PMD_TYPE_SECT)

#define pt_l2_is_section(pud)		((pt_l2_val(pud) & PUD_TYPE_MASK) == \
				 PUD_TYPE_SECT)
#define pud_table(pud)		((pt_l2_val(pud) & PUD_TYPE_MASK) == \
				 PUD_TYPE_TABLE)

static inline void set_pt_l3(pt_l3_t *pmdp, pt_l3_t pmd)
{
	*pmdp = pmd;
	dsb(ishst);
	isb();
}

static inline pt_l4_val_t *pmd_page_vaddr(pt_l3_t pmd)
{
	return linear_phys_to_virt(pt_l3_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
}

#define pmd_page(pmd)		number_to_page(__phys_to_pgnum(pt_l3_val(pmd) & PHYS_MASK))

#define mmu_l1_is_empty(l1)		(!(l1))
#define mmu_l1_is_invalid(l1)		(!((l1) & 2))
#define mmu_l2_is_empty(l2)		(!(l2))
#define mmu_l2_is_invalid(l2)		(!((l2) & 2))
#define mmu_l3_is_empty(l3)		(!(l3))
#define mmu_l3_is_invalid(l3)		(!((l3) & 2))
#define mmu_l4_is_empty(l4)		(!(l4))
#define mmu_l4_is_invalid(l4)		(!((l4) & 2))

#define pt_l2_is_empty(pud)		(!pt_l2_val(pud))
#define pt_l2_is_invalid(pud)		(!(pt_l2_val(pud) & 2))
#define pud_present(pud)	(pt_l2_val(pud))

static inline void set_pt_l2(pt_l2_t *pudp, pt_l2_t pud)
{
	*pudp = pud;
	dsb(ishst);
	isb();
}

static inline void pud_clear(pt_l2_t *pudp)
{
	set_pt_l2(pudp, pt_l2(0));
}

#define pud_page(pud)		number_to_page(__phys_to_pgnum(pt_l2_val(pud) & PHYS_MASK))

static inline void arch_mmu_desc_init(struct arch_mmu_desc* p)
{
	if (!p)
		return;
	memset((void*)p, 0, sizeof(*p));
	smp_lock_init(&p->lock);
	return;
}

static inline int arch_mmu_walk_info_set_prot(struct mmu_walk_info* info, unsigned long prot)
{
	unsigned long mmu_prot = (PAGE_DEFAULT | PTE_UXN);

	if (prot & PROT_READ)
		mmu_prot |= PTE_RDONLY;
	if (prot & PROT_WRITE)
		mmu_prot &= ~PTE_RDONLY;
	if (prot & PROT_EXEC) {
		mmu_prot &= ~PTE_UXN;
		if (!(prot & PROT_WRITE) && !(prot & PROT_READ))
				mmu_prot |= (PTE_RDONLY);
	}
	if (prot & PROT_USER)
		mmu_prot |= PTE_USER;

	info->in_prot = mmu_prot;
	return 0;
}


static inline void mmu_desc_init(struct mmu_desc* mmu)
{
	if (!mmu)
		return;
	smp_lock_init(&mmu->lock);
	mmu->at_mem = NULL;
	arch_mmu_desc_init(&mmu->mmu_arch);
	return;
}

#include <asm-generic/mmu.h>

#endif /* !__ASSEMBLY__ */

#endif /* __ASM_MMU_H */
