#ifndef _DIMSUM_MM_TYPES_H
#define _DIMSUM_MM_TYPES_H

#include <dim-sum/types.h>
#include <dim-sum/smp_lock.h>
#include <dim-sum/rbtree.h>
#include <dim-sum/rwsem.h>
#include <dim-sum/mman.h>
#include <dim-sum/err.h>
#include <asm/page.h>
#include <asm/memory.h>
#include <asm/mm_types.h>

#define USER_ENV_SIZE (0x200000)
#define USER_ENV_START ((USER_VA_END & (PAGE_MASK)) - USER_ENV_SIZE)
#define USER_ENV_END (USER_ENV_START + USER_ENV_SIZE - 1)

#define USER_STACK_SIZE (0x1000000)
#define USER_STACK_START (((USER_ENV_START - 1) & (PAGE_MASK)) - USER_STACK_SIZE)
#define USER_STACK_TOP (USER_STACK_START + USER_STACK_SIZE - 1)

#define USER_MAP_SO_END ((USER_STACK_START - 1) & PAGE_MASK - 1)

#define USER_MAP_SIGRET_START (USER_MAP_SO_END & PAGE_MASK)
#define USER_MAP_SIGRET_SIZE (PAGE_SIZE)
#define USER_MAP_SIGRET_END (USER_MAP_SIGRET_START + USER_MAP_SIGRET_SIZE - 1)

/**
 * 默认堆大小为1GB
 */
#define USER_HEAP_SIZE (0x40000000)
#define USER_HEAP_START (0x100000000)
#define USER_ALLOC_START (USER_VA_START + PAGE_SIZE)
#define USER_ALLOC_END (USER_VA_END)
/**
 * 虚拟地址区间的类型标志
 */
#define VAF_STACK_MAP_BIT (63)
#define VAF_HEAP_MAP_BIT (62)
#define VAF_ENV_MAP_BIT (61)

#define VAF_SHARED_MAP_BIT	(0x00)
#define VAF_PRIVATE_MAP_BIT	(0x01)
#define VAF_FIXED_MAP_BIT (0x04)
#define VAF_ANONYMOUS_MAP_BIT (0x5)
#define VAF_FILE_MAP_BIT (0x6)
#define VAF_SYNC_MAP_BIT (0x13)
#define VAF_FIXED_NOREPLACE_MAP_BIT (0x14)

#define VAF_SHARED_MAP	(0x01ULL << VAF_SHARED_MAP_BIT)
#define VAF_PRIVATE_MAP	(0x01ULL << VAF_PRIVATE_MAP_BIT)
#define VAF_FIXED_MAP (0x01ULL << VAF_FIXED_MAP_BIT)
#define VAF_SYNC_MAP (0x01ULL << VAF_SYNC_MAP_BIT)
#define VAF_FIXED_NOREPLACE_MAP (0x01ULL << VAF_FIXED_NOREPLACE_MAP_BIT)


#define VAF_STACK (0x1ULL << VAF_STACK_MAP_BIT)
#define VAF_HEAP (0x1ULL << VAF_HEAP_MAP_BIT)
#define VAF_ENV (0x1ULL << VAF_ENV_MAP_BIT)
#define VAF_ANONYMOUS_MAP (0x1ULL << VAF_ANONYMOUS_MAP_BIT)
#define VAF_FILE_MAP (0x1ULL << VAF_FILE_MAP_BIT)
#define VAF_FILE_SHARED_MAP	(VAF_FILE_MAP | VAF_SHARED_MAP)
#define VAF_FILE_PRIVATE_MAP	(VAF_FILE_MAP | VAF_PRIVATE_MAP)
#define VAF_MEM_ANONYMOUS_MAP (VAF_PRIVATE_MAP | VAF_ANONYMOUS_MAP)
#define VAF_STACK_MAP (VAF_STACK | VAF_PRIVATE_MAP | VAF_ANONYMOUS_MAP)
#define VAF_HEAP_MAP (VAF_HEAP | VAF_PRIVATE_MAP | VAF_ANONYMOUS_MAP)
#define VAF_ENV_MAP (VAF_ENV | VAF_PRIVATE_MAP | VAF_ANONYMOUS_MAP)

#define STACK_MAP_FLAGS (MAP_EXT_STACK | MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS)
#define HEAP_MAP_FLAGS (MAP_EXT_HEAP | MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS)
#define ENV_MAP_FLAGS (MAP_EXT_ENV | MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS)

#define VAF_MAYREAD	0x00000010	/* limits for mprotect() etc */
#define VAF_MAYWRITE	0x00000020
#define VAF_MAYEXEC	0x00000040
#define VAF_MAYSHARE	0x00000080


#define VAF_GROWSDOWN	0x00000100	/* general info on the segment */
#define VAF_GROWSUP	0x00000200
#define VAF_SHM		0x00000400	/* shared memory area, don't swap out */
#define VAF_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */

#define VAF_EXECUTABLE	0x00001000
#define VAF_IO           0x00004000	/* Memory mapped I/O or similar */

					/* Used by sys_madvise() */
#define VAF_SEQ_READ	0x00008000	/* App will access data sequentially */
#define VAF_RAND_READ	0x00010000	/* App will not benefit from clustered reads */

#define VAF_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
#define VAF_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
#define VAF_RESERVED	0x00080000	/* Don't unmap it from swap_out */
#define VAF_ACCOUNT	0x00100000	/* Is a VM accounted object */
#define VAF_HUGETLB	0x00400000	/* Huge TLB Page VM */
#define VAF_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
#define VAF_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
/**
 * 虚拟地址区间的访问权限标志
 */
#define VAC_USER_BIT (62)
#define VAC_KERN_BIT (63)
#define VAC_LOCK_BIT (61)
#define VAC_WKLOCK_BIT (60)

#define VAC_USER  (1ULL << VAC_USER_BIT)
#define VAC_KERN (1ULL << VAC_KERN_BIT)

#define VAC_LOCK  (1ULL << VAC_LOCK_BIT)
#define VAC_WKLOCK  (1ULL << VAC_WKLOCK_BIT)

#define VAC_READ_BIT (0)
#define VAC_WRITE_BIT (1)
#define VAC_EXEC_BIT (2)

#define VAC_READ  (1ULL << VAC_READ_BIT)
#define VAC_WRITE (1ULL << VAC_WRITE_BIT)
#define VAC_EXEC (1ULL << VAC_EXEC_BIT)

#define VAC_IO           0x00004000
#define VAC_URW (VAC_KERN | VAC_USER | VAC_READ | VAC_WRITE)
#define VAC_URWX (VAC_KERN | VAC_USER | VAC_READ | VAC_WRITE | VAC_EXEC)
#define VAC_UR (VAC_KERN | VAC_USER | VAC_READ)
#define VAC_URX (VAC_KERN | VAC_USER | VAC_READ | VAC_EXEC)
#define VAC_KRW (VAC_KERN |VAC_READ | VAC_WRITE)
#define VAC_KRWX (VAC_KERN | VAC_READ | VAC_WRITE | VAC_EXEC)
#define VAC_RWX_MASK (0x7)

#define URW_PROT (PROT_EXT_KERN | PROT_EXT_USER | PROT_READ | PROT_WRITE)
#define URWX_PROT (PROT_EXT_KERN | PROT_EXT_USER | PROT_READ | PROT_WRITE | PROT_EXEC)
#define UR_PROT (PROT_EXT_KERN | PROT_EXT_USER | PROT_READ)
#define URX_PROT (PROT_EXT_KERN | PROT_EXT_USER | PROT_READ | PROT_EXEC)
#define KRW_PROT (PROT_EXT_KERN |PROT_READ | PROT_WRITE)
#define KRWX_PROT (PROT_EXT_KERN | PROT_READ | PROT_WRITE | PROT_EXEC)

#define FVSTATE_EMPTY (1)
#define FVSTATE_NULL_EMPTY (2)
#define FVSTATE_ON_PREV (3)
#define FVSTATE_ON_NEXT (4)
#define FVSTATE_NULL_ON_PREV (5)
#define FVSTATE_NULL_ON_NEXT (6)
#define FVSTATE_UP_EXT (7)
#define FVSTATE_DN_EXT (8)
#define FVSTATE_NULL_UP_EXT (9)
#define FVSTATE_NULL_DN_EXT (10)
#define FVSTATE_ADDR_IN_VMA (11)
#define FVSTATE_ADDR_EQ_VMA (12)
#define FVSTATE_ADDR_SIN_EOUT_VMA (13)
#define FVSTATE_ADDR_SEQ_EIN_VMA (14)
#define FVSTATE_ADDR_SIN_EEQ_VMA (15)
#define FVSTATE_ADDR_SOUT_EIN_VMA (16)
#define FVSTATE_ADDR_SOUT_EOUT_VMA (17)
#define FVSTATE_ADDR_OUT_VMA (18)
#define FVSTATE_ERROR (19)

#define FVFLAG_MAP (1)
#define FVFLAG_AUTOMAP (2)
#define FVFLAG_FIXED (3)
#define FVFLAG_UNMAP (4)
#define FVFLAG_FAULT (5)
#define FVFLAG_REMAP (6)
#define FVFLAG_MPROTECT (7)

#define VMA_EXT_START (0)
#define VMA_EXT_END (1)

#define VMA_EXT_DN (0)
#define VMA_EXT_UP (1)

#define VMA_OFF_FILE_OUTSIDE (-1)

#define VAF_READHINTMASK			(VAF_SEQ_READ | VAF_RAND_READ)
#define VMA_CLEAR_READHINT(v)		(v)->flags &= ~VAF_READHINTMASK
#define VMA_NORMAL_READHINT(v)		(!((v)->flags & VAF_READHINTMASK))
#define VMA_SEQUENTIAL_READHINT(v)	((v)->flags & VAF_SEQ_READ)
#define VMA_RANDOM_READHINT(v)		((v)->flags & VAF_RAND_READ)

#define SUID_DUMP_DISABLE	0	/* No setuid dumping */
#define SUID_DUMP_USER		1	/* Dump as user of process */
#define SUID_DUMP_ROOT		2	/* Dump as root */

/* mm flags */

/* for SUID_DUMP_* above */
#define MMF_DUMPABLE_BITS 2
#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)

/* coredump filter bits */
#define MMF_DUMP_ANON_PRIVATE	2
#define MMF_DUMP_ANON_SHARED	3
#define MMF_DUMP_MAPPED_PRIVATE	4
#define MMF_DUMP_MAPPED_SHARED	5
#define MMF_DUMP_ELF_HEADERS	6
#define MMF_DUMP_HUGETLB_PRIVATE 7
#define MMF_DUMP_HUGETLB_SHARED  8
#define MMF_DUMP_DAX_PRIVATE	9
#define MMF_DUMP_DAX_SHARED	10

#define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
#define MMF_DUMP_FILTER_BITS	9
#define MMF_DUMP_FILTER_MASK \
	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
#define MMF_DUMP_FILTER_DEFAULT \
	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)

#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
# define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
#else
# define MMF_DUMP_MASK_DEFAULT_ELF	0
#endif
					/* leave room for more dump flags */
#define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
#define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
/*
 * This one-shot flag is dropped due to necessity of changing exe once again
 * on NFS restore
 */
//#define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */

#define MMF_HAS_UPROBES		19	/* has uprobes */
#define MMF_RECALC_UPROBES	20	/* MMF_HAS_UPROBES can be wrong */
#define MMF_OOM_SKIP		21	/* mm is of no interest for the OOM killer */
#define MMF_UNSTABLE		22	/* mm is unstable for copy_from_user */
#define MMF_HUGE_ZERO_PAGE	23      /* mm has ever used the global huge zero page */
#define MMF_DISABLE_THP		24	/* disable THP for all VMAs */
#define MMF_OOM_VICTIM		25	/* mm is the oom victim */
#define MMF_OOM_REAP_QUEUED	26	/* mm was queued for oom_reaper */
#define MMF_MULTIPROCESS	27	/* mm is shared between processes */
#define MMF_DISABLE_THP_MASK	(1 << MMF_DISABLE_THP)

#define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
				 MMF_DISABLE_THP_MASK)

#define MMS_ACTIVE (0xaafeaafe)
#define MMS_EXIT (0xeefaeefa)

struct file_cache_space;

/**
 * 内存映射描述符
 */
struct memory_map_desc {
	/**
	 * 保护映射页表描述符的自旋锁
	 */
	struct smp_lock page_table_lock;
	/**
	 * 一级映射页表指针
	 */
	pt_l1_t *pt_l1;
	
	/**
	 * 体系结构相关的映射描述符
	 * 如ASID
	 */
	struct arch_memory_map map_context;
};

/*************************分割线************************/
struct page_frame {
	/**
	 * 一组标志，也对页框所在的管理区进行编号
	 * 在不支持NUMA的机器上，flags中字段中管理索引占两位，节点索引占一位。
	 * 在支持NUMA的32位机器上，flags中管理索引占用两位。节点数目占6位。
	 * 在支持NUMA的64位机器上，64位的flags字段中，管理区索引占用两位，节点数目占用10位。
	 * 最左边的位数，用于表示该页面所属的内存段编号
	 */
	unsigned long flags;
	
	/**
	 * 页框的引用计数。当小于0表示没有人使用。
	 * Page_count返回_count+1表示正在使用的人数。
	 */
	struct accurate_counter ref_count;		/* Usage count, see below. */
	union {
		/**
		 * 如果映射到用户态，则表示映射到用户进程地址空间的次数 
		 * 初始值为-1。因此实际含义为与当前进程共享此页的进程数量。
		 */
		struct accurate_counter share_count;	
		/**
		 * 对于beehive分配器来说，表示其中的**已经分配出去的** 对象数目
		 */
		unsigned int inuse_count;
	};
	union {
		/**
		 * 该页在某个页面缓存映射内的索引号(第xx页)
		 */
		pgoff_t index;
		/**
		 * beehive分配器的空闲对象指针
		 */
		void *freelist;
	};

	union {
		struct {
			/**
			 * 如果页是空闲的，则该字段由伙伴系统使用。
			 * 当用于伙伴系统时，如果该页是一个2^k的空闲页块的第一个页，那么它的值就是k.
			 * 这样，伙伴系统可以查找相邻的伙伴，以确定是否可以将空闲块合并成2^(k+1)大小的空闲块。
			 */
			unsigned int order;
			/**
			 * 可用于正在使用页的内核成分
			 * 如在缓冲页的情况下，它是一个缓冲器头指针。
			 */
			unsigned long private;				 
			/**
			 * 当页被插入页高速缓存时使用或者当页属于匿名页时使用）。
			 * 		如果mapping字段为空，则该页属于交换高速缓存。
			 *		如果mapping字段不为空，且最低位为1，表示该页为匿名页。同时该字段中存放的是指向anon_vma描述符的指针。
			 *		如果mapping字段不为空，且最低位为0，表示该页为映射页。同时该字段指向对应文件的address_space对象。
			 */
			struct file_cache_space*cache_space;
		};

		/**
		 * 对于beehive第一个页面来说
		 * 表示页面所在的内存分配器
		 */
		struct beehive_allotter *beehive;
		/**
		 * 对于beehive非第一个页面来说
		 * 指向其第一个页面
		 */
		struct page_frame *first_page;
	};

	union {
		struct double_list brick_list;
		struct double_list cache_list;
		/**
		 * 对于beehive分配器来说
		 * 通过此字段将其链接到节点的半满链表中
		 */
		struct double_list beehive_list;
		struct double_list pgcache_list;
		struct double_list lru;
	};
};

struct vma_ops {
	void (*open)(struct vma_desc * area);
	void (*close)(struct vma_desc * area);
};
/**
 * 虚拟地址区间描述符
 */
struct mem_desc;
struct vma_desc {
	/**
	 * 虚拟地址区间描述符自身链表和红黑树节点
	 */
	struct double_list list;
	struct rb_node rbnode;
	struct mem_desc* at_mem;

	/**
	 * 保护虚拟地址区间描述符数据的锁
	 */
	struct smp_lock lock;
	
	/**
	 * 虚拟地址区间描述符标志和访问权限
	 */
	u64 flags;
	u64 acces;
	/**
	 * 虚拟地址区间描述符的开、结束地址
	 */
	virt_addr_t start;
	virt_addr_t end;
	/**
	 * 实际文件对应vma中的偏移和大小
	 */
	off_t rf_in_v_off;
	virt_size_t rf_in_v_size;
	/**
	 * 虚拟地址区间描述符的方法集
	 * 暂时没有
	 */
	struct vma_ops* ops;
	
	/**
	 * 虚拟地址区间描述符的映射的文件
	 */
	struct file * file;		/* File we map to (can be NULL). */
	
	/**
	 * 注意！注意！！
	 * 在vma表示为文件映射空间时，offset作为文件的偏移
	 * 但在vma表示为堆空间时，heapend作为堆的游标
	 */
	union {
		off_t offset;
		virt_addr_t heapend;
	};
	void* priv;	
};


/**
 * 内存描述符
 */
struct mem_desc
{
	/**
	 * 内存描述符自身链表
	 */
	struct double_list list;

	/**
	 * 保护内存描述符数据的锁和信号量
	 */
	struct smp_lock lock;
	struct rw_semaphore rwsem;

	/**
	 * 内存描述符标志和状态
	 */
	u64 flags;	
	u64 status;
	
	/**
	 * 内存描述符所属进程
	 */
	struct process_desc* at_proc;

	/**
	 * 内存描述符自身计数，有可能被共享
	 */
	struct accurate_counter count;
	
	/**
	 * 虚拟地址区间描述符的顺序链表和红黑树根
	 */
	struct double_list vma_list;
	struct rb_root vma_root;
	
	/**
	 * 用于缓存上一次访问的vma_desc
	 */
	struct vma_desc* vma_cache;

	/**
	 * 对应ELF文件中的堆、栈、环境变量段
	 */
	struct vma_desc* vma_heap;
	struct vma_desc* vma_stack;
	struct vma_desc* vma_env;
	u64 vma_def_locked;
	/**
	 * 虚拟地址区间描述符的个数
	 */
	u64 vma_nr;

	/**
	 * 虚拟内存的开始、结束地址和大小
	 */
	virt_addr_t vm_start;
	virt_addr_t vm_end;
	virt_size_t vm_size;
	
	/**
	 * 该内存描述符已经分配的物理页面数
	 */
	u64 phy_pages;
	
	/**
	 * aio 相关
	 */
	struct smp_lock		ioctx_lock;
	/**
	 * 异步I/O上下文链表
	 */
	struct hash_list_bucket	ioctx_list;

	/**
	 * 内存描述符体系结构相关的
	 */
	struct arch_mem_desc mem_arch;
	int dumpable;
	void* priv;
	void* ext;
};

struct vma_iterator {
	struct mem_desc* m;
	virt_addr_t start;
	virt_addr_t end;
	int vposstate, fiststate, laststate;
	struct vma_desc* vpos;
	struct vma_desc* fisrt;
	struct vma_desc* last;
	struct vma_desc* vma_arr;
};

struct vm_area_struct {
	/* The first cache line has the info for VMA tree walking. */

	unsigned long vm_start;		/* Our start address within vm_mm. */
	unsigned long vm_end;		/* The first byte after our end address
					   within vm_mm. */
};


static inline void vma_iterator_init(struct vma_iterator* vitor, 
					struct mem_desc* m, 
					virt_addr_t start, virt_addr_t end)
{
	if (!vitor)
		return;
	memset(vitor, 0, sizeof(*vitor));
	vitor->m = m;
	vitor->start = start;
	vitor->end = end;
	return;
}

static inline void mem_desc_init(struct mem_desc* m)
{
	if (!m)
		return;
	list_init(&m->list);
	smp_lock_init(&m->lock);
	init_rwsem(&m->rwsem);
	accurate_set(&m->count, 0);
	list_init(&m->vma_list);
	RB_ROOT_INIT(&m->vma_root);
	m->flags = m->flags & MMF_INIT_MASK;
	m->status = MMS_ACTIVE;
	arch_mem_desc_init(&m->mem_arch);
	return;
}

static inline void vma_desc_init(struct vma_desc* v)
{
	list_init(&v->list);
	RB_NODE_INIT(&v->rbnode);
	smp_lock_init(&v->lock);
	v->start = 0;
	v->end = 0;
	return;
}

static inline virt_addr_t mem_stack_top(struct mem_desc* m)
{
	if (!m || !m->vma_stack)
		return (virt_addr_t)NULL;
	return m->vma_stack->end;
}

static inline virt_addr_t mem_stack_start(struct mem_desc* m)
{
	if (!m || !m->vma_stack)
		return (virt_addr_t)NULL;
	return m->vma_stack->start;
}

static inline int vma_compare_prot(struct vma_desc* v, unsigned long prot)
{
	if ((v->acces & VAC_RWX_MASK) == (prot & VAC_RWX_MASK))
		return 1;

	return 0;
}

static inline int vma_reset_acces_prot(struct vma_desc* v, unsigned long prot)
{
	v->acces &= (~VAC_RWX_MASK);

	if (prot & PROT_READ)
		v->acces |= VAC_READ;
	
	if (prot & PROT_WRITE)
		v->acces |= (VAC_WRITE | VAC_READ);
	
	if (prot & PROT_EXEC) {
		v->acces |= VAC_EXEC;
		if (!(prot & PROT_READ) && !(prot & PROT_WRITE))
			v->acces |= VAC_READ;
	}
	return 0;
}

static inline unsigned long vma_ret_prot(struct vma_desc* v)
{
	unsigned long prot = 0;
	if (!v)
		return 0;
	if (v->acces & VAC_EXEC)
		prot |= PROT_EXEC;
	if (v->acces & VAC_READ)
		prot |= PROT_READ;
	if (v->acces & VAC_WRITE)
		prot |= PROT_WRITE;
	return prot;
}

static inline u64 mmapprot_to_vmaacces(unsigned long prot)
{
	u64 acces = VAC_USER | VAC_KERN;

	if (prot & PROT_READ)
		acces |= VAC_READ;

	if (prot & PROT_WRITE)
		acces |= (VAC_WRITE | VAC_READ);
	
	if (prot & PROT_EXEC) {
		acces |= VAC_EXEC;
		if (!(prot & PROT_READ) && !(prot & PROT_WRITE))
			acces |= VAC_READ;
	}

	if (prot & PROT_EXT_KERN)
		acces |= VAC_KERN;
	if (prot & PROT_EXT_USER)
		acces |= VAC_USER;

	return acces;
}

static inline u64 mmapflags_to_vmaflags(unsigned long flags)
{
	u64 vmaflags = 0;
	BUG_ON((flags & MAP_SHARED) && (flags & MAP_PRIVATE));

	if (flags & MAP_SHARED)
		vmaflags |= VAF_SHARED_MAP;
	
	if (flags & MAP_PRIVATE)
		vmaflags |= VAF_PRIVATE_MAP;
	
	if (flags & MAP_FIXED)
		vmaflags |= VAF_FIXED_MAP;
	
	if (flags & MAP_ANONYMOUS)
		vmaflags |= VAF_ANONYMOUS_MAP;
	else if (!(flags & MAP_ANONYMOUS))
		vmaflags |= VAF_FILE_MAP;

	if (flags & MAP_SYNC)
		vmaflags |= VAF_SYNC_MAP;

	if (flags & MAP_FIXED_NOREPLACE)
		vmaflags |= VAF_FIXED_NOREPLACE_MAP;
	
	if (flags & MAP_EXT_STACK)
		vmaflags |= VAF_STACK;
	
	if (flags & MAP_EXT_ENV)
		vmaflags |= VAF_ENV;

	if (flags & MAP_EXT_HEAP)
		vmaflags |= VAF_HEAP;

	return vmaflags;
}

static inline int vma_is_hugetlb_page(struct vma_desc *v)
{
	return v->flags & VAF_HUGETLB;
}

extern void set_dumpable(struct mem_desc *mem, int value);
/*
 * This returns the actual value of the suid_dumpable flag. For things
 * that are using this for checking for privilege transitions, it must
 * test against SUID_DUMP_USER rather than treating it as a boolean
 * value.
 */
static inline int __get_dumpable(unsigned long mm_flags)
{
	return mm_flags & MMF_DUMPABLE_MASK;
}

static inline int get_dumpable(struct mem_desc *mem)
{
	return __get_dumpable(mem->flags);
}

static inline int mmap_write_lock_killable(struct mem_desc *mem)
{
	// return down_write_killable(&mem->rwsem);
	down_write(&mem->rwsem);
	return 0;
}

static inline void mmap_write_unlock(struct mem_desc *mem)
{
	up_write(&mem->rwsem);
}

/**
 * 判断一个vma是否是堆
 */
static inline int vma_is_heap(struct vma_desc *v)
{
	BUG_ON(!v);
	if (v->flags & VAF_HEAP)
		return 1;
	return 0;
}

/**
 * 判断一个vma是否是栈
 */
static inline int vma_is_stack(struct vma_desc *v)
{
	BUG_ON(!v);
	if (v->flags & VAF_STACK)
		return 1;
	return 0;
}


static inline struct vma_desc* vma_prev(struct vma_desc* v, struct mem_desc* m)
{
	struct vma_desc *p;

	if (!list_is_first(&v->list, &m->vma_list))
		p = list_prev_entry(v, list);
	else
		p = NULL;

	return p;
}

static inline struct vma_desc* vma_next(struct vma_desc* v, struct mem_desc* m)
{
	struct vma_desc *n;

	if (!list_is_last(&v->list, &m->vma_list))
		n = list_next_entry(v, list);
	else
		n = NULL;
	return n;
}

static inline int __vma_is_locked(struct vma_desc *v)
{
	if (v->acces & VAC_LOCK)
		return 1;
	else
		return 0;
}

static inline int __vma_is_shared(struct vma_desc *v)
{
	if (v->flags & VAF_SHARED_MAP)
		return 1;
	else
		return 0;
}


static inline int __vma_is_wklocked(struct vma_desc *v)
{
	if (v->acces & VAC_WKLOCK)
		return 1;
	else
		return 0;
}

static inline int vma_is_locked(struct vma_desc *v)
{
	int ret;
	smp_lock(&v->lock);
	ret = __vma_is_locked(v);
	smp_unlock(&v->lock);
	return ret;
}

static inline int vma_is_wklocked(struct vma_desc *v)
{
	int ret;
	smp_lock(&v->lock);
	ret = __vma_is_wklocked(v);
	smp_unlock(&v->lock);
	return ret;
}

static inline int vma_lock(struct vma_desc *v)
{
	smp_lock(&v->lock);

	if (__vma_is_locked(v)) {
		smp_unlock(&v->lock);
		return -EFAULT;
	}

	v->acces |= VAC_LOCK;
	smp_unlock(&v->lock);
	return 0;
}

static inline int vma_work_lock(struct vma_desc *v)
{
	smp_lock(&v->lock);

	if (__vma_is_wklocked(v)) {
		smp_unlock(&v->lock);
		return -EFAULT;
	}

	v->acces |= VAC_WKLOCK;
	smp_unlock(&v->lock);
	return 0;
}

static inline void vma_unlock(struct vma_desc *v)
{
	smp_lock(&v->lock);
	v->acces &= (~VAC_LOCK);
	smp_unlock(&v->lock);
	return;
}

static inline void vma_work_unlock(struct vma_desc *v)
{
	smp_lock(&v->lock);
	v->acces &= (~VAC_WKLOCK);
	smp_unlock(&v->lock);
	return;
}

/**
 * 查找虚拟地址区间
 */
static inline int find_addr_min_vma(struct mem_desc *m, virt_addr_t addr, struct vma_desc** out)
{
	struct vma_desc *v, *p = NULL;
	if (!m)
		return FVSTATE_ERROR;
	
	if (addr < USER_ALLOC_START || addr >= USER_ALLOC_END)
		return FVSTATE_ERROR;

	if (list_is_empty(&m->vma_list)) {
		*out = NULL;
		return FVSTATE_EMPTY;
	}

	/**
	 *	从头开始查找start属于哪个vma
	 */
	list_for_each_entry(v, &m->vma_list, list) {
		if (v->end >= addr) {
			*out = v;
			if (v->start <= addr)
				return FVSTATE_ADDR_IN_VMA;
			return FVSTATE_ON_PREV;
		}
		p = v;
	}

	if (p) {
		if (p->end < addr) {
			*out = p;
			return FVSTATE_ON_NEXT;
		}
	}

	*out = NULL;
	return FVSTATE_ADDR_OUT_VMA;
}

static inline int fixed_addrspace_is_free(struct vma_iterator* vite)
{
	BUG_ON(!vite);
	if(vite->fiststate == FVSTATE_ON_PREV && 
		vite->laststate == FVSTATE_ON_PREV &&
		vite->fisrt == vite->last) {
		return 1;
	}
	
	if(vite->fiststate == FVSTATE_ON_NEXT && 
		vite->laststate == FVSTATE_ON_NEXT &&
		vite->fisrt == vite->last) {
		return 1;
	}

	if (vite->fiststate == FVSTATE_EMPTY && 
		vite->laststate == FVSTATE_EMPTY) {
		return 1;
	}
	return 0;
}

static inline long mem_desc_get_rss(struct mem_desc* m)
{
	/* TODO: 支持内存RSS统计。*/
	return 0;
}

struct vma_desc *alloced_vma_desc(void);
void free_vma_desc(struct vma_desc *v);
void *insert_vma_desc(struct mem_desc *m, struct vma_desc **out,
					struct vma_desc *vpos, struct vma_desc *v, int ins_state);
int find_vma_iterator(struct vma_iterator* viter, int flags);
long mem_desc_get(struct mem_desc* m);
long mem_desc_put(struct mem_desc* m);
int create_mem_desc(struct process_desc* proc);
int destroy_mem_desc(struct process_desc* proc);
int destroy_vma(struct mem_desc* m, virt_addr_t start, 
					virt_size_t size);
int destroy_vma_unlock(struct mem_desc *m, virt_addr_t start, virt_size_t size);
int vmem_munmmap(void* addr, size_t len);
int vmem_unmap_vma_pages(struct mem_desc *m, struct vma_desc *v, virt_addr_t start,
	virt_addr_t end);
void* create_vma(struct mem_desc* m, struct vma_desc** out, 
					virt_addr_t start, virt_size_t size, u64 access, 
					u64 flags, void* file, off_t offset);
void *create_vma_unlock(struct mem_desc *m, struct vma_desc **out,
					virt_addr_t start, virt_size_t size, u64 access,
					u64 flags, void *file, off_t offset);
					
unsigned long vmem_mmap(void* addr, size_t len, int fd, off_t offset, 
					u64 access, u64 flags);
unsigned long vmem_mmap_on_file(struct file *file, virt_addr_t addr,
	virt_size_t len, u64 access, u64 flags, off_t offset);
unsigned long vmem_mmap_on_file_unlock(struct file *file, virt_addr_t addr,
	virt_size_t len, u64 access, u64 flags, off_t offset);
struct vma_desc* vmem_mmap_ret_vma(void *addr, size_t len, int fd, 
					off_t offset, u64 access, u64 flags);
int create_userheapstack_vma(struct mem_desc* m);
int create_process_vma(struct process_desc* proc);
int create_process_mmu(struct process_desc* proc);
int create_process_memspace(struct process_desc* proc);
int destroy_process_memspace(struct process_desc* proc);
int copy_process_memspace(struct process_desc *dst);
void flush_process_memspace(struct process_desc *proc);
void flush_current_memspace(void);
struct vma_desc* vmem_find_vma_nolock(struct mem_desc *m, virt_addr_t addr);
struct vma_desc* vmem_find_vma(struct mem_desc *m, virt_addr_t addr);
int vmem_fault(struct mem_desc* m, virt_addr_t addr, 
					unsigned long data, void* ext);
void dump_curr_mem(void);
long vmem_brk(unsigned long addr);
unsigned long vmem_mremap(virt_addr_t addr, virt_size_t size,
					virt_addr_t new_addr, virt_size_t new_size, 
					unsigned long flags);
int vma_handle_realfile_offset(struct vma_desc* p, 
					virt_addr_t orig_rfoff, virt_size_t orig_rfsz);

int vmem_mprotect(virt_addr_t addr, 
					virt_size_t len, unsigned long prot);
int vmem_map_range_uaddr(virt_addr_t addr, virt_size_t len, 
					unsigned long prot);

long vmem_msync(virt_addr_t start, virt_size_t len, int flags);
void loosen_mem(struct mem_desc* m);
int exit_mem_desc(struct process_desc* proc);
int exit_mem(struct process_desc* proc, struct mem_desc* m);

#endif /* _DIMSUM_MM_TYPES_H */
