#include <linux/fs.h>
#include <linux/types.h>
#include <linux/module.h>
#include <asm/bitops.h>
#include <asm/byteorder.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/kdebug.h>
#include <linux/kthread.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/tty.h>
#include <linux/mount.h>
#include <linux/kdev_t.h>
#include <linux/major.h>
#include <linux/reboot.h>
#include <linux/sysrq.h>
#include <linux/kbd_kern.h>
#include <linux/quotaops.h>
#include <linux/kernel.h>
#include <linux/suspend.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h>		/* for fsync_bdev() */
#include <linux/swap.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/vt_kern.h>
#include <linux/workqueue.h>
#include <linux/kexec.h>
#include <linux/irq.h>
#include <linux/hrtimer.h>
#include <linux/oom.h>
#include <linux/percpu.h>
#include <linux/magic.h>
#include <linux/videodev2.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/irq_regs.h>
#include <asm/page.h>
#include <asm/percpu.h>

MODULE_LICENSE("GPL");
struct mm_struct init_mm = {};
#define log(a, ...) printk("[ %s : %.3d ] "a"\n", \
				__FUNCTION__, __LINE__,  ## __VA_ARGS__)

static int browse_mm(struct mm_struct *mm)
{
	struct rb_root *root = &mm->mm_rb;
	struct rb_node *nd, *pn = NULL;

	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
		struct vm_area_struct *vma;

		vma = rb_entry(nd, struct vm_area_struct, vm_rb);
		pn = nd;
		log(" vma->vm_start : 0x %.8lx,vma->vm_end : 0x %.8lx\n",
			vma->vm_start, vma->vm_end);
	}
	if (1) {
		struct vm_area_struct *vma = mm->mmap;

		printk("\n\t\tprint the vma in LIST traverse\n");
		do {
			log("vma->vm_start : 0x%.8lx, vma->vm_end : 0x%.8lx,"
				"len : %lu \tpage[s]",
				vma->vm_start, vma->vm_end, 
				(vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
			vma = vma->vm_next;
		} while (vma);
	}
	return 0;
}

/*--------------------- [ Test the page table (pgd)] -------------*/
static void test_page_table(void)
{
	pgd_t* pg_dir = init_mm.pgd;
	int i;

	for ( i = 768; i < 1024; i++) {
		if (0 == (unsigned long)pg_dir[i].pgd)
			printk("\t\t[ %#x ], [ ---------- ]\n", i);
		else
			printk("\t\t[ %#x ], [ %#.8x ]\n", i, 
					(unsigned int)pg_dir[i].pgd);
	}
}

/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
static void pte_clear_and_memory_write(void *addr, pte_t *pte,
					bool rw, bool flush_tlb)
{
	test_and_clear_bit(_PAGE_BIT_ACCESSED, (unsigned long *) &pte->pte);
	test_and_clear_bit(_PAGE_BIT_DIRTY, (unsigned long *) &pte->pte);
	log("[ clear the A and D bits ] pte : %d, %p ,  pte: %#x",
		sizeof(*pte), addr, (int)pte->pte);

	if (flush_tlb) {
		__flush_tlb_one((unsigned long) addr);
		log("--> flush the TLB for tlb consistency <--");
	}

	if (rw)
		memset(addr, 23, 24);
	else {
		int tmp = *(int *)addr;
		log("value %d", tmp);
	}
		
	log("[ after write the memory ] pte : %d, %p ,  pte: %#x\n",
		sizeof(*pte), addr, (int)pte->pte);
}

static int test_page_access(void)
{
	struct page *page;
	void *addr;
	unsigned long idx;
	pte_t *pte;
	pte_t* kmap_pte;
	unsigned long vaddr;

	if (1) {
		log("begin : %d, end : %d, %d, %d",
			FIX_KMAP_BEGIN, FIX_KMAP_END, KM_TYPE_NR, NR_CPUS);
		log("FIXADDR_TOP : %#x", (unsigned int)FIXADDR_TOP);
		vaddr = __fix_to_virt(FIX_KMAP_BEGIN);
		log("vaddr :  %#x ", (unsigned int)vaddr);

		kmap_pte = pte_offset_kernel(
				pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), vaddr),
			       	vaddr);
	}

	if (1) {
		page = alloc_page(GFP_HIGHUSER);
		if (PageHighMem(page))
			log("ok high memory\n");
		else {
			log("We need high memory page");
			__free_page(page);
			return -1;
		}

		addr = kmap_atomic(page, KM_USER0);
		idx = virt_to_fix((unsigned long) addr);
		pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
		if (page == pte_page(*pte)) {
			log("[ the origin ] pte : %d, %p , idx : %lu, pte: %#x",
				sizeof(*pte), addr, idx, (int)pte->pte);
			
			pte_clear_and_memory_write(addr, pte, 1, 1);
			pte_clear_and_memory_write(addr, pte, 0, 1);
			pte_clear_and_memory_write(addr, pte, 1, 1);
			pte_clear_and_memory_write(addr, pte, 0, 1);
			pte_clear_and_memory_write(addr, pte, 1, 1);
			/* load_cr3(pg_dir); */
		}
		kunmap_atomic(addr, KM_USER0);
		__free_page(page);
	}
	return 0;
}
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */

static void test_vdso(void)
{
	unsigned long start, end;

	start = FIXADDR_USER_START;
	end	= FIXADDR_USER_END;

	printk(" %lx, %lx, delta :%lu\n", start, end, end - start);
}

static void test_vm_events(void)
{
	unsigned long events[NR_VM_EVENT_ITEMS] = {};
	int i;

	all_vm_events(events);

	for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
		log("-- i : %d, [ %lu ]", i, events[i]);
}

static void test_pmd_size(void)
{
	int a = PAGE_SIZE * 32;
	int b = PMD_SIZE;


	log(" a : %d, b : %d", a, b);
	log(" in page : a : %d, b : %d", 32, b >> 12);
}


/* find the process "init" which has the origin init_mm */
static int prepare_for_test(void)
{
	struct task_struct *tsk = &init_task;
	struct task_struct *tsk2 = NULL;
	bool find_ok = 0;
	
	list_for_each_entry(tsk2, &tsk->tasks, tasks) {
		if (strcmp(tsk2->comm, "init"))
			continue;
		if (tsk2->mm || tsk2->active_mm) {
			init_mm = *(tsk2->mm ? tsk2->mm : tsk2->active_mm);
			find_ok = 1;
			break;
		}
	}
	return find_ok;
}

static void test_percpu(void)
{
	unsigned long *pc = __per_cpu_offset;
	struct pagevec *pv;
	struct desc_struct *gdt;
	int i;

	log("NR_CPUS : %d", NR_CPUS);
	for (i = 0; i < NR_CPUS; i++)
		log("%.2d --> %lu", i, pc[i]);

	/* GDT of percpu */
	if (0) {
		pv = (struct pagevec *) 0xc096e9d8;
		log("-- %lu", *(unsigned long*)pv);
		gdt = get_cpu_gdt_table(0);

		for (i = 0; i < GDT_ENTRIES; i++, gdt++) {
			log("GDT[%d] : %x, %x", i, gdt->b, gdt->a);
			log("GDT[%d] : type : %d, s : %d, dpl : %d, p : %d, g : %d, d : %d, l : %d",
				i, gdt->type, gdt->s, gdt->dpl, gdt->p, gdt->g, gdt->d, gdt->l);	
			log();
		}
	}
	
	/* other */
	log(" %lx", (unsigned long)pcpu_base_addr);
	log("%dk, %dk ", PERCPU_MODULE_RESERVE / 1024, PERCPU_DYNAMIC_RESERVE / 1024);
}

/*
static void test_zone(struct zone *zone)
{
	int i;
	log();
	log("------------ ZONE : [ %s ] page lock hash nr : %ld-------------", 
			zone->name, zone->wait_table_hash_nr_entries);

	for (i = 0; i < zone->wait_table_hash_nr_entries; i++) {
		struct list_head *head = &zone->wait_table[i].task_list;
		struct wait_bit_queue *q;
		wait_queue_t *wait;
		struct page *cur;
		int num = 0;

		list_for_each_entry(wait, head, task_list) {
			num++;
		}
		if (num)
			log("%d", num);
	}
	log("-----------------------------------\n");
}
*/

static void test_zone_pageblock(struct zone *zone)
{
	unsigned long zonesize = zone->spanned_pages;
	unsigned long *bitmap = zone->pageblock_flags;
	unsigned long usemapsize, bitidx;
	int migrate_type;
	int i;

	usemapsize = roundup(zonesize, pageblock_nr_pages);
	usemapsize = usemapsize >> pageblock_order;
	/*
	usemapsize *= NR_PAGEBLOCK_BITS;
	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));

	usemapsize = usemapsize / 8;
	*/

	/* me usemap_size() */
	for (i = 0; i < usemapsize; i++) {
		static char *type_str[MIGRATE_TYPES + 1] = {
			"MIGRATE_UNMOVABLE  ",
                        "MIGRATE_RECLAIMABLE",
                        "MIGRATE_MOVABLE    ",
                        "MIGRATE_PCPTYPES / MIGRATE_RESERVE",
                        "MIGRATE_ISOLATE    ",
		        "MIGRATE_TYPES      ",
		};
		unsigned long flags = 0;
		unsigned long value = 1;
		int start_bitidx = PB_migrate, end_bitidx = PB_migrate_end;

		bitidx = i * NR_PAGEBLOCK_BITS;
		for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
			if (test_bit(bitidx + start_bitidx, bitmap))
				flags |= value;


		migrate_type = flags;
		printk("zone [ %8s ] %.3d : %d : [ %s ]\n",
			zone->name, i, migrate_type, type_str[migrate_type]);
	}
}

static void test_zone_page(struct zone *zone)
{
	unsigned long low_pfn, end_pfn, base;
	struct page *page;
	char *buf;
	unsigned long flg;

	low_pfn = base = zone->zone_start_pfn;
	end_pfn = zone->zone_start_pfn + zone->spanned_pages;
	buf = vmalloc(zone->spanned_pages);
	if (!buf) {
		log("error");
		return;
	}
	memset(buf, '.', zone->spanned_pages);

	spin_lock_irqsave(&zone->lock, flg);
	for (; low_pfn < end_pfn; low_pfn++) {
		if (!pfn_valid_within(low_pfn)) {
			buf[low_pfn - base] = 'X';
			continue;
		}
		page = pfn_to_page(low_pfn);
		if (PageBuddy(page)) {
			unsigned int len = page_private(page);
			
			if (len < 10)
				buf[low_pfn - base] = '0' + len;
			else
				buf[low_pfn - base] = 'A' + len - 10;

			if (len > 0) {
				int i;
				len = 1 << len;
				for (i = 1; i < len; i++) {
					low_pfn++;
					buf[low_pfn - base] = '~';
				}
			}

			continue;
		}
		if (PageLRU(page)) {
			buf[low_pfn - base] = 'L';
			continue;
		}
	}
	spin_unlock_irqrestore(&zone->lock, flg);

	log("-------------------- ZONE : %s -------------", zone->name);
	for (low_pfn = base; low_pfn < end_pfn; low_pfn++) {
		printk("%c", buf[low_pfn - base]);
		if (low_pfn - base > 0 && ((low_pfn - base + 1) % 80 == 0))
			printk("\n");
	}
	printk("\n");
	log("+++++++++++++++++++++++++++++++++++++++++++\n");
	vfree(buf);
}

static void test_node_stat(void)
{
	struct sysinfo val = {}, *si = &val;
	struct zone *zone;
	pg_data_t *pgdat;
	//int nid;
	//int order, i, buddy, combo_index;
	int i;

	printk("\t\t ++++++ < For understanding thd page_alloc.c > ++++++\n\n");
	pgdat = NODE_DATA(0);
	log(" nr_zones : %d, pfn: %lu, P %lu, Span : %lu, max : %d", 
		pgdat->nr_zones, pgdat->node_start_pfn, 
		pgdat->node_present_pages, pgdat->node_spanned_pages,
		pgdat->kswapd_max_order);

	log(" zones : %d, shift : %d, sizeall : %d", 
		MAX_NR_ZONES, ZONES_SHIFT, sizeof(pg_data_t));
	si_meminfo(&val);
	log(" %lu, %lu, %lu, %lu, %lu, %u", 
		si->totalram, si->freeram, si->bufferram,
		si->totalhigh, si->freehigh, si->mem_unit);

#if 1
	for (i = 0; i < MAX_NR_ZONES; i++) {
		zone = &pgdat->node_zones[i];
		if (!populated_zone(zone))
			continue;
		if (0)
			test_zone_page(zone);
		if (1)
			test_zone_pageblock(zone);
	}
#endif

#if 0
	/* The two is equal. See memmap_init() */
	log(" The mem_map : 0x%p, \v pgdat->node_mem_map : 0x%p",
		mem_map, pgdat->node_mem_map);

	log(" high mem : %x, num_physpages : %lx",
		(unsigned int)high_memory, num_physpages);
	
	//log(" MAX_ORDER : %d, buddy bit : %d", MAX_ORDER, PG_buddy);
	i = 11;
	order = 2;

	buddy = i ^ ( 1 << order);
	combo_index = (i & ~(1 << order));

	log("i : %d, buddy : %d, combo_index : %d t : %d", 
		i, buddy, combo_index, 1 << order);
#endif
}

static void test_page_alloc(void)
{
	int t = pageblock_order;

	log("pageblock_order : %d", t >> 1);
	t = pageblock_nr_pages;
	log("pageblock_nr_pages : %d", t);
}

static void test_v4l2_cmd(void)
{
	/*video_ioctl2 */
	unsigned int cmd = VIDIOC_G_FMT;
	int  size;
	unsigned long n ;


	size = _IOC_SIZE(cmd);
	log("size : %d", size);
	if (_IOC_DIR(cmd) & _IOC_WRITE)
		log("ok");
	log("real size : %d", sizeof(struct v4l2_format));

	n = offsetof(struct v4l2_format, type) + 	
		sizeof(((struct v4l2_format*)0)->type);
	log("n : %ld", n);
}

static void test_module_generic(void)
{
	struct module *mod;

	mod = find_module("poseidon");

	if (mod == NULL) {
		log("can not find module poseidon");
		return;
	}
	log("module ref : %d", module_refcount(mod));
}

static void test_dentry(struct dentry *dentry_root)
{
	struct dentry *child;
	int n = 0;

	log("dentry: %s, %d", dentry_root->d_name.name,
		dentry_root->d_count);

	list_for_each_entry(child, &dentry_root->d_subdirs, d_u.d_child) {
		log("index : %d name :  [ %s ]", n++, child->d_name.name);
	}
}
static void test_inode(struct inode *inode)
{
	log("root inode : %ld", inode->i_ino);

}

static void test_mnt(void)
{
	struct file *file;

	file = shmem_file_setup("zyzii", 0, VM_NORESERVE);
	if (!IS_ERR(file)) {
		struct path *path;
		struct vfsmount *mnt;

		path = &file->f_path;
		mnt  = path->mnt;
		log("name : %s", mnt->mnt_devname);

		fput(file);
	}
}
static void test_tmpfs(void)
{
	struct file_system_type * fs_type;

	fs_type = get_fs_type("tmpfs");
	if (fs_type) {
		struct super_block *sb;
		bool found = false;

		list_for_each_entry(sb, &fs_type->fs_supers, s_instances) {
			if (sb->s_magic == TMPFS_MAGIC) {
				found = true;
				break;
			}
		}

		if (found == false)
			return;
		else {
			struct dentry *dentry_root = sb->s_root;
			struct inode *inode 	= dentry_root->d_inode;

			test_dentry(dentry_root);
			test_inode(inode);
            test_mnt();
		}
	}
}

static void test_macro(void)
{
	int n = 3;

	printk(" %d --> %ld\n", n, roundup_pow_of_two(n));
	n = 9;
	printk(" %d --> %ld\n", n, roundup_pow_of_two(n));
	n = 13;
	printk(" %d --> %ld\n", n, roundup_pow_of_two(n));
	n = 33;
	printk(" %d --> %ld\n", n, roundup_pow_of_two(n));
	n = 65;
	printk(" %d --> %ld\n", n, roundup_pow_of_two(n));

}

/* ---------------------------------------------------------------------- */
static void pte_null_not_flush(void *addr, pte_t *pte)
{
	pte_t pte_save = *pte;
	int tmp = 0;

	/* [1] null the pte */
	pte->pte_low = 0;
	smp_wmb();
	log("NULL : pte : %d, %p ,  pte: %#x", sizeof(*pte), addr, (int)pte->pte);

	/* [2] change the value with invalid PTE */
	{
		*(int *)addr = 12;
		tmp = *(int *)addr;
	}

	/* [3] restore the old PTE */
	*pte = pte_save;

	log("value %d", tmp);
	log("RESTOR :pte : %d, %p ,  pte: %#x", sizeof(*pte), addr, (int)pte->pte);
}

/* Does TLB valid, while PTE is null, DOES NOT cause page fault ? */
static int test_null_tlb(void)
{
	struct page *page;
	void *addr;
	unsigned long idx;
	pte_t *pte;
	pte_t* kmap_pte;
	unsigned long vaddr;

	if (1) {
		log("begin : %d, end : %d, %d, %d",
			FIX_KMAP_BEGIN, FIX_KMAP_END, KM_TYPE_NR, NR_CPUS);
		log("FIXADDR_TOP : %#x", (unsigned int)FIXADDR_TOP);
		vaddr = __fix_to_virt(FIX_KMAP_BEGIN);
		log("vaddr :  %#x ", (unsigned int)vaddr);

		kmap_pte = pte_offset_kernel(
				pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), vaddr),
			       	vaddr);
	}

repeat:
	page = alloc_page(__GFP_HIGHMEM | __GFP_WAIT);
	if (PageHighMem(page))
		log("high memory\n");
	else {
		static int time = 0;

		log("We need high memory page");
		__free_page(page);
		if (time++ > 1000)
			return 0;
		goto repeat;
	}

	addr = kmap_atomic(page, KM_USER0);
	idx = virt_to_fix((unsigned long) addr);
	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
	if (page == pte_page(*pte)) {
		log("[ the origin ] pte : %d, %p , idx : %lu, pte: %#x",
			sizeof(*pte), addr, idx, (int)pte->pte);
		
		pte_clear_and_memory_write(addr, pte, 1, 1);
		pte_null_not_flush(addr, pte);
	}
	kunmap_atomic(addr, KM_USER0);
	__free_page(page);
	return 0;
}

#if 0
extern long long kmap_atomic_count;
extern int result_ok;
static void test_page_fault(void)
{
	log("kmap_atomic times :%ld, result_ok : %d ", 
		kmap_atomic_count, result_ok);
}
#endif

static void test_pfn(void)
{
	log("max_mapnr : %ld ", max_mapnr);

}

/* test the status of the page */
#define lru_to_page(_head) (list_entry((_head), struct page, lru))

#define OK

#ifdef OK
extern int page_swapcount(struct page *page);
#else
static int page_swapcount(struct page *page)
{
	return 0;
}
#endif
static void test_page_status(void)
{
	struct zone *zone;
	pg_data_t *pgdat;
	struct list_head *page_head, *page_iter;
	int n = 0;

	pgdat = &contig_page_data;
	zone = &pgdat->node_zones[1];
	page_head = &zone->lru[LRU_INACTIVE_ANON].list;

	log("begin to check the status of each ANON page in the list:");
	page_iter = page_head->next;

	while (page_iter != page_head) {
		struct page *anon_page;
		int swap_cnt;

		anon_page = lru_to_page(page_iter);
		n++;

		swap_cnt = page_swapcount(anon_page);

		if (page_mapcount(anon_page) > 0 
			&& PageSwapCache(anon_page)
			&& swap_cnt > 0)
			log("[%.3d] pfn : %ld, map : %d, "
			       "swap: %d , bk: %d, flags : %lx ", 
				n, page_to_pfn(anon_page), 
				page_mapcount(anon_page), /* map count  */
				swap_cnt,/* swap count */
				PageSwapBacked(anon_page),
				anon_page->flags);

		page_iter = page_iter->next;
	}
	log("end here : [%d] PG_swapcache bit : %d", n, PG_swapcache);
}

static void test_iomem(void)
{
	struct resource *root = &iomem_resource;
	struct resource *p;

	log("%s, %p, P : %p, S : %p, %llx, %llx\n", 
		root->name, root, root->parent, root->sibling, 
		(unsigned long long)root->start,
		(unsigned long long)root->end);

	for (p = root->child; p ; p = p->sibling) {
		log("%s [%lx] range : [ %lx ~ %lx ]", 
			p->name, p->flags,
			(unsigned long)p->start,
			(unsigned long)p->end);

		if (p->child) {
			struct resource *ch;
			for (ch = p->child; ch; ch = ch->sibling)
				log("\t* %s [%lx] range : [ %lx ~ %lx ]", 
					p->name, p->flags,
					(unsigned long)p->start,
					(unsigned long)p->end);
		}
	}
	log("[ %0*d ]", 16, 133);
}

/* =================== test the KOBJECT of the kernel. haha :D  ========*/
static int foo;
static int baz;
static int bar;

/*
 * The "foo" file where a static variable is read from and written to.
 */
static ssize_t foo_show(struct kobject *kobj, struct kobj_attribute *attr,
			char *buf)
{
	return sprintf(buf, "%d\n", foo);
}

static ssize_t foo_store(struct kobject *kobj, struct kobj_attribute *attr,
			 const char *buf, size_t count)
{
	sscanf(buf, "%du", &foo);
	return count;
}

/*
 * More complex function where we determine which varible is being accessed by
 * looking at the attribute for the "baz" and "bar" files.
 */
static ssize_t b_show(struct kobject *kobj, struct kobj_attribute *attr,
		      char *buf)
{
	int var;

	if (strcmp(attr->attr.name, "baz") == 0)
		var = baz;
	else
		var = bar;
	return sprintf(buf, "%d\n", var);
}

static ssize_t b_store(struct kobject *kobj, struct kobj_attribute *attr,
		       const char *buf, size_t count)
{
	int var;

	sscanf(buf, "%du", &var);
	if (strcmp(attr->attr.name, "baz") == 0)
		baz = var;
	else
		bar = var;
	return count;
}

static struct kobj_attribute foo_attribute =
	__ATTR(foo, 0666, foo_show, foo_store);
static struct kobj_attribute baz_attribute =
	__ATTR(baz, 0666, b_show, b_store);
static struct kobj_attribute bar_attribute =
	__ATTR(bar, 0666, b_show, b_store);


static struct kobject *kobj = NULL;
static struct attribute *attrs[] = {
	&foo_attribute.attr,
	&baz_attribute.attr,
	&bar_attribute.attr,
	NULL,	/* need to NULL terminate the list of attributes */
};

static struct attribute_group attr_group = {
	.attrs = attrs,
};

static int test_sysfs(void)
{
	int retval;

	kobj = kobject_create_and_add("fsl-otp", NULL);
	if (!kobj)
		return -ENOMEM;

	retval = sysfs_create_group(kobj, &attr_group);
	if (retval)
		kobject_put(kobj);

	return retval;
	log();
}
/* ===================================================================== */

static int test_case = 7;

static int __init zyzii_init(void)
{
	printk("\n\t===================== 测试代码=======================\n\n");
	if (!prepare_for_test())
		return -1;

	switch (test_case) {
	case 17:
		test_sysfs();
		break;
	case 16:
		test_iomem();
		break;
	case 15:
		test_page_status();
		break;
	case 14:
		test_pfn();
		break;
	case 13:
		test_null_tlb();
		break;
	case 12:
		test_macro();
			break;
	case 11:
		test_tmpfs();
		break;
	case 10:
		test_module_generic();
		break;
	case 9:
		test_v4l2_cmd();
		break;
	case 8:
		test_page_alloc();
		break;
	case 7:
		test_node_stat();
		break;
	case 6:
		test_percpu();
		break;
	case 0:
		test_page_access();
		break;
	case 1:
		test_vdso();
		break;
	case 2: 
		test_page_table();
		break;
	case 3:
		browse_mm(&init_mm);
		break;
	case 4:
		test_pmd_size();
		break;
	case 5:
		test_vm_events();
		break;
	default:
		break;	
	}
	printk("\n\t===================== 测试代码=======================\n\n");
	return 0;
}

static void __exit zyzii_exit(void)
{
	if (test_case == 17) {
		kobject_del(kobj);
		kobject_put(kobj);
	}

	log("---------------- bye world ---------------------");
}

module_init(zyzii_init);
module_exit(zyzii_exit);
