#include<linux/init.h>
#include<linux/module.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/bitops.h>
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/numa.h>
#include <linux/init.h>
#include <linux/seqlock.h>
#include <linux/nodemask.h>
#include <asm/atomic.h>
#include <asm/page.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
#include <linux/mm.h>
#include <linux/buffer_head.h>
#include <linux/genhd.h>
#include <linux/mmzone.h>
//#include <linux/page-states.h>
#include <linux/delay.h>

MODULE_LICENSE("GPL v2");


//#walk through inactive list
static int mc(void)
{
	//#access the page list,get the page pointer vlaue
	//#use zone's inactive_list,nr_inactive,name
	unsigned long i=0,j=0,k=1;
	int l=0;
	struct zone zone_iter;
	struct list_head list_iter;
	struct page *sh=NULL;
	struct list_head *tmp1=NULL ;
	struct list_head *tmp2=NULL ;
	struct buffer_head *bh=NULL;
	//atomic_t count_t;

	for(i=0;i<contig_page_data.nr_zones;i++)
	{
		zone_iter = contig_page_data.node_zones[i];
		spin_lock(&zone_iter.lru_lock);
		list_iter=zone_iter.inactive_list;
		if(list_empty(&list_iter))
			goto out;

		tmp1=list_iter.next;
		tmp2=tmp1->next;
		while(tmp1!=tmp2)
		{
			sh=list_entry(tmp2,struct page,lru);
			if(sh==NULL)
			{
				printk(KERN_ALERT "page pointer is NULL\n");
				goto out;
			}
			

			//check the page-pointer vlaue in the RAM(/proc/kcore or /dev/kmem)

			//or, just output the page descriptor's pointer vlaue,the left work will be done in userspace
			l=atomic_read(&sh->_count);
			if(l > k)
			{
				spin_lock(&sh->mapping->private_lock);
				printk(KERN_ALERT "page descriptor values: %llu\n",sh);
				printk(KERN_ALERT "page flags: %lu \n",sh->flags);
				printk(KERN_ALERT "page reference count: %d\n",l);
				if(page_has_buffers(sh))
				{
					bh=page_buffers(sh);
					printk(KERN_ALERT " \
disk_name:%s  \
device_name:%s%d  \
|device_num: %d, %d  \
|first_minor: %d  \ 
|block_num: %llu  \
|bh_status: %lu\n",  \
bh->b_bdev->bd_disk->disk_name,  \
bh->b_bdev->bd_disk->disk_name,bh->b_bdev->bd_part->partno, \
bh->b_bdev->bd_disk->major, bh->b_bdev->bd_part->partno+bh->b_bdev->bd_disk->first_minor, \
bh->b_bdev->bd_disk->first_minor,  \
bh->b_blocknr,  \
bh->b_state);
				}				
				else{
					printk(KERN_ALERT "page %llu has no buffers.\n",sh);
				}
				spin_unlock(&sh->mapping->private_lock);
			}

			tmp2=tmp2->next;
			j++;
		}
		printk(KERN_ALERT "zone: %s |page numbers in inactive_list: %lu : %lu\n",zone_iter.name,j,zone_iter.nr_inactive);
		j=0;
		spin_unlock(&zone_iter.lru_lock);
	}

out:
	return 0;
}

//cache check,stats
//walk through cache,no possible, for the data struct is defined in slab.c,then it has the access space control
//check to see whether there is a system call to do this?
//1.check api docs,how to check all the api of the current kernel?
//2.check /proc/slabinfo 's source code
//#include <../kernel/workqueue.c>
//#include <../mm/slab.c>

#include        <linux/config.h>
#include        <linux/slab.h>
#include        <linux/mm.h>
#include        <linux/swap.h>
#include        <linux/cache.h>
#include        <linux/interrupt.h>
#include        <linux/init.h>
#include        <linux/compiler.h>
#include        <linux/cpuset.h>
#include        <linux/seq_file.h>
#include        <linux/notifier.h>
#include        <linux/kallsyms.h>
#include        <linux/cpu.h>
#include        <linux/sysctl.h>
#include        <linux/module.h>
#include        <linux/rcupdate.h>
#include        <linux/string.h>
#include        <linux/nodemask.h>
#include        <linux/mempolicy.h>
#include        <linux/mutex.h>

#include        <asm/uaccess.h>
#include        <asm/cacheflush.h>
#include        <asm/tlbflush.h>
#include        <asm/page.h>

#ifdef CONFIG_DEBUG_SLAB
#define DEBUG           1
#define STATS           1
#define FORCED_DEBUG    1
#else
#define DEBUG           0
#define STATS           0
#define FORCED_DEBUG    0
#endif

typedef unsigned int kmem_bufctl_t;
#define BUFCTL_END      (((kmem_bufctl_t)(~0U))-0)
#define BUFCTL_FREE     (((kmem_bufctl_t)(~0U))-1)
#define SLAB_LIMIT      (((kmem_bufctl_t)(~0U))-2)

/*
 * struct slab
 *
 * Manages the objs in a slab. Placed either at the beginning of mem allocated
 * for a slab, or allocated from an general cache.
 * Slabs are chained into three list: fully used, partial, fully free slabs.
 */
struct slab {
        struct list_head list;
        unsigned long colouroff;
        void *s_mem;            /* including colour offset */
        unsigned int inuse;     /* num of objs active in slab */
        kmem_bufctl_t free;
        unsigned short nodeid;
};

/*
 * struct slab_rcu
 *
 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
 * arrange for kmem_freepages to be called via RCU.  This is useful if
 * we need to approach a kernel structure obliquely, from its address
 * obtained without the usual locking.  We can lock the structure to
 * stabilize it and check it's still at the given address, only if we
 * can be sure that the memory has not been meanwhile reused for some
 * other kind of object (which our subsystem's lock might corrupt).
 *
 * rcu_read_lock before reading the address, then rcu_read_unlock after
 * taking the spinlock within the structure expected at that address.
 *
 * We assume struct slab_rcu can overlay struct slab when destroying.
 */
struct slab_rcu {
        struct rcu_head head;
        struct kmem_cache *cachep;
        void *addr;
};

struct array_cache {
        unsigned int avail;
        unsigned int limit;
        unsigned int batchcount;
        unsigned int touched;
        spinlock_t lock;
        void *entry[0];         /*
                                 * Must have this definition in here for the proper
                                 * alignment of array_cache. Also simplifies accessing
                                 * the entries.
                                 * [0] is for gcc 2.95. It should really be [].
                                 */
};

struct kmem_list3 {
        struct list_head slabs_partial; /* partial list first, better asm code */
        struct list_head slabs_full;
        struct list_head slabs_free;
        unsigned long free_objects;
        unsigned int free_limit;
        unsigned int colour_next;       /* Per-node cache coloring */
        spinlock_t list_lock;
        struct array_cache *shared;     /* shared per node */
        struct array_cache **alien;     /* on other nodes */
        unsigned long next_reap;        /* updated without locking */
        int free_touched;               /* updated without locking */
};

struct kmem_cache {
/* 1) per-cpu data, touched during every alloc/free */
        struct array_cache *array[NR_CPUS];
        unsigned int batchcount;
        unsigned int limit;
        unsigned int shared;
        unsigned int buffer_size;
/* 2) touched by every alloc & free from the backend */
        struct kmem_list3 *nodelists[MAX_NUMNODES];
        unsigned int flags;     /* constant flags */
        unsigned int num;       /* # of objs per slab */
        spinlock_t spinlock;

/* 3) cache_grow/shrink */
        /* order of pgs per slab (2^n) */
        unsigned int gfporder;

        /* force GFP flags, e.g. GFP_DMA */
        gfp_t gfpflags;

        size_t colour;          /* cache colouring range */
        unsigned int colour_off;        /* colour offset */
        struct kmem_cache *slabp_cache;
        unsigned int slab_size;
        unsigned int dflags;    /* dynamic flags */

        /* constructor func */
        void (*ctor) (void *, struct kmem_cache *, unsigned long);

        /* de-constructor func */
        void (*dtor) (void *, struct kmem_cache *, unsigned long);

/* 4) cache creation/removal */
        const char *name;
        struct list_head next;

/* 5) statistics */
#if STATS
        unsigned long num_active;
        unsigned long num_allocations;
        unsigned long high_mark;
        unsigned long grown;
        unsigned long reaped;
        unsigned long errors;
        unsigned long max_freeable;
        unsigned long node_allocs;
        unsigned long node_frees;
        atomic_t allochit;
        atomic_t allocmiss;
        atomic_t freehit;
        atomic_t freemiss;
#endif
#if DEBUG
        /*
         * If debugging is enabled, then the allocator can add additional
         * fields and/or padding to every object. buffer_size contains the total
         * object size including these internal fields, the following two
         * variables contain the offset to the user object and its size.
         */
        int obj_offset;
        int obj_size;
#endif
};

//copy these struct's defines,and now it's possible to walk through slab(cache)
//the return data is the same as /proc/slabinfo
static int cc(void)
{
        //how to walk through kmem_cache_t list?(cache_cache or malloc_sizes[] or cache_chain?, 3 type caches)

        int i=0,j=0,k=0;
	unsigned long flags = 0;
	unsigned long flags2 = 0;
	unsigned long  taotal_cache_num=0;
	unsigned long  cache_size=0;
	unsigned long  total_cache_num=0;
	unsigned long  free_obj_num=0;
	unsigned long  slab_num=0;
	unsigned int  free_obj_num2=0;
	unsigned int  active_obj_num=0;
	unsigned int  total_obj_num=0;
        kmem_cache_t * normal_cache_p1=NULL;
        kmem_cache_t * normal_cache_p2=NULL;
        kmem_cache_t * dma_cache_p1=NULL;
        kmem_cache_t * dma_cache_p2=NULL;
	struct list_head * normal = NULL;
	struct list_head * normal2 = NULL;
	struct list_head * dma = NULL;
	struct list_head *tmp1, *tmp2, *tmp3;
	struct slab * slabp = NULL;

	//13 types cache size,put here for reference
        //int cache_size_tmp[]={32,64,128,256,512,1024,2048,4096,8192,16384,32768,65536,131072};

	//seems kmem_cache *cs_cachep is a big circle list(all caches are listed circlely),
	//so not need to loop 13 times
        //for(i=0;i<13;i++)
        for(i=0;i<1;i++)
        {
		cache_size = malloc_sizes[i].cs_size;
		printk(KERN_ALERT "...............................................................\n");
		/*
		printk(KERN_ALERT "cache size : %lu\n", cache_size);
		printk(KERN_ALERT ">>>>>>>>>>>>>>>>>>>>>>>>>>kmem_cache *cs_cachep pointer value : %lu\n", malloc_sizes[i].cs_cachep);
		//is this a circle list? yes.
		normal_cache_p2 = malloc_sizes[i].cs_cachep;
                normal_cache_p1 = normal_cache_p2;
		normal2 = &normal_cache_p1->next;
		normal = normal2->next;
		do {
			printk(KERN_ALERT "kmem_cache *cs_cachep pointer value : %lu\n", normal);
			normal = normal->next;
		}while(normal != NULL && normal != normal2);
		continue;
		*/
		
		//is this a circle list? yes.
		normal_cache_p2 = malloc_sizes[i].cs_cachep;
                normal_cache_p1 = normal_cache_p2;

                dma_cache_p1 = dma_cache_p2 = malloc_sizes[i].cs_dmacachep;
                //dma = dma_cache_p2->next.next;

		normal2 = &normal_cache_p1->next;
		normal = normal2->next;
		do
		{
			printk(KERN_ALERT ".............................................................\n");
			normal_cache_p1 = list_entry(normal,struct kmem_cache, next);

			//spin_lock_irqsave(&normal_cache_p1->spinlock, flags);
			//printk(KERN_ALERT "MAX_NUMNODES : %lu \n", MAX_NUMNODES);
			for(j=0;j<MAX_NUMNODES;j++)
			{
				if(normal_cache_p1->nodelists[j] != NULL)
				{
					free_obj_num += normal_cache_p1->nodelists[j]->free_objects;
					
					//spin_lock_irqsave(&normal_cache_p1->nodelists[j]->list_lock, flags2);
					//walk through slabs_partial, pay attention, it's a circle list,and has list head,
					//so we should skip the head
					tmp1 = normal_cache_p1->nodelists[j]->slabs_partial.next;
					tmp2 = &normal_cache_p1->nodelists[j]->slabs_partial;
					printk(KERN_ALERT "slabs_partial->>>>>>>>>>>>>>>>\n");
					while(tmp1 != NULL && tmp1 != tmp2)
					{
						slabp = list_entry(tmp1, struct slab, list);
						active_obj_num += slabp->inuse;
						printk(KERN_ALERT "active : %u\n", slabp->inuse);
						slab_num++;
						tmp1 = tmp1->next;
					}

					//walk through slabs_full
					tmp1 = normal_cache_p1->nodelists[j]->slabs_full.next;
					tmp2 = &normal_cache_p1->nodelists[j]->slabs_full;
					printk(KERN_ALERT "slabs_full->>>>>>>>>>>>>>>>\n");
					while(tmp1 != NULL && tmp1 != tmp2)
					{
						slabp = list_entry(tmp1, struct slab, list);
						active_obj_num += slabp->inuse;
						printk(KERN_ALERT "active : %u\n", slabp->inuse);
						slab_num++;
						tmp1 = tmp1->next;
					}

					//walk through slabs_free
					tmp1 = normal_cache_p1->nodelists[j]->slabs_free.next;
					tmp2 = &normal_cache_p1->nodelists[j]->slabs_free;
					printk(KERN_ALERT "slabs_free->>>>>>>>>>>>>>>>\n");
					while(tmp1 != NULL && tmp1 != tmp2)
					{
						slabp = list_entry(tmp1, struct slab, list);
						active_obj_num += slabp->inuse;
						printk(KERN_ALERT "active : %u\n", slabp->inuse);
						slab_num++;
						tmp1 = tmp1->next;
					}
					//spin_unlock_irqrestore(&normal_cache_p1->nodelists[j]->list_lock, flags2);
				}
			}
			//spin_unlock_irqrestore(&normal_cache_p1->spinlock, flags);

			total_obj_num = active_obj_num + free_obj_num;
			printk(KERN_ALERT "cache_name : %s \n", normal_cache_p1->name);
			printk(KERN_ALERT "total : %u |active: %u |free: %u \n", total_obj_num, active_obj_num, free_obj_num);
			printk(KERN_ALERT "slab_num : %lu \n",slab_num);
			printk(KERN_ALERT "slabsize : %lu \n",normal_cache_p1->slab_size);
			printk(KERN_ALERT "obj_perslab : %lu \n",normal_cache_p1->num);
			printk(KERN_ALERT "obj_size : %lu \n",normal_cache_p1->buffer_size);
			printk(KERN_ALERT "batch_count : %lu \n",normal_cache_p1->batchcount);
			printk(KERN_ALERT "limit : %lu \n",normal_cache_p1->limit);
			printk(KERN_ALERT "pages_per_slab : %lu \n",1<<normal_cache_p1->gfporder);
			printk(KERN_ALERT "shared_factor : %lu \n", normal_cache_p1->shared);
			for(k=0;k<NR_CPUS;k++)
			{
				if(normal_cache_p1->array[k] != NULL)
					printk(KERN_ALERT "avail : %lu \n", normal_cache_p1->array[k]->avail);
			}
			free_obj_num = 0;
			free_obj_num2 = 0;
			active_obj_num = 0;
			total_obj_num = 0;
			slab_num = 0;
			normal = normal->next;
		}while(normal != NULL && normal != normal2);
        }

        return 0;
}


struct data_ret
{
	unsigned long reclaim;
	unsigned long total;
};

int mem_page_discardable(struct page* page)
{
	struct address_space *mapping;

	if (PageDirty(page) || PageReserved(page) || PageWriteback(page) ||
	    PageLocked(page) || PagePrivate(page) || PageDiscarded(page) ||
	    !PageUptodate(page) || !PageLRU(page) ||
	    (PageAnon(page) && !PageSwapCache(page)))
		return 0;

	mapping = page_mapping(page);
	if (!mapping)
		return 1;
#ifdef CONFIG_PAGE_STATES
	if (mapping->mlocked)
		return 0;
#endif

	if (page_mapcount(page) + 1 != page_count(page))
		return 0;

	return 1;
}

unsigned long mem_free_pages(void)
{
	int i=0,j=0;
	struct zone zone_iter;
	unsigned long free_page_num=0;
	unsigned long flags;

        for(i=0;i<contig_page_data.nr_zones;i++)
        {
                zone_iter = contig_page_data.node_zones[i];
retry:          if(spin_is_locked(&zone_iter.lock))
                {
                        msleep(5);
                        goto retry;
                }
                spin_lock_irqsave(&zone_iter.lock,flags);
                for(j=0;j<MAX_ORDER;j++)
                {
                        //printk(KERN_ALERT "number: %lu\n", zone_iter.free_area[j].nr_free);
                        free_page_num += (zone_iter.free_area[j].nr_free*(1<<j));
                }
                //free_page_num += contig_page_data.node_zones[i].free_pages;
                spin_unlock_irqrestore(&zone_iter.lock,flags);
                msleep(5);
        }

	return free_page_num;
}

struct data_ret mem_reclaim_inactive(void)
{
	int i;
	struct zone zone_iter;
	struct list_head list_iter;
	struct list_head *tmp1=NULL ;
	struct list_head *tmp2=NULL ;
	struct page *sh=NULL;
	unsigned long avail_page_num_inactive=0;
	unsigned long total_page_num_inactive=0;
	unsigned long flags;
	struct data_ret data = {0,0};

        for(i=0;i<contig_page_data.nr_zones;i++)
        {
                zone_iter = contig_page_data.node_zones[i];
retry2:         if(spin_is_locked(&zone_iter.lru_lock))
                {
                        msleep(5);
                        goto retry2;
                }
                spin_lock_irqsave(&zone_iter.lru_lock,flags);
                //total_page_num += zone_iter.nr_inactive;
                list_iter=zone_iter.inactive_list;
                if(list_empty(&list_iter))
                {
                        spin_unlock_irqrestore(&zone_iter.lru_lock,flags);
			return data;
                }

                tmp1=list_iter.next;
                tmp2=tmp1->next;
                while(tmp1!=tmp2)
                {
                        sh=list_entry(tmp2,struct page,lru);
                        if(sh==NULL)
                        {
                                printk(KERN_ALERT "page pointer is NULL\n");
                                spin_unlock_irqrestore(&zone_iter.lru_lock,flags);
                                return data;
                        }
                        if(mem_page_discardable(sh))
                        {
                                avail_page_num_inactive++;
                        }

                        tmp2=tmp2->next;
                        total_page_num_inactive++;
                }
                spin_unlock_irqrestore(&zone_iter.lru_lock,flags);
                msleep(5);
        }

	data.reclaim = avail_page_num_inactive;
	data.total = total_page_num_inactive;
	return data;
}

struct data_ret mem_reclaim_active(void)
{
	int i;
	struct zone zone_iter;
	struct list_head list_iter;
	struct list_head *tmp1=NULL ;
	struct list_head *tmp2=NULL ;
	struct page *sh=NULL;
	unsigned long avail_page_num_active=0;
	unsigned long total_page_num_active=0;
	unsigned long flags;
	struct data_ret data = {0,0};

        for(i=0;i<contig_page_data.nr_zones;i++)
        {
                zone_iter = contig_page_data.node_zones[i];
retry3:         if(spin_is_locked(&zone_iter.lru_lock))
                {
                        msleep(5);
                        goto retry3;
                }
                spin_lock_irqsave(&zone_iter.lru_lock,flags);
                //total_page_num += zone_iter.nr_active;
                list_iter=zone_iter.active_list;
                if(list_empty(&list_iter))
                {
                        spin_unlock_irqrestore(&zone_iter.lru_lock,flags);
			return data;
                }

                tmp1=list_iter.next;
                tmp2=tmp1->next;
                while(tmp1!=tmp2)
                {
                        sh=list_entry(tmp2,struct page,lru);
                        if(sh==NULL)
                        {
                                printk(KERN_ALERT "page pointer is NULL\n");
                                spin_unlock_irqrestore(&zone_iter.lru_lock,flags);
                                return data;
                        }
                        if(mem_page_discardable(sh))
                        {
                                avail_page_num_active++;
                        }

                        tmp2=tmp2->next;
                        total_page_num_active++;
                }
                spin_unlock_irqrestore(&zone_iter.lru_lock,flags);
                msleep(5);
        }
	
	data.reclaim = avail_page_num_active;
	data.total = total_page_num_active;
	return data;
}

unsigned long mem_reclaim_noneed_action(void)
{
	int i;
	struct zone zone_iter;
	struct list_head list_iter;
	struct list_head *tmp1=NULL ;
	struct list_head *tmp2=NULL ;
	struct page *sh=NULL;
	unsigned long avail_page_num_noneed_actions=0;
	unsigned long flags;
	//struct buffer_head *bh=NULL;
	//unsigned int page_buffer_size=0;

        for(i=0;i<contig_page_data.nr_zones;i++)
        {
                zone_iter = contig_page_data.node_zones[i];
retry4:         if(spin_is_locked(&zone_iter.lru_lock))
                {
                        msleep(5);
                        goto retry4;
                }
                spin_lock_irqsave(&zone_iter.lru_lock,flags);
                //total_page_num += zone_iter.nr_inactive;
                list_iter=zone_iter.inactive_list;
                if(list_empty(&list_iter))
                {
                        spin_unlock_irqrestore(&zone_iter.lru_lock,flags);
                        return 0;
                }

                tmp1=list_iter.next;
                tmp2=tmp1->next;
                while(tmp1!=tmp2)
                {
                        sh=list_entry(tmp2,struct page,lru);
                        if(sh==NULL)
                        {
                                printk(KERN_ALERT "page pointer is NULL\n");
                                spin_unlock_irqrestore(&zone_iter.lru_lock,flags);
				return 0;
                        }
                        if(PageLocked(sh) || PageWriteback(sh) || PageReserved(sh) || PageDirty(sh) ||
                                //PageReferenced(sh) ||
                                //(PagePrivate(sh) && PageWriteback(sh)) ||
                                //(PageMappedToDisk(sh) && PageDirty(sh)) ||
                                page_mapped(sh) ||
                                (page_count(sh) !=2) /*caller's ref + pagecache ref*/ )
                        {
                                ;
                        }
                        else
                        {
                                avail_page_num_noneed_actions++;
                                //if page has buffers,and account buffers size
                                /*if(page_has_buffers(sh))
                                {
                                        bh=page_buffers(sh);
                                        //page's buffers is a circular list
                                        struct buffer_head *bh_iter=bh->b_this_page;
                                        do{
                                                page_buffer_size += bh_iter->b_size;
                                                bh_iter = bh_iter->b_this_page;
                                        }while(bh_iter != bh);
                                }*/
                        }

                        tmp2=tmp2->next;
                        //total_page_num++;
                }
                spin_unlock_irqrestore(&zone_iter.lru_lock,flags);
                msleep(5);
        }
	
	return avail_page_num_noneed_actions;
}


//avail_memory = free pages + pages can be reclaimed
static int avail_memory(void)
{
	unsigned long free_page_num=0;
	unsigned long avail_page_num_inactive=0;
	unsigned long avail_page_num_active=0;
	unsigned long avail_page_num_noneed_actions=0;
	unsigned long total_page_num_inactive=0;
	unsigned long total_page_num_active=0;

	//1.get free pages from buddy
	free_page_num = mem_free_pages();

	//2.check inactive list,and get available(reclaim-able) pages number
	avail_page_num_inactive = mem_reclaim_inactive().reclaim;
	total_page_num_inactive = mem_reclaim_inactive().total;

	//3.check active list,get reclaim-able page number: (is it needed?for it's in active)!
	avail_page_num_active = mem_reclaim_active().reclaim;
	total_page_num_active = mem_reclaim_active().total;

	//4.check inactive (& active list),get reclaim-able pages,
	//that no need more actions(for example: write page to disk;unmap pages;no need swap)
	//according to  page's status flags(page's life cycle)
	//if page has buffers,also account buffers size(in PFRA,
	//if page has buffers,it will drop buffers first,then free pages,but the memory is superposed)
	avail_page_num_noneed_actions = mem_reclaim_noneed_action();

	printk(KERN_INFO "free-> %lu|inactive-> %lu:%lu|active-> %lu:%lu|avail(no_action)-> %lu\n", \
		free_page_num,total_page_num_inactive,avail_page_num_inactive,\
		total_page_num_active,avail_page_num_active,avail_page_num_noneed_actions);

	return 0;
}

static int mc_init(void)
{
	/*printk(KERN_ALERT "kernel memory check module inited1.\n");
	mc();
	printk(KERN_ALERT "kernel memory check module inited2.\n");
	*/
	//printk(KERN_ALERT "kernel cache check module inited2.\n");
	cc();

	//printk(KERN_ALERT "available memory  module inited.\n");
//#ifndef CONFIG_NEED_MULTIPLE_NODES
//	avail_memory();
//#endif
	
	return 0;
}
static void mc_exit(void)
{
	//printk(KERN_ALERT "kernel memory check module removed\n");
	//printk(KERN_ALERT "available memory module removed\n");
}

module_init(mc_init);
module_exit(mc_exit);
