#ifndef _LINUX_MM_H
#define _LINUX_MM_H
#include <linux/fs.h>
#include <asm-i386/atomic.h>
#include <asm-i386/types.h>
#include <asm-i386/bitops.h>
#include <linux/list.h>
#include <linux/mmzone.h>
#define PG_locked		 0	/* Page is locked. Don't touch. */
#define PG_error		 1
#define PG_referenced		 2
#define PG_uptodate		 3
#define PG_dirty		 4
#define PG_unused		 5
#define PG_lru			 6
#define PG_active		 7
#define PG_slab			 8
#define PG_skip			10
#define PG_highmem		11
#define PG_checked		12	/* kill me in 2.5.<early>. */
#define PG_arch_1		13
#define PG_reserved		14
#define PG_launder		15	/* written out by VM pressure.. */
#define PG_fs_1			16	/* Filesystem specific */
#define PageLRU(page)		test_bit(PG_lru, &(page)->flags)
#define PageActive(page)	test_bit(PG_active, &(page)->flags)
#define TestSetPageLRU(page)	test_and_set_bit(PG_lru, &(page)->flags)
#define TestClearPageLRU(page)	test_and_clear_bit(PG_lru, &(page)->flags)
/*
 * GFP bitmasks..
 */
/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low four bits) */
/*
 * GFP bitmasks..
 */
/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low four bits) */
#define __GFP_DMA	0x01
#define __GFP_HIGHMEM	0x02

/* Action modifiers - doesn't change the zoning */
#define __GFP_WAIT	0x10	/* Can wait and reschedule? */
#define __GFP_HIGH	0x20	/* Should access emergency pools? */
#define __GFP_IO	0x40	/* Can start low memory physical IO? */
#define __GFP_HIGHIO	0x80	/* Can start high mem physical IO? */
#define __GFP_FS	0x100	/* Can call down to low-level FS? */

#define GFP_NOHIGHIO	(__GFP_HIGH | __GFP_WAIT | __GFP_IO)
#define GFP_NOIO	(__GFP_HIGH | __GFP_WAIT)
#define GFP_NOFS	(__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO)
#define GFP_ATOMIC	(__GFP_HIGH)
#define GFP_USER	(             __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
#define GFP_HIGHUSER	(             __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM)
#define GFP_KERNEL	(__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
#define GFP_NFS		(__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
#define GFP_KSWAPD	(             __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)

/* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some
   platforms, used as appropriate on others */

#define GFP_DMA		__GFP_DMA

#define set_page_count(p,v) 	atomic_set(&(p)->count, v)
#define SetPageReserved(page)		set_bit(PG_reserved, &(page)->flags)
#define ZONE_SHIFT (BITS_PER_LONG - 8)
#define ClearPageReserved(page)		clear_bit(PG_reserved, &(page)->flags)
#define SetPageActive(page)	set_bit(PG_active, &(page)->flags)
#define ClearPageActive(page)	clear_bit(PG_active, &(page)->flags)
#define PageLocked(page)	test_bit(PG_locked, &(page)->flags)
#define PageClearSlab(page)	clear_bit(PG_slab, &(page)->flags)
#define GFP_DMA		__GFP_DMA
#define PageSetSlab(page)	set_bit(PG_slab, &(page)->flags)
#define set_page_address(page, address)			\
	do {						\
		(page)->virtual = (address);		\
	} while(0)
#define PageReserved(page)	test_bit(PG_reserved, &(page)->flags)
extern void __free_pages(struct page *page, unsigned int order);
#define __free_page(page) __free_pages((page), 0)
#define put_page_testzero(p) 	atomic_dec_and_test(&(p)->count)
extern struct page * _alloc_pages(unsigned int gfp_mask, unsigned int order);

static inline struct page * alloc_pages(unsigned int gfp_mask, unsigned int order)
{
    /*
     * Gets optimized away by the compiler.
     */
    if (order >= MAX_ORDER)
        return NULL;
    return _alloc_pages(gfp_mask, order);
}

extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);

#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr),0)

#define __get_free_page(gfp_mask) \
		__get_free_pages((gfp_mask),0)

#define __get_dma_pages(gfp_mask, order) \
		__get_free_pages((gfp_mask) | GFP_DMA,(order))

#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)

#define page_address(page)						\
	__va( (((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT)	\
			+ page_zone(page)->zone_start_paddr)

typedef struct page {
    struct list_head list;		/* ->mapping has some page lists. */
    struct address_space *mapping;	/* The inode (or ...) we belong to. */
    unsigned long index;		/* Our offset within mapping. */
    struct page *next_hash;		/* Next page sharing our hash bucket in
					   the pagecache hash table. */
    atomic_t count;			/* Usage count, see below. */
    unsigned long flags;		/* atomic flags, some possibly
					   updated asynchronously */
    struct list_head lru;		/* Pageout list, eg. active_list;
					   protected by pagemap_lru_lock !! */
    struct page **pprev_hash;	/* Complement to *next_hash. */
    struct buffer_head * buffers;	/* Buffer maps us to a disk block. */

} mem_map_t;

extern struct zone_struct *zone_table[];
static inline zone_t *page_zone(struct page *page)
{
    return zone_table[page->flags >> ZONE_SHIFT];
}
extern mem_map_t * mem_map;
extern unsigned long max_mapnr;
extern unsigned long num_physpages;
extern unsigned long num_mappedpages;
extern void * high_memory;

static inline void set_page_zone(struct page *page, unsigned long zone_num)
{
    page->flags &= ~(~0UL << ZONE_SHIFT);
    page->flags |= zone_num << ZONE_SHIFT;
}

extern void free_area_init(unsigned long *zones_size);
extern void free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
                                unsigned long *zones_size, unsigned long zone_start_paddr,
                                unsigned long *zholes_size, struct page *lmem_map);
extern void mem_init(void);
extern unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order);
#endif //CODE_MM_H
