#include <linux/mmzone.h>
#include <linux/bootmem.h>
#include <linux/nodemask.h>
#include <linux/gfp.h>
#include <linux/mm_types.h>
#include <linux/mm.h>
#include <linux/vmstat.h>
#include <linux/page-flags.h>
#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/smp.h>
#include <linux/log2.h>

#include <lib/div64.h>

#include <asm-generic/memory_model.h>
#include <asm/processor.h>

gfp_t gfp_allowed_mask = GFP_BOOT_MASK;

unsigned long totalram_pages;

static char * const zone_names[MAX_NR_ZONES] = {
#ifdef CONFIG_ZONE_DMA
	 "DMA",
#endif
#ifdef CONFIG_ZONE_DMA32
	 "DMA32",
#endif
	 "Normal",
#ifdef CONFIG_HIGHMEM
	 "HighMem",
#endif
	 "Movable",
};

static unsigned long nr_kernel_pages;
static unsigned long nr_all_pages;
static unsigned long dma_reserve;

struct pglist_data contig_page_data = { .bdata = &bootmem_node_data[0] };

extern unsigned long highest_memmap_pfn;

static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
	zoneref->zone = zone;
	zoneref->zone_idx = zone_idx(zone);
}

static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
				int nr_zones, enum zone_type zone_type)
{
	struct zone *zone;

	BUG_ON(zone_type >= MAX_NR_ZONES);
	zone_type++;

	do {
		zone_type--;
		zone = pgdat->node_zones + zone_type;
		if (populated_zone(zone)) {
			zoneref_set_zone(zone,
				&zonelist->_zonerefs[nr_zones++]);
			// check_highest_zone(zone_type);
		}

	} while (zone_type);
	return nr_zones;
}

static void build_zonelists(pg_data_t *pgdat)
{
	int node, local_node;
	enum zone_type j;
	struct zonelist *zonelist;

	local_node = pgdat->node_id;

	zonelist = &pgdat->node_zonelists[0];
	j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);

	/*
	 * Now we build the zonelist so that it contains the zones
	 * of all the other nodes.
	 * We don't want to pressure a particular node, so when
	 * building the zones for node N, we make sure that the
	 * zones coming right after the local ones are those from
	 * node N+1 (modulo N)
	 */
	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
		if (!node_online(node))
			continue;
		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
							MAX_NR_ZONES - 1);
	}
	for (node = 0; node < local_node; node++) {
		if (!node_online(node))
			continue;
		j = build_zonelists_node(NODE_DATA(node), zonelist, j,
							MAX_NR_ZONES - 1);
	}

	zonelist->_zonerefs[j].zone = NULL;
	zonelist->_zonerefs[j].zone_idx = 0;
}

static int __build_all_zonelists(void *dummy)
{
	int nid;

	for_each_online_node(nid) {
		pg_data_t *pgdat = NODE_DATA(nid);

		build_zonelists(pgdat);
		// build_zonelist_cache(pgdat);
	}
	return 0;
}

void build_all_zonelists(void)
{
	// set_zonelist_order();

	// if (system_state == SYSTEM_BOOTING) {
		__build_all_zonelists(NULL);
		// mminit_verify_zonelist();
		// cpuset_init_current_mems_allowed();
	// } else {
		/* we have to stop all cpus to guarantee there is no user
		   of zonelist */
		// stop_machine(__build_all_zonelists, NULL, NULL);
		/* cpuset refresh routine should be here */
	// }
	// vm_total_pages = nr_free_pagecache_pages();
	/*
	 * Disable grouping by mobility if the number of pages in the
	 * system is too low to allow the mechanism to work. It would be
	 * more accurate, but expensive to check per-zone. This check is
	 * made on memory-hotadd so a system can start with mobility
	 * disabled and enable it later
	 */
// 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
// 		page_group_by_mobility_disabled = 1;
// 	else
// 		page_group_by_mobility_disabled = 0;

// 	// printf("Built %i zonelists in %s order, mobility grouping %s.  "
// 		"Total pages: %ld\n",
// 			nr_online_nodes,
// 			zonelist_order_name[current_zonelist_order],
// 			page_group_by_mobility_disabled ? "off" : "on",
// 			vm_total_pages);
// #ifdef CONFIG_NUMA
// 	// printf("Policy zone: %s\n", zone_names[policy_zone]);
// #endif
}

static inline unsigned long zone_spanned_pages_in_node(int nid,
					unsigned long zone_type,
					unsigned long *zones_size)
{
	return zones_size[zone_type];
}

static inline unsigned long zone_absent_pages_in_node(int nid,
						unsigned long zone_type,
						unsigned long *zholes_size)
{
	if (!zholes_size)
		return 0;

	return zholes_size[zone_type];
}

static void calculate_node_totalpages(struct pglist_data *pgdat,
		unsigned long *zones_size, unsigned long *zholes_size)
{
	unsigned long realtotalpages, totalpages = 0;
	enum zone_type i;

	for (i = 0; i < MAX_NR_ZONES; i++)
		totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
								zones_size);
	pgdat->node_spanned_pages = totalpages;

	realtotalpages = totalpages;
	for (i = 0; i < MAX_NR_ZONES; i++)
		realtotalpages -=
			zone_absent_pages_in_node(pgdat->node_id, i,
								zholes_size);
	pgdat->node_present_pages = realtotalpages;
	printf("On node %d totalpages: %lu\r\n", pgdat->node_id,
							realtotalpages);
}

#if 0
static int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
{
	int i;
	struct pglist_data *pgdat = zone->zone_pgdat;
	size_t alloc_size;

	/*
	 * The per-page waitqueue mechanism uses hashed waitqueues
	 * per zone.
	 */
	zone->wait_table_hash_nr_entries =
		 wait_table_hash_nr_entries(zone_size_pages);
	zone->wait_table_bits =
		wait_table_bits(zone->wait_table_hash_nr_entries);
	alloc_size = zone->wait_table_hash_nr_entries
					* sizeof(wait_queue_head_t);

	// if (!slab_is_available()) {
		zone->wait_table = (wait_queue_head_t *)alloc_bootmem_node(pgdat, alloc_size);
	// } else {
		/*
		 * This case means that a zone whose size was 0 gets new memory
		 * via memory hot-add.
		 * But it may be the case that a new node was hot-added.  In
		 * this case vmalloc() will not be able to use this new node's
		 * memory - this wait_table must be initialized to use this new
		 * node itself as well.
		 * To use this new node's memory, further consideration will be
		 * necessary.
		 */
		// zone->wait_table = vmalloc(alloc_size);
	// }
	if (!zone->wait_table)
		return -ENOMEM;

	for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
		init_waitqueue_head(zone->wait_table + i);

	return 0;
}
#endif

void memmap_init_zone(unsigned long size, int nid, unsigned long zone,
		unsigned long start_pfn, enum memmap_context context)
{
	struct page *page;
	unsigned long end_pfn = start_pfn + size;
	unsigned long pfn;
	struct zone *z;

	if (highest_memmap_pfn < end_pfn - 1)
		highest_memmap_pfn = end_pfn - 1;

	z = &NODE_DATA(nid)->node_zones[zone];
	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
		/*
		 * There can be holes in boot-time mem_map[]s
		 * handed to this function.  They do not
		 * exist on hotplugged memory.
		 */
		if (context == MEMMAP_EARLY) {
			if (!early_pfn_valid(pfn))
				continue;
			if (!early_pfn_in_nid(pfn, nid))
				continue;
		}
		page = pfn_to_page(pfn);
		set_page_links(page, zone, nid, pfn);
		// mminit_verify_page_links(page, zone, nid, pfn);
		// init_page_count(page);
		// reset_page_mapcount(page);
		// SetPageReserved(page);
		/*
		 * Mark the block movable so that blocks are reserved for
		 * movable at startup. This will force kernel allocations
		 * to reserve their blocks rather than leaking throughout
		 * the address space during boot when many long-lived
		 * kernel allocations are made. Later some blocks near
		 * the start are marked MIGRATE_RESERVE by
		 * setup_zone_migrate_reserve()
		 *
		 * bitmap is created for zone's valid pfn range. but memmap
		 * can be created for invalid pages (for alignment)
		 * check here not to call set_pageblock_migratetype() against
		 * pfn out of zone.
		 */
		// if ((z->zone_start_pfn <= pfn)
		//     && (pfn < z->zone_start_pfn + z->spanned_pages)
		//     && !(pfn & (pageblock_nr_pages - 1)))
		// 	set_pageblock_migratetype(page, MIGRATE_MOVABLE);

		// INIT_LIST_HEAD(&page->lru);
// #ifdef WANT_PAGE_VIRTUAL
// 		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
// 		if (!is_highmem_idx(zone))
// 			set_page_address(page, __va(pfn << PAGE_SHIFT));
// #endif
	}
}

static void zone_init_free_lists(struct zone *zone)
{
	int order, t;
	for_each_migratetype_order(order, t) {
		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
		zone->free_area[order].nr_free = 0;
	}
}

int init_currently_empty_zone(struct zone *zone,
					unsigned long zone_start_pfn,
					unsigned long size,
					enum memmap_context context)
{
	struct pglist_data *pgdat = zone->zone_pgdat;
	int ret;
	// ret = zone_wait_table_init(zone, size);
	// if (ret)
	// 	return ret;
	pgdat->nr_zones = zone_idx(zone) + 1;

	zone->zone_start_pfn = zone_start_pfn;

	printf("memmap_init: Initialising map node %d zone %lu pfns %lu -> %lu\r\n",
			pgdat->node_id,
			(unsigned long)zone_idx(zone),
			zone_start_pfn, (zone_start_pfn + size));

	zone_init_free_lists(zone);
	printf("this is %s(): %d\r\n", __func__, __LINE__);

	return 0;
}

#ifndef __HAVE_ARCH_MEMMAP_INIT
#define memmap_init(size, nid, zone, start_pfn) \
	memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
#endif

static void free_area_init_core(struct pglist_data *pgdat,
		unsigned long *zones_size, unsigned long *zholes_size)
{
	enum zone_type j;
	int nid = pgdat->node_id;
	unsigned long zone_start_pfn = pgdat->node_start_pfn;
	int ret;

	// pgdat_resize_init(pgdat);
	pgdat->nr_zones = 0;
	init_waitqueue_head(&pgdat->kswapd_wait);
	pgdat->kswapd_max_order = 0;
	// pgdat_page_cgroup_init(pgdat);
	
	for (j = 0; j < MAX_NR_ZONES; j++) {
		struct zone *zone = pgdat->node_zones + j;
		unsigned long size, realsize, memmap_pages;
		enum lru_list l;

		size = zone_spanned_pages_in_node(nid, j, zones_size);
		realsize = size - zone_absent_pages_in_node(nid, j,
								zholes_size);

		/*
		 * Adjust realsize so that it accounts for how much memory
		 * is used by this zone for memmap. This affects the watermark
		 * and per-cpu initialisations
		 */
		memmap_pages =
			PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
		if (realsize >= memmap_pages) {
			realsize -= memmap_pages;
			if (memmap_pages)
				printf("  %s zone: %lu pages used for memmap\r\n",
				       zone_names[j], memmap_pages);
		} else
			printf("  %s zone: %lu pages exceeds realsize %lu\r\n",
				zone_names[j], memmap_pages, realsize);

		/* Account for reserved pages */
		if (j == 0 && realsize > dma_reserve) {
			realsize -= dma_reserve;
			printf("  %s zone: %lu pages reserved\r\n",
					zone_names[0], dma_reserve);
		}

		if (!is_highmem_idx(j))
			nr_kernel_pages += realsize;
		nr_all_pages += realsize;

		zone->spanned_pages = size;
		zone->present_pages = realsize;
// #ifdef CONFIG_NUMA
// 		zone->node = nid;
// 		zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
// 						/ 100;
// 		zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
// #endif
		zone->name = zone_names[j];
		spin_lock_init(&zone->lock);
		spin_lock_init(&zone->lru_lock);
		// zone_seqlock_init(zone);
		zone->zone_pgdat = pgdat;

		zone->prev_priority = DEF_PRIORITY;

		// zone_pcp_init(zone);
		// for_each_lru(l) {
		// 	INIT_LIST_HEAD(&zone->lru[l].list);
		// 	zone->reclaim_stat.nr_saved_scan[l] = 0;
		// }
		// zone->reclaim_stat.recent_rotated[0] = 0;
		// zone->reclaim_stat.recent_rotated[1] = 0;
		// zone->reclaim_stat.recent_scanned[0] = 0;
		// zone->reclaim_stat.recent_scanned[1] = 0;
		zap_zone_vm_stats(zone);
		zone->flags = 0;
		if (!size)
			continue;

		// set_pageblock_order(pageblock_default_order());
		// setup_usemap(pgdat, zone, size);
		ret = init_currently_empty_zone(zone, zone_start_pfn,
						size, MEMMAP_EARLY);
		BUG_ON(ret);
		memmap_init(size, nid, j, zone_start_pfn);
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		zone_start_pfn += size;
		printf("this is %s(): %d\r\n", __func__, __LINE__);
	}
	printf("this is %s(): %d\r\n", __func__, __LINE__);
}

static void alloc_node_mem_map(struct pglist_data *pgdat)
{
	/* Skip empty nodes */
	if (!pgdat->node_spanned_pages)
		return;

// #ifdef CONFIG_FLAT_NODE_MEM_MAP
	/* ia64 gets its own node_mem_map, before this, without bootmem */
	if (!pgdat->node_mem_map) {
		unsigned long size, start, end;
		struct page *map;

		/*
		 * The zone's endpoints aren't required to be MAX_ORDER
		 * aligned but the node_mem_map endpoints must be in order
		 * for the buddy allocator to function correctly.
		 */
		start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
		end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
		end = ALIGN(end, MAX_ORDER_NR_PAGES);
		size =  (end - start) * sizeof(struct page);
		printf("this is %s(): %d >> size=%d\r\n", __func__, __LINE__, size);
		map = alloc_remap(pgdat->node_id, size);
		if (!map)
			map = alloc_bootmem_node(pgdat, size);
		pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
		printf("this is %s(): %d >> map=%lx, pgdat->node_mem_map=%lx\r\n", __func__, __LINE__, map, pgdat->node_mem_map);
	}
// #ifndef CONFIG_NEED_MULTIPLE_NODES
	/*
	 * With no DISCONTIG, the global mem_map is just set as node 0's
	 */
	if (pgdat == NODE_DATA(0)) {
		mem_map = NODE_DATA(0)->node_mem_map;
	printf("this is %s(): %d >> mem_map=%lx\r\n", __func__, __LINE__, mem_map);
// #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
// 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
// 			mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
// #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
	}
// #endif
// #endif /* CONFIG_FLAT_NODE_MEM_MAP */
}

void free_area_init_node(int nid, unsigned long *zones_size,
		unsigned long node_start_pfn, unsigned long *zholes_size)
{
	pg_data_t *pgdat = NODE_DATA(nid);

	pgdat->node_id = nid;
	pgdat->node_start_pfn = node_start_pfn;
	calculate_node_totalpages(pgdat, zones_size, zholes_size);

	alloc_node_mem_map(pgdat);
// #ifdef CONFIG_FLAT_NODE_MEM_MAP
	// printf("free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\r\n",
		// nid, (unsigned long)pgdat, (unsigned long)pgdat->node_mem_map);
// #endif

	free_area_init_core(pgdat, zones_size, zholes_size);
}










static inline void set_page_order(struct page *page, int order)
{
	set_page_private(page, order);
	__SetPageBuddy(page);
}

static inline void rmv_page_order(struct page *page)
{
	__ClearPageBuddy(page);
	set_page_private(page, 0);
}

static inline struct page *
__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
{
	unsigned long buddy_idx = page_idx ^ (1 << order);

	return page + (buddy_idx - page_idx);
}

static inline unsigned long
__find_combined_index(unsigned long page_idx, unsigned int order)
{
	return (page_idx & ~(1 << order));
}

static inline unsigned long page_order(struct page *page)
{
	// VM_BUG_ON(!PageBuddy(page));
	return page_private(page);
}

static inline int page_is_buddy(struct page *page, struct page *buddy,
								int order)
{
	if (!pfn_valid_within(page_to_pfn(buddy)))
		return 0;

	// if (page_zone_id(page) != page_zone_id(buddy))
	// 	return 0;

	if (PageBuddy(buddy) && page_order(buddy) == order) {
		// VM_BUG_ON(page_count(buddy) != 0);
		return 1;
	}
	return 0;
}

static inline void expand(struct zone *zone, struct page *page,
	int low, int high, struct free_area *area,
	int migratetype)
{
	unsigned long size = 1 << high;

	while (high > low) {
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
		area--;
		high--;
		size >>= 1;
		// VM_BUG_ON(bad_range(zone, &page[size]));
		list_add(&page[size].lru, &area->free_list[migratetype]);
		area->nr_free++;
		set_page_order(&page[size], high);
	}
}

static inline
struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
						int migratetype)
{
	unsigned int current_order;
	struct free_area * area;
	struct page *page;
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	/* Find a page of the appropriate size in the preferred list */
	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
		area = &(zone->free_area[current_order]);
		if (list_empty(&area->free_list[migratetype]))
			continue;

		page = list_entry(area->free_list[migratetype].next,
							struct page, lru);
		list_del(&page->lru);
		rmv_page_order(page);
		area->nr_free--;
		expand(zone, page, order, current_order, area, migratetype);
		return page;
	}

	return NULL;
}

static struct page *__rmqueue(struct zone *zone, unsigned int order,
						int migratetype)
{
	struct page *page;

retry_reserve:
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
	page = __rmqueue_smallest(zone, order, migratetype);

	// if (!page && migratetype != MIGRATE_RESERVE) {
	// 	page = __rmqueue_fallback(zone, order, migratetype);

	// 	/*
	// 	 * Use MIGRATE_RESERVE rather than fail an allocation. goto
	// 	 * is used because __rmqueue_smallest is an inline function
	// 	 * and we want just one call site
	// 	 */
	// 	if (!page) {
	// 		migratetype = MIGRATE_RESERVE;
	// 		goto retry_reserve;
	// 	}
	// }

	// trace_mm_page_alloc_zone_locked(page, order, migratetype);
	return page;
}

static inline
struct page *buffered_rmqueue(struct zone *preferred_zone,
			struct zone *zone, int order, gfp_t gfp_flags,
			int migratetype)
{
	unsigned long flags;
	struct page *page;
	int cold = !!(gfp_flags & __GFP_COLD);
	int cpu;

again:
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
	cpu  = get_cpu();
	if (order == 0) {
		// struct per_cpu_pages *pcp;
		// struct list_head *list;

		// pcp = &zone_pcp(zone, cpu)->pcp;
		// list = &pcp->lists[migratetype];
		// local_irq_save(flags);
		// if (list_empty(list)) {
		// 	pcp->count += rmqueue_bulk(zone, 0,
		// 			pcp->batch, list,
		// 			migratetype, cold);
		// 	if (list_empty(list))
		// 		goto failed;
		// }

		// if (cold)
		// 	page = list_entry(list->prev, struct page, lru);
		// else
		// 	page = list_entry(list->next, struct page, lru);

		// list_del(&page->lru);
		// pcp->count--;
	} else {
		// if (gfp_flags & __GFP_NOFAIL) {
		// 	/*
		// 	 * __GFP_NOFAIL is not to be used in new code.
		// 	 *
		// 	 * All __GFP_NOFAIL callers should be fixed so that they
		// 	 * properly detect and handle allocation failures.
		// 	 *
		// 	 * We most definitely don't want callers attempting to
		// 	 * allocate greater than order-1 page units with
		// 	 * __GFP_NOFAIL.
		// 	 */
		// 	WARN_ON_ONCE(order > 1);
		// }
		spin_lock_irqsave(&zone->lock, flags);
		page = __rmqueue(zone, order, migratetype);
		// __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
		spin_unlock(&zone->lock);
		if (!page)
			goto failed;
	}

	// __count_zone_vm_events(PGALLOC, zone, 1 << order);
	// zone_statistics(preferred_zone, zone);
	local_irq_restore(flags);
	put_cpu();

	// VM_BUG_ON(bad_range(zone, page));
	// if (prep_new_page(page, order, gfp_flags))
	// 	goto again;
	return page;

failed:
	local_irq_restore(flags);
	put_cpu();
	return NULL;
}

/* The ALLOC_WMARK bits are used as an index to zone->watermark */
#define ALLOC_WMARK_MIN		WMARK_MIN
#define ALLOC_WMARK_LOW		WMARK_LOW
#define ALLOC_WMARK_HIGH	WMARK_HIGH
#define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */

/* Mask to get the watermark bits */
#define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)

#define ALLOC_HARDER		0x10 /* try to alloc harder */
#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET		0x40 /* check for correct cpuset */

/*
 * get_page_from_freelist goes through the zonelist trying to allocate
 * a page.
 */
static struct page *
get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
		struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
		struct zone *preferred_zone, int migratetype)
{
	struct zoneref *z;
	struct page *page = NULL;
	int classzone_idx;
	struct zone *zone;
	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
	int zlc_active = 0;		/* set if using zonelist_cache */
	int did_zlc_setup = 0;		/* just call zlc_setup() one time */

	classzone_idx = zone_idx(preferred_zone);
zonelist_scan:
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
	/*
	 * Scan zonelist, looking for a zone with enough free.
	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
	 */
	for_each_zone_zonelist_nodemask(zone, z, zonelist,
						high_zoneidx, nodemask) {
		// if (NUMA_BUILD && zlc_active &&
		// 	!zlc_zone_worth_trying(zonelist, z, allowednodes))
		// 		continue;
		// if ((alloc_flags & ALLOC_CPUSET) &&
		// 	!cpuset_zone_allowed_softwall(zone, gfp_mask))
		// 		goto try_next_zone;

		// BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
		// if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
		// 	unsigned long mark;
		// 	int ret;

		// 	mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
		// 	if (zone_watermark_ok(zone, order, mark,
		// 		    classzone_idx, alloc_flags))
		// 		goto try_this_zone;

		// 	if (zone_reclaim_mode == 0)
		// 		goto this_zone_full;

		// 	ret = zone_reclaim(zone, gfp_mask, order);
		// 	switch (ret) {
		// 	case ZONE_RECLAIM_NOSCAN:
		// 		/* did not scan */
		// 		goto try_next_zone;
		// 	case ZONE_RECLAIM_FULL:
		// 		/* scanned but unreclaimable */
		// 		goto this_zone_full;
		// 	default:
		// 		/* did we reclaim enough */
		// 		if (!zone_watermark_ok(zone, order, mark,
		// 				classzone_idx, alloc_flags))
		// 			goto this_zone_full;
		// 	}
		// }

try_this_zone:
		page = buffered_rmqueue(preferred_zone, zone, order,
						gfp_mask, migratetype);
		if (page)
			break;
// this_zone_full:
// 		if (NUMA_BUILD)
// 			zlc_mark_zone_full(zonelist, z);
// try_next_zone:
// 		if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
// 			/*
// 			 * we do zlc_setup after the first zone is tried but only
// 			 * if there are multiple nodes make it worthwhile
// 			 */
// 			allowednodes = zlc_setup(zonelist, alloc_flags);
// 			zlc_active = 1;
// 			did_zlc_setup = 1;
// 		}
	}

	// if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
	// 	/* Disable zlc cache for second zonelist scan */
	// 	zlc_active = 0;
	// 	goto zonelist_scan;
	// }
	return page;
}

/*
 * This is the 'heart' of the zoned buddy allocator.
 */
struct page *
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
			struct zonelist *zonelist, nodemask_t *nodemask)
{
	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
	struct zone *preferred_zone;
	struct page *page;
	// int migratetype = allocflags_to_migratetype(gfp_mask);
	int migratetype = 2;
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	gfp_mask &= gfp_allowed_mask;

	// lockdep_trace_alloc(gfp_mask);

	// might_sleep_if(gfp_mask & __GFP_WAIT);

	// if (should_fail_alloc_page(gfp_mask, order))
		// return NULL;

	/*
	 * Check the zones suitable for the gfp_mask contain at least one
	 * valid zone. It's possible to have an empty zonelist as a result
	 * of GFP_THISNODE and a memoryless node
	 */
	if (!zonelist->_zonerefs->zone)
		return NULL;

	/* The preferred zone is used for statistics later */
	first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
	if (!preferred_zone)
		return NULL;

	/* First allocation attempt */
	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
			zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
			preferred_zone, migratetype);
	// if (!page)
	// 	page = __alloc_pages_slowpath(gfp_mask, order,
	// 			zonelist, high_zoneidx, nodemask,
	// 			preferred_zone, migratetype);

	// trace_mm_page_alloc(page, order, gfp_mask, migratetype);
	return page;
}

unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
{
	struct page *page;

	/*
	 * __get_free_pages() returns a 32-bit address, which cannot represent
	 * a highmem page
	 */
	// VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);

	page = alloc_pages(gfp_mask, order);
	// printf("this is %s(): %d >> page = 0x%x\r\n", __func__, __LINE__, page);
	if (!page)
		return 0;
	return (unsigned long) page_address(page);
}










static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
							unsigned long pfn)
{
#ifdef CONFIG_SPARSEMEM
	return __pfn_to_section(pfn)->pageblock_flags;
#else
	return zone->pageblock_flags;
#endif /* CONFIG_SPARSEMEM */
}

static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
{
#ifdef CONFIG_SPARSEMEM
	pfn &= (PAGES_PER_SECTION-1);
	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
#else
	pfn = pfn - zone->zone_start_pfn;
	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
#endif /* CONFIG_SPARSEMEM */
}

unsigned long get_pageblock_flags_group(struct page *page,
					int start_bitidx, int end_bitidx)
{
	struct zone *zone;
	unsigned long *bitmap;
	unsigned long pfn, bitidx;
	unsigned long flags = 0;
	unsigned long value = 1;

	zone = page_zone(page);
	pfn = page_to_pfn(page);
	bitmap = get_pageblock_bitmap(zone, pfn);
	bitidx = pfn_to_bitidx(zone, pfn);

	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
		if (test_bit(bitidx + start_bitidx, bitmap))
			flags |= value;

	return flags;
}

static inline void __free_one_page(struct page *page,
		struct zone *zone, unsigned int order,
		int migratetype)
{
	unsigned long page_idx;
	// printf("this is %s(): %d >> migratetype = %d\r\n", __func__, __LINE__, migratetype);

	// if (PageCompound(page))
	// 	if (destroy_compound_page(page, order))
	// 		return;

	// VM_BUG_ON(migratetype == -1);

	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);

	// VM_BUG_ON(page_idx & ((1 << order) - 1));
	// VM_BUG_ON(bad_range(zone, page));
	// printf("this is %s(): %d >> migratetype = %d\r\n", __func__, __LINE__, migratetype);

	while (order < MAX_ORDER-1) {
		unsigned long combined_idx;
		struct page *buddy;
	// printf("this is %s(): %d >> migratetype = %d\r\n", __func__, __LINE__, migratetype);

		buddy = __page_find_buddy(page, page_idx, order);
		if (!page_is_buddy(page, buddy, order)) {
			break;
		}
	// printf("this is %s(): %d >> migratetype = %d\r\n", __func__, __LINE__, migratetype);

		/* Our buddy is free, merge with it and move up one order. */
		list_del(&buddy->lru);
	// printf("this is %s(): %d >> migratetype = %d\r\n", __func__, __LINE__, migratetype);
		zone->free_area[order].nr_free--;
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
		rmv_page_order(buddy);
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
		combined_idx = __find_combined_index(page_idx, order);
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;
		order++;
	}
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
	set_page_order(page, order);
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
	zone->free_area[order].nr_free++;
}

static void free_one_page(struct zone *zone, struct page *page, int order,
				int migratetype)
{
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
	spin_lock(&zone->lock);
	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
	zone->pages_scanned = 0;

	// __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
	__free_one_page(page, zone, order, migratetype);
	spin_unlock(&zone->lock);
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
	int i;
	int bad = 0;
	// int wasMlocked = __TestClearPageMlocked(page);
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	// kmemcheck_free_shadow(page, order);

	// for (i = 0 ; i < (1 << order) ; ++i)
	// 	bad += free_pages_check(page + i);
	if (bad)
		return;

	// if (!PageHighMem(page)) {
	// 	debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
	// 	debug_check_no_obj_freed(page_address(page),
	// 				   PAGE_SIZE << order);
	// }
	// arch_free_page(page, order);
	// kernel_map_pages(page, 1 << order, 0);

	local_irq_save(flags);
	// if (wasMlocked)
	// 	free_page_mlock(page);
	// __count_vm_events(PGFREE, 1 << order);
	// free_one_page(page_zone(page), page, order,
	// 				get_pageblock_migratetype(page));
	free_one_page(page_zone(page), page, order, 2);
	local_irq_restore(flags);
}

void __free_pages(struct page *page, unsigned int order)
{
	// if (put_page_testzero(page)) {
		// trace_mm_page_free_direct(page, order);
		// if (order == 0)
		// 	free_hot_page(page);
		// else
			__free_pages_ok(page, order);
	// }
}

static inline void set_page_count(struct page *page, int v)
{
	atomic_set(&page->_count, v);
}

static inline void set_page_refcounted(struct page *page)
{
	// VM_BUG_ON(PageTail(page));
	// VM_BUG_ON(atomic_read(&page->_count));
	set_page_count(page, 1);
}

void __free_pages_bootmem(struct page *page, unsigned int order)
{
	if (order == 0) {
		// printf("this is %s(): %d\r\n", __func__, __LINE__);
		__ClearPageReserved(page);
		set_page_count(page, 0);
		set_page_refcounted(page);
		__free_page(page);
	} else {
		int loop;
		// printf("this is %s(): %d\r\n", __func__, __LINE__);

		// prefetchw(page);
		for (loop = 0; loop < BITS_PER_LONG; loop++) {
			struct page *p = &page[loop];

			if (loop + 1 < BITS_PER_LONG)
				prefetchw(p + 1);
			__ClearPageReserved(p);
			set_page_count(p, 0);
		}

		set_page_refcounted(page);
		// printf("this is %s(): %d\r\n", __func__, __LINE__);
		__free_pages(page, order);
	}
}

void free_pages(unsigned long addr, unsigned int order)
{
	if (addr != 0) {
		// VM_BUG_ON(!virt_addr_valid((void *)addr));
		__free_pages(virt_to_page((void *)addr), order);
	}
}

static inline int get_order(unsigned long size)
{
	int order;

	size = (size - 1) >> (PAGE_SHIFT - 1);
	order = -1;
	do {
		size >>= 1;
		order++;
	} while (size);
	return order;
}

void *alloc_large_system_hash(const char *tablename,
				     unsigned long bucketsize,
				     unsigned long numentries,
				     int scale,
				     int flags,
				     unsigned int *_hash_shift,
				     unsigned int *_hash_mask,
				     unsigned long limit)
{
	unsigned long long max = limit;
	unsigned long log2qty, size;
	void *table = NULL;

	/* allow the kernel cmdline to have a say */
	// if (!numentries) {
	// 	/* round applicable memory size up to nearest megabyte */
	// 	numentries = nr_kernel_pages;
	// 	numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
	// 	numentries >>= 20 - PAGE_SHIFT;
	// 	numentries <<= 20 - PAGE_SHIFT;

	// 	/* limit to 1 bucket per 2^scale bytes of low memory */
	// 	if (scale > PAGE_SHIFT)
	// 		numentries >>= (scale - PAGE_SHIFT);
	// 	else
	// 		numentries <<= (PAGE_SHIFT - scale);

	// 	/* Make sure we've got at least a 0-order allocation.. */
	// 	if (unlikely(flags & HASH_SMALL)) {
	// 		/* Makes no sense without HASH_EARLY */
	// 		WARN_ON(!(flags & HASH_EARLY));
	// 		if (!(numentries >> *_hash_shift)) {
	// 			numentries = 1UL << *_hash_shift;
	// 			BUG_ON(!numentries);
	// 		}
	// 	} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
	// 		numentries = PAGE_SIZE / bucketsize;
	// }
	numentries = roundup_pow_of_two(numentries);

	/* limit allocation size to 1/16 total memory by default */
	if (max == 0) {
		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
		do_div(max, bucketsize);
	}

	if (numentries > max)
		numentries = max;

	log2qty = ilog2(numentries);

	do {
		size = bucketsize << log2qty;
		// if (flags & HASH_EARLY)
		// 	table = alloc_bootmem_nopanic(size);
		// else if (hashdist)
		// 	table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
		// else {
		// 	/*
		// 	 * If bucketsize is not a power-of-two, we may free
		// 	 * some pages at the end of hash table which
		// 	 * alloc_pages_exact() automatically does
		// 	 */
		// 	if (get_order(size) < MAX_ORDER) {
		// 		table = alloc_pages_exact(size, GFP_ATOMIC);
		// 		kmemleak_alloc(table, size, 1, GFP_ATOMIC);
		// 	}
		// }
		table = __get_free_pages(GFP_KERNEL, get_order(size));
	} while (!table && size > PAGE_SIZE && --log2qty);

	if (!table) {
		printf("Failed to allocate %s hash table\r\n", tablename);
		while (1);
	}

	printf("%s hash table entries: %d (order: %d, %lu bytes)\r\n",
	       tablename,
	       (1U << log2qty),
	       ilog2(size) - PAGE_SHIFT,
	       size);

	if (_hash_shift)
		*_hash_shift = log2qty;
	if (_hash_mask)
		*_hash_mask = (1 << log2qty) - 1;

	return table;
}
