#include <linux/types.h>
#include <linux/mm.h>
#include <linux/bootmem.h>

#include <asm/setup.h>
#include <asm/page.h>
#include <asm/map.h>
#include <asm/memory.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/domain.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

#include <asm-generic/sections.h>

#include "mm.h"

pgprot_t pgprot_kernel;

#define vectors_high()	(1)

static void sanity_check_meminfo(void)
{
	int i, j, highmem = 0;

	for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
		struct membank *bank = &meminfo.bank[j];
		*bank = meminfo.bank[i];

		bank->highmem = highmem;

		j++;
	}
	meminfo.nr_banks = j;
}

#define PROT_PTE_DEVICE		L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
#define PROT_SECT_DEVICE	PMD_TYPE_SECT|PMD_SECT_AP_WRITE

static struct mem_type mem_types[] = {
	[MT_DEVICE] = {		  /* Strongly ordered / ARMv6 shared device */
		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
				  L_PTE_SHARED,
		.prot_l1	= PMD_TYPE_TABLE,
		.prot_sect	= PROT_SECT_DEVICE | PMD_SECT_S,
		.domain		= DOMAIN_IO,
	},
	[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
		.prot_l1	= PMD_TYPE_TABLE,
		.prot_sect	= PROT_SECT_DEVICE,
		.domain		= DOMAIN_IO,
	},
	[MT_DEVICE_CACHED] = {	  /* ioremap_cached */
		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
		.prot_l1	= PMD_TYPE_TABLE,
		.prot_sect	= PROT_SECT_DEVICE | PMD_SECT_WB,
		.domain		= DOMAIN_IO,
	},	
	[MT_DEVICE_WC] = {	/* ioremap_wc */
		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
		.prot_l1	= PMD_TYPE_TABLE,
		.prot_sect	= PROT_SECT_DEVICE,
		.domain		= DOMAIN_IO,
	},
	[MT_UNCACHED] = {
		.prot_pte	= PROT_PTE_DEVICE,
		.prot_l1	= PMD_TYPE_TABLE,
		// .prot_sect	= PMD_TYPE_SECT | PMD_SECT_XN,
		.prot_sect	= PMD_TYPE_SECT | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
		.domain		= DOMAIN_IO,
	},
	[MT_CACHECLEAN] = {
		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
		.domain    = DOMAIN_KERNEL,
	},
	[MT_MINICLEAN] = {
		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
		.domain    = DOMAIN_KERNEL,
	},
	[MT_LOW_VECTORS] = {
		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
				L_PTE_EXEC,
		.prot_l1   = PMD_TYPE_TABLE,
		.domain    = DOMAIN_USER,
	},
	[MT_HIGH_VECTORS] = {
		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
				L_PTE_USER | L_PTE_EXEC,
		.prot_l1   = PMD_TYPE_TABLE,
		.domain    = DOMAIN_USER,
	},
	[MT_MEMORY] = {
		// .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE | PMD_SECT_WB,
		.domain    = DOMAIN_KERNEL,
	},
	[MT_ROM] = {
		.prot_sect = PMD_TYPE_SECT,
		.domain    = DOMAIN_KERNEL,
	},
	[MT_MEMORY_NONCACHED] = {
		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
		.domain    = DOMAIN_KERNEL,
	},
};

#define vectors_base()	(vectors_high() ? 0xffff0000 : 0)

static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
				  unsigned long end, unsigned long pfn,
				  const struct mem_type *type)
{
	pte_t *pte;
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	if (pmd_none(*pmd)) {
		// printf("this is %s(): %d\r\n", __func__, __LINE__);
		pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t));
		__pmd_populate(pmd, __pa(pte) | type->prot_l1);
	}

	// printf("this is %s(): %d\r\n", __func__, __LINE__);
	pte = pte_offset_kernel(pmd, addr);
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
	do {
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
		set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
		pfn++;
	} while (pte++, addr += PAGE_SIZE, addr != end);
}

static void alloc_init_section(pgd_t *pgd, unsigned long addr,
				      unsigned long end, unsigned long phys,
				      const struct mem_type *type)
{
	pmd_t *pmd = pmd_offset(pgd, addr);
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	/*
	 * Try a section mapping - end, addr and phys must all be aligned
	 * to a section boundary.  Note that PMDs refer to the individual
	 * L1 entries, whereas PGDs refer to a group of L1 entries making
	 * up one logical pointer to an L2 table.
	 */
	if (((addr | end | phys) & ~SECTION_MASK) == 0) {
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
		pmd_t *p = pmd;

		if (addr & SECTION_SIZE)
			pmd++;

		do {
			*pmd = __pmd(phys | type->prot_sect);
			phys += SECTION_SIZE;
		} while (pmd++, addr += SECTION_SIZE, addr != end);

		flush_pmd_entry(p);
	} else {
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
		/*
		 * No need to loop; pte's aren't interested in the
		 * individual L1 entries.
		 */
		alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
	}
}

void create_mapping(struct map_desc *md)
{
	unsigned long phys, addr, length, end;
	const struct mem_type *type;
	pgd_t *pgd;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	// if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
	// 	printf("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
	// 	       __pfn_to_phys((u64)md->pfn), md->virtual);
	// 	return;
	// }

	if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
	    md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
		printf("BUG: mapping for 0x%08llx at 0x%08lx overlaps vmalloc space\n",
		       __pfn_to_phys((u64)md->pfn), md->virtual);
	}

	type = &mem_types[md->type];

	/*
	 * Catch 36-bit addresses
	 */
	if (md->pfn >= 0x100000) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		// create_36bit_mapping(md, type);
		return;
	}
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	addr = md->virtual & PAGE_MASK;
	phys = (unsigned long)__pfn_to_phys(md->pfn);
	length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
		printf("BUG: map for 0x%08lx at 0x%08lx can not be mapped using pages, ignoring.\n",
		       __pfn_to_phys(md->pfn), addr);
		return;
	}
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	pgd = pgd_offset_k(addr);
	end = addr + length;
	do {
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
		unsigned long next = pgd_addr_end(addr, end);

		alloc_init_section(pgd, addr, next, phys, type);

		phys += next - addr;
		addr = next;
	} while (pgd++, addr != end);
}

static inline void prepare_page_table(void)
{
	unsigned long addr;

	/*
	 * Clear out all the mappings below the kernel image.
	 */
	for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE) {
		if (addr >= 0X02000000 && addr <= 0X02500000) {

		} else if (addr >= 0xA00000 && addr <= 0xC00000) {

		} else {
			pmd_clear(pmd_off_k(addr));
		}
	}

// #ifdef CONFIG_XIP_KERNEL
// 	/* The XIP kernel is mapped in the module area -- skip over it */
// 	addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
// #endif
	for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
		pmd_clear(pmd_off_k(addr));

	/*
	 * Clear out all the kernel space mappings, except for the first
	 * memory bank, up to the end of the vmalloc region.
	 */
	// for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0]));
	//      addr < VMALLOC_END; addr += PGDIR_SIZE)
	// 	pmd_clear(pmd_off_k(addr));
}

/*
 * Reserve the various regions of node 0
 */
void reserve_node_zero(pg_data_t *pgdat)
{
	/*
	 * Register the kernel text and data with bootmem.
	 * Note that this can only be in node 0.
	 */
#ifdef CONFIG_XIP_KERNEL
	reserve_bootmem_node(pgdat, __pa(_data), _end - _data,
			BOOTMEM_DEFAULT);
#else
	reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext,
			BOOTMEM_DEFAULT);
#endif

	/*
	 * Reserve the page tables.  These are already in use,
	 * and can only be in node 0.
	 */
	reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
			     PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT);
}

// static void __init devicemaps_init(struct machine_desc *mdesc)
static void devicemaps_init(void)
{
	struct map_desc map;
	unsigned long addr;
	void *vectors;
	printf("this is %s(): %d\r\n", __func__, __LINE__);

	/*
	 * Allocate the vector page early.
	 */
	vectors = alloc_bootmem_low_pages(PAGE_SIZE);
	printf("this is %s(): %d     vectors = 0x%x\r\n", __func__, __LINE__, vectors);

	// for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
	// 	pmd_clear(pmd_off_k(addr));

	/*
	 * Create a mapping for the machine vectors at the high-vectors
	 * location (0xffff0000).  If we aren't using high-vectors, also
	 * create a mapping at the low-vectors virtual address.
	 */
	map.pfn = __phys_to_pfn(virt_to_phys(vectors));
	map.virtual = 0xffff0000;
	map.length = PAGE_SIZE;
	map.type = MT_HIGH_VECTORS;
	create_mapping(&map);
	printf("this is %s(): %d\r\n", __func__, __LINE__);

	if (!vectors_high()) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		map.virtual = 0;
		map.type = MT_LOW_VECTORS;
		create_mapping(&map);
	}

	/*
	 * Ask the machine support to map in the statically mapped devices.
	 */
	// if (mdesc->map_io)
		// mdesc->map_io();

	/*
	 * Finally flush the caches and tlb to ensure that we're in a
	 * consistent state wrt the writebuffer.  This also ensures that
	 * any write-allocated cache lines in the vector page are written
	 * back.  After this point, we can start to touch devices again.
	 */
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	local_flush_tlb_all();
	flush_cache_all();
	printf("this is %s(): %d\r\n", __func__, __LINE__);
}

void paging_init()
{
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	sanity_check_meminfo();
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	prepare_page_table();
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	bootmem_init();
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	devicemaps_init();
	printf("this is %s(): %d\r\n", __func__, __LINE__);
}
