#include <um.h>
#include <pmm.h>
#include <mmu.h>
#include <memlayout.h>
#include <error.h>
#include <buddy_pmm.h>
#include <stdio.h>
#include <sync.h>
#include <types.h>
#include <stub.h>
#include <slab.h>
#include <devices.h>

// size of memory above 0x100000
size_t mem_size = 0;

#define TEST_PAGE 0x80000000


/**************************************************
 * Page table operations
 **************************************************/

/**
 * Get @la's corresponding entry in the table.
 * @param pgdir page directory
 * @param la logical address
 * @param create alloc table if not present
 * @return the entry if found or created, or NULL if not present
 */
pte_t *
get_pte(pde_t *pgdir, uintptr_t la, bool create) {
	pde_t *pdep = &pgdir[PDX(la)];
    if (!(*pdep & PTE_P)) {
        struct Page *page;
        if (!create || (page = alloc_page()) == NULL) {
            return NULL;
        }
        set_page_ref(page, 1);
        uintptr_t pa = page2pa(page);
        memset(KADDR(pa), 0, PGSIZE);
        *pdep = pa | PTE_U | PTE_W | PTE_P;
    }
    return &((pte_t *)KADDR(PDE_ADDR(*pdep)))[PTX(la)];
}

/**
 * Get the entry and page descriptor of @la
 * @param pgdir page directory
 * @param la logical address
 * @param ptep_store table entry stored if not NULL
 * @return @la's corresponding page descriptor
 */
struct Page *
get_page(pde_t *pgdir, uintptr_t la, pte_t **ptep_store) {
	pte_t *ptep = get_pte(pgdir, la, 0);
	if (ptep_store != NULL) {
		*ptep_store = ptep;
	}
	if (ptep != NULL && *ptep & PTE_P) {
		return pa2page(*ptep);
	}
	return NULL;
}

void
tlb_flush (pde_t *pgdir, uintptr_t la) 
{
	pte_t* pte = get_pte (pgdir, la, 0);
	if (pte == 0 || (*pte & PTE_P) == 0)
		panic ("invalid tlb flushing\n");
	uint32_t pa = PDE_ADDR(*pte);
	int r = 1, w = 1, x = 1;
	if (Get_PTE_W(pte) == 0 || Get_PTE_D(pte) == 0)
		w = 0;
	if (Get_PTE_A(pte) == 0)
		r = x = 0;
	
	tlb_invalidate (pgdir, la);
	
	struct mmap_arg_struct args = {
		.addr = la,
		.len = PGSIZE,
		.prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) | (x ? PROT_EXEC : 0),
		.flags = MAP_SHARED,
		.fd = ginfo->mem_fd,
		.offset = pa,
	};
	cprintf ("map: 0x%x\n",  la);
	syscall1 (__NR_mmap, (long)&args);
}

/**
 * flush 'tlb' (which is process map here)
 * @param pgdir page directory
 * @param la the logical address of the page to be flushed
 */
void
tlb_invalidate (pde_t *pgdir, uintptr_t la) {
	cprintf ("invalid: 0x%x\n", la);
	syscall2 (__NR_munmap, la, PGSIZE);
	
}

/**
 * remove a page and its entry
 * @param pgdir page directory (not used)
 * @param la logical address of the page to be removed
 * @param page table entry of the page to be removed
 */
static inline void
page_remove_pte(pde_t *pgdir, uintptr_t la, pte_t *ptep) {
	if (!(*ptep & PTE_P))  /* page not present */
		return;
	struct Page *p = pte2page(*ptep);
	if (page_ref_dec (p) == 0)
		free_page (p);
	*ptep = 0;
	tlb_invalidate(pgdir, la);
}

/**
 * remove a page and its entry in page table
 * @param pgdir page directory
 * @param la logical address of the page to be removed
 */
void
page_remove(pde_t *pgdir, uintptr_t la) {
	pte_t *ptep = get_pte(pgdir, la, 0);
	if (ptep != NULL) {
		page_remove_pte(pgdir, la, ptep);
	}
}

/**
 * insert a page into the page table
 * @param pgdir page directory
 * @param page the page descriptor of the page to be inserted
 * @param la logical address of the page
 * @param perm permission of the page
 * @return 0 on success and error code when failed
 */
int
page_insert(pde_t *pgdir, struct Page *page, uintptr_t la, uint32_t perm) {
	pte_t *ptep = get_pte(pgdir, la, 1);
	if (ptep == NULL) {
		return -E_NO_MEM;
	}
	page_ref_inc(page);
	if (*ptep & PTE_P) {
		struct Page *p = pte2page(*ptep);
		if (p == page) {
			page_ref_dec(page);
		}
		else {
			page_remove_pte(pgdir, la, ptep);
		}
	}
	*ptep = page2pa(page) | PTE_P | perm;
	tlb_flush (pgdir, la);
	return 0;
}

/**
 * alloc a page and map it in the table
 * @param pgdir page directory
 * @param la logical address for the page to be allocated
 * @param perm permission of the page
 */
struct Page *
pgdir_alloc_page(pde_t *pgdir, uintptr_t la, uint32_t perm) {
    struct Page *page = alloc_page();
    if (page != NULL) {
        if (page_insert(pgdir, page, la, perm) != 0) {
            free_page(page);
            return NULL;
        }
    }
    return page;
}


/**************************************************
 * Memory tests
 **************************************************/

/**
 * Check whether the virtual physical memory created works as designed.
 */
static void
check_vpm (void) 
{
	int* p;
	uint32_t freemem = e820map.nr_map-1;
	uint32_t freemem_start = (uint32_t)e820map.map[freemem].addr;
	uint32_t freemem_size = (uint32_t)e820map.map[freemem].size;
	
	/* write at the beginning */
	p = (int*)freemem_start;
	*p = 55;
	assert (*p == 55);

	/* write at the middle*/
	p = (int*)(freemem_start + freemem_size / 2);
	*p = 1111;
	assert (*p == 1111);

	/* write at the end */
	p = (int*)((uint32_t)(freemem_size + freemem_start - sizeof(int)));
	*p = 101;
	assert (*p = 101);

	cprintf ("check_vpm() succeeded.\n");
}


/**
 * Check the correctness of the system.
 */
static void
check_alloc_page(void) {
	pmm_manager->check();
	
	cprintf("check_alloc_page() succeeded.\n");
}


/**
 * Check page table
 */
static void
check_pgdir(void) {
	assert(npage <= KMEMSIZE / PGSIZE);
	assert(boot_pgdir != NULL && (uint32_t)PGOFF(boot_pgdir) == 0);
	assert(get_page(boot_pgdir, TEST_PAGE, NULL) == NULL);

	struct Page *p1, *p2;
	p1 = alloc_page();
	assert(page_insert(boot_pgdir, p1, TEST_PAGE, 0) == 0);

	pte_t *ptep;
	assert((ptep = get_pte(boot_pgdir, TEST_PAGE, 0)) != NULL);
	assert(pa2page(*ptep) == p1);
	assert(page_ref(p1) == 1);

	ptep = &((pte_t *)KADDR(PTE_ADDR(boot_pgdir[PDX(TEST_PAGE)])))[1];
	assert(get_pte(boot_pgdir, TEST_PAGE + PGSIZE, 0) == ptep);

	p2 = alloc_page();
	assert(page_insert(boot_pgdir, p2, TEST_PAGE + PGSIZE, PTE_U | PTE_W) == 0);
	assert((ptep = get_pte(boot_pgdir, TEST_PAGE + PGSIZE, 0)) != NULL);
	assert(*ptep & PTE_U);
	assert(*ptep & PTE_W);
	assert(boot_pgdir[PDX(TEST_PAGE)] & PTE_U);
	assert(page_ref(p2) == 1);

	assert(page_insert(boot_pgdir, p1, TEST_PAGE + PGSIZE, 0) == 0);
	assert(page_ref(p1) == 2);
	assert(page_ref(p2) == 0);
	assert((ptep = get_pte(boot_pgdir, TEST_PAGE + PGSIZE, 0)) != NULL);
	assert(pa2page(*ptep) == p1);
	assert((*ptep & PTE_U) == 0);

	page_remove(boot_pgdir, TEST_PAGE);
	assert(page_ref(p1) == 1);
	assert(page_ref(p2) == 0);

	page_remove(boot_pgdir, TEST_PAGE + PGSIZE);
	assert(page_ref(p1) == 0);
	assert(page_ref(p2) == 0);

	assert(page_ref(pa2page(boot_pgdir[PDX(TEST_PAGE)])) == 1);
	free_page(pa2page(boot_pgdir[PDX(TEST_PAGE)]));
	boot_pgdir[PDX(TEST_PAGE)] = 0;

	cprintf("check_pgdir() succeeded.\n");
}

/**
 * Check whether page directory for boot lives well.
 *     NOTE: we don't have mm_struct at present.
 *           as write to a clean page also raises SIGSEGV, we're not able to deal with it now.
 *           so just mark all page inserted to be accessed and dirty.
 */
static void
check_boot_pgdir(void) {
    pte_t *ptep;
    int i;
    for (i = 0; i < npage; i += PGSIZE) {
        assert((ptep = get_pte(boot_pgdir, (uintptr_t)KADDR(i), 0)) != NULL);
        assert(PTE_ADDR(*ptep) == i);
    }

    //assert(PDE_ADDR(boot_pgdir[PDX(VPT)]) == PADDR(boot_pgdir));

    assert(boot_pgdir[PDX(TEST_PAGE)] == 0);
	
    struct Page *p;
    p = alloc_page();
    assert(page_insert(boot_pgdir, p, TEST_PAGE, PTE_W | PTE_D | PTE_A) == 0);
    assert(page_ref(p) == 1);
    assert(page_insert(boot_pgdir, p, TEST_PAGE + PGSIZE, PTE_W | PTE_D | PTE_A) == 0);
    assert(page_ref(p) == 2);

    const char *str = "ucore: Hello world!!";
    strcpy((void *)TEST_PAGE, str);
    assert(strcmp((void *)TEST_PAGE, (void *)(TEST_PAGE + PGSIZE)) == 0);

    *(char *)(page2kva(p)) = '\0';
    assert(strlen((const char *)TEST_PAGE) == 0);

	/*
	 * in um architecture clear page table doesn't mean
	 *     the linear address is invalid
	 * so remove then by hand
	 */
	tlb_invalidate (boot_pgdir, TEST_PAGE);
	tlb_invalidate (boot_pgdir, TEST_PAGE + PGSIZE);
	
    free_page(p);
    free_page(pa2page(PDE_ADDR(boot_pgdir[PDX(TEST_PAGE)])));
    boot_pgdir[PDX(TEST_PAGE)] = 0;

    cprintf("check_boot_pgdir() succeeded.\n");
}

/**************************************************
 * Memory manager wrappers.
 **************************************************/

/**
 * Initialize mm tragedy.
 */
static void
init_pmm_manager(void) {
	pmm_manager = &buddy_pmm_manager;
	cprintf("memory managment: %s\n", pmm_manager->name);
	pmm_manager->init();
}

/**
 * Initialize memory bitmap.
 */
static void
init_memmap(struct Page *base, size_t n) {
	pmm_manager->init_memmap(base, n);
}


/**************************************************
 * Page allocation wrappers.
 **************************************************/
struct Page *
alloc_pages(size_t n) {
	struct Page *page;
	bool intr_flag;
	local_intr_save(intr_flag);
	{
		page = pmm_manager->alloc_pages(n);
	}
	local_intr_restore(intr_flag);
	return page;
}

static void *
boot_alloc_page(void) {
	struct Page *p = alloc_page();
	if (p == NULL) {
		panic("boot_alloc_page failed.\n");
	}
	return page2kva(p);
}

void
free_pages(struct Page *base, size_t n) {
	bool intr_flag;
	local_intr_save(intr_flag);
	{
		pmm_manager->free_pages(base, n);
	}
	local_intr_restore(intr_flag);
}


size_t
nr_free_pages(void) {
	size_t ret;
	bool intr_flag;
	local_intr_save(intr_flag);
	{
		ret = pmm_manager->nr_free_pages();
	}
	local_intr_restore(intr_flag);
	return ret;
}

/**************************************************
 * Page operation wrappers end.
 **************************************************/

/**
 * Examine address space, create virtual physical memory and map it.
 */
static void
page_init (void)
{
	int i;
	int freemem_size = 0;
	
	/* Construct page descriptor table.
	 * mem => memory not reserved or occupied by kernel code
	 * freemem => memory available after page descriptor table is built
	 */
	
	/* all pages from 0x100000 to the top should have an entry in page descriptor table */
	for (i = 0; i < e820map.nr_map; i++) {
		mem_size += (uint32_t)(e820map.map[i].size);
		if (e820map.map[i].type == E820_ARM)
			freemem_size += e820map.map[i].size;
	}
	
	pages = (struct Page *)(uint32_t)(e820map.map[e820map.nr_map-1].addr);
	npage = (mem_size) / PGSIZE;
	for (i = 0; i < npage; i++) {
		SetPageReserved (pages + i);
	}

	uintptr_t freemem = PADDR(ROUNDUP((uintptr_t)pages + sizeof(struct Page) * npage, PGSIZE));
	uint32_t freemem_npage = freemem_size / PGSIZE - npage * sizeof (struct Page) / PGSIZE;
	init_memmap(pa2page(freemem), freemem_npage);
}

/**
 * 
 */
static void
boot_map_segment(pde_t *pgdir, uintptr_t la, size_t size, uintptr_t pa, uint32_t perm) {
    assert(PGOFF(la) == PGOFF(pa));
    size_t n = ROUNDUP(size + PGOFF(la), PGSIZE) / PGSIZE;
    la = ROUNDDOWN(la, PGSIZE);
    pa = ROUNDDOWN(pa, PGSIZE);
    for (; n > 0; n --, la += PGSIZE, pa += PGSIZE) {
        pte_t *ptep = get_pte(pgdir, la, 1);
        assert(ptep != NULL);
        *ptep = pa | PTE_P | perm;
    }
}

static const char *
perm2str(int perm) {
    static char str[4];
    str[0] = (perm & PTE_U) ? 'u' : '-';
    str[1] = 'r';
    str[2] = (perm & PTE_W) ? 'w' : '-';
    str[3] = '\0';
    return str;
}

/**
 *
 */
static int
get_pgtable_items(size_t left, size_t right, size_t start, uintptr_t *table, size_t *left_store, size_t *right_store) {
    if (start >= right) {
        return 0;
    }
    while (start < right && !(table[start] & PTE_P)) {
        start ++;
    }
    if (start < right) {
        if (left_store != NULL) {
            *left_store = start;
        }
        int perm = (table[start ++] & PTE_USER);
        while (start < right && (table[start] & PTE_USER) == perm) {
            start ++;
        }
        if (right_store != NULL) {
            *right_store = start;
        }
        return perm;
    }
    return 0;
}

/**
 *
 */
void
print_pgdir(void) {
    cprintf("-------------------- BEGIN --------------------\n");
    size_t left, right = 0, perm;
    while ((perm = get_pgtable_items(0, NPDEENTRY, right, boot_pgdir, &left, &right)) != 0) {
        cprintf("PDE(%03x) %08x-%08x %08x %s\n", right - left,
                left * PTSIZE, right * PTSIZE, (right - left) * PTSIZE, perm2str(perm));
        //size_t l, r = left * NPTEENTRY;
		size_t l = 0, r = 0, i = left;
#define PT(pde)									\
		((pte_t*)(KADDR(PDE_ADDR(pde))))
		for (; i < right; i++) {
			r = 0;
			while ((perm = get_pgtable_items(0, NPTEENTRY, r, PT(boot_pgdir[i]), &l, &r)) != 0) {
				cprintf("  |-- PTE(%05x) %08x-%08x %08x %s\n", r - l,
						l * PGSIZE, r * PGSIZE, (r - l) * PGSIZE, perm2str(perm));
			}
		}
    }
    cprintf("--------------------- END ---------------------\n");
}


void
pmm_init (void) 
{
	check_vpm ();
	
	init_pmm_manager ();

	page_init ();

	check_alloc_page ();

	boot_pgdir = boot_alloc_page();
	memset(boot_pgdir, 0, PGSIZE);
	check_pgdir();

	/* register kernel code and data pages in the table so that it won't raise bad segv. */
	boot_map_segment (boot_pgdir, KERNBASE, mem_size, 0, PTE_W);

	check_boot_pgdir ();
	print_pgdir ();

	slab_init ();
}
