#include "page.h"
#include "riscv.h"
#include "string.h" // memset()
#include "panic.h"
#include "pmm.h" // alloc_page()
#include "list.h"
#include "stdio.h" // puts()
#include "io.h"

extern char etext[];
extern char ekernel[];

// create a l3 page table for kernel
struct pgtable_l3 kpgtable;

/* @brief: check if it's a valid pde
 * @param pde: a level 2 or 3 page table entry
 */
int check_pde(uint64 pde) {
	if ((pde & (PTE_W | PTE_R | PTE_X)) != 0) {
		panic("check_pde: wrong pde format.");
	}
	return pde & PTE_V;
}

/* @brief: start paging mechanism and create kernel page table
 */
void page_init() {
	// create kernel page table
	memset((char *)(&kpgtable.entries[0]), 0, PGSIZE);
	smode_map_pages(&kpgtable);
	csrw(satp, (SV39 << 60) | ((uint64)&kpgtable.entries[0] >> 12));
	asm volatile("sfence.vma");
}

/* @brief: map physical address (K210_KERN_START,K210_MAX_ADDR) to the same virtual address
 * @note: kernel and user's supervisor mode need this
 */
void smode_map_pages(struct pgtable_l3 *pgtable) {
	// .text section, executable and readable
	map_pages(pgtable, K210_KERN_START, K210_KERN_START, (uint64)etext - K210_KERN_START, PTE_X | PTE_R);
	// other section of kernel, writable and readable
	map_pages(pgtable, (uint64)etext, (uint64)etext, (uint64)ekernel - (uint64)etext, PTE_W | PTE_R);
	// other RAM area
	map_pages(pgtable, (uint64)ekernel, (uint64)ekernel, K210_MAX_ADDR - (uint64)ekernel, PTE_U | PTE_W | PTE_R);
	// io peripheral mapping area
	map_pages(pgtable, IO_REGION_START0, IO_REGION_START0, (IO_REGION_END0 - IO_REGION_START0), PTE_W | PTE_R);
	map_pages(pgtable, IO_REGION_START1, IO_REGION_START1, (IO_REGION_END1 - IO_REGION_START1), PTE_W | PTE_R);
	map_pages(pgtable, IO_REGION_START2, IO_REGION_START2, (IO_REGION_END2 - IO_REGION_START2), PTE_W | PTE_R);
}

/* @brief: map virtual address [va, va + PGSIZE) to physical address [pa, pa + PGSIZE)
 * @param pgtable_l3: level 3 page table
 * @param perm: PTE flags
 * @note: create necessary level 2 and 1 page table when mapping
 */
void map_a_page(struct pgtable_l3 *table_ptr3, uint64 va, uint64 pa, uint64 perm) {
	struct pgtable_l1 *table_ptr1;
	struct pgtable_l2 *table_ptr2;
	pte_l1 *entry_ptr1;
	pde_l2 *entry_ptr2;
	pde_l3 *entry_ptr3;
	// level 3
	if(table_ptr3 == NULL) {
		panic("map_a_page: table_ptr3 is NULL\n");
	}
	entry_ptr3 = &(table_ptr3->entries[VA2PGTABLE_INDEX(va, 3)]);
		// invalid pde
	if (check_pde(*entry_ptr3) == 0) {
		*entry_ptr3 = PA2PTE(kalloc(PGSIZE), PTE_V);
	}
	// level 2
	table_ptr2 = (struct pgtable_l2 *)(PTE2PA(*entry_ptr3));
	entry_ptr2 = &(table_ptr2->entries[VA2PGTABLE_INDEX(va, 2)]);
	if (check_pde(*entry_ptr2) == 0) {
		*entry_ptr2 = PA2PTE(kalloc(PGSIZE), PTE_V);
	}
	// level 1
	table_ptr1 = (struct pgtable_l1 *)(PTE2PA(*entry_ptr2));
	entry_ptr1 = &(table_ptr1->entries[VA2PGTABLE_INDEX(va, 1)]);
	*entry_ptr1 = PA2PTE(pa, perm | PTE_V);
}

/* @brief: map virtual address [va, va + size) to physical address [pa, pa + size)
 * @param pgtable_l3: level 3 page table
 * @param perm: PTE flags
 */
void map_pages(struct pgtable_l3 *table_ptr3, uint64 va, uint64 pa, uint64 size, uint64 perm) {
	uint64 i;
	pa = ROUNDDOWN(pa, PGSIZE);
	va = ROUNDDOWN(va, PGSIZE);
	for (i = 0; i < size; i += PGSIZE) {
		map_a_page(table_ptr3, va, pa, perm);
		va += PGSIZE;
		pa += PGSIZE;
	}
}

/* @brief: transform virtual address to physical address
 * @param va: virtual address
 * @param table_ptr3: level 3 page table
 * @return: 0 if there is no mapping pa
 */
uint64 va2pa(uint64 va, struct pgtable_l3 *table_ptr3) {
	struct pgtable_l1 *table_ptr1;
	struct pgtable_l2 *table_ptr2;
	pte_l1 *entry_ptr1;
	pde_l2 *entry_ptr2;
	pde_l3 *entry_ptr3;
	// level 3
	entry_ptr3 = &(table_ptr3->entries[VA2PGTABLE_INDEX(va, 3)]);
	if (check_pde(*entry_ptr3) == 0) {
		goto not_mapping;
	}
	// level 2
	table_ptr2 = (struct pgtable_l2 *)(PTE2PA(*entry_ptr3));
	entry_ptr2 = &(table_ptr2->entries[VA2PGTABLE_INDEX(va, 2)]);
	if (check_pde(*entry_ptr2) == 0) {
		goto not_mapping;
	}
	// level 1
	table_ptr1 = (struct pgtable_l1 *)(PTE2PA(*entry_ptr2));
	entry_ptr1 = &(table_ptr1->entries[VA2PGTABLE_INDEX(va, 1)]);

	return ((uint64)PTE2PA(*entry_ptr1));

	not_mapping:
		return 0;
}
