#include <types.h>
#include <arch/pgtable.h>
#include <mem_map.h>
#include <util.h>
#include <errno.h>
#include <arch/arm.h>
#include <config.h>

#define CPOLICY_UNCACHED		0
#define CPOLICY_BUFFERED		1
#define CPOLICY_WRITETHROUGH    2
#define CPOLICY_WRITEBACK		3

struct cachepolicy {
	pgd_t prot_sect;
	pte_t prot_pte;
};

struct mem_type {
	pgd_t prot_pgd;
	pgd_t prot_sect;
	pte_t prot_pte;
	unsigned int domain;
};                

void *__mboot_pgtable_start;
void *__mboot_pgtable_end;
pgd_t *__mboot_pgd_start;

static unsigned int cachepolicy = CPOLICY_WRITEBACK;

static struct cachepolicy cache_policies[] = {
	[CPOLICY_UNCACHED] = {
		.prot_sect = MMU_UNCACHED,
		.prot_pte  = MMU_UNCACHED,
	}, 
	[CPOLICY_BUFFERED] = {
		.prot_sect = MMU_BUFFERED,
		.prot_pte  = MMU_BUFFERED,
	},
	[CPOLICY_WRITETHROUGH] = {
		.prot_sect = MMU_WRITETHROUGH,
		.prot_pte  = MMU_WRITETHROUGH,
	},
	[CPOLICY_WRITEBACK] = {
		.prot_sect = MMU_WRITEBACK,
		.prot_pte  = MMU_WRITEBACK,
	},
};

static struct mem_type mem_types[] = {
    [MT_DEVICE] = {
        .prot_pgd  = MMU_TYPE_TABLE,
        .prot_sect = MMU_TYPE_SECT | MMU_SECT_AP_WRITE | MMU_SECT_AP_READ,
        .prot_pte  = MMU_TYPE_SMALL | MMU_SMALL_AP_READ | MMU_SMALL_AP_WRITE,
		.domain = MMU_DOMAIN_KERNEL, 
    },  
    [MT_SRAM] = { 
        .prot_pgd  = MMU_TYPE_TABLE,
        .prot_sect = MMU_TYPE_SECT | MMU_SECT_AP_WRITE | MMU_SECT_AP_READ,
        .prot_pte  = MMU_TYPE_SMALL | MMU_SMALL_AP_READ | MMU_SMALL_AP_WRITE,
		.domain = MMU_DOMAIN_KERNEL, 
    },
    [MT_ROM] = {
        .prot_pgd  = MMU_TYPE_TABLE,
        .prot_sect = MMU_TYPE_SECT | MMU_SECT_AP_READ,
        .prot_pte  = MMU_TYPE_SMALL | MMU_SMALL_AP_READ,
		.domain = MMU_DOMAIN_KERNEL, 
    },
	[MT_SDRAM] = {
        .prot_pgd  = MMU_TYPE_TABLE,
        .prot_sect = MMU_TYPE_SECT | MMU_SECT_AP_WRITE | MMU_SECT_AP_READ,
        .prot_pte  = MMU_TYPE_SMALL | MMU_SMALL_AP_READ | MMU_SMALL_AP_WRITE,
		.domain = MMU_DOMAIN_KERNEL, 
	},
	[MT_VECTORS] = {
        .prot_pgd  = MMU_TYPE_TABLE,
        .prot_sect = MMU_TYPE_SECT | MMU_SECT_AP_READ,
        .prot_pte  = MMU_TYPE_SMALL | MMU_SMALL_AP_READ,
		.domain = MMU_DOMAIN_KERNEL, 
	},
};

static void build_mem_types(void)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
		mem_types[i].prot_pgd  |= MMU_DOMAIN(mem_types[i].domain) | MMU_L1_BIT4;
		mem_types[i].prot_sect |= cache_policies[cachepolicy].prot_sect | MMU_DOMAIN(mem_types[i].domain) | MMU_L1_BIT4;
		mem_types[i].prot_pte  |= cache_policies[cachepolicy].prot_pte;
	}
}

static int prepare_page_table(uint32_t pgtable_start)
{
	unsigned int i;
	pgd_t *pgd;

	if (pgtable_start & (PGD_TABLE_SIZE - 1)) {
		return -EINVAL;
	}

	__mboot_pgtable_start = (void *)pgtable_start;
	__mboot_pgd_start = (pgd_t *)__mboot_pgtable_start;
	__mboot_pgtable_end = (void *)(__mboot_pgd_start + PTRS_PER_PGD);
	pgd = __mboot_pgd_start;
	for (i = 0; i < PTRS_PER_PGD; i++) {
		pgd_clear(pgd);
		pgd++;
	}

	set_ttb((uint32_t)__mboot_pgtable_start);
}

static void *pgtable_alloc(unsigned int size)
{
	void *res = __mboot_pgtable_end;

	__mboot_pgtable_end = (void *)((uint8_t *)__mboot_pgtable_end + size);
	return res;
}

static pte_t *alloc_init_pte(uint32_t virt_addr, struct mem_type *type)
{
	pgd_t *pgd;
	pte_t *pte;
	unsigned int i;

	pgd = pgd_offset_k(virt_addr);
	if (pgd_none(*pgd)) {
		pte = (pte_t *)pgtable_alloc(PTE_TABLE_SIZE);
		set_pgd(pgd, type->prot_pgd | ((uint32_t)pte & ~(PTE_TABLE_SIZE - 1)));
		for (i = 0; i < PTRS_PER_PTE; i++) {
			pte_clear(pte);
			pte++;
		}
	}
	pte = pte_offset_k(virt_addr);
	if (!pte_none(*pte)) {
		return ERR_PTR(-EBUSY);
	}

	return pte;
}

int create_mapping(struct mem_map_desc *md)
{
	struct mem_type *type;
	pgd_t *pgd;
	pte_t *pte;
	uint32_t virt_addr, phys_addr, length;

	if (md->type >= ARRAY_SIZE(mem_types)) {
		return -ENOTSUPP;
	}

	if ((md->virt_addr & PAGE_MASK) || (md->phys_addr & PAGE_MASK) || (md->length & PAGE_MASK)) {
		return -EINVAL;
	}

	virt_addr = md->virt_addr;
	phys_addr = md->phys_addr;
	length    = md->length;
	type      = &mem_types[md->type];

	if ((md->virt_addr & PGD_SIZE) && (md->phys_addr & PGD_SIZE)) {
		while (length >= PGD_SIZE) {
			pgd = pgd_offset_k(virt_addr);
			*pgd = __pgd(phys_addr | type->prot_sect); 
			virt_addr += PGD_SIZE;
			phys_addr += PGD_SIZE;
			length -= PGD_SIZE;
		}
	}

	while (length) {
		pte = alloc_init_pte(virt_addr, type);
		if (IS_ERR(pte)) {
			return PTR_ERR(pte);
		}
		*pte = __pte((phys_addr & ~PAGE_MASK) | type->prot_pte);
		virt_addr += PAGE_SIZE;
		phys_addr += PAGE_SIZE;
		length -= PAGE_SIZE;
	}

	return 0;
}

int pgtable_arch_init(void)
{
	int cpu_arch = get_cpu_architecture();
	uint32_t cr;

	if (cpu_arch == CPU_ARCH_UNKNOWN) {
		return -ENOTSUPP;
	}

	if (CONFIG_MBOOT_PGTABLE_START & (PGD_TABLE_SIZE - 1)) {
		return -EINVAL;
	}

	cr  = get_cr() & ~CR_R;
	cr |= CR_S;
	set_cr(cr);

	set_dac(MMU_DOMAIN_CLIENT);

	build_mem_types();
	prepare_page_table(CONFIG_MBOOT_PGTABLE_START);

	return 0;
}

