#include <memory.h>
#include <util.h>
#include <errno.h>
#include <machine.h>
#include <arch/mmu.h>
#include <arch/cache.h>

#define CPOLICY_UNCACHED		0
#define CPOLICY_BUFFERED		1
#define CPOLICY_WRITETHROUGH    2
#define CPOLICY_WRITEBACK		3

struct cachepolicy {
	pgde_t prot_sect;
	pte_t prot_pte;
};

struct mem_type {
	pgde_t prot_pgde;
	pgde_t prot_sect;
	pte_t prot_pte;
	unsigned int domain;
};                

static unsigned int cachepolicy = CPOLICY_WRITEBACK;

static struct cachepolicy cache_policies[] = {
	[CPOLICY_UNCACHED] = {
		.prot_sect = MMU_UNCACHED,
		.prot_pte  = MMU_UNCACHED,
	}, 
	[CPOLICY_BUFFERED] = {
		.prot_sect = MMU_BUFFERED,
		.prot_pte  = MMU_BUFFERED,
	},
	[CPOLICY_WRITETHROUGH] = {
		.prot_sect = MMU_WRITETHROUGH,
		.prot_pte  = MMU_WRITETHROUGH,
	},
	[CPOLICY_WRITEBACK] = {
		.prot_sect = MMU_WRITEBACK,
		.prot_pte  = MMU_WRITEBACK,
	},
};

static struct mem_type mem_types[] = {
    [MMT_DEVICE] = {
        .prot_pgde = MMU_TYPE_TABLE,
        .prot_sect = MMU_TYPE_SECT | MMU_SECT_AP_WRITE | MMU_SECT_AP_READ,
        .prot_pte  = MMU_TYPE_SMALL | MMU_SMALL_AP_READ | MMU_SMALL_AP_WRITE,
		.domain = MMU_DOMAIN_KERNEL, 
    },  
    [MMT_ROM] = {
        .prot_pgde = MMU_TYPE_TABLE,
        .prot_sect = MMU_TYPE_SECT | MMU_SECT_AP_READ,
        .prot_pte  = MMU_TYPE_SMALL | MMU_SMALL_AP_READ,
		.domain = MMU_DOMAIN_KERNEL, 
    },
	[MMT_RAM] = {
        .prot_pgde = MMU_TYPE_TABLE,
        .prot_sect = MMU_TYPE_SECT | MMU_SECT_AP_WRITE | MMU_SECT_AP_READ,
        .prot_pte  = MMU_TYPE_SMALL | MMU_SMALL_AP_READ | MMU_SMALL_AP_WRITE,
		.domain = MMU_DOMAIN_KERNEL, 
	},
	[MMT_DMA] = {
        .prot_pgde = MMU_TYPE_TABLE,
        .prot_sect = MMU_TYPE_SECT | MMU_SECT_AP_WRITE | MMU_SECT_AP_READ,
        .prot_pte  = MMU_TYPE_SMALL | MMU_SMALL_AP_READ | MMU_SMALL_AP_WRITE,
		.domain = MMU_DOMAIN_KERNEL, 
	},	
};

extern pgde_t __start[];

static pgde_t *__pgd_start;

static pgdf_t *__pgd_flags_start;

static void build_mem_types(void)
{
	int i;
	unsigned int cp;

	for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
		if ((i == MMT_DEVICE) || (i == MMT_DMA)) {
			cp = CPOLICY_UNCACHED;
		} else {
			cp = cachepolicy;
		}

		mem_types[i].prot_pgde |= MMU_DOMAIN(mem_types[i].domain) | MMU_L1_BIT4;
		mem_types[i].prot_sect |= cache_policies[cp].prot_sect | MMU_DOMAIN(mem_types[i].domain) | MMU_L1_BIT4;
		mem_types[i].prot_pte  |= cache_policies[cp].prot_pte;
	}
}

static void page_table_init(void)
{
	unsigned int i;
	pgde_t *pgde;
	pgdf_t *pgdf;

	__pgd_start = __start - PTRS_PER_PGD;
	__pgd_flags_start = (pgdf_t *)__pgd_start - PTRS_PER_PGD;

	pgde = __pgd_start;
	for (i = 0; i < PTRS_PER_PGD; i++) {
		clear_pgde(pgde);
		pgde++;
	}

	pgdf = __pgd_flags_start;
	for (i = 0; i < PTRS_PER_PGD; i++) {
		clear_pgdf(pgdf);
		pgdf++;
	}

	set_ttb((uint32_t)__pgd_start);
}

static int memory_map_set(struct memory_map *map)
{
	struct mem_type *type;
	pgde_t *pgde;
	pgdf_t *pgdf;
	uint32_t virt_addr, phys_addr, length;

	if (!map || IS_ERR(map))
		return -EINVAL;

	if (map->type >= ARRAY_SIZE(mem_types)) 
		return -EINVAL;

	if ((map->virt_addr & PTI_MASK) || (map->phys_addr & PTI_MASK) || (map->length & PTI_MASK)) 
		return -EINVAL;

	virt_addr = map->virt_addr;
	phys_addr = map->phys_addr;
	length    = map->length;
	type      = &mem_types[map->type];

	while (length >= (1 << PGDI_SHIFT)) {
		pgde = get_pgde(virt_addr, __pgd_start);
		*pgde = to_pgde(phys_addr | type->prot_sect); 
		pgdf = get_pgdf(virt_addr, __pgd_flags_start);
		*pgdf = to_pgdf(map->type);
		virt_addr += (1 << PGDI_SHIFT);
		phys_addr += (1 << PGDI_SHIFT);
		length -= (1 << PGDI_SHIFT);
	}

	return 0;
}

static int page_table_arch_init(void)
{
	int cpu_arch = get_cpu_architecture();
	uint32_t cr;

	if (cpu_arch == CPU_ARCH_UNKNOWN) {
		return -ENOTSUPP;
	}

	cr  = get_cr() & ~CR_R;
	cr |= CR_S;
	set_cr(cr);

	set_dac(MMU_DOMAIN_CLIENT);

	return 0;
}

static int memory_map_banks_zones(void)
{
	struct list_head *p;
	struct memory_bank *mb;
	struct memory_zone *mz;
	struct memory_bank *bank_table[PTRS_PER_PGD];
	struct memory_zone *zone_table[PTRS_PER_PGD];
	struct memory_map mp;
	unsigned int start, end;
	int i, ret;

	for (i = 0; i < ARRAY_SIZE(bank_table); i++) {
		bank_table[i] = NULL;
		zone_table[i] = NULL;
	}

	for_each_memory_bank(p) {
		mb = container_of(p, struct memory_bank, n_bank_list);
		start = mb->start_addr >> PGDI_SHIFT;
		end = ((mb->start_addr + mb->length - 1) + (1 << PGDI_SHIFT) - 1) >> PGDI_SHIFT;
		for (i = start; i < end; i++) 
			bank_table[i] = mb;
	}

	for_each_memory_bank(p) {
		mz = container_of(p, struct memory_zone, n_zone_list);
		start = mz->start_addr >> PGDI_SHIFT;
		end = ((mz->start_addr + mz->length - 1) + (1 << PGDI_SHIFT) - 1) >> PGDI_SHIFT;
		for (i = start; i < end; i++) 
			zone_table[i] = mz;
	}

	for (i = 0; i < ARRAY_SIZE(bank_table); i++) {
		if (bank_table[i]) {
			mp.phys_addr = i << PGDI_SHIFT;
			mp.virt_addr = mp.virt_addr;
			mp.length = 1 << PGDI_SHIFT;
			if (zone_table[i] && (zone_table[i]->type != MZ_NORMAL)) {
				if (zone_table[i]->type == MZ_DMA)
					mp.type = MMT_DMA;
			} else {
				switch (bank_table[i]->type) {
					case MT_DEVICE:
						mp.type = MMT_DEVICE;
						break;
					case MT_ROM:
						mp.type = MMT_ROM;
						break;
					case MT_SRAM:
					case MT_SDRAM:
						mp.type = MMT_RAM;
						break;
					default:
						continue;
				}
			}
			ret = memory_map_set(&mp);
			if (IS_ERR(ret))
				return ret;
		}
	}

	return 0;
}

int memory_map_init(void)
{
	int ret;
	
	ret = page_table_arch_init();
	if (IS_ERR(ret)) 
		return ret;
	build_mem_types();
	page_table_init();

	ret = memory_map_banks_zones();
	if (IS_ERR(ret))
		return ret;

	if (dcache_is_enabled()) {
		dcache_flush_and_invalidate_all();
		write_buffer_drain();
	}

	tlbs_invalidate_all();
	mmu_enable();
	return 0;
}

