/*
	Copyright (C) 2014 Salil Bhagurkar

	This file is part of illusion

	illusion is free software: you can redistribute it and/or modify
	it under the terms of the GNU Lesser General Public License as
	published by the Free Software Foundation, either version 3 of
	the License, or (at your option) any later version.

	illusion is distributed in the hope that it will be useful,
	but WITHOUT ANY WARRANTY; without even the implied warranty of
	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
	GNU Lesser General Public License for more details.

	You should have received a copy of the GNU Lesser General Public
	License along with illusion.
	If not, see <http://www.gnu.org/licenses/>.
*/

//
// File Visibility: KERNEL
// Subsystem: VMPAGE
// File Description: This includes extern declarations for everything defined
// in assembly for the architecture.
//

#include <klib/lib.h>
#include <klib/bitmap.h>
#include <kernel/errors.h>
#include <kernel/page.h>
#include <kernel/vmpage.h>
#include <kernel/config.h>
#include <arch/page.h>
#include <kernel/console.h>
#include <arch/power.h>
#include <arch/asmdecl.h>
#include <kernel/ilapp.h>
#include <kernel/syscall.h>

/*
 * Current page level transaction to be performed
 * The default setting is such that the flags of a PTE are
 * copied over to the parent PDE. Hence, if a page table is currently
 * not present, then mapping an entry inside it will also make it present
 * in the parent page directory. If we make a PTE of a kernel PDE as
 * user mode, then the PDE will also turn into user mode PDE. But doing the
 * reverse, will turn a user mode PDE into a kernel PDE, and the PTEs held
 * by it which are user mode, will no longer function. Hence, a PDE must
 * always have either user mode or kernel mode PTEs.
 */
static uint_t current_trans = MAP_CREATE;

void vm_set_map_trans(uint_t trans)
{
	current_trans = trans;
}

uint_t vm_get_map_trans()
{
	return current_trans;
}


/*
 * Use the base address of the vmmap to compute the correct bit
 */
static uint_t bit_no(struct vmmap *vmmap, void *vaddr)
{
	assert((ptr_t)vaddr >= (ptr_t)(vmmap->vm_pagemap_start));
	void *offset_addr = (void *)(vaddr - vmmap->vm_pagemap_start);
	return (uint_t)offset_addr / PAGE_SIZE;
}

static void *address_from_bit(struct vmmap *vmmap, uint_t bit)
{
	return vmmap->vm_pagemap_start + (bit * PAGE_SIZE);
}


//TODO Need to improve error handling here



/*
 * Wrapper to the map_address in x86 code
 */
void vm_map_address(struct vmmap *vmmap, void *phys, void *vaddr, u32 page_flags)
{
	u32 pde_flags = 0, pte_flags = 0;
	if(page_flags & PAGE_USER) {
		pde_flags |= PDE_USER;
		pte_flags |= PTE_USER;
	}
	if(page_flags & PAGE_WRITE) {
		pde_flags |= PDE_WRITE;
		pte_flags |= PTE_WRITE;
	}
	if(page_flags &  PAGE_PRESENT) {
		pde_flags |= PDE_PRESENT;
		pte_flags |= PTE_PRESENT;
	}
	map_address(&vmmap->page_sys, phys, vaddr, pde_flags, pte_flags, current_trans);
}

void vm_unmap_address(struct vmmap *vmmap, void **was_phys, void *vaddr)
{
	unmap_address(&vmmap->page_sys, was_phys, vaddr);
}

/*
 * Function to check if a page is reserved
 */
int vm_is_page(struct vmmap *vmmap, void *vaddr)
{
	uint_t bit = bit_no(vmmap, vaddr);
	if(get_bit(vmmap->vm_pagemap, bit))
		return 1;
	return 0;
}

uint_t vm_count_free(struct vmmap *vmmap)
{
	return count_clear_bit(vmmap->vm_pagemap, MAP_NR_PAGES);
}

uint_t vm_total_count(struct vmmap *vmmap)
{
	return MAP_NR_PAGES;
}

/*
 * This function does not even do the mapping. It will only set the right bit.
 */
void vm_reserve_page(struct vmmap *vmmap, void *vaddr)
{
	uint_t bit = bit_no(vmmap, vaddr);
	assert(!get_bit(vmmap->vm_pagemap, bit));
	set_bit(vmmap->vm_pagemap, bit);
}

void vm_reserve_pages(struct vmmap *vmmap, void *vaddr, uint_t count)
{
	void *vaddr_i = vaddr;
	uint_t i;
	for(i = 0; i < count; i++) {
		vm_reserve_page(vmmap, vaddr_i);
		vaddr_i += PAGE_SIZE;
	}
}


void *virt_to_phys(struct vmmap *vmmap, void *virt_address)
{
	u32 flags;
	void *phys;
	get_phys_address(&vmmap->page_sys, &phys, (unsigned long *)&flags, virt_address);
	return phys;
}

static int handle_free_trouble(void *addr, char *msg)
{
	if(is_kernel_address(addr)) {
		assertv(false, msg, addr);
		return ESTINV;
	} else {
		console_printf(msg, addr);
		return ESTINV;
	}
}

int vm_free_pages(struct vmmap *vmmap, void *virt_address, uint_t count)
{
	if(((ptr_t)virt_address & PAGE_ADDRESS_MASK) != ((ptr_t)virt_address))
		return handle_free_trouble(virt_address, "Address 0x%x is not page aligned\n");
	uint_t bit = bit_no(vmmap, virt_address);
	uint_t i, final = bit + count;
	//Check if any of the pages is free already
	for(i = bit; i < final; i++) {
		if(!get_bit(vmmap->vm_pagemap, i)) {
			void *addr = address_from_bit(vmmap, i);
			return handle_free_trouble(addr, "Page 0x%x is already free\n");
		}
	}
	for(i = bit; i < final; i++) {
		//Get the physical address
		void *phys, *virt = address_from_bit(vmmap, i);
		vm_unmap_address(vmmap, &phys, virt);
		phys_free_pages(phys, 1);
		clear_bit(vmmap->vm_pagemap, i);
	}
	return 0;
}


//
// This will fetch virtual space without mapping it to any physical address.
//
void *vm_get_virt(struct vmmap *vmmap, uint_t count)
{
	uint_t i, free_count = 0, start = 0;
	char in_extent = 0;
	for(i = 0; i < MAP_NR_PAGES; i++) {
		if(!get_bit(vmmap->vm_pagemap, i)) {
			if(!in_extent) {
				in_extent = 1;
				start = i;
			}
			free_count++;
			if(free_count == count) {
				uint_t j;
				//Reserve the pages we found
				for(j = start; j <= i; j++) {
					set_bit(vmmap->vm_pagemap, j);
				}
				return address_from_bit(vmmap, start);
			}
		} else {
			in_extent = 0;
			free_count = 0;
		}
	}
	return null;
}


//
// Free previously allocated virtual space.
//
void vm_free_virt(struct vmmap *vmmap, void *addr, uint_t count)
{
	uint_t start = bit_no(vmmap, addr);
	uint_t end = start + count;
	uint_t i;
	for(i = start; i < end; i++) {
		assert(get_bit(vmmap->vm_pagemap, i));
		clear_bit(vmmap->vm_pagemap, i);
	}
}

void *vm_get_pages(struct vmmap *vmmap, uint_t count, u32 page_flags)
{
	void *vaddr = vm_get_virt(vmmap, count);
	uint_t i;
	void *vaddr_i = (void *)vaddr;
	for(i = 0; i < count; i++) {
		void *phys = phys_get_pages(1);
		vm_map_address(vmmap, phys, vaddr_i, page_flags);
		vaddr_i += PAGE_SIZE;
	}
	return vaddr;
}

/*
 * When you know the physical pages, and want access to them, this
 * will map a virtual address and return it to you
 */
void *vm_get_virt_for_phys(struct vmmap *vmmap, void *phys, uint_t count, u32 page_flags)
{
	void *vaddr = vm_get_virt(vmmap, count);
	uint_t i;
	void *vaddr_i = (void *)vaddr;
	void *phys_i = (void *)phys;
	for(i = 0; i < count; i++) {
		//When in VBE, we want a virt for 0xe0000000 for example. But since
		//there is no physical page range (check kernel/page.c), this check
		//causes a problem.
//		if(phys_is_page(phys_i) == 0) {
//			error("phys 0x%x not reserved for virt\n", phys_i);
//		}
		vm_map_address(vmmap, phys_i, vaddr_i, page_flags);
		vaddr_i += PAGE_SIZE;
		phys_i += PAGE_SIZE;
	}
	return vaddr;
}

/*
 * When you know the virtual address space, and want to allocate physical pages
 * to store data when you access the space, this will get new physical pages,
 * map them to the specified virtual space for you.
 * This does not return a physical address, as it is of no use. Accessing it
 * will access the address that it is mapped to (considering it as the vaddr),
 * which will have unpredictable results.
 */
void vm_make_phys_for_virt(struct vmmap *vmmap, void *vaddr, uint_t count, u32 page_flags)
{
	//Unlike vm_create_virtual, we do not need to get contiguous physical space
	uint_t i;
	void *vaddr_i = (void *)vaddr;
	for(i = 0; i < count; i++) {
		void *phys = phys_get_pages(1);
		vm_map_address(vmmap, phys, vaddr_i, page_flags);
		vaddr_i += PAGE_SIZE;
	}
}

/*
 * This will change over the 'online' flag and implement
 * the page translation in the vmmap
 */
void vm_set_online(struct vmmap *from_vmmap, struct vmmap *vmmap)
{
	if(from_vmmap != null)
		from_vmmap->is_online = 0;
	implement_page_sys(&vmmap->page_sys);
	vmmap->is_online = 1;
}

/*
 * This will only change over the vmmap online flag
 */
void vm_prepare_online(struct vmmap *from_vmmap, struct vmmap *vmmap)
{
	if(from_vmmap != null)
		from_vmmap->is_online = 0;
	prepare_implement_page_sys(&vmmap->page_sys);
	vmmap->is_online = 1;
}
