/*
	kernel_memory_space.c

	contains functionality for managing the kernel's memory space and for allocating
	chunks of memory within it

	Author: Aidan Goddard 11/7/14
*/

#include "types.h"
#include "memory.h"
#include "memory_offsets.h"
#include "kernel_fixed_table.h"
#include "memory_tables.h"
#include "lock.h"

#include "printf.h"

////////////////////////////////////////////////////////////////////////////////////
// variables and things
static struct lock_table lock;
static uint64_t *PDP = (uint64_t*)VIRTUAL_PDP_KERNEL;
static uint64_t *PD_map_PT = (uint64_t*)VIRTUAL_PD_AREA_PT;
static uint64_t *PD_map = (uint64_t*)VIRTUAL_PD_MAP_BASE;
static uint8_t *PT_bitmap = (uint8_t*)VIRTUAL_ALLOCATION_BITMAP_2MB;

extern struct physical_allocation_control *phys_allocation_control;
extern struct virtual_allocation_control *vir_allocation_control;

extern uint8_t *vir_bitmap_2mb;
extern uint64_t vir_bitmap_2mb_length;

struct control_block
{
	uint64_t PT_count;
	uint64_t data_page_count;
	uint64_t MAGIC;
};

////////////////////////////////////////////////////////////////////////////////////
// internal function to get a 4kb page
static inline uint64_t get_4kb_page()
{
	uint64_t to_return = memory_get_page(PAGE_SIZE_4K, PAGE_TYPE_HIGH);
	if(!to_return)
		to_return = memory_get_page(PAGE_SIZE_4K, PAGE_TYPE_LOW);

	if(to_return)
		phys_allocation_control->kernel_memory_allocated += 4096;

	return to_return;
}

////////////////////////////////////////////////////////////////////////////////////
// internal function to free a 4kb page
static inline uint64_t free_4kb_page(uint64_t address)
{
	uint64_t to_return = memory_free_page(address, PAGE_SIZE_4K);
	if(to_return)
		phys_allocation_control->kernel_memory_allocated -= 4096;

	return to_return;
}

////////////////////////////////////////////////////////////////////////////////////
// internal function to allocate a new PD
static uint64_t alloc_new_PD()
{
	// first find a place to put it
	// search through the PDP
	uint64_t i;
	for(i = 0; i < 512; i++)
	{
		// check if the PD entry is free
		if(PDP[i] == 0)
			break;
	}

	// i should point to PDP entry
	// if not, error out
	if(i == 512)
		return 0;

	// i points to PDP entry to use
	// get a 4kb page
	uint64_t PD_page = get_4kb_page();
	if(!PD_page)
		return 0;

	// map it into the PD area
	PD_map_PT[i] = PD_page | 0x103;
	invlpg(VIRTUAL_PD_MAP_BASE + (i * 4096));

	// clear it
	uint64_t *PD = (uint64_t*)(VIRTUAL_PD_MAP_BASE + (i * 4096));
	uint64_t k;
	for(k = 0; k < 512; k++)
		PD[k] = 0;

	// make PDP point to it
	PDP[i] = PD_page | 0x3;

	// update PT bitmap
	for(k = 0; k < 64; k++)
	{
		vir_bitmap_2mb[(i * 64) + k] = 0;
	}

	// update allocation variables to reflect the new address space
	vir_allocation_control->available_2mb_PTs += 512;
	vir_allocation_control->available_virtual_memory += 512 * 2097152;

	// finished, return the base virtual address of the space added by the new PD
	return (i * 0x40000000ULL) + KOFFSET;
}

////////////////////////////////////////////////////////////////////////////////////
// Lazy invalidation stuff

// returns 0 on error
static uint64_t mark_range_allocated_for_invalidation(uint64_t base_address, uint16_t page_count)
{
	// check if need to post this range yet
	if(KERNEL->system_state == SYSTEM_STATE_BOOT)
	{
		// don't need to post them
		return page_count;
	}

	// announce
	printf("marking 0x%x at %u pages for invalidation (new)", base_address, page_count);

	// need to post them
	// for each CPU
	uint64_t i;
	uint64_t MAX_CPU = KERNEL->CPU_count;
	for(i = 0; i < MAX_CPU; i++)
	{
		// get pointer to the table for this CPU
		struct lazy_invalidation_table *table = &(KERNEL->lazy_invalidation_tables[i]);

		// lock
		lock_no_IRQ(&table->lock);

		// add a new invalidation entry
		uint64_t next_entry = table->next_entry_new;
		if(next_entry < table->max_entry_count)
		{
			table->new_entries[next_entry].virtual_base = base_address;
			table->new_entries[next_entry].page_count_4kb = page_count;
			table->next_entry_new++;
		}

		// unlock
		unlock_no_IRQ(&table->lock);
	}

	return base_address;
}


static uint64_t mark_range_freed_for_invalidation(uint64_t base_address, uint64_t page_count)
{
	// check if need to post this range yet
	if(KERNEL->system_state == SYSTEM_STATE_BOOT)
	{
		// don't need to post them
		return page_count;
	}

	// announce
	printf("marking 0x%x at %u pages for invalidation (free)", base_address, page_count);
	
	// need to post them
	// for each CPU
	uint64_t i;
	uint64_t MAX_CPU = KERNEL->CPU_count;
	for(i = 0; i < MAX_CPU; i++)
	{
		// get pointer to the table for this CPU
		struct lazy_invalidation_table *table = &(KERNEL->lazy_invalidation_tables[i]);

		// lock
		lock_no_IRQ(&table->lock);

		// add a new invalidation entry
		uint64_t next_entry = table->next_entry_free;
		if(next_entry < table->max_entry_count)
		{
			table->free_entries[next_entry].virtual_base = base_address;
			table->free_entries[next_entry].page_count_4kb = page_count;
			table->next_entry_free++;
		}

		// unlock
		unlock_no_IRQ(&table->lock);
	}

	// post into the global table
	struct lazy_invalidation_global_free_table *global = (struct lazy_invalidation_global_free_table*)KERNEL->lazy_invalidation_global;

	// lock the global table
	lock_no_IRQ(&global->lock);

	if(global->next_entry < global->max_entry_count)
	{
		global->entries[global->next_entry].virtual_base = base_address;
		global->entries[global->next_entry].page_count_4kb = page_count;
		global->entries[global->next_entry].references_remaining = KERNEL->CPU_count;
	}

	// unlock
	unlock_no_IRQ(&global->lock);

	return base_address;
}

void invalidate_pages(uint64_t OSCPUID)
{
	// announce
	printf("INVALIDATING PAGES");

	// find base pointer of the invalidation table for this CPU
	struct lazy_invalidation_table *table = &(KERNEL->lazy_invalidation_tables[OSCPUID]);
	const uint64_t MAX_ENTRIES = table->max_entry_count;

	// lock the table
	lock_no_IRQ(&table->lock);

	// check if any new entries exist
	uint32_t i;
	uint32_t next_entry_new = table->next_entry_new;
	for(i = 0; i < next_entry_new; i++)
	{
		// free all pages in this entry
		uint32_t k;
		uint64_t base_to_invalidate = table->new_entries[i].virtual_base;
		uint64_t count = table->new_entries[i].page_count_4kb;
		for(k = 0; k < count; k++)
		{
			invlpg(base_to_invalidate);
			base_to_invalidate += 4096;
		}
	}
	table->next_entry_new = 0;


	// check the global free entries table
	struct lazy_invalidation_global_free_table *global = (struct lazy_invalidation_global_free_table*)KERNEL->lazy_invalidation_global;

	// lock the global table as well
	lock_no_IRQ(&global->lock);

	// get global table variables
	struct lazy_invalidation_entry *entries = (struct lazy_invalidation_entry*)global->entries;
	uint64_t global_entry_count = global->next_entry;


	// check if any free entries exist
	uint32_t next_entry_free = table->next_entry_free;
	for(i = 0; i < next_entry_free; i++)
	{
		// free all pages in this entry
		uint64_t k;
		uint64_t base_to_invalidate = table->free_entries[i].virtual_base;
		uint64_t count = table->free_entries[i].page_count_4kb;
		for(k = 0; k < count; k++)
		{
			invlpg(base_to_invalidate);
			base_to_invalidate += 4096;
		}

		// check if it exists in the global table
		uint64_t global_entry_count = global->next_entry;
		for(k = 0; k < global_entry_count; k++)
		{
			// check the base address
			if(entries[k].virtual_base == table->free_entries[i].virtual_base)
			{
				// reduce references count
				if(entries[k].references_remaining == 1)
				{
					// mark the associated PTs as available
					uint64_t PT_start = (table->free_entries[i].virtual_base - KOFFSET) / 0x200000;
					uint64_t PT_count = 0;
					if(count > 480) // 480 data pages in first PT
					{
						// need multiple PTs
						PT_count = (count - 480) / 512;
						if((count - 480) % 512 > 0)
							PT_count++;
						PT_count++;
					}
					else
					{
						// only need one
						PT_count = 1;
					}
					uint64_t j;
					for(j = PT_start; j < PT_start + PT_count; j++)
					{
						// get PT bit & byte
						uint64_t byte = j >> 3;
						uint64_t bit = j & 7;

						// set value
						vir_bitmap_2mb[byte] &= ~(1 << bit);
					}

					if(PT_start < vir_allocation_control->next_available_2mb_PT)
						vir_allocation_control->next_available_2mb_PT = PT_start;

					// remove this entry from the global table
					for(j = k + 1; j < global_entry_count; j++)
					{
						entries[j-1].virtual_base = entries[j].virtual_base;
						entries[j-1].page_count_4kb = entries[j].page_count_4kb;
						entries[j-1].references_remaining = entries[j].references_remaining;
						entries[j-1].reserved1 = entries[j].reserved1;
					}
					global->next_entry--;

				}
				else
				{
					// reduce the reference count
					entries[k].references_remaining--;
				}

				// done
				break;
			}
		}

	}
	table->next_entry_free = 0;
	
	// unlock it
	unlock_no_IRQ(&global->lock);

	// finished
	unlock_no_IRQ(&table->lock);
	return;
}



////////////////////////////////////////////////////////////////////////////////////
// external function to allocate a chunk of kernel memory

uint64_t memory_kernel_get_memory(PAGE_SIZE page_size, uint64_t page_count)
{
	// lock
	lock_no_IRQ(&lock);

	// debug output page count and size, and total size of memory requested
	//printf("pages of size %u requested: %u. Total memory requested %u 0x%x", page_size, page_count, page_size * page_count, page_size * page_count);

	// determine how many 4kb pages are required
	uint64_t total_memory_requested = page_size * page_count;
	uint64_t total_pages_requested = total_memory_requested >> 12;

	// determine how many PTs are required
	uint64_t PTs_required = 0;
	if(total_pages_requested > 480) // 480 data pages in first PT
	{
		// need multiple PTs
		PTs_required = (total_pages_requested - 480) / 512;
		if((total_pages_requested - 480) % 512 > 0)
			PTs_required++;
		PTs_required++;
	}
	else
	{
		// only need one
		PTs_required = 1;
	}

	// check if too many PTs (31)
	if(PTs_required > 31)
	{
		// can't have more than 31 PTs
		unlock_no_IRQ(&lock);
		return 0;
	}

	// now try to find group of consecutive PTs of the right length
	uint64_t i;
	uint64_t length_so_far = 0;
	uint64_t start_PT = 0;

	while(1)
	{
		//printf("here");
		//printf("r %u", vir_allocation_control->next_available_2mb_PT);
		uint64_t l = vir_bitmap_2mb_length * 8;
		for(i = vir_allocation_control->next_available_2mb_PT; i < l && length_so_far < PTs_required; i++)
		{
			// get PT state
			uint64_t byte = i >> 3;
			uint64_t bit = i & 7;

			// get value
			uint8_t value = vir_bitmap_2mb[byte] & (1 << bit);
			
			// check value
			if(value == 0)
			{
				// PT is available
				// check if start of chain
				if(start_PT == 0)
					start_PT = i;

				// increase chain length so far
				length_so_far++;

				//printf("%u", i);
				//printf("length so far %u", length_so_far);
			}
			else
			{
				// PT is not available
				// reset counters
				start_PT = 0;
				length_so_far = 0;
			}
		}

		// check if found a block
		if(start_PT > 0)
			break;

		// did not find a block
		// allocate a new PD and try again
		uint64_t new_pd = alloc_new_PD();
		//printf("new PD at 0x%a", new_pd);
		//printf("virtual memory available 0x%x", vir_allocation_control->available_virtual_memory);
		if(new_pd == 0)
			break;

	}


	// check if PT chain was found
	if(start_PT == 0)
	{
		unlock_no_IRQ(&lock);
		return 0;
	}
	
	// get address of first block
	uint64_t block_base_virtual_address = (start_PT * 0x200000) + KOFFSET;
	
	

	// PT chain was found
	// now get first PT page
	uint64_t first_PT_page = get_4kb_page();
	if(!first_PT_page)
	{
		unlock_no_IRQ(&lock);
		return 0;
	}

	// map in the temporary PT
	// and use it to map the first actual PT in to set it up
	PD_map[start_PT] = PHYSICAL_TEMP_PT | 0x3;
	uint64_t *temp_pt_ptr = (uint64_t*)VIRTUAL_TEMP_PT;
	for(i = 0; i < 512; i++)
		temp_pt_ptr[i] = 0;
	temp_pt_ptr[0] = first_PT_page | 0x103;

	// get pointer to first PT
	invlpg(block_base_virtual_address);

	// zero 4kb block
	uint64_t *ptr = (uint64_t*)block_base_virtual_address;
	for(i = 0; i < 512; i++)
		ptr[i] = 0;

	
	// point first entry to itself
	ptr[0] = first_PT_page | 0x103;

	// remap the first PD entry
	PD_map[start_PT] = first_PT_page | 0x3;
	invlpg(block_base_virtual_address);

	// attempt to get data block page
	uint64_t data_block_page = get_4kb_page();
	if(!data_block_page)
	{
		// ran out of memory here
		// undo mapping of first block
		PD_map[start_PT] = 0;
		invlpg(block_base_virtual_address);

		// free the 4kb page
		free_4kb_page(first_PT_page);

		// return
		unlock_no_IRQ(&lock);
		return 0;
	}

	// have data block
	// map it in as entry 31
	ptr[31] = data_block_page | 0x103;
	invlpg(block_base_virtual_address + (31 * 4096));

	// get pointer to control block
	struct control_block *control = (struct control_block*)(block_base_virtual_address + (31 * 4096));

	// set initial values
	control->PT_count = 1;
	control->data_page_count = 0;

	
	// map pages until total requested pages are mapped
	uint64_t current_pt_entry = 32;
	uint64_t current_pt = 0;

	// for each page to map
	for(i = 0; i < total_pages_requested; i++)
	{
		// determine if new PT page is required
		if(current_pt_entry == 512)
		{
			// try to allocate a new PT
			uint64_t new_pt = get_4kb_page();
			if(new_pt)
			{
				// new PT was allocated successfully
				// map it in
				current_pt_entry = 0;
				current_pt++;

				ptr[current_pt] = new_pt | 0x103;
				invlpg(block_base_virtual_address + (4096 * current_pt));
				
				// clear it
				int k;
				for(k = 0; k < 512; k++)
					ptr[(current_pt * 512) + k] = 0;

				// map it into the PD
				PD_map[start_PT + current_pt] = new_pt | 0x3;
				control->PT_count++;
			}
		}

		// check if new PT allocation worked (if necessary)
		if(current_pt_entry < 512)
		{
			// allocate next page
			uint64_t next_page = get_4kb_page();
			if(next_page)
			{
				// map it in
				ptr[(current_pt * 512) + current_pt_entry] = next_page | 0x103;
				invlpg(block_base_virtual_address + (4096 * ((current_pt * 512) + current_pt_entry)));
				control->data_page_count++;

				// increase pt entry pointer
				current_pt_entry++;

				// done this bit
				continue;
			}
		}

		// failure to allocate page

		//printf("page allocation failed");

		// get infomation from the control block
		uint64_t data_pages_mapped = control->data_page_count;
		uint64_t pt_pages_mapped = control->PT_count;

		// unmap control block
		uint64_t control_block_page_address = ptr[31] & 0xfffffffffffff000;
		ptr[31] = 0;
		invlpg(block_base_virtual_address + (31 * 4096));
		free_4kb_page(control_block_page_address);

		// need to go through and unallocate each data page already allocated
		uint64_t k;
		//printf("unmapping %u data pages", data_pages_mapped);
		for(k = 0; k < data_pages_mapped; k++)
		{
			uint64_t page_to_unmap = ptr[32 + k] & 0xfffffffffffff000;
			//printf("unmapping page 0x%x at entry %u", page_to_unmap, 32 + k);
			ptr[32 + k] = 0;
			//printf("0x%x 0x%x", block_base_virtual_address, (block_base_virtual_address + ((32 + k) * 4096)));
			invlpg(block_base_virtual_address + ((32 + k) * 4096));
			free_4kb_page(page_to_unmap);

			//uint8_t* t = (uint8_t*)(block_base_virtual_address + ((32 + k) * 4096));
			//printf("%u", t[0]);
		}

		// and then the PTs
		// all but the first one, first
		for(k = 1; k < pt_pages_mapped; k++)
		{
			uint64_t page_to_unmap = ptr[k] & 0xfffffffffffff000;
			//printf("unmapping pt page 0x%x at entry %u and PDP entry %u", page_to_unmap, k, start_PT + k);
			ptr[k] = 0;
			PDP[start_PT + k] = 0;
			//printf("0x%x 0x%x", block_base_virtual_address, (block_base_virtual_address + (k * 4096)));
			invlpg(block_base_virtual_address + (k * 4096));
			free_4kb_page(page_to_unmap);

			//printf("%u", ptr[512 * k]);
		}

		// and then the first pt
		uint64_t page_to_unmap = ptr[0] & 0xfffffffffffff000;
		//printf("unmapping pt page 0x%x at entry %u and PDP entry %u", page_to_unmap, 0, start_PT);
		ptr[0] = 0;
		PDP[start_PT] = 0;
		//printf("0x%x 0x%x", block_base_virtual_address, block_base_virtual_address);
		invlpg(block_base_virtual_address);
		free_4kb_page(page_to_unmap);

		//printf("%u", ptr[0]);

		unlock_no_IRQ(&lock);
		return 0;
	}

	// now push this block to the Lazy Invalidation system
	mark_range_allocated_for_invalidation(block_base_virtual_address, 32 + control->data_page_count);
	
	// set the PTs as mapped
	uint64_t end_PT = start_PT + PTs_required;
	for(i = start_PT; i < end_PT; i++)
	{
		// get PT bit
		uint64_t byte = i >> 3;
		uint64_t bit = i & 7;

		// set value
		//printf("PAGE SPACE: marking PT %u as used", i);
		vir_bitmap_2mb[byte] |= (1 << bit);
	}
	//printf("%u %u %u", start_PT, end_PT, PTs_required);
	vir_allocation_control->next_available_2mb_PT = end_PT;
	vir_allocation_control->available_2mb_PTs -= PTs_required;
	vir_allocation_control->available_virtual_memory-= PTs_required * 2097152;

	// set the control block's magic value to the entire block's base address
	control->MAGIC = block_base_virtual_address;

	// finished
	// calculate base address of data block
	uint64_t address = block_base_virtual_address + (32 * 4096);
	
	// unlock and return
	unlock_no_IRQ(&lock);
	return address;

}


////////////////////////////////////////////////////////////////////////////////////
// external function to free a chunk of kernel memory
// return 0 on error, the address of the freed block on success

uint64_t memory_kernel_free_memory(uint64_t address)
{
	// lock
	lock_no_IRQ(&lock);

	// negative offset address to get base of entire block (inc. control & mapping blocks)
	address -= (4096 * 32);

	// get pointer to base of block
	uint64_t *block_PT = (uint64_t*)address;
	struct control_block *block_control = (struct control_block*)(address + (31 * 4096));

	// check if the magic value is correct
	if(block_control->MAGIC != address)
	{
		// it is not correct
		unlock_no_IRQ(&lock);
		return 0;
	}

	// need to free the data blocks
	uint64_t data_block_limit = block_control->data_page_count + 32;
	uint64_t i;
	for(i = 32; i < data_block_limit; i++)
	{
		// get base physical address of data block
		uint64_t block_address = block_PT[i] & 0xfffffffffffff000ULL;

		// unmap it
		block_PT[i] = 0;

		// free it
		free_4kb_page(block_address);
	}

	// now need to free the PTs
	// get PT start and limit
	uint64_t start_PT = (address - KOFFSET) / 0x200000;
	uint64_t limit_PT = start_PT + block_control->PT_count;

	// now free all but the first PT block
	for(i = start_PT + 1; i < limit_PT; i++)
	{
		// unmap from PD
		PD_map[i] = 0;

		// unmap from PT
		uint64_t PT_address = block_PT[i - start_PT] & 0xfffffffffffff000ULL;
		block_PT[i - start_PT] = 0;

		// free it
		free_4kb_page(PT_address);
	}
	
	// get some variables
	uint64_t total_pages = block_control->data_page_count + 32;

	// unmap and free control block
	uint64_t control_block_address = block_PT[31] & 0xfffffffffffff000ULL;
	free_4kb_page(control_block_address);
	block_PT[31] = 0;

	// unmap and free PT 0
	uint64_t PT0_address = block_PT[0] & 0xfffffffffffff000ULL;
	block_PT[0] = 0;
	free_4kb_page(PT0_address);
	PD_map[start_PT] = 0;

	// invalidate the entire range
	for(i = 0; i < total_pages; i++)
	{
		invlpg(address + (i * 4096));
	}

	// push them for invalidation
	mark_range_freed_for_invalidation(address, total_pages);

	// finished
	unlock_no_IRQ(&lock);
	return address;
}