/**
 * \file Memory.cc
 * \brief Memory management facilities
 * \author Corey Tabaka
 */

/*
   Copyright 2006 Corey Tabaka

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
*/

#include <platform/Memory.h>
#include <platform/Interrupt.h>
#include <platform/PageMap.h>
#include <platform/processor.h>
#include <platform/RegUtil.h>
#include <vm/Page.h>
#include <vm/Cache.h>
#include <vm/Store.h>
#include <Process.h>
#include <Queue.h>
#include <Log.h>
#include <syscall.h>
#include <panic.h>

#include <i386/console.h>
#include <i386/mem.h>
#include <malloc.h>
#include <stdio.h>
#include <new.h>

/** Points to a buffer containing a copy of the first 1M of system memory before
 * the system initialization. This is useful for creating vm86 mode VMs based on
 * the initial configuration of the system.
 */
uint8 *lowMemImage;

/*
 * Allocation space (heap)
 */ 
// defined in kernel.ld
extern uint8 heap_base[];
extern uint8 code[];
extern uint8 code_end[];
extern uint8 end[];
extern uint8 services[];
extern uint8 data_end[];

extern uint32 maxPhysicalMemAddr;

static uint32 heap_break = (uint32) heap_base;

/*
 * Physical memory page map
 */
static PageMap physicalPageMap((uint32) heap_base, MEM_IDENTITY_MAP_SIZE);
static bool usePageMap = false;

/*
 * Morecore using freeable memory pools
 */
#define HEAP_POOL_SIZE		(256*1024)
#define HEAP_POOL_MASK		0x3ffff
#define HEAP_POOL_ENTRIES	(1024)

static int nextHeapPool;
static uint32 heapPoolBreak;
static uint32 heapPools[HEAP_POOL_ENTRIES];

/*
 * Page management for VMs
 */
enum {
	QUEUE_FREE=0,
	QUEUE_CLEAR,
	QUEUE_ACTIVE,
	QUEUE_MODIFIED,
	QUEUE_RESERVED,
	
	NUM_QUEUES,
};

typedef struct {
	QList list;
	uint32 count;
	InterruptLock lock;
} page_queue_t;

static page_queue_t queues[NUM_QUEUES];

/*
 * Page queue macros
 */
#define enqueue(page, queue) queues[queue].list.append(&page.queueNode)
#define dequeue(page) page.queueNode.remove()

/*
 * Array of all physical pages in the system
 */
static Page *pages;

/*
 * Cache for kernel heap pages
 */
static Cache heapCache(RefStore(0), 0);

/*
 * VM stats
 */
static struct {
	uint32 total;
	uint32 free;
	uint32 active;
	uint32 clear;
	uint32 reserved;
} vm_info;

static void printUsage(void) {
	struct mallinfo mi = mallinfo();
		
	printf_xy(71, 10, WHITE, "Peak");
	printf_xy(71, 11, LIGHTGRAY, "%7u K", mi.usmblks / 1024);
	printf_xy(71, 12, WHITE, "Reserved");
	printf_xy(71, 13, LIGHTGRAY, "%7u K", mi.arena / 1024);
	printf_xy(71, 14, WHITE, "Used");
	printf_xy(71, 15, LIGHTGRAY, "%7u K", mi.uordblks / 1024);
	printf_xy(71, 16, WHITE, "Free");
	printf_xy(71, 17, LIGHTGRAY, "%7u K", mi.fordblks / 1024);
	
	printf_xy(71, 18, WHITE, "Sys Used");
	printf_xy(71, 19, LIGHTGRAY, "%7u K",
		physicalPageMap.getAllocatedPages() * 4);
	printf_xy(71, 20, WHITE, "Sys Free");
	printf_xy(71, 21, LIGHTGRAY, "%7u K", physicalPageMap.getFreePages() * 4);
}

void Memory::initialize(void) {
	printf("        %u MB system memory\n", maxPhysicalMemAddr / 0x100000);
	
	// reset the page map settings if the size info was found
	if (maxPhysicalMemAddr > 0) {
		physicalPageMap.initialize((uint32) heap_base, maxPhysicalMemAddr);
	}
	
	// switch from using the temporary heap to the page map
	if (heap_break > (uint32) heap_base) {
		physicalPageMap.reserve((uint32) heap_base,
			heap_break - (uint32) heap_base);
	}
	usePageMap = true;
	
	lowMemImage = new uint8[0x100000];
	memcpy(lowMemImage, 0, 0x100000);
	
	// init the VM page management structures
	uint32 numObjs = maxPhysicalMemAddr / PAGE_SIZE;
	uint32 numPages = numObjs * sizeof(Page) / PAGE_SIZE;
	
	vm_info.total = numObjs;
	
	new (&heapCache) Cache(RefStore(0), numPages, CACHE_OPTIONS_NOSCAN); 
	
	pages = (Page *) Memory::alloc(numPages);
	
	for (uint32 i=0; i < numObjs; i++) {
		memset(pages + i, 0, sizeof(Page));
		
		uint32 addr = i << PAGE_SHIFT;
		
		if (physicalPageMap.isFree(addr)) {
			new (pages + i) Page(i, PAGE_STATE_FREE);
			enqueue(pages[i], QUEUE_FREE);
			vm_info.free++;
		} else if (addr >= (uint32) heap_base) {
			new (pages + i) Page(i, PAGE_STATE_ACTIVE, i);
			enqueue(pages[i], QUEUE_ACTIVE);
			heapCache.insert(pages[i]);
			vm_info.active++;
		} else {
			new (pages + i) Page(i, PAGE_STATE_WIRED, i);
			enqueue(pages[i], QUEUE_RESERVED);
			vm_info.reserved++;
		}
	}
	
	// now is a good time to enable the caches
	clearInCR0(X86_CR0_CD);
	clearInCR0(X86_CR0_NW);
	
	RefProcess proc = Process::getSystemProcess();
	proc->getPageTable().makeCurrent();
	setInCR0(X86_CR0_PG | X86_CR0_WP);
	
	proc->getPageTable().acquire(code, code, code_end - code, false,
		ACQUIRE_MODE_KEEP_PDE
		| ACQUIRE_MODE_REPLACE_PTE
		| ACQUIRE_MODE_READ_ONLY);
	
	Interrupt::registerHandler(INT_SYSCALL, syscallHandler);
	
	//printUsage();
}

void *Memory::alloc(uint32 len) {
	uint32 where;
	
	if (len && physicalPageMap.alloc(&where, len)) {
		return (void *) where;
	} else {
		return 0;
	}
}

void Memory::free(void *base, uint32 len) {
	physicalPageMap.free((uint32) base, len);
}

PageMap &Memory::getPhysicalPageMap(void) {
	return physicalPageMap;
}

/*
 * Morecore and memory pool functions
 */

static uint32 heapPoolAlloc(int num) {
	uint32 base = (uint32) Memory::alloc(HEAP_POOL_SIZE * num);
	for (int i=0; i < num; i++) {
		heapPools[nextHeapPool] = base + HEAP_POOL_SIZE * i;
		nextHeapPool++;
	}
	
	return base;
}

static void heapPoolFree(int num) {
	for (int i = 0; i < num; i++) {
		nextHeapPool--;
		Memory::free((void *) heapPools[nextHeapPool], HEAP_POOL_SIZE);
		heapPools[nextHeapPool] = 0;
	}
}

extern "C"
void *morecore(long size) {
	uint32 addr;
	
	if (usePageMap) {
		if (size > 0) {
			size = (size + HEAP_POOL_MASK) & ~HEAP_POOL_MASK;
			
			addr = heapPoolAlloc(size / HEAP_POOL_SIZE);
			
			return (void *) addr;
		} else if (size == 0) {
			addr = heapPools[nextHeapPool - 1];
			return (void *) (addr + HEAP_POOL_SIZE);
		} else {
			addr = heapPools[nextHeapPool - 1];
			heapPoolFree(-size / HEAP_POOL_SIZE);
			
			return (void *) addr;
		}
	} else {
		if (size > 0) {
			size = (size + HEAP_POOL_MASK) & ~HEAP_POOL_MASK;
			
			addr = heap_break;
			for (int i=0; i < size / HEAP_POOL_SIZE; i++) {
				heapPools[nextHeapPool] = heap_break + HEAP_POOL_SIZE * i;
				nextHeapPool++;
			}
			heap_break += size;
			
			return (void *) addr;
		} else if (size == 0) {
			addr = heapPools[nextHeapPool - 1];
			return (void *) (addr + HEAP_POOL_SIZE);
		} else {
			addr = heapPools[nextHeapPool - 1];
			for (int pools = -size / HEAP_POOL_SIZE; pools; pools--) {
				nextHeapPool--;
				heapPools[nextHeapPool] = 0;
				heap_break -= HEAP_POOL_SIZE;
			}
			
			return (void *) addr;
		}
	}
}

#define _CLASS (syscallParam(regs, 0))
#define _FUNC (syscallParam(regs, 1))
#define _ARG(n) (syscallParam(regs, (n)+2))

bool Memory::syscallHandler(regs_t *regs) {
	bool handled = false;
	
	if (_CLASS == SYSCALL_MEMORY) {
		switch(_FUNC) {
			case SYSCALL_MEMORY_MALLINFO: {
				struct mallinfo mi = mallinfo();
				
				memcpy((void *) _ARG(0), &mi, sizeof(mi));
			}
			break;
			
			case SYSCALL_MEMORY_PAGEINFO: {
				uint32 *values = (uint32 *) _ARG(0);
				
				values[0] = physicalPageMap.getPeakPages();
				values[1] = physicalPageMap.getAllocatedPages();
				values[2] = physicalPageMap.getFreePages();
			}
			break;
			
			default:
				Log::printf(LOG_MEDIUM, "Unrecognized SYSCALL_MEMORY function "
					"%u\n", _FUNC);
				break;
		}
		
		handled = true;
	}
	
	return handled;
}

Page *Page::lookup(uint32 page) {
	if (page >= (uint32) heap_base) {
	}
}

void Page::dump(void) {
	Log::printf(LOG_LOW, "Page::initialize: %u pages, %u free, %u active, "
		"%u reserved\n",
		vm_info.total, vm_info.free, vm_info.active, vm_info.reserved);
}

// Page

Page::Page(uint32 page, uint32 state) {
	this->page = page;
	this->state = state;
}

Page::Page(uint32 page, uint32 state, offset_t offset) {
	this->page = page;
	this->state = state;
	this->offset = offset;
}

uint32 Page::getPage(void) {
	return page;
}

uint32 Page::getState(void) {
	return state;
}

offset_t Page::getOffset(void) {
	return state;
}

void Page::setState(uint32 state) {
	// TODO: handle page state operations
	
	switch (state) {
	}	
}
