/*
Copyright (C) 2011 Salil Bhagurkar

This file is part of illusion

illusion is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

illusion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public License
along with illusion.  If not, see <http://www.gnu.org/licenses/>.
*/


#include <kernel/process.h>
#include <kernel/vmpage.h>
#include <klib/lib.h>
#include <klib/string.h>
#include <klib/memory.h>
#include <klib/format.h>
#include <kernel/init.h>
#include <kernel/kmalloc.h>
#include <arch/arch.h>
#include <arch/power.h>
#include <arch/asmdecl.h>
#include <arch/page.h>
#include <kernel/console.h>
#include <kernel/vfs.h>
#include <kernel/module.h>
#include <kernel/ilapp.h>
#include <kernel/errors.h>
#include <apimod/apimod.h>
#include <kernel/lfault.h>
#include <kernel/sched.h>
#include <kernel/init.h>
#include <kernel/list.h>

struct vfs_node *vfs_process;

/*
 * Process hashing for PID lookups
 */
struct process_list {
	struct process *process;
	struct process_list *next, *prev;
};

#define PIDHASH_SIZE 20
static struct process_list *pid_hash[PIDHASH_SIZE] = {0}, *pid_hash_tail[PIDHASH_SIZE] = {0};

static void process_list_add(struct process *process)
{
	struct process_list *pl = (struct process_list *)kmalloc(sizeof(struct process_list), "procl");
	int hash_index = process->pid % PIDHASH_SIZE;
	list_attach(pid_hash[hash_index], pid_hash_tail[hash_index], pl);
	process->pl = pl;
	pl->process = process;
}

static void process_list_remove(struct process *process)
{
	int hash_index = process->pid % PIDHASH_SIZE;
	list_detach(pid_hash[hash_index], pid_hash_tail[hash_index], process->pl);
	kfree(process->pl, sizeof(struct process_list), "procl");
	process->pl = null;
}

struct process *get_process_by_pid(uint_t pid)
{
	int hash_index = pid % PIDHASH_SIZE;
	struct process_list *pl;
	list_for(pid_hash[hash_index], pl) {
		if(pl->process->pid == pid)
			return pl->process;
	}
	return null;
}



static uint_t next_process_id = 1;

//The flags to be checked by the legal fault subsystem
#define PIMG_CHECK_MASK LF_ABSENT | LF_UNEXPECTED | LF_VIOLATION | LF_READ | LF_WRITE
#define EXT_CHECK_MASK LF_ABSENT | LF_UNEXPECTED | LF_VIOLATION

#define FINSTRUMENT_FUNCTIONS_GUARD 0x10

void assign_stack(struct process *process, void *stack, uint_t size, char kern)
{
	if(!kern) {
		process->stack_size = size;
		process->stack = stack;
	} else {
		process->kernel_stack_size = size;
		//Adds 16 bytes at the end of the stack for some shit that
		//-finstrument-functions does
		process->kernel_stack = stack - FINSTRUMENT_FUNCTIONS_GUARD;
	}
}

/*
 * This creates a buffer around the stack of 2 pages, both not present.
 * This will prevent buffer underflows and overflows to some extent
 */
static void create_process_stack(struct process *process, uint_t size, char kern)
{
	uint_t nr_pages = size / PAGE_SIZE + !!(size % PAGE_SIZE);
	nr_pages += 2;
	//Create a stack space with two pages
	//at the start and end to prevent buffer underflows and overflows to some extent
	void *stack_v = vm_get_virt(process->vmmap, nr_pages);

	debug("stack_v = 0x%x - 0x%x\n", stack_v, stack_v + nr_pages * PAGE_SIZE);

	//Here we first map in the "present" stack pages
	//and then set the guard pages to not-present.
	//The pages that are first mapped determine the flags that will
	//be set on the page directory. If we map the not-present pages first,
	//then a not-present flag will be set on the page directory itself.
	//This can be overridden by specifying the transaction, though.

	//Set first and last page to absent
	void *first_page = stack_v;

	void *inner_pg = stack_v + PAGE_SIZE;
	uint_t i;
	uint_t flags = PAGE_PRESENT | PAGE_WRITE |
			((!kern) * PAGE_USER);
	for(i = 0; i < nr_pages - 2; i++) {
		vm_map_address(process->vmmap, phys_get_pages(1), inner_pg,
				flags);
		inner_pg += PAGE_SIZE;
	}
	void *last_page = inner_pg;

	//Map the first and last page to not-present
	vm_map_address(process->vmmap, null, first_page, 0);
	vm_map_address(process->vmmap, null, last_page, 0);

	//Flag that indicates if we are creating a kernel stack or a user stack
	assign_stack(process, inner_pg, size, kern);
}

/*
 * We only need to destroy stack of kernel processes, as for user processes,
 * the entire vmmap can be simply destroyed.
 */
static void destroy_kernel_process_stack(struct process *process)
{
	uint_t size = process->kernel_stack_size;
	void *stack_start = (process->kernel_stack + FINSTRUMENT_FUNCTIONS_GUARD)
			- size;
	void *stack_end =stack_start + size;
	uint_t nr_pages = size / PAGE_SIZE + !!(size % PAGE_SIZE);
	vm_free_pages(&kernel_vmmap, stack_start, nr_pages);
	//Free guard pages from virtual space
	vm_free_virt(&kernel_vmmap, stack_start - PAGE_SIZE, 1);
	vm_free_virt(&kernel_vmmap, stack_end, 1);
}


static void create_process_vmmap(struct process *process)
{
	process->vmmap = (struct vmmap *)kmalloc(sizeof(struct vmmap), "vmmap");
	memset(process->vmmap, 0, sizeof(struct vmmap));
	process->vmmap->is_online = 0;
	process->vmmap->vm_pagemap_start = (void *)APIMOD_DEFAULT_LOAD;
	create_page_sys(&(process->vmmap->page_sys), null, (void *)(APIMOD_DEFAULT_LOAD - PAGE_SIZE));
}

static void destroy_process_vmmap(struct process *process)
{
	void *vaddr_i = process->vmmap->vm_pagemap_start;
	uint_t i;
	for(i = 0; i < MAP_NR_PAGES; i++) {
		if(vm_is_page(process->vmmap, vaddr_i)) {
			//See if it is mapped
			void *phys = virt_to_phys(process->vmmap, vaddr_i);
			if(phys) {
				//Only free the physical address.
				//The virtual reservation is only for this process
				//and the vmmap will be destroyed anyway
				phys_free_pages(phys, 1);
			}
		}
		vaddr_i += PAGE_SIZE;
	}
	destroy_page_sys(&process->vmmap->page_sys, null, (void *)(APIMOD_DEFAULT_LOAD - PAGE_SIZE));
	kfree(process->vmmap, sizeof(struct vmmap), "vmmap");
}


/*
 * List the process in the VFS ram disk
 */
static void list_instance(struct process *process)
{
	char fs_name[VFS_NAME_LEN];
	format_string(fs_name, VFS_NAME_LEN, "%s-%u", process->name, process->pid);
	struct vfs_node *node = vfs_create(vfs_process, fs_name);
	node->class = process;
	process->instance_fs = node;
}

static void unlist_instance(struct process *process)
{
	vfs_delete(process->instance_fs);
	vfs_sync(process->instance_fs);
	vfs_dispose_node(process->instance_fs, true);
}

int __create_process(struct process **ret_process, struct vfs_node *node, void *data, uint_t offset, uint_t length, char *arguments, uint_t arguments_length, bool inherit_wn)
{
	struct ilapp *ilapp = (struct ilapp *)(data + offset);
	if(ilapp->load != (void *)ILAPP_DEFAULT_LOAD) {
		console_printf("Invalid load address 0x%x for this process\n",
				ilapp->load);
		return ESTINV;
	}

	//Compute the offset at which the header will lie from the start of
	//the module after being loaded
	uint_t header_offset = ilapp->header - ilapp->load;

	//Get the real start of the module by subtracting this offset
	//from the actual header start
	void *real_module_addr = (void *)ilapp - header_offset;


	struct process *process = (struct process *)kmalloc(
			sizeof(struct process), "proc");

	//Read in the name of the node and use it for the process's name
	strcpy(process->name, node->name, PROCESS_NAME_LEN);

	create_process_vmmap(process);

	process->load = ilapp->load;
	process->end = ilapp->end;
	process->main = ilapp->entry;

	uint_t image_pages =
			((ptr_t)(ilapp->bss) - (ptr_t)(ilapp->load)) / PAGE_SIZE;

	//Simply reserve some virtual space for the process
	//The process image will be loaded on page faults
	vm_reserve_pages(process->vmmap, process->load, image_pages);

	uint_t bss_pages =
			((ptr_t)(ilapp->end) - (ptr_t)(ilapp->bss)) / PAGE_SIZE;

	vm_reserve_pages(process->vmmap, ilapp->bss, bss_pages);


	uint_t img_code_offset = real_module_addr - data;

	process->legal_faults = process->legal_faults_tail = null;

	debug("ilapp->load = 0x%x\n", ilapp->load);
	debug("ilapp->bss = 0x%x\n", ilapp->bss);
	debug("ilapp->end = 0x%x\n", ilapp->end);

	debug("load = 0x%x\n", process->load);
	debug("end = 0x%x\n", process->load + PAGE_SIZE * image_pages);
	debug("bss_pages = 0x%x\n", bss_pages);

	//Register the fault region for the process image
	//Note that we allow a page fault to occur in kernel mode too
	register_legal_fault(process, "process_image_read",
			LF_ABSENT | LF_READ,
			PIMG_CHECK_MASK, process->load, image_pages,
			node, img_code_offset);

	register_legal_fault(process, "process_image_write",
			LF_ABSENT | LF_WRITE,
			PIMG_CHECK_MASK, process->load, image_pages,
			node, img_code_offset);


	//Not sure why I'm registering only an expected write fault on BSS.
	//Is this so that a BSS region is not read before it's read?
	register_legal_fault_no_source(process, "process_bss",
			LF_ABSENT | LF_WRITE,
			PIMG_CHECK_MASK, ilapp->bss, bss_pages);

	uint_t apimod_code_size = apimod_module.data - apimod_module.load;
	uint_t apimod_code_pages = (apimod_code_size / PAGE_SIZE) + !!(apimod_code_size % PAGE_SIZE);
	uint_t apimod_data_size = apimod_module.bss - apimod_module.data;
	uint_t apimod_data_pages = (apimod_data_size / PAGE_SIZE) + !!(apimod_data_size % PAGE_SIZE);
	uint_t apimod_bss_size = apimod_module.end - apimod_module.bss;
	uint_t apimod_bss_pages = (apimod_bss_size / PAGE_SIZE) + !!(apimod_bss_size % PAGE_SIZE);

	uint_t apimod_total_pages = apimod_code_pages + apimod_data_pages
			+ apimod_bss_pages;

	debug("apimod code = 0x%x = 0x%x\n", apimod_module.load, apimod_module.data);

	//Register the APIMOD legal faults
	register_legal_fault(process, "apimod_code_read",
			LF_ABSENT | LF_READ,
			PIMG_CHECK_MASK, apimod_module.load, apimod_code_pages,
			apimod_module_fs, 0);

	register_legal_fault(process, "apimod_data_read",
			LF_ABSENT | LF_READ,
			PIMG_CHECK_MASK, apimod_module.data, apimod_data_pages,
			apimod_module_fs, apimod_code_size);
	register_legal_fault(process, "apimod_data_write",
			LF_ABSENT | LF_WRITE,
			PIMG_CHECK_MASK, apimod_module.data, apimod_data_pages,
			apimod_module_fs, apimod_code_size);

	register_legal_fault_no_source(process, "apimod_bss_read",
			LF_ABSENT | LF_READ,
			PIMG_CHECK_MASK, apimod_module.bss, apimod_bss_pages);
	register_legal_fault_no_source(process, "apimod_bss_write",
			LF_ABSENT | LF_WRITE,
			PIMG_CHECK_MASK, apimod_module.bss, apimod_bss_pages);

	//Reserve all virt required by APIMOD
	vm_reserve_pages(process->vmmap, apimod_module.load, apimod_total_pages);


	debug("Process image @ 0x%x, %u pages\n", process->load, image_pages);

	create_process_stack(process, PROCESS_STACK_SIZE, 0);
	//The page directory was created as user mode
	//Hence plugging in the kernel stack keeps only the kernel
	//stack at kernel mode.
	create_process_stack(process, PROCESS_KERNEL_STACK_SIZE, 1);

	debug("Process stack @ 0x%x, %u bytes\n", process->stack,
			process->stack_size);

	process->first_run = 1;
	process->pid = next_process_id++;

	process->files = process->files_tail = null;

	//We don't want file_ids to start from zero
	process->next_fid = 1;

	//Set the vfs_node of the process (current location)
	//We inherit this if the inherit flag is set
	if(current_process != null && inherit_wn)
		process->vfs_node = current_process->vfs_node;
	else
		process->vfs_node = node;
	process->process_fs = node;

	process->is_kernel = 0;

	if(arguments != null) {
		process->arguments = (char *)kmalloc(arguments_length, "pargs");
		memcpy(process->arguments, arguments, arguments_length);
		process->arguments_length = arguments_length;
	} else {
		process->arguments = null;
		process->arguments_length = 0;
	}

	//Add process to scheduler
	sched_add_process(process, PRIO_NORMAL);

	//Add to the PID hash
	process_list_add(process);

	//Add the instance to the illusion:/process node
	list_instance(process);

	process->redirect_out = null;

	//Create the process events
	process->evt_run = event_create("process_run");
	process->evt_sleep = event_create("process_sleep");
	process->evt_exit = event_create("process_exit");

	process->syscall_try = false;

	*ret_process = process;
	return 0;
}



int create_process(struct process **ret_process, struct vfs_node *node, char *arguments, uint_t arguments_length, bool inherit_wn)
{
	debug("Create process: %s\n", node->name);
	//Read in the module data
	char *img = (char *)kmalloc(node->length, "pimg");
	vfs_read(node, "data", img, 0, node->length);

	//Find the ILAPP header
	char *sign = img;
	uint_t i;
	for(i = 0; i < node->length; i++) {
		if(streq(&sign[i], ILAPP_SIGN)) {
			int err = __create_process(ret_process, node, img, i, node->length, arguments, arguments_length, inherit_wn);
			kfree(img, node->length, "pimg");
			return err;
		}
	}
	console_printf("No ILAPP header found for process\n");
	return ESTINV;
}



/*
 * Create a process that will run in the kernel mode
 */
int create_kernel_process(struct process **ret_process, char *name, void (*main)())
{
	debug("Create kernel process: %s 0x%x\n", name, main);

	struct process *process = (struct process *)kmalloc(sizeof(struct process), "proc");
	memset(process, 0, sizeof(struct process));

	process->vmmap = &kernel_vmmap;

	strcpy(process->name, name, PROCESS_NAME_LEN);

	//This process has no load address
	process->load = null;

	//No legal faults possible here
	process->legal_faults = process->legal_faults_tail = null;

	process->main = main;

	create_process_stack(process, PROCESS_KERNEL_STACK_SIZE, 1);

	process->first_run = 1;
	process->pid = next_process_id++;

	process->files = process->files_tail = null;

	//We don't want file_ids to start from zero
	process->next_fid = 1;

	//Set the vfs_node of the process
	process->vfs_node = null;

	process->is_kernel = 1;

	//Add process to scheduler
	sched_add_process(process, PRIO_NORMAL);

	//Add to the PID hash
	process_list_add(process);

	//Add to the illusion:/process node
	list_instance(process);

	process->redirect_out = null;

	//Create the process events
	process->evt_run = event_create("process_run");
	process->evt_sleep = event_create("process_sleep");
	process->evt_exit = event_create("process_exit");

	*ret_process = process;
	return 0;
}

static void start_new_process(struct process *process)
{
	debug("First Run: %s, stack = 0x%x\n", process->name,
			process->kernel_stack);

	if(process->is_kernel) {
		_start_new_kernel_process(process->kernel_stack, process->main,
			kernel_vmmap.page_sys.pg_dir);
	} else {
		_start_new_user_process(process->stack, process->main,
			process->vmmap->page_sys.pg_dir);
	}
}



static void switch_to_new_process(struct process *from,
		struct process *to)
{
	debug("%s -> First Run: %s, stack = 0x%x\n", from->name, to->name,
			to->kernel_stack);

	if(to->is_kernel) {
		_switch_to_new_kernel_process(&from->process_context.stack_ptr,
			to->kernel_stack, to->main, kernel_vmmap.page_sys.pg_dir);
	} else {
		_switch_to_new_user_process(&from->process_context.stack_ptr,
			to->stack, to->main, to->vmmap->page_sys.pg_dir);
	}
}

static void switch_to_process(struct process *from, struct process *to)
{
	debug("%s -> %s, stack = 0x%x\n", from->name, to->name,
			to->process_context.stack_ptr);
	//As against the above functions, the 'to' stack passed here is
	//that from the saved process context
	if(to->is_kernel) {
		_switch_to_process(&from->process_context.stack_ptr,
			to->process_context.stack_ptr,
			kernel_vmmap.page_sys.pg_dir);
	} else {
		_switch_to_process(&from->process_context.stack_ptr,
			to->process_context.stack_ptr,
			to->vmmap->page_sys.pg_dir);
	}
}

static void switch_to_process_no_save(struct process *to)
{
	debug("<deleted> -> %s, stack = 0x%x\n", to->name, to->process_context.stack_ptr);

	if(to->is_kernel) {
		_switch_to_process_no_save(to->process_context.stack_ptr,
				kernel_vmmap.page_sys.pg_dir);
	} else {
		_switch_to_process_no_save(to->process_context.stack_ptr,
				to->vmmap->page_sys.pg_dir);
	}
}


/*
 * Save context and switch to a new process
 */
void process_switch(struct process *from, struct process *to)
{
	//Check if the target process is running for the first time
	if(to->first_run) {
		//Set the first_run flag to zero
		to->first_run = 0;
		//Set the current process in the system
		current_process = to;
		//Set the stack which must be loaded if there
		//is an interrupt/system call from this process
		set_current_kernel_stack((ptr_t)(
				to->kernel_stack));
		//Check if a source context exists
		if(from != null) {
			//Prepare to switch to the new vmmap
			vm_prepare_online(from->vmmap, to->vmmap);
			switch_to_new_process(from, to);
		} else {
			//Prepare to switch to the new vmmap
			vm_prepare_online(&kernel_vmmap, to->vmmap);
			start_new_process(to);
		}
	} else {
		//Set the current process in the system
		current_process = to;
		//Set the stack which must be loaded if there
		//is an interrupt/system call from this process
		set_current_kernel_stack((ptr_t)(
				to->kernel_stack));
		//Prepare to switch to the new vmmap
		if(from == null) {
			vm_prepare_online(null, to->vmmap);
			switch_to_process_no_save(to);
		} else {
			vm_prepare_online(from->vmmap, to->vmmap);
			switch_to_process(from, to);
		}
	}
}

void process_debug(struct process *process)
{
	char path[256];
	console_printf("Process [ %s ]\n", process->name);
	console_printf("exe = %s\n", vfs_get_path(path, 256, process->process_fs));
	console_printf("working = %s\n", vfs_get_path(path, 256, process->vfs_node));
	console_printf("instance = %s\n", vfs_get_path(path, 256, process->instance_fs));
	console_printf("args = %s\n", process->arguments);
	console_printf("image: 0x%x - 0x%x\n", process->load, process->end);

	console_printf("Process files:\n");
	struct process_file *pf;
	list_for(process->files, pf) {
		console_printf("fid = %u, node = %s, state = %u\n", pf->fid,
				vfs_get_path(path, 256, pf->node), pf->state);
	}
}

/*
 * Called by the process who is looking after a process, once the process exits
 */
void process_kill(struct process *process)
{
	debug("Killing process: %s\n", process->name);

	//Free all events
	event_free(process->evt_exit);
	event_free(process->evt_sleep);
	event_free(process->evt_run);

	unlist_instance(process);
	sched_remove_process(process);
	process_list_remove(process);

	if(process->is_kernel) {
		debug("(kernel process) Removing stack / PCB..\n");
		destroy_kernel_process_stack(process);
		kfree(process, sizeof(struct process), "proc");
	} else {
		//Close all process_files
		struct process_file *pf;
		for(pf = process->files; pf; ) {
			debug("Closing process file %u\n", pf->fid);
			struct process_file *next = pf->next;
			vfs_close(pf->node, pf->state);
			kfree(pf, sizeof(struct process_file), "file");
			pf = next;
		}
		debug("Destroying vmmap..\n");
		destroy_process_vmmap(process);
		debug("Unregistering legal faults..\n");
		unregister_legal_faults(process);
		if(process->arguments) {
			kfree(process->arguments, process->arguments_length, "pargs");
		}

		kfree(process, sizeof(struct process), "proc");
	}
	debug("Process successfully killed\n");
}
