
/*
 *  Checkpoint logic and helpers
 */

#include <time.h>
#include <proc.h>
#include <slab.h>
#include <string.h>
#include <sync.h>
#include <pmm.h>
#include <error.h>
#include <sched.h>
#include <elf.h>
#include <vmm.h>
#include <trap.h>
#include <unistd.h>
#include <stdio.h>
#include <sched.h>
#include <stdlib.h>
#include <assert.h>
#include <elf.h>
#include <fs.h>
#include <vfs.h>
#include <sysfile.h>
#include <swap.h>
#include <mbox.h>
#include <kio.h>
#include <file.h>
#include <stdio.h>
#include <mp.h>
#include <checkpoint.h>
#include <clock.h>
#include <version.h>


// from processes

// setup_kstack - alloc pages with size KSTACKPAGE as process kernel stack
static int
restart_setup_kstack(struct proc_struct *proc) {
    struct Page *page = alloc_pages(KSTACKPAGE);
    if (page != NULL) {
        proc->kstack = (uintptr_t)page2kva(page);
        return 0;
    }
    return -E_NO_MEM;
}

// setup_pgdir - alloc one page as PDT
static int
restart_setup_pgdir(struct mm_struct *mm) {
    struct Page *page;
    if ((page = alloc_page()) == NULL) {
        return -E_NO_MEM;
    }
    pgd_t *pgdir = page2kva(page);
    memcpy(pgdir, init_pgdir_get(), PGSIZE);
	map_pgdir (pgdir);
    mm->pgdir = pgdir;
    return 0;
}

// mm_create -  alloc a mm_struct & initialize it.
struct mm_struct *
restart_mm_create(struct mm_struct *oldmm) {
    struct mm_struct *mm = kmalloc(sizeof(struct mm_struct));
    if (mm != NULL) {
        list_init(&(mm->mmap_list));
        mm->mmap_tree = NULL;
        mm->mmap_cache = NULL;
        mm->pgdir = NULL;
        mm->map_count = 0;
        mm->swap_address = 0;
        set_mm_count(mm, 0);
        mm->locked_by = 0;
        mm->brk_start = mm->brk = 0;
        list_init(&(mm->proc_mm_link));
        sem_init(&(mm->mm_sem), 1);
		mm->lapic = -1;
    }
    return mm;
}

struct fs_struct *
restart_fs_create(struct fs_struct *oldfs) {
//    static_assert((int)FS_STRUCT_NENTRY > 128);
    struct fs_struct *fs_struct;
    if ((fs_struct = kmalloc(sizeof(struct fs_struct) + FS_STRUCT_BUFSIZE)) != NULL) {
        fs_struct->pwd = NULL;
        fs_struct->filemap = (void *)(fs_struct + 1);
        atomic_set(&(fs_struct->fs_count), 0);
        sem_init(&(fs_struct->fs_sem), 1);
        filemap_init(fs_struct->filemap);
    }
    return fs_struct;
}

static void
restartret(void) {
    forkrets(pls_read(current)->tf);
}

static inline timer_t *
restart_timer_init(timer_t *timer, struct proc_struct *proc, int expires) {
    timer->expires = expires;
    timer->proc = proc;
    list_init(&(timer->timer_link));
    return timer;
}






static int set_task_links(struct ckpt_ctx *ctx)
{
	int i;
	bool intr_flag;

	int timeslice = current->time_slice;
	timeslice = timeslice / (ctx->nr_tasks +1);
	current->time_slice = timeslice;

	// set parent and thread links of all tasks
	for (i = 0; i < ctx->proc_map.len; i++)
	{
		struct proc_struct *proc = (struct proc_struct*)ctx->proc_map.arr[i].second;
		struct proc_struct *oldproc = (struct proc_struct*)ctx->tasks_arr[i];

		// get a pid
	    local_intr_save(intr_flag);
	    {
	        proc->pid = get_pid();

			proc->time_slice = timeslice;
			// hash it
			hash_proc(proc);
			// add to list
			list_add(&proc_list, &(proc->list_link));
			// link father
			if (i == 0) // for root task
				proc->parent = current;
			else
				proc->parent = map_getvalue(&ctx->proc_map, oldproc->parent);
			// link brother
			if (i != 0)
			{
				proc->optr = map_getvalue(&ctx->proc_map, oldproc->optr);
				proc->yptr = map_getvalue(&ctx->proc_map, oldproc->yptr);
			}
			// link child
			proc->cptr = map_getvalue(&ctx->proc_map, oldproc->cptr);
			// link thread.. TODO:
			// add total process count
			inc_nr_process();
	    }
	    local_intr_restore(intr_flag);

		// link mm
		struct mm_struct *mm = map_getvalue(&ctx->mm_map, oldproc->mm);
		proc->mm = mm;
		mm_count_inc(mm);
		set_pgdir(proc, mm->pgdir);

		// link fs
		struct fs_struct *fs = map_getvalue(&ctx->fs_map, oldproc->fs_struct);
		proc->fs_struct = fs;
		fs_count_inc(fs);
	}
	return 0;
}

int build_tasks_tree(struct ckpt_ctx *ctx)
{
	int i;

	// read data
	struct ckpt_task_header task_header;
	read_chunk(ctx, CKPT_CHUNK_TYPE_TASK, &task_header);

	ctx->nr_tasks = task_header.task_count;
	ctx->tasks_arr = kmalloc(ctx->nr_tasks * sizeof(struct proc_struct*));
	ctx->tasks_arr[0] = kmalloc(ctx->nr_tasks * sizeof(struct proc_struct));
	for (i = 1; i < ctx->nr_tasks; i++)
		ctx->tasks_arr[i] = ctx->tasks_arr[i -1] +1;

	// read task map
	read_bits(ctx, &ctx->proc_map, sizeof(ctx->proc_map));

	// allocate new proc_struct
	for (i = 0; i < ctx->proc_map.len; i++)
	{
		struct proc_struct *newproc = alloc_proc();
		// TODO: naive solution
		ctx->proc_map.arr[i].second = newproc;
		// read old data
		struct proc_struct *oldproc = ctx->tasks_arr[i];
		read_bits(ctx, oldproc, sizeof(struct proc_struct));
		// set new data
		// set proc name
		memcpy(newproc->name, ctx->tasks_arr[i]->name, PROC_NAME_LEN +1);
		// set wait_timer
		newproc->wait_timer = oldproc->wait_timer;

	}

	return 0;
}

int build_restart_head(struct ckpt_ctx *ctx)
{
	struct ckpt_header header;
	read_chunk(ctx, CKPT_CHUNK_TYPE_HEADER, &header);
	// TODO : process parameters
	return 0;
}

int build_kernel_stacks(struct ckpt_ctx *ctx)
{
	int i;

	for (i = 0; i < ctx->proc_map.len; i++)
	{
		// setup each kernel stack of task
		if (restart_setup_kstack(ctx->proc_map.arr[i].second))
			return -1;
	}
}

int build_mm(struct ckpt_ctx *ctx)
{
	int i, j;

	struct mm_struct *curmm = current->mm;

	// read data
	struct ckpt_mm_header mm_header;
	read_chunk(ctx, CKPT_CHUNK_TYPE_MM, &mm_header);

	// read task map
	read_bits(ctx, &ctx->mm_map, sizeof(ctx->mm_map));

	// allocate new proc_struct
	for (i = 0; i < ctx->mm_map.len; i++)
	{
		// read stored mm
		struct ckpt_mm_dump_struct mm_dump;
		struct mm_struct oldmm;
		read_bits(ctx, &mm_dump, sizeof(struct ckpt_mm_dump_struct));
		read_bits(ctx, &oldmm, sizeof(struct mm_struct));

		// create new mm
		// TODO: naive solution
		struct mm_struct* newmm = restart_mm_create(&oldmm);
		ctx->mm_map.arr[i].second = newmm;
		// continue to initialize
		restart_setup_pgdir(newmm);
		// write mm data
		// swithc page table to the current mm
		switchtopgdir(newmm);
		// read from file
		for (j = 0; j < mm_dump.vma_struct_count; j++)
		{
			// TODO: currently shared memory
			struct vma_struct vma;
			struct vma_struct *newvma;
			read_bits(ctx, &vma, sizeof(struct vma_struct));
			// insert new vma
			mm_map(newmm, vma.vm_start, vma.vm_end - vma.vm_start, VM_WRITE | VM_READ, &newvma);
			// fill data
			read_bits(ctx, vma.vm_start, vma.vm_end - vma.vm_start);
			// set correct flag
			newvma->vm_flags = vma.vm_flags;
		}
		// set some properties safely
        newmm->brk_start = oldmm.brk_start;
        newmm->brk = oldmm.brk;
        bool intr_flag;
        local_intr_save(intr_flag);
        {
            list_add(&(proc_mm_list), &(newmm->proc_mm_link));
        }
        local_intr_restore(intr_flag);
	}
	// switch back to current
	switchtopgdir(curmm);

	// build shared memory
	struct ckpt_shared_mm_header shared_header;
	read_chunk(ctx, CKPT_CHUNK_TYPE_SHAREDMM, &shared_header);
	// TODO: do with shared memory
	/// read shm map table
	read_bits(ctx, &ctx->shared_mm_map, sizeof(ctx->shared_mm_map));

//	for (i = 0; i < ctx->shared_mm_map.len; i++)
//	{
//		struct shmem_struct *shm = ctx->shared_mm_map.arr[i].first;
//		struct vma_struct *vma = ctx->shared_mm_map.arr[i].second;
//		// save shared memory
//		write_bits(ctx, shm, sizeof(struct shmem_struct)); // store length
//		// switch pgdir
//		switchtopgdir(vma->vm_mm);
//		// get address
//		write_bits(ctx, vma->vm_start, vma->vm_end - vma->vm_start);
//	}
}

int build_fs(struct ckpt_ctx *ctx)
{
	int i, j;

	struct fs_struct *bakfs = current->fs_struct;

	// read data
	struct ckpt_fs_header fs_header;
	read_chunk(ctx, CKPT_CHUNK_TYPE_FS, &fs_header);

	// read fs map
	read_bits(ctx, &ctx->fs_map, sizeof(ctx->fs_map));

	// allocate new proc_struct
	for (i = 0; i < ctx->fs_map.len; i++)
	{
		// read stored mm
		struct fs_struct oldfs;
		read_bits(ctx, &oldfs, sizeof(struct fs_struct));

		// create new mm
		// TODO: naive solution
		struct fs_struct* newfs = restart_fs_create(&oldfs);
		ctx->fs_map.arr[i].second = newfs;
		// setup fs
		// setup pwd
		// TODO: oldfs.pwd
		// setup filemap
//		file_open("stdin:", O_RDONLY);
//		file_open("stdout:", O_WRONLY);

		newfs->fs_count.counter = 1;

		assert(newfs != NULL);

//		switchtofs(bakfs);

		int sum;
		read_bits(ctx, &sum, sizeof(sum));

		ckprintf("Ckpt : file read %d\n", sum);

		struct file newfile;
		for (j = 0; j < sum; j++)
		{
//			switchtofs(bakfs);
			read_bits(ctx, &newfile, sizeof(struct file));

			ckprintf("CKpt : file : %d %d %d %d %s\n", newfile.fd, newfile.status, newfile.readable, newfile.writable, newfile.path);
			uint32_t fflags = 0;
			if (newfile.readable && newfile.writable)
				fflags = O_RDWR;
			else if (newfile.readable)
				fflags = O_RDONLY;
			else if (newfile.writable)
				fflags = O_WRONLY;

			assert(newfs != NULL);

			switchtofs(newfs);
			file_open_fd(newfile.fd, newfile.path, fflags);
			switchtofs(bakfs);

			// seek to current
			struct file *ff = newfs->filemap + newfile.fd;
			ff->open_count = newfile.open_count;
			ff->pos = newfile.pos;
		}

		// switch back
		newfs->fs_count.counter = 0;

	}

	switchtofs(bakfs);

}

int build_thread(struct ckpt_ctx *ctx)
{
	int i;

	// read data
	read_chunk(ctx, CKPT_CHUNK_TYPE_TRAPFRAME, &i);

	for (i = 0; i < ctx->proc_map.len; i++)
	{
		struct proc_struct *proc = (struct proc_struct*)ctx->proc_map.arr[i].second;
		// setup trapframe
		struct trapframe oldtf;
		read_bits(ctx, &oldtf, sizeof(struct trapframe));

	    proc->tf = (struct trapframe *)(proc->kstack + KSTACKSIZE) - 1;
	    *(proc->tf) = oldtf;
	    proc->tf->tf_regs.reg_eax = 0;
//	    proc->tf->tf_esp = esp;
	    proc->tf->tf_eflags |= FL_IF;

	    proc->context.eip = (uintptr_t)restartret;
	    proc->context.esp = (uintptr_t)(proc->tf);
	}

	return 0;
}


int build_timers(struct ckpt_ctx *ctx)
{
	int i;

	// read data
	read_chunk(ctx, CKPT_CHUNK_TYPE_TIMER, &i);

	// read hash table of timers
	read_bits(ctx, &ctx->timer_map, sizeof(struct map));

	for (i = 0; i < ctx->timer_map.len; i++)
	{
		ckprintf("Restart: setup a timer\n");

		// load old timer
		timer_t oldtimer;
		read_bits(ctx, &oldtimer, sizeof(timer_t));
		// get some info
		struct proc_struct *proc = map_getvalue(&ctx->proc_map, oldtimer.proc);

		// how to allocate the timer????, temportatoyly malloc one
		timer_t *newtimer = kmalloc(sizeof(timer_t));
		// TODO:
		restart_timer_init(newtimer, proc, oldtimer.expires);
		// set waiting proc state
		//!!!!!!!!!!!!!!
		proc->state = PROC_SLEEPING;
		proc->wait_state = WT_TIMER;
		// add timer
		add_timer(newtimer);
		// how about delete it
		proc->wait_timer = newtimer;
	}

	return 0;
}

int wakeup_tree(struct ckpt_ctx *ctx)
{
	int i;

	bool intr_flag;
	local_intr_save(intr_flag);
	{
		build_timers(ctx);
		for (i = 0; i < ctx->proc_map.len; i++)
		{
			struct proc_struct *proc = (struct proc_struct*)ctx->proc_map.arr[i].second;
			struct proc_struct *oldproc = (struct proc_struct*)ctx->tasks_arr[i];
			if (oldproc->state == PROC_RUNNABLE)
				wakeup_proc(proc);
		}
	}
	local_intr_restore(intr_flag);

	return 0;
}

int do_self_restore(struct ckpt_ctx *ctx)
{
	// like fork
	ckprintf("Rst : head\n");
	build_restart_head(ctx);
	ckprintf("Rst : task tree\n");
	build_tasks_tree(ctx);
	ckprintf("Rst : kernel stack\n");
	build_kernel_stacks(ctx);

	// build_sems
	ckprintf("Rst : mm\n");
	build_mm(ctx);
	ckprintf("Rst : fs\n");
	build_fs(ctx);
	ckprintf("Rst : thread trap\n");
	build_thread(ctx);
	ckprintf("Rst : links\n");
	set_task_links(ctx);
	ckprintf("Rst : wakeup\n");
	wakeup_tree(ctx);
	ckprintf("Rst : restart over\n");
	//...

	return ctx->tasks_arr[0]->pid;
}

int do_restart(struct ckpt_ctx *ctx, pid_t pid, unsigned long flags)
{
	int ret;

//	if (ctx)
//		ret = do_restore_coord(ctx, pid);
//	if (flags & RESTART_GHOST)
//		ret = do_ghost_task();
//	else
//		ret = do_restore_task();
	ret = do_self_restore(ctx);


	/*
	 * The retval from what we return to the caller when all goes
	 * well: this is either the retval from the original syscall
	 * that was interrupted during checkpoint, or the contents of
	 * (saved) eax if the task was in userspace.
	 *
	 * The coordinator (ctx!=NULL) is exempt: don't adjust its retval.
	 * But in self-restart (where RESTART_TASKSELF), the coordinator
	 * _itself_ is a restarting task.
	 */

//	if (!ctx || (ctx->uflags & RESTART_TASKSELF)) {
//		if (ret < 0) {
//			/* partial restore is undefined: terminate */
//			ckpt_debug("restart err %ld, exiting\n", ret);
//			force_sig(SIGKILL, current);
//		} else {
//			ret = restore_retval();
//		}
//	}

//	ckpt_debug("sys_restart returns %ld\n", ret);
	return ret;
}


