/*

  Time-stamp: <2009-11-14 20:35:58 xinhaoyuan>

  File: env.c env.h

  This file contains routines manage environments. The scheduling
  granularity unit in the system.

  The environment structures are stored in a pre-allocated env array.
  
    - int env_idx_alloc() -- allocate a new env array element, return the idx
	- void env_idx_sfree(uint16_t idx)  -- deallocate a env array element
	
	- void fork(struct trapframe_t *tf) -- classical fork
	- void sfork(struct trapframe_t *tf) -- fork a new thread belong to the same process
	
*/

#define _EKOS_KERNEL_C_
#include <kernel/kernel.h>

uint8_t envs_alloc_lock;

uint32_t env_id_count;
uint32_t vid_count;

int env_free_head;
int env_sfree_head;
struct env_t *envs;

KERNEL_EXPORT int
env_idx_alloc(void)
{
	 
     sl_lock(&envs_alloc_lock);

     /* disable env allocating and freeing {{{ */

	 if (env_free_head == ENV_NULL)
	 {
		  sl_lock(&envs_switch_lock);
		  env_free_head = env_sfree_head;
		  env_sfree_head = ENV_NULL;
		  sl_release(&envs_switch_lock);
	 }

	 if (env_free_head == ENV_NULL)
	 {
		  sl_release(&envs_alloc_lock);
		  return -E_NO_ENV;
	 }

	 	 
     int result = env_free_head;
	 uint16_t eid = env_id_count ++;
	 
	 env_free_head = envs[env_free_head].free_next;
	 envs[result].eid = eid;

     sl_release(&envs_alloc_lock);
	 return result;
}

KERNEL_EXPORT void
env_idx_sfree(uint16_t idx)
{
	 envs[idx].free_next = env_sfree_head;
	 envs[idx].env_status = ENV_FREE;
	 
	 env_sfree_head = idx;
}


/* This function is used by routine create new process */
/* Construct the new kernel dynamic mapping for the new address
 * space  */
/* TVPD, TVPT, TPAGE used */
/* Assume that TVPD is the new address space(L1 TABLE) */
static int
fork_kern_P(void)
{
     int err;
     uint32_t l;

	 pdir_entry_t *tvpd = get_local_tvpd();
	 ptab_entry_t *tvpt = get_local_tvpt();
	 
     ptab_entry_t *svpt;
	 
     uint32_t i, j;
     physaddr_t addr;

	 char *tpage = get_local_tpage();

     l = pcs_push_level();

	 /* 建立 KTMP, PCS */
	 {
		  physaddr_t ktmp_paddr, pcs_paddr;
		  if ((err = frame_alloc_push(&ktmp_paddr)) != 0)
		  {
			   pcs_clear_level(l);
			   return err;
		  }

		  if ((err = frame_alloc_push(&pcs_paddr)) != 0)
		  {
			   pcs_clear_level(l);
			   return err;
		  }

		  tvpd[PAGEDIR_IDX(KTMP)] = ktmp_paddr | PTE_W | PTE_P;
		  /* 大概不需要清理, 不过出于道德 {{{ */
		  
		  vpt[PAGE_NUM(tvpt)] = ktmp_paddr | PTE_W | PTE_P;
		  /* PCS */
		  vpt[PAGE_NUM(tpage)] = pcs_paddr | PTE_W | PTE_P;

		  tlbflush();
		  memset(tvpt, 0, PAGE_SIZE);
		  pcs_init_struct((volatile struct pcs_t *)tpage);
		  
		  /* IMPORTANT -- 假设新的 LID 是 0 */
		  tvpt[KTMP_PCS_PAGE_OFFSET] = pcs_paddr | PTE_W | PTE_P;
	 }

     /* 复制 kernel stack (包括 uxstack) */
     if ((err = frame_alloc_push(&addr)) != 0)
     {
		  pcs_clear_level(l);
		  return err;
     }
	 
	 // 危险!!! >.< ULIM 可能和 KSTACK 区域不临界 ...
	 // 这样的话或许应该是 PAGEDIR_IDX(KSTACK_BOT)
     tvpd[PAGEDIR_IDX(ULIM)] = addr | PTE_W | PTE_P;
     vpt[PAGE_NUM(tvpt)]     = addr | PTE_W | PTE_P;

     svpt = (ptab_entry_t *)vpt + (PAGEDIR_IDX(KSTACK_TOP - 1) << (PDX_SHIFT - PTX_SHIFT));

	 /* 0. initial the kstack_alloc */
	 if ((err = frame_alloc_push(&addr)) != 0)
	 {
		  pcs_clear_level(l);
		  return err;
	 }

	 vpt[PAGE_NUM(tpage)] = addr | PTE_W | PTE_P;
	 tlbflush();
	 
	 memset(tvpt, 0, PAGETAB_COUNT);

	 tvpt[PAGETAB_IDX(DYNINFO)] = addr | PTE_W | PTE_P;
	 
	 /* set the 0 is alloced */
	 struct dyninfo_t *tmp_dyninfo = (struct dyninfo_t *)tpage;
	 for (i = 1; i < KSTACK_COUNT; ++i)
	 {
		  tmp_dyninfo->kstack_alloc_tab[i] = i + 1;
	 }
	 
	 tmp_dyninfo->kstack_alloc_free = 1;
	 tmp_dyninfo->kstack_alloc_tab[0] = KSTACK_COUNT;
	 tmp_dyninfo->kstack_alloc_inuse = 0;
	 
	 mutex_init(&tmp_dyninfo->kstack_alloc_lock);
	 mutex_init(&tmp_dyninfo->uxstack_lock);

	 /* copying veflags */
     tmp_dyninfo->veflags = dyninfo.veflags;
	 /* and initialize the pfhandler */
	 tmp_dyninfo->upfhandler = dyninfo.upfhandler;
	 
	 /* 1. copy the current kernel stack */
	 // cprintf("[fork_kern] kernel stack copying\n");
	 int base_idx = PAGETAB_COUNT - 1 - envs[cur_env].lid * ((KSTACK_SIZE >> PAGE_SHIFT) + 1);
	 for (j = 0; j != (KSTACK_SIZE >> PAGE_SHIFT); ++j)
     {
		  if (svpt[base_idx - j] & PTE_P)
		  {
			   if ((err = frame_alloc_push(&addr)) != 0)
			   {
					pcs_clear_level(l);
					return err;
			   }
	       
			   vpt[PAGE_NUM(tpage)] = addr | PTE_W | PTE_P;
			   tlbflush();
			   tvpt[PAGETAB_COUNT - 1 - j] = addr;

			   // cprintf("[fork_kern] kstack page %d => page %d\n",
			   // base_idx - j, PAGETAB_COUNT - 1 - j);
			   
			   memmove(tpage, PAGE_ADDR(PAGEDIR_IDX(KSTACK_TOP - 1), base_idx - j, 0),
					   PAGE_SIZE);
			   tvpt[PAGETAB_COUNT - 1 - j] |= svpt[base_idx - j] & PTE_USER;
		  }
     }

	 /* 2. copy the other area in kstack area uxstack */
	 // cprintf("[fork_kern] uxstack stack copying\n");
     for (j = PAGETAB_IDX(UXSTACK_BOT); j != PAGETAB_IDX(UXSTACK_TOP); ++j)
     {
	 	  if (svpt[j] & PTE_P)
	 	  {
	 		   if ((err = frame_alloc_push(&addr)) != 0)
	 		   {
	 				pcs_clear_level(l);
	 				return err;
	 		   }
	       
	 		   vpt[PAGE_NUM(tpage)] = addr | PTE_W | PTE_P;
	 		   tlbflush();
	 		   tvpt[j] = addr;

			   // cprintf("[fork_kern] kstack page %d => page %d\n", j, j);
			   
	 		   memmove(tpage, PAGE_ADDR(PAGEDIR_IDX(KSTACK_TOP - 1), j, 0), PAGE_SIZE);
	 		   tvpt[j] |= svpt[j] & PTE_USER;
	 	  }
     }

	 return err;
}

/* this function is used internal */
static int
get_kstk_delta(int d_lid, int s_lid)
{
	 return (s_lid - d_lid) * (KSTACK_SIZE + PAGE_SIZE);
}

/* assume TVPD is the new table */
static int
dup_env(struct trapframe_t *tf, int kstk_delta)
{
	 int result = env_idx_alloc();
	 if (result < 0) return result;
	 
     memmove(envs + result, envs + cur_env, sizeof(struct env_t));

	 envs[result].lid = 0;
     envs[result].cr3 = PTE_ADDR(vpt[PAGE_NUM(get_local_tvpd())]);
     envs[result].parent = result;

	 envs[result].env_status = ENV_INUSE;
     envs[result].sch_status = ENV_SCH_DETACHED;

     envs[result].ssp = (uint32_t)tf + kstk_delta;
	 envs[result].ksp = envs[cur_env].ksp + kstk_delta;

	 iec_init(result);
	 sch_attach(cur_env, result);
	 
	 return 0;
}

/* the famous fork function */
/* our implementation use the copy-on-write technique */
/* TVPT, TVPD, TPAGE used */
KERNEL_EXPORT void
fork(struct trapframe_t *tf)
{
	 pdir_entry_t *tvpd = get_local_tvpd();
	 ptab_entry_t *tvpt = get_local_tvpt();

     uint32_t l = pcs_push_level();
     physaddr_t addr;

     ptab_entry_t *svpt;
	 
     int err;

     tf->regs.eax = 0;

	 // 0. allocate the new page table
     if ((err = frame_alloc_push(&addr)) != 0)
     {
		  pcs_clear_level(l);
		  tf->regs.eax = err;
		  return;
     }

     vpt[PAGE_NUM(tvpd)] = addr | PTE_W | PTE_P;
     tlbflush();

     memmove(tvpd, (pdir_entry_t *)vpd, PAGE_SIZE);

     tvpd[PAGEDIR_IDX(KVPT)] = addr | PTE_W | PTE_P;
     tvpd[PAGEDIR_IDX(UVPT)] = addr | PTE_U | PTE_P;
     
     /* mark COW for entries in page dir (user) */
     uint32_t i, j, pde;
     for (i = 0; i <= PAGEDIR_IDX(UTOP - 1); ++i)
     {
		  pde = lock_pde(i);
		  if (pde & PTE_P)
		  {
			   if ((pde & (PTE_W | PTE_COW)) && !(pde & PTE_SHARED))
			   {
					tvpd[i] = (pde | PTE_COW) & ~(uint32_t)PTE_W;
					pde = tvpd[i];
			   }
			   else tvpd[i] = pde;
	       
			   frame_inc_ref(PTE_ADDR(pde));
		  } else tvpd[i] = 0;
		  release_pde(i, pde);
     }
	 
	 int kstk_delta = get_kstk_delta(0, envs[cur_env].lid);

	 /* change stack pointer to LID(0) */
	 /* IMPORTANT -- we must not store any non-PIC pointer to kernel
	  * stack, they will become dangerous after fork */
	 tf->regs.esp += kstk_delta;
	 
     /* copy page dir kernel */
     if ((err = fork_kern_P()) != 0)
     {
		  pcs_clear_level(l);
		  tf->regs.eax = err;
		  return;
     }
	 
	 /* restore the stack pointer */
	 tf->regs.esp -= kstk_delta;

	 /* fork the env */
	 if ((err = dup_env(tf, kstk_delta)) != 0) {
		  pcs_clear_level(l);
		  tf->regs.eax = err;
		  return;
	 }

	 tf->regs.eax = 0;
     pcs_pop_level(l);
     return;
}

/* shared fork -- create a new environment belong to the same group */
KERNEL_EXPORT void
sfork(struct trapframe_t *tf)
{
	 int result = 0;

	 /* 0. alloc lid */
	 mutex_lock(&dyninfo.kstack_alloc_lock);

	 uint32_t new_lid = dyninfo.kstack_alloc_free;
	 if (new_lid != KSTACK_COUNT)
	 {
		  dyninfo.kstack_alloc_free = dyninfo.kstack_alloc_tab[new_lid];
	 } else result = -E_NO_THREAD;			/* TOO MANY THREADS */

	 mutex_unlock(&dyninfo.kstack_alloc_lock);

	 /* 1. alloc env */
	 int eidx;
	 if (!result)
	 {
		  eidx = env_idx_alloc();
		  if (eidx < 0)
			   result = 1;
	 }

	 uint32_t sbase_idx, dbase_idx;
	 sbase_idx = PAGE_NUM(KSTACK_TOP) - 1 -
		  envs[cur_env].lid * ((KSTACK_SIZE >> PAGE_SHIFT) + 1);
	 dbase_idx = PAGE_NUM(KSTACK_TOP) - 1 -
		  new_lid * ((KSTACK_SIZE >> PAGE_SHIFT) + 1);
	 uint32_t kstk_delta = get_kstk_delta(new_lid, envs[cur_env].lid);

	 tf->regs.eax = 0;
	 tf->regs.esp += kstk_delta;

	 uint32_t l = pcs_push_level();
	 physaddr_t addr;

	 if (!result)
	 {
		  /* the pcs */
		  volatile struct pcs_t *new_pcs = get_pcs(new_lid);
		  if (!(vpt[PAGE_NUM(new_pcs)] & PTE_P)) {
			   if ((result = frame_alloc_push(&addr)) != 0)
			   {
					pcs_clear_level(l);
			   } else {
					vpt[PAGE_NUM(new_pcs)] = addr | PTE_W | PTE_P;
			   }
		  }

		  if (!result) {
			   tlbflush();
			   pcs_init_struct(new_pcs);
		  }
	 }
	 
	 if (!result)
	 {
		  uint32_t j;
		  
		  /* the kstack -- create */
		  for (j = 0; j != (KSTACK_SIZE >> PAGE_SHIFT); ++j)
		  {
			   if ((vpt[sbase_idx - j] & PTE_P) && !(vpt[dbase_idx - j] & PTE_P))
			   {
					if ((result = frame_alloc_push(&addr)) != 0)
					{
						 pcs_clear_level(l);
						 break;
					}
			   
					vpt[dbase_idx - j] = addr | (vpt[sbase_idx - j] & PTE_USER);
			   }
		  }

		  if (!result) {
			   tlbflush();
			   /* then copy */
			   for (j = 0; j != (KSTACK_SIZE >> PAGE_SHIFT); ++j)
			   {
					if ((vpt[sbase_idx - j] & PTE_P))
					{
						 memmove(PAGE_ADDR(0, dbase_idx - j, 0),
								 PAGE_ADDR(0, sbase_idx - j, 0),
								 PAGE_SIZE);
					}
			   }
		  }


		  if (!result)
			   pcs_pop_level(l);
	 }

	 tf->regs.esp -= kstk_delta;

	 if (!result)
	 {
		  memmove(envs + eidx, envs + cur_env, sizeof(struct env_t));

		  envs[eidx].lid = new_lid;
		  envs[eidx].parent = envs[cur_env].parent;
		  
		  envs[eidx].env_status = ENV_INUSE;
		  envs[eidx].sch_status = ENV_SCH_DETACHED;

		  envs[eidx].ssp = (uint32_t)tf + kstk_delta;
		  envs[eidx].ksp = envs[cur_env].ksp + kstk_delta;

		  iec_init(eidx);
		  tlbflush();
		  sch_attach(cur_env, eidx);
	 }

	 tf->regs.eax = result;

	 mutex_lock(&dyninfo.kstack_alloc_lock);
	 
	 dyninfo.kstack_alloc_tab[new_lid] = dyninfo.kstack_alloc_inuse;
	 dyninfo.kstack_alloc_inuse = new_lid;
		  
	 mutex_unlock(&dyninfo.kstack_alloc_lock);

}

/* destory the current env, and the resource associated with it */
KERNEL_EXPORT void
env_destroy(void)
{
	 /* process iec -- cancel all iec remains */
	 while (true) {
		  if (iec_send_cancel() == 0) break;
		  else sch_yield(ENV_SCH_WAITING);
	 }
	 
	 sl_lock(&envs[cur_env].iec.recv.lock);
	 envs[cur_env].iec.recv.policy = IEC_POLICY_DENY;
	 sl_release(&envs[cur_env].iec.recv.lock);

	 while (true)
	 {
		  int n;
		  
		  sl_lock(&envs[cur_env].iec.recv.lock);
		  if ((n = envs[cur_env].iec.recv.begin) == cur_env) break;
		  sl_release(&envs[cur_env].iec.recv.lock);

		  iec_recv(n);
	 }
	 /* finished. */
	 
	 int lid = envs[cur_env].lid;
	 bool last = false;
	 
	 mutex_lock(&dyninfo.kstack_alloc_lock);
	 last = ((dyninfo.kstack_alloc_inuse = dyninfo.kstack_alloc_tab[lid]) == KSTACK_COUNT);
	 mutex_unlock(&dyninfo.kstack_alloc_lock);

	 if (last)
	 {
		  /* When there is no more thread in the process */
		  /* We start to recycle all memory mapped in the address space */

		  breakpoint();
		  
		  /* 1. clear all memory below UTOP */
		  /* Need to be considerate about the COW flag */
		  int pde_idx, pte_idx;
		  for (pde_idx = 0; pde_idx < PAGEDIR_IDX(UTOP); ++pde_idx)
		  {
			   if (vpd[pde_idx] & PTE_P)
			   {
					if (!(vpd[pde_idx] & PTE_COW))
					{
						 for (pte_idx = 0; pte_idx != PAGETAB_COUNT; ++pte_idx)
						 {
							  int pagenum = (pde_idx << (PAGETAB_SHIFT - PAGE_SHIFT)) | pte_idx;
							  							  
							  if (vpt[pagenum] & PTE_P)
							  {
								   frame_dec_ref(PTE_ADDR(vpt[pagenum]));
								   vpt[pagenum] = 0;
							  }
						 }
					}
					frame_dec_ref(PTE_ADDR(vpd[pde_idx]));
			   }
		  }

		  breakpoint();


		  /* 2. KTMP */
		  int i, j;
		  for (i = 0; i != KSTACK_COUNT; ++i)
		  {
			   volatile struct pcs_t *pcs = get_pcs(i);
			   if (vpt[PAGE_NUM(pcs)] & PTE_P)
			   {
					frame_dec_ref(PTE_ADDR(vpt[PAGE_NUM(pcs)]));
					vpt[PAGE_NUM(pcs)] = 0;
			   }
		  }
		  
		  /* 3. free uxstack and all kstack except the current one */
		  uintptr_t cur;
		  cur = PAGE_NUM(UXSTACK_TOP);
		  for (j = 0; j != (UXSTACK_SIZE >> PAGE_SHIFT); ++j)
		  {
			   if (vpt[cur - j] & PTE_P)
					frame_dec_ref(PTE_ADDR(vpt[cur - j]));
			   vpt[cur - j] = 0;
		  }

		  uintptr_t my_kstack_idx;
		  cur = PAGE_NUM(KSTACK_TOP);
		  for (i = 0; i != KSTACK_COUNT; ++i)
		  {
			   if (i != envs[cur_env].lid)
			   {
					for (j = 0; j != (KSTACK_SIZE >> PAGE_SHIFT) + 1; ++j)
					{
						 if (vpt[cur - j] & PTE_P)
							  frame_dec_ref(PTE_ADDR(vpt[cur - j]));
						 vpt[cur - j] = 0;
					}
			   }
			   else
			   {
					my_kstack_idx = cur;
			   }
			   cur -= (KSTACK_SIZE >> PAGE_SHIFT) + 1;
		  }

		  /* the last kstack is relatively hard to clear */
		  /* but we do it as following */
		  /* 1. detach the env and thus lock the switching lock */
		  /* 2. free the frames, althought these frames is still in
		   * use, but we since the switching is disabled so that no
		   * more alloc will be happened. */

		  /* there is a trick, we are not going to free the frames to
		   * free pool, but to a special frame pool that can ensure
		   * the safety */
		  
		  if (sch_detach() == 0)
		  {
			   for (j = 0; j != (KSTACK_SIZE >> PAGE_SHIFT) + 1; ++j)
			   {
					if (vpt[my_kstack_idx - j] & PTE_P)
						 frame_sfree(PTE_ADDR(vpt[my_kstack_idx - j]));
					/* could not set it to 0 --- still in use !!! */
					// vpt[my_kstack_idx - j] = 0;
			   }
			   
			   frame_sfree(envs[cur_env].cr3);
			   env_idx_sfree(cur_env);
			   /* all finished, continue working */
			   sl_release(&envs_switch_lock);
			   call_yield();
		  }
		  else
		  {
			   kprintf("Ops ... I'm the only environment alive. =.= How can I die ...\n");
			   monitor(NULL);

			   while (1) ;
		  }
	 }
	 else
	 {
		  mutex_lock(&dyninfo.kstack_alloc_lock);
		  dyninfo.kstack_alloc_tab[lid] = dyninfo.kstack_alloc_free;
		  dyninfo.kstack_alloc_free = lid;
		  mutex_unlock(&dyninfo.kstack_alloc_lock);

		  sch_detach();
		  env_idx_sfree(cur_env);
		  /* all finished, continue working */
		  sl_release(&envs_switch_lock);		  
		  call_yield();
	 }
}
