#include "../kernel.h"

struct segdesc_t gdt[GDT_COUNT] =
{
     /* 0x0 - unused (always faults -- for trapping NULL far pointers) */
     SEG_NULL,

     /* 0x8 - kernel code segment */
     [GD_KERN_TEXT >> 3] = SEG(STA_X | STA_R, 0x0, 0xffffffff, 0),

     /* 0x10 - kernel data segment */
     [GD_KERN_DATA >> 3] = SEG(STA_W, 0x0, 0xffffffff, 0),

     /* 0x18 - user code segment */
     [GD_USER_TEXT >> 3] = SEG(STA_X | STA_R, 0x0, 0xffffffff, 3),

     /* 0x20 - user data segment */
     [GD_USER_DATA >> 3] = SEG(STA_W, 0x0, 0xffffffff, 3),
};

static struct pseudodesc_t gdt_pd =
{
     sizeof(gdt) - 1, (unsigned long) gdt
};

pdir_entry_t init_vpd[PAGEDIR_COUNT] __attribute__((aligned(PAGE_SIZE)));
static spin_lock_t  init_vpd_lock[PAGEDIR_COUNT] __attribute__((aligned(PAGE_SIZE)));

static kpipe_t pf_pipe __attribute__((aligned(PAGE_SIZE)));

/* The symbols is defined in kernel.S */
/* {{{ */
/* Lock for vpd entry */
extern volatile spin_lock_t  vpd_lock[];
/* The mapping of current VPT */
extern volatile ptab_entry_t vpt[];
/* Mapping of VPD inside vpt */
extern volatile pdir_entry_t vpd[];
/* }}} */

static ptab_entry_t *
init_vpd_walk(pdir_entry_t *vpd, const void *va, int create, int perm)
{
     uintptr_t pdx, ptx;
     pdx = PAGEDIR_IDX(va);
     ptx = PAGETAB_IDX(va);

     if (vpd[pdx] & PTE_P)
     {
		  /* DO NOTHING but add the perm */
		  /* vpd[pdx] |= perm; */
     }
     else if (create)
     {
		  physaddr_t vpt;
		  /* suppose in the initializing stage, all memory can be
		   * allocated in the kernel addressing area (0 ~ 128MB) */
		  if (pmem_page_alloc(1, &vpt, 0, PPAGE_CORE) < 0) return NULL;

		  memset(KADDR(vpt), 0, PAGE_SIZE);
		  vpd[pdx] = vpt | perm | PTE_P;
		  pmem_page_inc_ref(vpt);
     }
     else
     {
		  return NULL;
     }

     return (ptab_entry_t *)KADDR(PTE_ADDR(vpd[pdx])) + ptx;
}

static int
init_map_segment(pdir_entry_t *vpd, uintptr_t la, size_t size, physaddr_t pa, int perm)
{
     if (size == 0) return 0;
	 /* Here we round up */
     uintptr_t i, npages = ((size - 1) >> PAGE_SHIFT) + 1;

#if 0
     kprintf("[init_map_segment] mapping [%x, %x] to [%x, %x]\n",
			 la, la + size - 1, pa, pa + size - 1);
#endif
     
     for (i = 0; i != npages; ++i)
     {
		  ptab_entry_t *pte;

		  pte = init_vpd_walk(vpd, (void *)la, 1, perm);
		  if (pte == NULL) return -E_NO_MEM;
	  
		  *pte = pa | perm | PTE_P;
	  
		  la += PAGE_SIZE;
		  pa += PAGE_SIZE;
     }

	 return 0;
}

int
vmem_init(void)
{
	 int err, i;
	 physaddr_t init_cr3;

     memset(init_vpd, 0, PAGEDIR_COUNT * sizeof(pdir_entry_t));
	 for (i = 0; i != PAGEDIR_COUNT; ++ i)
		  spl_init(&init_vpd_lock[i]);
	 
     init_cr3 = PADDR(init_vpd);

	 init_vpd[PAGEDIR_COUNT - 1] = PADDR(init_vpd_lock) | PTE_W | PTE_P;
     init_vpd[PAGEDIR_IDX(KVPT)] = PADDR(init_vpd)      | PTE_W | PTE_P;
     init_vpd[PAGEDIR_IDX(UVPT)] = PADDR(init_vpd)      | PTE_U | PTE_P;

     /* map kernel area */
     if ((err = init_map_segment(init_vpd, KBASE, KISIZE, 0, PTE_W)) < 0)
	 {
		  return err;
	 }

	 /* map utask area */
	 if ((err = init_map_segment(init_vpd, UTASK,
	 							 LAPIC_COUNT * TS_USIZE,
	 							 PADDR(tasks_start), PTE_U) < 0))
	 {
	 	  return err;
	 }

	 #if 0
	 /* map uirq_pipe ctrl block */
	 ptab_entry_t *init_vpt = KADDR(PTE_ADDR(init_vpd[PAGEDIR_IDX(UIRQ_PIPE)]));
	 for (i = 0; i != IRQ_COUNT; ++ i)
	 {
	 	  init_vpt[PAGETAB_IDX(UIRQ_PIPE + i * PAGE_SIZE)] =
	 		   PADDR(irq_pipes + i) | PTE_U | PTE_P;
	 }
	 #endif
	 
	 /* map procs */
	 if ((err = init_map_segment(init_vpd, UPROCS,
	 							 PROCESS_MAX_COUNT * sizeof(struct proc_t),
	 							 PADDR(procs), PTE_U) < 0))
	 {
	 	  return err;
	 }

	 /* trick mapping, would be clean later */
     init_vpd[PAGEDIR_IDX(_text - KBASE)] = init_vpd[PAGEDIR_IDX(_text)];

	 /* Initailize tss segment for each cpu */
	 for (i = 0; i != initial_conf.lcpu_count; ++ i)
	 {
		  gdt[GD_TSS(i) >> 3] = SEG_NULL;
	 }

	 /* Install page table. */
	 lcr3(init_cr3);

	 asm volatile("movl %%cr0, %%eax" ::);
	 /* PG NM CD  */
	 asm volatile("orl  $0xe0000000, %%eax" ::);
	 /* !TS !EM */
	 asm volatile("orl  $0x0000000c, %%eax" ::);
	 asm volatile("xorl $0x0000000c, %%eax" ::);
	 asm volatile("movl %%eax, %%cr0" ::);

     // Reload all segment registers.
     asm volatile("lgdt (%0)" : : "r"(&gdt_pd));
     asm volatile("movw %%ax,%%gs" :: "a" (GD_USER_DATA|3));
     asm volatile("movw %%ax,%%fs" :: "a" (GD_USER_DATA|3));
     asm volatile("movw %%ax,%%es" :: "a" (GD_KERN_DATA));
     asm volatile("movw %%ax,%%ds" :: "a" (GD_KERN_DATA));
     asm volatile("movw %%ax,%%ss" :: "a" (GD_KERN_DATA));
     asm volatile("ljmp %0,$1f\n 1:\n" :: "i" (GD_KERN_TEXT));  // reload cs
     asm volatile("lldt %%ax" :: "a" (0));

	 init_vpd[PAGEDIR_IDX(_text - KBASE)] = 0;
     /* refresh the vpd */
     lcr3(init_cr3);

	 kpipe_open(&pf_pipe);
     return 0;
}

int
vmem_init_ap(physaddr_t cr3)
{
	 uint32_t cr0;
	 cr0 = rcr0();
	 cr0 |= CR0_PG | CR0_NW | CR0_CD;
     cr0 &= ~(CR0_TS|CR0_EM);

     lcr0(cr0);

	 asm volatile("lgdt (%0)" : : "r"(&gdt_pd));
     asm volatile("movw %%ax,%%gs" :: "a" (GD_USER_DATA));
     asm volatile("movw %%ax,%%fs" :: "a" (GD_USER_DATA));
     asm volatile("movw %%ax,%%es" :: "a" (GD_KERN_DATA));
     asm volatile("movw %%ax,%%ds" :: "a" (GD_KERN_DATA));
     asm volatile("movw %%ax,%%ss" :: "a" (GD_KERN_DATA));
     asm volatile("ljmp %0,$1f\n 1:\n" :: "i" (GD_KERN_TEXT));  // reload cs
     asm volatile("lldt %%ax" :: "a" (0));
	 lcr3(cr3);
}

int
vmem_post_fix(void)
{
	 int err;
	 
	 if (sysconf.has_hpet)
	 {
		  uint32_t pte = initial_conf.hpet_phys | PTE_W | PTE_PWT | PTE_PCD;
		  int err = 0;
		  if ((err = fix_addr((void *)hpet, &pte, 1)) < 0)
		  {
			   return err;
		  }
	 }
	 else
	 {
		  physaddr_t fake;
		  if ((err = pmem_page_alloc(1, &fake, 0, PPAGE_CORE)) < 0)
			   return err;
                           
		  uint32_t pte = fake | PTE_W | PTE_PWT | PTE_PCD;
		  if ((err = fix_addr((void *)hpet, &pte, 1)) < 0)
		  {
			   return err;
		  }
		  else memset((void *)lapic, 0, PAGE_SIZE);
	 }
	 
	 if (sysconf.use_lapic)
	 {
		  uint32_t pte = initial_conf.lapic_phys | PTE_W | PTE_PWT | PTE_PCD;
		  int err = 0;
		  if ((err = fix_addr((void *)lapic, &pte, 1)) < 0)
		  {
			   return err;
		  }
		  /* Map it to ULAPIC */
		  vpt[PAGE_NUM(ULAPIC)] = initial_conf.lapic_phys | PTE_U | PTE_PCD | PTE_P;
	 }
	 else
	 {
		  physaddr_t fake;
		  if ((err = pmem_page_alloc(1, &fake, 0, PPAGE_CORE)) < 0)
			   return err;
                           
		  uint32_t pte = fake | PTE_W | PTE_PWT | PTE_PCD;
		  if ((err = fix_addr((void *)lapic, &pte, 1)) < 0)
		  {
			   return err;
		  }
		  else memset((void *)lapic, 0, PAGE_SIZE);
		  /* Map it to ULAPIC */
		  vpt[PAGE_NUM(ULAPIC)] = fake | PTE_U | PTE_PCD | PTE_P;
	 }
	 /* Map sysconf */
	 vpt[PAGE_NUM(USYSCONF)] = PADDR(&sysconf) | PTE_U | PTE_P;
	 return 0;
}

void *
set_tpage(physaddr_t addr)
{
	 void *result = (void *)((physaddr_t)get_tpage() | PAGE_OFF(addr));
	 physaddr_t naddr = (addr & ~(PAGE_SIZE - 1));

	 vpt[PAGE_NUM(result)] = naddr | PTE_W | PTE_P;
	 invlpg(result);

	 return result;
}

void
set_vpt(void *vaddr, uint32_t pte)
{
	 vpt[PAGE_NUM(vaddr)] = pte;
	 invlpg(vaddr);
}

ptab_entry_t
get_vpt(void *vaddr)
{
	 if (vpd[PAGEDIR_IDX(vaddr)] & PTE_P)
		  return vpt[PAGE_NUM(vaddr)];
	 else return 0;
}

/* Trick lock for page table */

static inline uint32_t
lock_pde(uint32_t idx)
{
	 spl_acquire(vpd_lock + idx);
}

static inline void
release_pde(uint32_t idx)
{
	 spl_release(vpd_lock + idx);
}

static inline uint32_t
lock_pte(uint32_t idx)
{
	 uint32_t result;
	 while ((result = xchg32(vpt + idx, PTE_LOCK)) == PTE_LOCK) ;
	 invlpg((void *)(idx << PTX_SHIFT));
	 return result;
}

static inline void
release_pte(uint32_t idx, uint32_t val)
{
	 xchg32(vpt + idx, val);
	 invlpg((void *)(idx << PTX_SHIFT));
}

/* The fundamental routine for virtual memory mapping management. The
 * routine maps virtual address by physical pages. The VPT mapping and
 * VPD_LOCK is currectly initalized */

/* addr   -- the virtual address that need to be processed, need not to be page aligned */
/* create -- if the ptr is not null, it would point to the page tab entry value, */
/*           after the call, the value of the slot woule be the old
 *           value in the table. If the page physical address in it is
 *           0, the routine will allocated a new page by itself. Or it
 *           will assign the mapping by the page address */
/* int ... -- force replace the old item to the value in ``create'' */

/* Routine returns nagtive result if error happens, or 0 for success,
 * FA_COW for the processing of COW sign, FA_AWA for processing of AWA
 * sign */
int
__fix_addr(int verbose, void *addr, ptab_entry_t *create, ...)
{
	 uint32_t idx;
	 idx = PAGEDIR_IDX(addr);

	 uint32_t create_rep;

	 int err;
	 physaddr_t naddr;

	 uint32_t i;
	 int force;

	 if (verbose)
	 {
		  kprintf("[fix_addr] addr = %p, create = %p", addr, create);
	 }

	 if (create != NULL)
	 {
		  va_list va;

		  va_start(va, create);
		  force = va_arg(va, int);
		  va_end(va);

		  if (verbose)
		  {
			   kprintf("(%08x), force = %d. ", *create, force);
		  }
	 }
	 else
	 {
		  force = 0;

		  if (verbose)
		  {
			   kprintf(". ");
		  }
	 }

	 if (idx == PAGEDIR_IDX(KVPT))
	 {
		  uint32_t dirbase = idx * PAGETAB_COUNT;
		  idx = PAGETAB_IDX(addr);

		  lock_pde(idx);
		  uint32_t pde = vpd[idx];

		  if (verbose)
		  {
			   kprintf("[L1] pde = %08x", pde);
		  }

		  /* L1 tab should ignore the force flag */
		  /* if (force) */
		  /* { */
		  /* 	   create_rep = pde; */
		  /* 	   goto fa_create_1; */
		  /* } */
		  /* else */
		  if (pde & PTE_P)
		  {
			   if (!PTE_COW_P(pde))
			   {
					err = 0;
					goto fa_exit_1;
			   }

			   if ((err = pmem_page_alloc(1, &naddr, 0, PPAGE_CORE)) < 0)
			   {
					goto fa_exit_1;
			   } else pmem_page_inc_ref(naddr);

			   if (verbose)
			   {
					kprintf("[fix_addr] process COW sign.\n");
			   }
			   pdir_entry_t *tdir = (pdir_entry_t *)get_tpage();
			   vpt[PAGE_NUM(tdir)] = naddr | PTE_W | PTE_P;
			   invlpg(tdir);

			   /* duplicate the page tab, with processing of the
				* flags */
			   for (i = 0; i != PAGETAB_COUNT; ++i)
			   {
					if (vpt[dirbase + i] & PTE_P)
					{
						 if ((vpt[dirbase | i] & (PTE_W | PTE_COW)) &&
							 !(vpt[dirbase | i] & PTE_SHARED))
						 {
							  tdir[i] = (vpd[dirbase | i] | PTE_COW) & ~(uint32_t)PTE_W;

							  /* should not be uncommented */
							  // svpt[i] = tvpt[i];
						 }
						 else
						 {
							  tdir[i] = vpt[dirbase + i];
						 }

						 pmem_page_inc_ref(PTE_ADDR(tdir[i]));
					} else tdir[i] = 0;
			   }

			   pmem_page_dec_ref(PTE_ADDR(pde));
			   vpt[PAGE_NUM(tdir)] = 0;
			   pde = (naddr | (pde & PDE_USER) | PTE_W) & ~(uint32_t)PTE_COW;

			   err = FA_COW;
		  }
		  else if (PTE_AWA_P(pde))
		  {
			   if ((err = pmem_page_alloc(1, &naddr, 0, PPAGE_CORE)) < 0)
			   {
					goto fa_exit_1;
			   } else pmem_page_inc_ref(naddr);

			   if (verbose)
			   {
					kprintf("[fix_addr] process AWA sign.\n");
			   }
			   pdir_entry_t *tdir = (pdir_entry_t *)get_tpage();
			   vpt[PAGE_NUM(tdir)] = naddr | PTE_W | PTE_P;
			   invlpg(tdir);

			   /* Set all pages as AWA */
			   for (i = 0; i != PAGETAB_COUNT; ++i)
			   {
					tdir[i] = pde;
			   }

			   vpt[PAGE_NUM(tdir)] = 0;
			   pde = (naddr | (pde & PDE_USER) | PTE_W | PTE_P) & ~(uint32_t)PTE_AWA;

			   err = FA_AWA;

		  }
		  else if (create != NULL)
		  {
			   create_rep = 0;

		  fa_create_1:

			   if (PTE_ADDR(*create))
			   {
					pde = *create | PTE_P;
					*create = create_rep;

					err = 0;
			   }
			   else
			   {
					/* create a empty page */
					if ((err = pmem_page_alloc(1, &naddr, 0, PPAGE_CORE)) < 0)
					{
						 goto fa_exit_1;
					}
					else pmem_page_inc_ref(naddr);

					pdir_entry_t *tdir = (pdir_entry_t *)get_tpage();
					vpt[PAGE_NUM(tdir)] = naddr | PTE_W | PTE_P;
					invlpg(tdir);

					memset(tdir, 0, PAGE_SIZE);

					pde = naddr | (*create & PDE_USER) | PTE_P;
					*create = create_rep;

					err = 0;
			   }

			   if (verbose)
			   {
					kprintf("CREATED");
			   }
		  }
		  else err = -E_FAULT;

	 fa_exit_1:

		  vpd[idx] = pde;
		  /* Need not refresh the table, since there is append only modification */
		  release_pde(idx);

		  if (verbose)
		  {
			   kprintf("\n vpd[] = %08x\n", pde);
		  }

	 }
	 else
	 {

		  if (verbose)
		  {
			   kprintf("[L2]");
		  }

		  uint32_t pte;
		  if (create != NULL)
		  {
			   int cp;
			   cp = *create & PDE_USER;

			   if ((err = __fix_addr(verbose, (void *)(vpt + PAGE_NUM(addr)), &cp, false)) < 0)
					goto fa_exit;
		  }
		  else
		  {
			   if ((err = __fix_addr(verbose, (void *)(vpt + PAGE_NUM(addr)), NULL)) < 0)
					goto fa_exit;
		  }

		  idx = PAGE_NUM(addr);
		  pte = lock_pte(idx);

		  if (force)
		  {
			   create_rep = pte;
			   goto fa_create_2;
		  }
		  if (pte & PTE_P)
		  {

			   if (!PTE_COW_P(pte))
			   {
					err = 0;
					goto fa_exit_2;
			   }

			   if (verbose)
			   {
					kprintf("[fix_addr] process COW sign.\n");
			   }

			   if ((err = pmem_page_alloc(1, &naddr, 0, PPAGE_CORE)) < 0)
			   {
					goto fa_exit_2;
			   }
			   else pmem_page_inc_ref(naddr);

			   char *tdpage = (char *)get_tpage();
			   vpt[PAGE_NUM(tdpage)] = naddr | PTE_W | PTE_P;
			   invlpg(tdpage);

			   /* copy the page */
			   memmove(tdpage, PAGE_ALIGN(addr), PAGE_SIZE);

			   pmem_page_dec_ref(PTE_ADDR(pte));
			   pte = (naddr | (pte & PTE_USER) | PTE_W) & ~(uint32_t)PTE_COW;

			   err = FA_COW;
		  }
		  else if (PTE_AWA_P(pte))
		  {
			   if (verbose)
			   {
					kprintf("[fix_addr] process AWA sign.\n");
			   }

			   if ((err = pmem_page_alloc(1, &naddr, 0, PPAGE_CORE)) < 0)
			   {
					goto fa_exit_2;
			   }
			   else pmem_page_inc_ref(naddr);

			   char *tdpage = (char *)get_tpage();
			   vpt[PAGE_NUM(tdpage)] = naddr | PTE_W | PTE_P;
			   invlpg(tdpage);

			   /* copy the page */
			   memset(tdpage, 0, PAGE_SIZE);
			   pte = (naddr | (pte & PTE_USER) | PTE_W | PTE_P) & ~(uint32_t)PTE_AWA;

			   err = FA_AWA;
		  }
		  else if (create != NULL)
		  {
			   create_rep = 0;

		  fa_create_2:

			   if (PTE_ADDR(*create))
			   {
					pte = *create | PTE_P;
					*create = create_rep;

					err = 0;
			   }
			   else
			   {
					if ((err = pmem_page_alloc(1, &naddr, 0, PPAGE_CORE)) < 0)
					{
						 goto fa_exit_2;
					}
					else pmem_page_inc_ref(naddr);

					pte = naddr | (*create & PTE_USER) | PTE_P;
					*create = create_rep;

					err = 0;
			   }

			   if (verbose)
			   {
					kprintf("CREATED");
			   }
		  }
		  else err = -E_FAULT;


	 fa_exit_2:

		  release_pte(idx, pte);

		  if (verbose)
		  {
			   kprintf("\n vpt[] = %08x\n", pte);
		  }

	 }

fa_exit:

	 if (verbose)
	 {
		  kprintf("[fix_addr] finished err = %d.\n", err);
	 }

	 return err;
}


int
vpd_mark_shared(int idx)
{
	 int result;
	 lock_pde(idx);
	 if (vpd[idx] & PTE_P)
	 {
		  vpd[idx] |= PTE_SHARED;
		  result = 0;
	 }
	 else result = -E_INVAL;
	 release_pde(idx);
	 return result;
}

int
vpt_mark_shared(int idx)
{
	 int result;
	 uint32_t pte = lock_pte(idx);
	 if (pte & PTE_P)
	 {
		  pte |= PTE_SHARED;
		  result = 0;
	 }
	 else result = -E_INVAL;
	 release_pte(idx, pte);
	 return result;
}

int
vpd_alloc_temp(int idx)
{
	 int result;
	 lock_pde(idx);
	 if (vpd[idx] & PTE_P)
	 {
		  result = -E_INVAL;
	 }
	 else vpd[idx] |= PTE_AWA | PTE_U | PTE_W;
	 release_pde(idx);
	 return result;
}

int
vpt_alloc_temp(int idx)
{
	 int result;
	 uint32_t pte = PTE_U | PTE_W;
	 if ((result = fix_addr((void *)(vpt + idx), &pte, 0)) < 0)
		  return result;

	 pte = lock_pte(idx);
	 if (pte & PTE_P)
	 {
		  result = -E_INVAL;
	 }
	 else
	 {
		  pte |= PTE_AWA | PTE_U | PTE_W;
	 }
	 release_pte(idx, pte);
	 return result;
}

physaddr_t
vmem_vpt_new(void)
{
	 int i;
	 int err;

	 physaddr_t nvpd_lock;
	 spin_lock_t *tvpd_lock;
	 if ((err = pmem_page_alloc(1, &nvpd_lock, 0, PPAGE_CORE)) < 0)
	 {
		  return 0;
	 }

	 BEGIN_TPAGE(nvpd_lock, tvpd_lock);
	 for (i = 0; i != PAGEDIR_COUNT; ++ i)
		  spl_init(vpd_lock + i);
	 END_TPAGE;

	 physaddr_t nvpt;
	 ptab_entry_t *tvpt;
	 if ((err = pmem_page_alloc(1, &nvpt, 0, PPAGE_CORE)) < 0)
	 {
		  pmem_page_free(nvpd_lock);
		  return err;
	 }

	 BEGIN_TPAGE(nvpt, tvpt);
	 memset(tvpt, 0, PAGE_SIZE);
	 tvpt[PAGETAB_IDX(VPDLCK)] = nvpd_lock  | PTE_W | PTE_P;
	 tvpt[PAGETAB_IDX(LAPIC)]  = vpt[PAGE_NUM(LAPIC)];
	 END_TPAGE;


	 physaddr_t nvpd;
	 pdir_entry_t *tvpd;
	 if ((err = pmem_page_alloc(1, &nvpd, 0, PPAGE_CORE)) < 0)
	 {
		  pmem_page_free(nvpd_lock);
		  pmem_page_free(nvpt);
		  return err;
	 }

	 BEGIN_TPAGE(nvpd, tvpd);
	 memset(tvpd, 0, PAGE_SIZE);
	 /* Common part */
	 for (i = PAGEDIR_IDX(UWLIM); i != PAGEDIR_COUNT; ++ i)
	 {
		  tvpd[i] = vpd[i];
	 }

	 tvpd[PAGEDIR_IDX(VPDLCK)] = nvpt | PTE_W | PTE_P;
	 tvpd[PAGEDIR_IDX(KVPT)]   = nvpd | PTE_W | PTE_P;
     tvpd[PAGEDIR_IDX(UVPT)]   = nvpd | PTE_U | PTE_P;
	 END_TPAGE;

	 pmem_page_inc_ref(nvpd);
	 pmem_page_inc_ref(nvpt);
	 pmem_page_inc_ref(nvpd_lock);

	 return nvpd;
}

int
fork_mem(void)
{
	 int i, j;

	 proc_id_t pid = cur_proc_id();
	 apic_id_t lcpu  = cur_lcpu();

	 int err;

	 physaddr_t nvpd_lock;
	 spin_lock_t *tvpd_lock;
	 if ((err = pmem_page_alloc(1, &nvpd_lock, 0, PPAGE_CORE)) < 0)
	 {
		  return err;
	 }

	 BEGIN_TPAGE(nvpd_lock, tvpd_lock);
	 for (i = 0; i != PAGEDIR_COUNT; ++ i)
		  spl_init(vpd_lock + i);
	 END_TPAGE;

	 physaddr_t nvpt;
	 ptab_entry_t *tvpt;
	 if ((err = pmem_page_alloc(1, &nvpt, 0, PPAGE_CORE)) < 0)
	 {
		  pmem_page_free(nvpd_lock);
		  return err;
	 }

	 BEGIN_TPAGE(nvpt, tvpt);
	 memset(tvpt, 0, PAGE_SIZE);
	 tvpt[PAGETAB_IDX(VPDLCK)] = nvpd_lock  | PTE_W | PTE_P;
	 tvpt[PAGETAB_IDX(LAPIC)]  = vpt[PAGE_NUM(LAPIC)];
	 END_TPAGE;


	 physaddr_t nvpd;
	 pdir_entry_t *tvpd;
	 if ((err = pmem_page_alloc(1, &nvpd, 0, PPAGE_CORE)) < 0)
	 {
		  pmem_page_free(nvpd_lock);
		  pmem_page_free(nvpt);
		  return err;
	 }

	 BEGIN_TPAGE(nvpd, tvpd);
	 /* Common part */
	 for (i = PAGEDIR_IDX(UWLIM); i != PAGEDIR_COUNT; ++ i)
	 {
		  tvpd[i] = vpd[i];
	 }
	 /* COW part */
	 for (i = 0; i != PAGEDIR_IDX(UWLIM); ++ i)
	 {
		  lock_pde(i);
		  if ((vpd[i] & PTE_P) &&
			  !(vpd[i] & PTE_SHARED) &&
			  (vpd[i] & PTE_W))
		  {
			   vpd[i] &= ~PTE_W;
			   vpd[i] |= PTE_COW;
		  }
		  tvpd[i] = vpd[i];
		  release_pde(i);
		  pmem_page_inc_ref((physaddr_t)PAGE_ALIGN(tvpd[i]));
	 }

	 tvpd[PAGEDIR_IDX(VPDLCK)] = nvpt | PTE_W | PTE_P;
	 tvpd[PAGEDIR_IDX(KVPT)]   = nvpd | PTE_W | PTE_P;
     tvpd[PAGEDIR_IDX(UVPT)]   = nvpd | PTE_U | PTE_P;
	 END_TPAGE;

#if 0
	 proc_id_t cur = cur_proc_id();
	 physaddr_t old_vpt = procs[cur].vpt;
	 procs[cur].vpt = nvpt;
#endif

	 pmem_page_inc_ref(nvpd);
	 pmem_page_inc_ref(nvpt);
	 pmem_page_inc_ref(nvpd_lock);

#if 0
	 pmem_page_dec_ref(old_vpt);
#endif

	 return 0;
}

/* Now we use ap to deliver the exception */
#if 0
static void
mem_enqueue_pf_info(struct pf_info_t *pf)
{
	 kpipe_write(&pf_pipe, pf, sizeof(struct pf_info_t));
}

static void
mem_enqueue_pf_info_wait(struct pf_info_t *pf)
{
	 /* TODO - FIX ? */
	 spl_acquire(&pf_pipe.lock);
	 kpipe_write_unsafe(&pf_pipe, pf, sizeof(struct pf_info_t));
	 sch_notify(PTE_MGR_PID(pf->pte));
	 sch_wait(cur_proc_id(), 1, &pf_pipe.lock);
	 call_yield();
}

static void
mem_dequeue_pf_info(struct pf_info_t *pf)
{
	 kpipe_read(&pf_pipe, pf, sizeof(struct pf_info_t));
}
#endif
