/* Time-stamp: <2010-10-23 19:06:04 xinhaoyuan> */
/* This file implemented simple page-level physical and virtual memory
 * management subsystem */
#include "kernel.h"

/* Global descriptor table. */

/* The kernel and user segments are identical (except for the DPL). */
/* To load the SS register, the CPL must equal the DPL.  Thus, */
/* we must duplicate the segments for the user and the kernel. */

struct segdesc_t gdt[GDT_COUNT] =
{
     /* 0x0 - unused (always faults -- for trapping NULL far pointers) */
     SEG_NULL,

     /* 0x8 - kernel code segment */
     [GD_KERN_TEXT >> 3] = SEG(STA_X | STA_R, 0x0, 0xffffffff, 0),

     /* 0x10 - kernel data segment */
     [GD_KERN_DATA >> 3] = SEG(STA_W, 0x0, 0xffffffff, 0),

     /* 0x18 - user code segment */
     [GD_USER_TEXT >> 3] = SEG(STA_X | STA_R, 0x0, 0xffffffff, 3),

     /* 0x20 - user data segment */
     [GD_USER_DATA >> 3] = SEG(STA_W, 0x0, 0xffffffff, 3),
};

struct pseudodesc_t gdt_pd = {
     sizeof(gdt) - 1, (unsigned long) gdt
};

uint32_t   mlayout_count;
physaddr_t mlayout_addr[MEMMAP_MAXCOUNT << 1];

/* Static storage for calculate the memory map  */
static uint32_t        mmap_count;
static struct memmap_t mmap[MEMMAP_MAXCOUNT];

static uint32_t   mmap_node_count;
static physaddr_t mmap_node_addr[MEMMAP_MAXCOUNT << 1];
static int32_t    mmap_node_flag[MEMMAP_MAXCOUNT << 1];

static kpipe_t pf_pipe __attribute__ ((aligned (PAGE_SIZE)));

static int
make_pmemory_layout(int verbose)
{
     uint32_t i, j, k, t;

     mmap_node_count = 0;

     for (i = 0; i != mmap_count; ++i)
     {
		  mmap_node_addr[mmap_node_count] = mmap[i].base;
		  mmap_node_flag[mmap_node_count] = mmap[i].flag;
		  ++mmap_node_count;

		  /* ignore if the end overflows */
		  if (!MEMMAP_ADDR_OVERFLOW(mmap[i].end))
		  {
			   mmap_node_addr[mmap_node_count] =  mmap[i].end;
			   mmap_node_flag[mmap_node_count] = -mmap[i].flag;
			   ++mmap_node_count;
		  }
     }

     /* Sort the mmap node table */
     for (i = 0; i != mmap_node_count; ++i)
     {
		  j = i;
		  for (k = i + 1; k != mmap_node_count; ++k)
		  {
			   if (mmap_node_addr[k] < mmap_node_addr[j])
					j = k;
		  }
		  t = mmap_node_addr[i]; mmap_node_addr[i] = mmap_node_addr[j]; mmap_node_addr[j] = t;
		  t = mmap_node_flag[i]; mmap_node_flag[i] = mmap_node_flag[j]; mmap_node_flag[j] = t;
     }

     /* for debugging */
     /* for (i = 0; i != mmapNodeCount; ++i) */
     /* 	  kprintf("addr : %x, flag : %x\n", mmapNodeAddr[i], mmapNodeFlag[i]); */

     /* Make memory layout table */
     int32_t vaild_level, reclaim_level;
     bool last_vaild, v;
     
     vaild_level = 0; reclaim_level = 0; v = false;
     mlayout_count = 0;

     i = 0;
     while (1)
     {
		  if (i >= mmap_node_count) break;

		  last_vaild = v;
		  while (1)
		  {
			   switch (mmap_node_flag[i])
			   {
			   case MEMMAP_FREE:
					++vaild_level;
					break;
			   case -MEMMAP_FREE:
					--vaild_level;
					break;
			   case MEMMAP_RECLAIMABLE:
					++reclaim_level;
					break;
			   case -MEMMAP_RECLAIMABLE:
					--reclaim_level;
					break;
			   default:
					if (mmap_node_flag[i] > 0)
						 --vaild_level;
					else ++vaild_level;
			   }

			   ++i;
			   if (i >= mmap_node_count) break;
			   if (mmap_node_addr[i - 1] != mmap_node_addr[i]) break;
		  }
	  
		  if (vaild_level == 0)
			   v = (reclaim_level > 0);
		  else v = (vaild_level > 0);

		  /* for debugging */
		  // kprintf("%x %d %d %d\n", mmapNodeAddr[i - 1], vaildLevel, reclaimLevel, v);

		  if (v != last_vaild)
		  {
			   mlayout_addr[mlayout_count] = mmap_node_addr[i - 1];
			   ++mlayout_count;
		  }
     }

	 if (verbose)
	 {
		  kprintf("Physical memory layout boundary:\n");
		  for (i = 0; i != mlayout_count; ++i)
		  {
			   kprintf(" [%d] = %x\n", i, mlayout_addr[i]);
		  }
	 }

	 return 0;
}

/* Allocate memory space when initailizing system */
static physaddr_t init_free;

static void *
init_alloc(size_t size, size_t align)
{
     if (init_free == 0)
		  init_free = PADDR(_end);

#ifdef K_INIT_DEBUG
     kprintf("[init_alloc] init_free = %p, size = %d, align = 0x%x\n", init_free, size, align);
#endif

	 /* Find the first segment possible */
     int lid = pmemory_layout_bin_search(init_free);
     lid |= 1;

     while (lid < mlayout_count)
     {
		  if (init_free < mlayout_addr[lid - 1])
			   init_free = mlayout_addr[lid - 1];

		  /* Find the continuous memory space that would contain the
		   * need */
		  if ((init_free |=  align - 1) + size < mlayout_addr[lid])
			   break;
		  else lid += 2;
     }

     /* NULL is failed */
     if (lid >= mlayout_count) return NULL;
     if (init_free + size >= KSIZE) return NULL;
     else ++init_free;

     void *result = KADDR(init_free);
     
     kprintf("[init_alloc] result = %p\n", result);

     init_free += size;

	 return result;
}

static int
do_init_alloc(void)
{
     if (mlayout_count < 2)
		  ppage_count = 1 << (32 - PAGE_SHIFT);
     else ppage_count = mlayout_addr[(mlayout_count - 2) | 1] / PAGE_SIZE;

     kprintf("Available physical pages: %d\n", ppage_count);

     if ((ppages = init_alloc(ppage_count * sizeof(struct ppage_t), PAGE_SIZE)) == NULL)
	 	  return -E_NO_MEM;
     
     if ((procs  = init_alloc(PROCESS_MAX_COUNT * sizeof(struct proc_t), PAGE_SIZE)) == NULL)
     	  return -E_NO_MEM;

	 if ((irq_pipes = (kpipe_t *)init_alloc(IRQ_COUNT * sizeof(kpipe_t), PAGE_SIZE)) == NULL)
		  return -E_NO_MEM;

     return 0;
}

static void
init_pmem_allocator(int verbose)
{

     uint32_t i, j, k, w, apages;
     bool vaild;
     physaddr_t cur;

     struct ppage_t *free_head = NULL;

     apages = 0;

     cur = 0; i = 0; j = 0; k = 0;
     for (i = 0; i != ppage_count; ++i)
     {

		  while (j < mlayout_count)
		  {
			   if (mlayout_addr[j] <= cur)
					++j;
			   else break;
		  }

		  while (k < mlayout_count)
		  {
			   if (mlayout_addr[k] < cur + PAGE_SIZE)
					++k;
			   else break;
		  }

		  vaild = (j == k) && ((j & 1) == 1);

		  mcs_init(&ppages[i].lock);
		  ppages[i].ref_count = 0;
		  ppages[i].status = PPAGE_FREE;
	  
		  if (vaild)
		  {
			   ++ apages;
			   ppages[i].next = free_head;
			   free_head = ppages + i;
		  }
		  else
		  {
			   ppages[i].status = PPAGE_CORE;
		  }
		  

		  cur += PAGE_SIZE;
     }

     /* In order to allocate memories in kernel area in booting stage,
      * we reverse the queue of free memory pages. */

     ppage_free_head = NULL;
     mcs_init(&ppage_alloc_lock);
     while (free_head != NULL)
     {
		  struct ppage_t *tmp = free_head->next;
	  
     	  free_head->next = ppage_free_head;
     	  ppage_free_head = free_head;
     	  free_head = tmp;
     }

	 for (i = 0; i != SMP_MAX_COUNT; ++ i)
	 {
		  ppage_fc_per_cpu[i] = 0;
		  ppage_fh_per_cpu[i] = NULL;
	 }
	 
	 if (verbose)
	 {
		  kprintf("free pages: %d\n", apages);
		  kprintf("\tfirst page number: %d\n", ppage_free_head - ppages);
	 }
}

static ptab_entry_t *
init_vpd_walk(pdir_entry_t *vpd, const void *va, int create, int perm)
{
     uintptr_t pdx, ptx;
     pdx = PAGEDIR_IDX(va);
     ptx = PAGETAB_IDX(va);

     if (vpd[pdx] & PTE_P)
     {
		  /* DO NOTHING but add the perm */
		  /* vpd[pdx] |= perm; */
     }
     else if (create)
     {
		  physaddr_t vpt;
		  /* suppose in the initializing stage, all memory can be
		   * allocated in the kernel addressing area (0 ~ 128MB) */
		  if (ppage_alloc(&vpt, PPAGE_CORE) < 0) return NULL;

		  memset(KADDR(vpt), 0, PAGE_SIZE);
		  vpd[pdx] = vpt | perm | PTE_P;
		  ppage_inc_ref(vpt);
     }
     else
     {
		  return NULL;
     }

     return (ptab_entry_t *)KADDR(PTE_ADDR(vpd[pdx])) + ptx;
}

static int
init_map_segment(pdir_entry_t *vpd, uintptr_t la, size_t size, physaddr_t pa, int perm)
{
     if (size == 0) return 0;
	 /* Here we round up */
     uintptr_t i, npages = ((size - 1) >> PAGE_SHIFT) + 1;

#if 0
     kprintf("[init_map_segment] mapping [%x, %x] to [%x, %x]\n",
			 la, la + size - 1, pa, pa + size - 1);
#endif
     
     for (i = 0; i != npages; ++i)
     {
		  ptab_entry_t *pte;

		  pte = init_vpd_walk(vpd, (void *)la, 1, perm);
		  if (pte == NULL) return -E_NO_MEM;
	  
		  *pte = pa | perm | PTE_P;
	  
		  la += PAGE_SIZE;
		  pa += PAGE_SIZE;
     }

	 return 0;
}

int
mem_init(void)
{
	 uint32_t i;
	 
     int err;
     
     mmap_count = *(uint32_t *)MEMMAP_BASE;
     memmove(mmap, MEMMAP_ENTRY, MEMMAP_SIZE * mmap_count);

     if (mmap_count == 0)
     {
		  kprintf("Cannot detect physical memory map.\n");
		  return -E_IO;
     }

     for (i = 0; i != mmap_count; ++i)
		  mmap[i].end += mmap[i].base;

     /* generate the initial layout */
     make_pmemory_layout(0);

     /* alloc space for the initial dynamic data */
     if ((err = do_init_alloc()) < 0) return err;

     /* mark the kernel and lower memory reserved */
     mmap[mmap_count].base = (uintptr_t)PADDR(_text);
     mmap[mmap_count].end  = init_free;
     mmap[mmap_count].flag = MEMMAP_RESERVED;

     ++mmap_count;

     mmap[mmap_count].base = (uintptr_t)0;
     mmap[mmap_count].end =  (uintptr_t)0x00100000;
     mmap[mmap_count].flag = MEMMAP_RESERVED;

     ++mmap_count;

     /* generate it again */
     make_pmemory_layout(1);

     /* initialize the ppage allocator */
     init_pmem_allocator(1);

     physaddr_t init_cr3;

     memset(init_vpd, 0, PAGEDIR_COUNT * sizeof(pdir_entry_t));
	 
     init_cr3 = PADDR(init_vpd);

	 init_vpd[PAGEDIR_COUNT - 1] = PADDR(init_vpd_lock) | PTE_W | PTE_P;
     init_vpd[PAGEDIR_IDX(KVPT)] = PADDR(init_vpd) | PTE_W | PTE_P;
     init_vpd[PAGEDIR_IDX(UVPT)] = PADDR(init_vpd) | PTE_U | PTE_P;

     /* map kernel area */
     if ((err = init_map_segment(init_vpd, KBASE, KISIZE, 0, PTE_W) < 0))
	 {
		  return err;
	 }
	 /* map utask area */
	 if ((err = init_map_segment(init_vpd, UTASK,
								 SMP_MAX_COUNT * TS_USIZE,
								 PADDR(tasks_start), PTE_U) < 0))
	 {
		  return err;
	 }
	 /* map uirq_pipe ctrl block */
	 ptab_entry_t *init_vpt = KADDR(PTE_ADDR(init_vpd[PAGEDIR_IDX(UIRQ_PIPE)]));
	 for (i = 0; i != IRQ_COUNT; ++ i)
	 {
		  init_vpt[PAGETAB_IDX(UIRQ_PIPE + i * PAGE_SIZE)] =
			   PADDR(irq_pipes + i) | PTE_U | PTE_P;
	 }
	 /* map procs */
	 if ((err = init_map_segment(init_vpd, UPROCS,
								 PROCESS_MAX_COUNT * sizeof(struct proc_t),
								 PADDR(procs), PTE_U) < 0))
	 {
		  return err;
	 }
	 /* trick mapping, would be clean later */
     init_vpd[0] = init_vpd[PAGEDIR_IDX(KBASE)];

     /* Install page table. */
     lcr3(init_cr3);

     uint32_t cr0;
     
     /* Turn on paging. */
     cr0 = rcr0();
     // cr0 |= CR0_PE|CR0_PG|CR0_AM|CR0_WP|CR0_NE|CR0_TS|CR0_EM|CR0_MP;
	 cr0 |= CR0_PG;
     cr0 &= ~(CR0_TS|CR0_EM);

     lcr0(cr0);

	 /* Initailize tss segment for each cpu */
	 for (i = 0; i != SMP_MAX_COUNT; ++ i)
	 {
		  gdt[GD_TSS(i) >> 3] = SEG_NULL;
	 }
	 
     // Reload all segment registers.
     asm volatile("lgdt (%0)" : : "r"(&gdt_pd));
     asm volatile("movw %%ax,%%gs" :: "a" (GD_USER_DATA|3));
     asm volatile("movw %%ax,%%fs" :: "a" (GD_USER_DATA|3));
     asm volatile("movw %%ax,%%es" :: "a" (GD_KERN_DATA));
     asm volatile("movw %%ax,%%ds" :: "a" (GD_KERN_DATA));
     asm volatile("movw %%ax,%%ss" :: "a" (GD_KERN_DATA));
     asm volatile("ljmp %0,$1f\n 1:\n" :: "i" (GD_KERN_TEXT));  // reload cs
     asm volatile("lldt %%ax" :: "a" (0));

     init_vpd[0] = 0;
     /* refresh the vpd */
     lcr3(init_cr3);

	 kpipe_open(&pf_pipe);

     return 0;
}

int
pmemory_layout_bin_search(physaddr_t addr)
{
     int l = 0, r = mlayout_count - 1, m;

     while (l < r)
     {
		  if (mlayout_addr[m = ((l + r) >> 1)] > addr)
			   r = m;
		  else l = m + 1;
     }

     return l;
}

uint32_t ppage_count;
struct ppage_t *ppages;
mcs_lock_t ppage_alloc_lock;
struct ppage_t *ppage_free_head;
struct ppage_t *ppage_fh_per_cpu[SMP_MAX_COUNT];
int ppage_fc_per_cpu[SMP_MAX_COUNT];

static void
ppage_init(struct ppage_t *ppage, uint16_t status)
{
	 memset(ppage, 0, sizeof(struct ppage_t));
	 mcs_init(&ppage->lock);
	 ppage->status = status;
}

int
ppage_alloc(physaddr_t *addr, uint16_t status)
{
	 int cpu = cur_cpu();
	 
	 if (ppage_fc_per_cpu[cpu] == 0)
	 {
		  struct mcs_lock_node_t lock_node;
		  mcs_acquire(&ppage_alloc_lock, &lock_node);
		  if (ppage_free_head == NULL)
		  {
			   mcs_release(&ppage_alloc_lock, &lock_node);
			   return -E_NO_MEM;
		  }
		  else
		  {
			   struct ppage_t *cur;
			   int i;
			   for (i = 0;
					i != PPAGE_ALLOC_BATCH_COUNT && ppage_free_head != NULL;
					++ i)
			   {
					cur = ppage_free_head;
					ppage_free_head = cur->next;
			   
					cur->next = ppage_fh_per_cpu[cpu];
					ppage_fh_per_cpu[cpu] = cur;

					++ ppage_fc_per_cpu[cpu];
			   }
		  }
		  mcs_release(&ppage_alloc_lock, &lock_node);
	 }

	 struct ppage_t *result;
	 if (ppage_fc_per_cpu[cpu] == 0)
		  return -E_NO_MEM;
	 else
	 {
		  result = ppage_fh_per_cpu[cpu];
	 }
	 
	 *addr = (result - ppages) << PAGE_SHIFT;
	 ppage_fh_per_cpu[cpu] = result->next;
	 -- ppage_fc_per_cpu[cpu];
	 ppage_init(result, status);

	 return 0;
}

int
ppage_inc_ref(physaddr_t addr)
{
	 struct mcs_lock_node_t lock_node;
	 int err;
	 struct ppage_t *ppage = &ppages[addr >> PAGE_SHIFT];
	 /* lock the ppage to ensure the counter is correct */
	 mcs_acquire(&ppage->lock, &lock_node);
	 if (ppage->status == PPAGE_FREE)
		  err = -1;
	 else
	 {
		  err = 0;
		  ++ ppage->ref_count;
     }
     mcs_release(&ppage->lock, &lock_node);
     return err;
}

int
ppage_add_ref(physaddr_t addr, int ref)
{
	 struct mcs_lock_node_t lock_node;
	 int err;
	 struct ppage_t *ppage = &ppages[addr >> PAGE_SHIFT];
	 /* lock the ppage to ensure the counter is correct */
	 mcs_acquire(&ppage->lock, &lock_node);
	 if (ppage->status == PPAGE_FREE)
		  err = -1;
	 else
	 {
		  err = 0;
		  ppage->ref_count += ref;
     }
     mcs_release(&ppage->lock, &lock_node);
     return err;
}

void
ppage_free(physaddr_t addr)
{
	 int cpu = cur_cpu();
	 struct ppage_t *ppage = &ppages[addr >> PAGE_SHIFT];
	 
	 ppage->next = ppage_fh_per_cpu[cpu];
	 ppage_fh_per_cpu[cpu] = ppage;
	 if (++ ppage_fc_per_cpu[cpu] > PPAGE_ALLOC_BATCH_COUNT)
	 {
		  int i;
		  struct mcs_lock_node_t lock_node;
		  mcs_acquire(&ppage_alloc_lock, &lock_node);
		  for (i = 0; i < PPAGE_ALLOC_BATCH_COUNT; ++ i)
		  {
			   ppage = ppage_fh_per_cpu[cpu];
			   ppage_fh_per_cpu[cpu] = ppage->next;

			   ppage->next = ppage_free_head;
			   ppage_free_head = ppage;

			   --ppage_fc_per_cpu[cpu];
		  }
		  mcs_release(&ppage_alloc_lock, &lock_node);
	 }
}

int
ppage_dec_ref(physaddr_t addr)
{
	 int err;
     struct ppage_t *ppage = &ppages[addr >> PAGE_SHIFT];
	 struct mcs_lock_node_t lock_node;
	 
     mcs_acquire(&ppage->lock, &lock_node);
     
     uint32_t ref_count;
     if (ppage->status == PPAGE_FREE)
     {
		  err = -1;
		  ref_count = -1;
     }
     else
     {
		  err = 0;
		  ref_count = 
			   -- ppage->ref_count;
	 }
     mcs_release(&ppage->lock, &lock_node);

     if (ref_count == 0)
     {
		  ppage_free(addr);
     }
     
     return err;
}

/* virtual memory */

void *
set_tpage(physaddr_t addr)
{
	 void *result = (void *)((physaddr_t)get_tpage() | PAGE_OFF(addr));
	 physaddr_t naddr = (addr & ~(PAGE_SIZE - 1));
	 
	 vpt[PAGE_NUM(result)] = naddr | PTE_W | PTE_P;
	 invlpg(result);

	 return result;
}

void
set_page_map(void *vaddr, uint32_t pte)
{
	 vpt[PAGE_NUM(vaddr)] = pte;
	 invlpg(vaddr);
}

/* Trick lock for page table */
physaddr_t
phys_addr(void *addr)
{
	 if ((vpd[PAGEDIR_IDX(addr)] & PTE_P) &&
		 (vpt[PAGE_NUM(addr)] & PTE_P))
		  return PTE_ADDR(vpt[PAGE_NUM(addr)]) | PAGE_OFF(addr);
	 else return 0;
}

inline uint32_t
lock_pde(uint32_t idx)
{
	 spl_acquire(vpd_lock + idx);
}

inline void
release_pde(uint32_t idx)
{
	 spl_release(vpd_lock + idx);
}
	 
inline uint32_t
lock_pte(uint32_t idx)
{
	 uint32_t result;
	 while ((result = xchg32(vpt + idx, PTE_LOCK)) == PTE_LOCK) ;
	 invlpg((void *)(idx << PTX_SHIFT));
	 return result;
}

inline void
release_pte(uint32_t idx, uint32_t val)
{
	 xchg32(vpt + idx, val);
	 invlpg((void *)(idx << PTX_SHIFT));
}

/* The fundamental routine for virtual memory mapping management. The
 * routine maps virtual address by physical pages. The VPT mapping and
 * VPD_LOCK is currectly initalized */

/* addr   -- the virtual address that need to be processed, need not to be page aligned */
/* create -- if the ptr is not null, it would point to the page tab entry value, */
/*           after the call, the value of the slot woule be the old
 *           value in the table. If the page physical address in it is
 *           0, the routine will allocated a new page by itself. Or it
 *           will assign the mapping by the page address */
/* int ... -- force replace the old item to the value in ``create'' */

/* Routine returns nagtive result if error happens, or 0 for success,
 * FA_COW for the processing of COW sign, FA_AWA for processing of AWA
 * sign */
int
__fix_addr(int verbose, void *addr, ptab_entry_t *create, ...)
{
	 uint32_t idx;
	 idx = PAGEDIR_IDX(addr);

	 uint32_t create_rep;

	 int err;
	 physaddr_t naddr;

	 uint32_t i;
	 int force;

	 if (verbose)
	 {
		  kprintf("[fix_addr] addr = %p, create = %p", addr, create);	 
	 }

	 if (create != NULL)
	 {
		  va_list va;

		  va_start(va, create);
		  force = va_arg(va, int);
		  va_end(va);
		  
		  if (verbose)
		  {
			   kprintf("(%08x), force = %d. ", *create, force);
		  }
	 }
	 else
	 {
		  force = 0;
		  
		  if (verbose)
		  {
			   kprintf(". ");
		  }		  
	 }
	 
	 if (idx == PAGEDIR_IDX(KVPT))
	 {
		  uint32_t dirbase = idx * PAGETAB_COUNT;
		  idx = PAGETAB_IDX(addr);

		  lock_pde(idx);
		  uint32_t pde = vpd[idx];
		  
		  if (verbose)
		  {
			   kprintf("[L1] pde = %08x", pde);
		  }

		  /* L1 tab should ignore the force flag */
		  /* if (force) */
		  /* { */
		  /* 	   create_rep = pde; */
		  /* 	   goto fa_create_1; */
		  /* } */
		  /* else */
		  if (pde & PTE_P)
		  {
			   if (!PTE_COW_P(pde))
			   {
					err = 0;
					goto fa_exit_1;
			   }

			   if ((err = ppage_alloc(&naddr, PPAGE_CORE)) < 0)
			   {
					goto fa_exit_1;
			   } else ppage_inc_ref(naddr);
			   
			   if (verbose)
			   {
					kprintf("[fix_addr] process COW sign.\n");
			   }
			   pdir_entry_t *tdir = (pdir_entry_t *)get_tpage();
			   vpt[PAGE_NUM(tdir)] = naddr | PTE_W | PTE_P;
			   invlpg(tdir);

			   /* duplicate the page tab, with processing of the
				* flags */
			   for (i = 0; i != PAGETAB_COUNT; ++i)
			   {
					if (vpt[dirbase + i] & PTE_P)
					{
						 if ((vpt[dirbase | i] & (PTE_W | PTE_COW)) &&
							 !(vpt[dirbase | i] & PTE_SHARED))
						 {
							  tdir[i] = (vpd[dirbase | i] | PTE_COW) & ~(uint32_t)PTE_W;
						 
							  /* should not be uncommented */
							  // svpt[i] = tvpt[i];
						 }
						 else
						 {
							  tdir[i] = vpt[dirbase + i];
						 }

						 ppage_inc_ref(PTE_ADDR(tdir[i]));
					} else tdir[i] = 0;
			   }

			   ppage_dec_ref(PTE_ADDR(pde));
			   vpt[PAGE_NUM(tdir)] = 0;
			   pde = (naddr | (pde & PTE_USER) | PTE_W) & ~(uint32_t)PTE_COW;

			   err = FA_COW;
		  }
		  else if (PTE_AWA_P(pde))
		  {
			   if ((err = ppage_alloc(&naddr, PPAGE_CORE)) < 0)
			   {
					goto fa_exit_1;
			   } else ppage_inc_ref(naddr);
			   
			   if (verbose)
			   {
					kprintf("[fix_addr] process AWA sign.\n");
			   }
			   pdir_entry_t *tdir = (pdir_entry_t *)get_tpage();
			   vpt[PAGE_NUM(tdir)] = naddr | PTE_W | PTE_P;
			   invlpg(tdir);

			   /* Set all pages as AWA */
			   for (i = 0; i != PAGETAB_COUNT; ++i)
			   {
					tdir[i] = pde;
			   }

			   vpt[PAGE_NUM(tdir)] = 0;
			   pde = (naddr | (pde & PTE_USER) | PTE_W | PTE_P) & ~(uint32_t)PTE_AWA;

			   err = FA_AWA;
			   
		  }
		  else if (create != NULL)
		  {
			   create_rep = 0;

		  fa_create_1:
			   
			   if (PTE_ADDR(*create))
			   {
					pde = *create | PTE_P;
					*create = create_rep;

					err = 0;
			   }
			   else
			   {
					/* create a empty page */
					if ((err = ppage_alloc(&naddr, PPAGE_CORE)) < 0)
					{
						 goto fa_exit_1;
					} else ppage_inc_ref(naddr);

					pdir_entry_t *tdir = (pdir_entry_t *)get_tpage();
					vpt[PAGE_NUM(tdir)] = naddr | PTE_W | PTE_P;
					invlpg(tdir);

					memset(tdir, 0, PAGE_SIZE);

					pde = naddr | (*create & PTE_USER) | PTE_P;
					*create = create_rep;
					
					err = 0;
			   }
			   
			   if (verbose)
			   {
					kprintf("CREATED");
			   }
		  }
		  else err = -E_FAULT;

	 fa_exit_1:

		  if (verbose)
		  {
			   kprintf("\n");
		  }

		  vpd[idx] = pde;
		  /* Need not refresh the table, since there is append only modification */
		  release_pde(idx);
	 }
	 else
	 {
		  
		  if (verbose)
		  {
			   kprintf("[L2]");
		  }
		  uint32_t pte;
		  if (create != NULL)
		  {
			   int cp;
			   cp = *create & PTE_USER;

			   if ((err = __fix_addr(verbose, (void *)(vpt + PAGE_NUM(addr)), &cp, false)) < 0)
					goto fa_exit;
		  }
		  else
		  {
			   if ((err = __fix_addr(verbose, (void *)(vpt + PAGE_NUM(addr)), NULL)) < 0)
					goto fa_exit;
		  }
		  
		  idx = PAGE_NUM(addr);
		  pte = lock_pte(idx);

		  if (force)
		  {
			   create_rep = pte;
			   goto fa_create_2;
		  }
		  if (pte & PTE_P)
		  {

			   if (!PTE_COW_P(pte))
			   {
					err = 0;
					goto fa_exit_2;
			   }

			   if (verbose)
			   {
					kprintf("[fix_addr] process COW sign.\n");
			   }
			   
			   if ((err = ppage_alloc(&naddr, PPAGE_CACHED)) < 0)
			   {
					goto fa_exit_2;
			   } else ppage_inc_ref(naddr);

			   char *tdpage = (char *)get_tpage();
			   vpt[PAGE_NUM(tdpage)] = naddr | PTE_W | PTE_P;
			   invlpg(tdpage);

			   /* copy the page */
			   memmove(tdpage, PAGE_ALIGN(addr), PAGE_SIZE);

			   ppage_dec_ref(PTE_ADDR(pte));
			   pte = (naddr | (pte & PTE_USER) | PTE_W) & ~(uint32_t)PTE_COW;

			   err = FA_COW;
		  }
		  else if (PTE_AWA_P(pte))
		  {
			   if (verbose)
			   {
					kprintf("[fix_addr] process AWA sign.\n");
			   }
			   
			   if ((err = ppage_alloc(&naddr, PPAGE_CACHED)) < 0)
			   {
					goto fa_exit_2;
			   } else ppage_inc_ref(naddr);

			   char *tdpage = (char *)get_tpage();
			   vpt[PAGE_NUM(tdpage)] = naddr | PTE_W | PTE_P;
			   invlpg(tdpage);

			   /* copy the page */
			   memset(tdpage, 0, PAGE_SIZE);
			   pte = (naddr | (pte & PTE_USER) | PTE_W | PTE_P) & ~(uint32_t)PTE_AWA;

			   err = FA_AWA;
		  }
		  else if (create != NULL)
		  {
			   create_rep = 0;

		  fa_create_2:
			   
			   if (PTE_ADDR(*create))
			   {
					pte = *create | PTE_P;
					*create = create_rep;

					err = 0;
			   }
			   else
			   {
					if ((err = ppage_alloc(&naddr, PPAGE_CORE)) < 0)
					{
						 goto fa_exit_2;
					} else ppage_inc_ref(naddr);

					pte = naddr | (*create & PTE_USER) | PTE_P;
					*create = create_rep;
					
					err = 0;
			   }

			   if (verbose)
			   {
					kprintf("CREATED");
			   }
		  }
		  else err = -E_FAULT;

		  
	 fa_exit_2:

		  if (verbose)
		  {
			   kprintf("\n");
		  }

		  release_pte(idx, pte);
	 }

fa_exit:
	 
	 if (verbose)
	 {
		  kprintf("[fix_addr] finished err = %d.\n", err);
	 }

	 return err;
}

int
vpd_mark_shared(int idx)
{
	 int result;
	 lock_pde(idx);
	 if (vpd[idx] & PTE_P)
	 {
		  vpd[idx] |= PTE_SHARED;
		  result = 0;
	 }
	 else result = -E_INVAL;
	 release_pde(idx);
	 return result;
}

int
vpt_mark_shared(int idx)
{
	 int result;
	 uint32_t pte = lock_pte(idx);
	 if (pte & PTE_P)
	 {
		  pte |= PTE_SHARED;
		  result = 0;
	 }
	 else result = -E_INVAL;
	 release_pte(idx, pte);
	 return result;
}

int
vpd_alloc_temp(int idx)
{
	 int result;
	 lock_pde(idx);
	 if (vpd[idx] & PTE_P)
	 {
		  result = -E_INVAL;
	 }
	 else vpd[idx] |= PTE_AWA | PTE_U | PTE_W;
	 release_pde(idx);
	 return result;
}

int
vpt_alloc_temp(int idx)
{
	 int result;
	 uint32_t pte = PTE_U | PTE_W;
	 if ((result = fix_addr((void *)(vpt + idx), &pte, 0)) < 0)
		  return result;

	 pte = lock_pte(idx);
	 if (pte & PTE_P)
	 {
		  result = -E_INVAL;
	 }
	 else
	 {
		  pte |= PTE_AWA | PTE_U | PTE_W;
	 }
	 release_pte(idx, pte);
	 return result;
}

int
fork_mem(void)
{
	 int i, j;
	 
	 proc_id_t pid = cur_proc_id();
	 cpu_id_t cpu  = cur_cpu();
	 
	 int err;
	 
	 physaddr_t nvpd_lock;
	 spin_lock_t *tvpd_lock;
	 if ((err = ppage_alloc(&nvpd_lock, PPAGE_CORE)) < 0)
	 {
		  return err;
	 }

	 BEGIN_TPAGE(nvpd_lock, tvpd_lock);
	 for (i = 0; i != PAGEDIR_COUNT; ++ i)
		  spl_init(vpd_lock + i);
	 END_TPAGE;

	 physaddr_t nvpt;
	 ptab_entry_t *tvpt;
	 if ((err = ppage_alloc(&nvpt, PPAGE_CORE)) < 0)
	 {
		  ppage_free(nvpd_lock);
		  return err;
	 }
	 
	 BEGIN_TPAGE(nvpt, tvpt);
	 memset(tvpt, 0, PAGE_SIZE);
	 tvpt[PAGETAB_IDX(VPDLCK)] = nvpd_lock  | PTE_W | PTE_P;
	 tvpt[PAGETAB_IDX(LAPIC)]  = vpt[PAGE_NUM(LAPIC)];
	 END_TPAGE;
	 
	 
	 physaddr_t nvpd;
	 pdir_entry_t *tvpd;
	 if ((err = ppage_alloc(&nvpd, PPAGE_CORE)) < 0)
	 {
		  ppage_free(nvpd_lock);
		  ppage_free(nvpt);
		  return err;
	 }

	 BEGIN_TPAGE(nvpd, tvpd);
	 /* Common part */
	 for (i = PAGEDIR_IDX(UWLIM); i != PAGEDIR_COUNT; ++ i)
	 {
		  tvpd[i] = vpd[i];
	 }
	 /* COW part */
	 for (i = 0; i != PAGEDIR_IDX(UWLIM); ++ i)
	 {
		  lock_pde(i);
		  if ((vpd[i] & PTE_P) &&
			  !(vpd[i] & PTE_SHARED) &&
			  (vpd[i] & PTE_W))
		  {
			   vpd[i] &= ~PTE_W;
			   vpd[i] |= PTE_COW;
		  }
		  tvpd[i] = vpd[i];
		  release_pde(i);
		  ppage_inc_ref((physaddr_t)PAGE_ALIGN(tvpd[i]));
	 }

	 tvpd[PAGEDIR_IDX(VPDLCK)] = nvpt | PTE_W | PTE_P;
	 tvpd[PAGEDIR_IDX(KVPT)]   = nvpd | PTE_W | PTE_P;
     tvpd[PAGEDIR_IDX(UVPT)]   = nvpd | PTE_U | PTE_P;
	 END_TPAGE;

	 proc_id_t cur = cur_proc_id();
	 physaddr_t old_vpt = procs[cur].vpt;
	 procs[cur].vpt = nvpt;

	 ppage_inc_ref(nvpd);
	 ppage_inc_ref(nvpt);
	 ppage_inc_ref(nvpd_lock);

	 ppage_dec_ref(old_vpt);

	 return 0;
}

void
mem_enqueue_pf_info(struct pf_info_t *pf)
{
	 kpipe_write(&pf_pipe, pf, sizeof(struct pf_info_t));
}

void
mem_enqueue_pf_info_wait(struct pf_info_t *pf)
{
	 spl_acquire(&pf_pipe.lock);
	 kpipe_write_unsafe(&pf_pipe, pf, sizeof(struct pf_info_t));
	 sch_notify(PTE_MGR_PID(pf->pte), 1);
	 sch_wait(cur_proc_id(), 1, &pf_pipe.lock);
	 call_yield();
}

void
mem_dequeue_pf_info(struct pf_info_t *pf)
{
	 kpipe_read(&pf_pipe, pf, sizeof(struct pf_info_t));
}
