#include <framework/framework_i.h>

/* For the naive mapping strategy */
#define PADDR(kva) ((physaddr_t)(kva) - KBASE)
#define KADDR(pa)  (void *)((pa) + KBASE)

struct segdesc_s gdt[GDT_COUNT] =
{
     /* 0x0 - unused (always faults -- for trapping NULL far pointers) */
     SEG_NULL,

     /* 0x8 - kernel code segment */
     [GD_KERN_TEXT >> 3] = SEG(STA_X | STA_R, 0x0, 0xffffffff, 0),

     /* 0x10 - kernel data segment */
     [GD_KERN_DATA >> 3] = SEG(STA_W, 0x0, 0xffffffff, 0),

     /* 0x18 - user code segment */
     [GD_USER_TEXT >> 3] = SEG(STA_X | STA_R, 0x0, 0xffffffff, 3),

     /* 0x20 - user data segment */
     [GD_USER_DATA >> 3] = SEG(STA_W, 0x0, 0xffffffff, 3),
};

static struct pseudodesc_s gdt_pd =
{
     sizeof(gdt) - 1, (unsigned long) gdt
};

volatile pdir_entry_t init_vpd[PAGEDIR_COUNT] __attribute__((aligned(PAGE_SIZE)));

/* The symbols is defined in kernel.S */
/* {{{ */
/* The mapping of current VPT */
extern volatile ptab_entry_t vpt[];
/* Mapping of VPD inside vpt */
extern volatile pdir_entry_t vpd[];
/* }}} */

static ptab_entry_t *
init_vpd_walk(volatile pdir_entry_t *vpd, const void *va, int create, int perm)
{
     uintptr_t pdx, ptx;
     pdx = PAGEDIR_IDX(va);
     ptx = PAGETAB_IDX(va);

     if (vpd[pdx] & PTE_P)
     {
		  /* DO NOTHING but add the perm */
		  /* vpd[pdx] |= perm; */
     }
     else if (create)
     {
		  physaddr_t vpt;
		  /* suppose in the initializing stage, all memory can be
		   * allocated in the kernel addressing area (0 ~ 128MB) */
		  if (pmem_bp_page_alloc(1, &vpt, 0, PPAGE_CORE) < 0) return NULL;

		  memset(KADDR(vpt), 0, PAGE_SIZE);
		  vpd[pdx] = vpt | perm | PTE_P;
		  pmem_page_inc_ref(vpt);
     }
     else
     {
		  return NULL;
     }

     return (ptab_entry_t *)KADDR(PTE_ADDR(vpd[pdx])) + ptx;
}

static int
init_map_segment(volatile pdir_entry_t *vpd, uintptr_t la, size_t size, physaddr_t pa, int perm)
{
     if (size == 0) return 0;
	 /* Here we round up */
     uintptr_t i, npages = ((size - 1) >> PAGE_SHIFT) + 1;

#if 0
     kprintf("[init_map_segment] mapping [%x, %x] to [%x, %x]\n",
			 la, la + size - 1, pa, pa + size - 1);
#endif
     
     for (i = 0; i != npages; ++i)
     {
		  ptab_entry_t *pte;

		  pte = init_vpd_walk(vpd, (void *)la, 1, perm);
		  if (pte == NULL) return -E_NO_MEM;
	  
		  *pte = pa | perm | PTE_P;
	  
		  la += PAGE_SIZE;
		  pa += PAGE_SIZE;
     }

	 return 0;
}

/* defined in framework.S */
extern char init_fake_lapic[];

int
vmem_init(void)
{
	 int err, i;
	 physaddr_t init_cr3;
	 
	 memset((void *)init_vpd, 0, PAGEDIR_COUNT * sizeof(pdir_entry_t));
	 init_cr3 = PADDR(init_vpd);

     init_vpd[PAGEDIR_IDX(KVPT)] = PADDR(init_vpd)      | PTE_W | PTE_P;
     init_vpd[PAGEDIR_IDX(UVPT)] = PADDR(init_vpd)      | PTE_U | PTE_P;

     /* map kernel area */
     if ((err = init_map_segment(init_vpd, KBASE, KISIZE, 0, PTE_W)) < 0)
	 {
		  return err;
	 }

	 /* init fake lapic */
	 if ((err = init_map_segment(init_vpd, LAPIC, PAGE_SIZE, PADDR(init_fake_lapic), PTE_W)) < 0)
	 {
		  return err;
	 }

	 /* map utask area */
	 if ((err = init_map_segment(init_vpd, UTASK,
	 							 LAPIC_COUNT * TS_USIZE,
	 							 PADDR(tasks_start), PTE_U) < 0))
	 {
	 	  return err;
	 }

	 /* trick mapping, would be clean later */
     init_vpd[PAGEDIR_IDX(_text - KBASE)] = init_vpd[PAGEDIR_IDX(_text)];

	 /* Initailize tss segment for each cpu */
	 for (i = 0; i != ekf_sysconf.lcpu_count; ++ i)
	 {
		  gdt[GD_TSS(i) >> 3] = SEG_NULL;
	 }
	 
	 /* Install page table. */
	 lcr3(init_cr3);

	 uint32_t cr0;
	 cr0 = rcr0();
	 cr0 |= CR0_PG; // | CR0_NW | CR0_CD;
     cr0 &= ~(CR0_TS|CR0_EM);
	 
	 __asm__ __volatile__("movl %0,%%eax" : : "m"(cr0));
	 __asm__ __volatile__("movl %%eax, %%cr0" ::);

     // Reload all segment registers.
     __asm__ __volatile__("lgdt (%0)" : : "r"(&gdt_pd));
     __asm__ __volatile__("movw %%ax,%%gs" :: "a" (GD_USER_DATA|3));
     __asm__ __volatile__("movw %%ax,%%fs" :: "a" (GD_USER_DATA|3));
     __asm__ __volatile__("movw %%ax,%%es" :: "a" (GD_KERN_DATA));
     __asm__ __volatile__("movw %%ax,%%ds" :: "a" (GD_KERN_DATA));
     __asm__ __volatile__("movw %%ax,%%ss" :: "a" (GD_KERN_DATA));
     __asm__ __volatile__("ljmp %0,$1f\n 1:\n" :: "i" (GD_KERN_TEXT));  // reload cs
     __asm__ __volatile__("lldt %%ax" :: "a" (0));

	 init_vpd[PAGEDIR_IDX(_text - KBASE)] = 0;
     /* refresh the vpd */
     lcr3(init_cr3);

	 pmem_bp_fix();
     return 0;
}

int
vmem_init_ap(physaddr_t cr3)
{
	 uint32_t cr0;
	 cr0 = rcr0();
	 cr0 |= CR0_PG; // | CR0_NW | CR0_CD;
     cr0 &= ~(CR0_TS|CR0_EM);

     lcr0(cr0);

	 __asm__ __volatile__("lgdt (%0)" : : "r"(&gdt_pd));
     __asm__ __volatile__("movw %%ax,%%gs" :: "a" (GD_USER_DATA));
     __asm__ __volatile__("movw %%ax,%%fs" :: "a" (GD_USER_DATA));
     __asm__ __volatile__("movw %%ax,%%es" :: "a" (GD_KERN_DATA));
     __asm__ __volatile__("movw %%ax,%%ds" :: "a" (GD_KERN_DATA));
     __asm__ __volatile__("movw %%ax,%%ss" :: "a" (GD_KERN_DATA));
     __asm__ __volatile__("ljmp %0,$1f\n 1:\n" :: "i" (GD_KERN_TEXT));  // reload cs
     __asm__ __volatile__("lldt %%ax" :: "a" (0));
	 lcr3(cr3);
}

int
vmem_post_fix(void)
{
	 int err;

	 if (ekf_sysconf.has_hpet)
	 {
		  /* CACHE DISABLE ? */
		  uint32_t pte = ekf_sysconf.hpet_phys | PTE_W | /* PTE_PWT | PTE_PCD | */ PTE_P;
		  int err = 0;
		  if ((err = fix_addr((void *)hpet, &pte, 1)) < 0)
		  {
			   return err;
		  }

		  kprintf("hpet %08x mapped to %08x\n", hpet, ekf_sysconf.hpet_phys);
	 }
	 else
	 {
		  physaddr_t fake;
		  if ((err = pmem_page_alloc(1, &fake, 0, PPAGE_CORE)) < 0)
			   return err;
		  /* STILL ABOUT THE CACHE */
		  uint32_t pte = fake | PTE_W | /* PTE_PWT | PTE_PCD | */ PTE_P;
		  if ((err = fix_addr((void *)hpet, &pte, 1)) < 0)
		  {
			   return err;
		  }
		  else memset((void *)hpet, 0, PAGE_SIZE);

		  kprintf("hpet %08x (fake)mapped to %08x\n", hpet, fake);
	 }

	 if (ekf_sysconf.use_lapic)
	 {
		  /* STILL ABOUT THE CACHE */
		  uint32_t pte = ekf_sysconf.lapic_phys | PTE_W /* | PTE_PWT | PTE_PCD */;
		  int err = 0;
		  if ((err = fix_addr((void *)lapic, &pte, 1)) < 0)
		  {
			   return err;
		  }
		  /* Map it to ULAPIC */
		  vpt[PAGE_NUM(ULAPIC)] = ekf_sysconf.lapic_phys | PTE_U | /* PTE_PCD | */ PTE_P;
	 }
	 else
	 {
		  /* Map the fake it to ULAPIC */
		  vpt[PAGE_NUM(ULAPIC)] = PTE_ADDR(vpt[PAGE_NUM(LAPIC)]) | PTE_U | /* PTE_PCD | */ PTE_P;
	 }
	 
	 /* Map sysconf */
	 vpt[PAGE_NUM(USYSCONF)] = PADDR(&sysconf) | PTE_U | PTE_P;

	 return 0;
}

int
vmem_init_cpupriv(void)
{
	 int err;
	 
	 physaddr_t paddr;
	 if ((err = pmem_page_alloc(1, &paddr, 0, PPAGE_CORE)) < 0)
		  return err;
	 uint32_t pte = paddr | PTE_W;
	 if ((err = fix_addr((void *)CPUPRIV, &pte, 1)) < 0)
	 {
		  pmem_page_free(paddr);
		  return err;
	 }
	 else memset((void *)CPUPRIV, 0, PAGE_SIZE);
	 vpt[PAGE_NUM(UCPUPRIV)] = paddr | PTE_U | PTE_P;
}


void *
set_tpage(physaddr_t addr)
{
	 void *result = (void *)((physaddr_t)KTMP | PAGE_OFF(addr));
	 physaddr_t naddr = (addr & ~(PAGE_SIZE - 1));

	 vpt[PAGE_NUM(result)] = naddr | PTE_W | PTE_P;
	 invlpg(result);

	 return result;
}

void
vpt_set(void *vaddr, uint32_t pte)
{
	 vpt[PAGE_NUM(vaddr)] = pte;
	 invlpg(vaddr);
}

ptab_entry_t
vpt_get(void *vaddr)
{
	 if (vpd[PAGEDIR_IDX(vaddr)] & PTE_P)
		  return vpt[PAGE_NUM(vaddr)];
	 else return 0;
}

void
mmap_set(void *vaddr, physaddr_t paddr, int write, int user)
{
	 paddr &= ~(physaddr_t)(PAGE_SIZE - 1);
	 if (paddr == 0)
	 {
		  if (vpd[PAGEDIR_IDX(vaddr)] & PTE_P)
			   vpt[PAGE_NUM(vaddr)] = 0;
	 }
	 else
	 {
		  paddr |= PTE_P;
		  if (write) paddr |= PTE_W;
		  if (user)  paddr |= PTE_U;
		  
		  fix_addr(vaddr, &paddr, 1);
	 }
	 invlpg(vaddr);
}

void
mmap_get(void *vaddr, physaddr_t *paddr, int *write, int *user)
{
	 uint32_t r = vpt_get(vaddr);
	 if ((r & PTE_P) == 0) r = 0;
	 if (paddr) *paddr = PTE_ADDR(r);
	 if (write) *write = ((r & PTE_W) != 0);
	 if (user)  *user  = ((r & PTE_U) != 0);
}

/* The fundamental routine for virtual memory mapping management. The
 * routine maps virtual address by physical pages. The VPT mapping and
 * VPD_LOCK is currectly initalized */

/* addr   -- the virtual address that need to be processed, need not to be page aligned */
/* create -- if the ptr is not null, it would point to the page tab entry value, */
/*           after the call, the value of the slot woule be the old
 *           value in the table. If the page physical address in it is
 *           0, the routine will allocated a new page by itself. Or it
 *           will assign the mapping by the page address */
/* int ... -- force replace the old item to the value in ``create'' */

/* Routine returns nagtive result if error happens, or 0 for success,
 * FA_COW for the processing of COW sign, FA_AWA for processing of AWA
 * sign */
int
__fix_addr(int verbose, void *addr, ptab_entry_t *create, ...)
{
	 uint32_t idx;
	 idx = PAGEDIR_IDX(addr);

	 uint32_t create_rep;

	 int err;
	 physaddr_t naddr;

	 uint32_t i;
	 int force;

	 if (verbose)
	 {
		  kprintf("[fix_addr] addr = %p, create = %p", addr, create);
	 }

	 if (create != NULL)
	 {
		  va_list va;

		  va_start(va, create);
		  force = va_arg(va, int);
		  va_end(va);

		  if (verbose)
		  {
			   kprintf("(%08x), force = %d. ", *create, force);
		  }
	 }
	 else
	 {
		  force = 0;

		  if (verbose)
		  {
			   kprintf(". ");
		  }
	 }

	 if (idx == PAGEDIR_IDX(KVPT))
	 {
		  uint32_t dirbase = idx * PAGETAB_COUNT;
		  idx = PAGETAB_IDX(addr);

		  uint32_t flags = irq_save();
		  uint32_t pde = vpd[idx];
		  if (verbose)
		  {
			   kprintf("[L1] pde = %08x", pde);
		  }

		  /* L1 tab should ignore the force flag */
		  /* if (force) */
		  /* { */
		  /* 	   create_rep = pde; */
		  /* 	   goto fa_create_1; */
		  /* } */
		  /* else */
		  if (pde & PTE_P)
		  {
			   if (!PTE_COW_P(pde))
			   {
					err = 0;
					goto fa_exit_1;
			   }

			   if ((err = pmem_page_alloc(1, &naddr, 0, PPAGE_CORE)) < 0)
			   {
					goto fa_exit_1;
			   } else pmem_page_inc_ref(naddr);

			   if (verbose)
			   {
					kprintf("[fix_addr] process COW sign.\n");
			   }
			   pdir_entry_t *tdir = (pdir_entry_t *)KTMP;
			   vpt[PAGE_NUM(tdir)] = naddr | PTE_W | PTE_P;
			   invlpg(tdir);

			   /* duplicate the page tab, with processing of the
				* flags */
			   for (i = 0; i != PAGETAB_COUNT; ++i)
			   {
					if (vpt[dirbase + i] & PTE_P)
					{
						 if ((vpt[dirbase | i] & (PTE_W | PTE_COW)) &&
							 !(vpt[dirbase | i] & PTE_SHARED))
						 {
							  tdir[i] = (vpd[dirbase | i] | PTE_COW) & ~(uint32_t)PTE_W;

							  /* should not be uncommented */
							  // svpt[i] = tvpt[i];
						 }
						 else
						 {
							  tdir[i] = vpt[dirbase + i];
						 }

						 pmem_page_inc_ref(PTE_ADDR(tdir[i]));
					} else tdir[i] = 0;
			   }

			   pmem_page_dec_ref(PTE_ADDR(pde));
			   vpt[PAGE_NUM(tdir)] = 0;
			   pde = (naddr | (pde & PDE_USER) | PTE_W) & ~(uint32_t)PTE_COW;

			   err = FA_COW;
		  }
		  else if (PTE_AWA_P(pde))
		  {
			   if ((err = pmem_page_alloc(1, &naddr, 0, PPAGE_CORE)) < 0)
			   {
					goto fa_exit_1;
			   } else pmem_page_inc_ref(naddr);

			   if (verbose)
			   {
					kprintf("[fix_addr] process AWA sign.\n");
			   }
			   pdir_entry_t *tdir = (pdir_entry_t *)KTMP;
			   vpt[PAGE_NUM(tdir)] = naddr | PTE_W | PTE_P;
			   invlpg(tdir);

			   /* Set all pages as AWA */
			   for (i = 0; i != PAGETAB_COUNT; ++i)
			   {
					tdir[i] = pde;
			   }

			   vpt[PAGE_NUM(tdir)] = 0;
			   pde = (naddr | (pde & PDE_USER) | PTE_W | PTE_P) & ~(uint32_t)PTE_AWA;

			   err = FA_AWA;

		  }
		  else if (create != NULL)
		  {
			   create_rep = 0;

		  fa_create_1:

			   if (PTE_ADDR(*create))
			   {
					pde = *create | PTE_P;
					*create = create_rep;

					err = 0;
			   }
			   else
			   {
					/* create a empty page */
					if ((err = pmem_page_alloc(1, &naddr, 0, PPAGE_CORE)) < 0)
					{
						 goto fa_exit_1;
					}
					else pmem_page_inc_ref(naddr);

					pdir_entry_t *tdir = (pdir_entry_t *)KTMP;
					vpt[PAGE_NUM(tdir)] = naddr | PTE_W | PTE_P;
					invlpg(tdir);

					memset(tdir, 0, PAGE_SIZE);

					pde = naddr | (*create & PDE_USER) | PTE_P;
					*create = create_rep;

					err = 0;
			   }

			   if (verbose)
			   {
					kprintf("CREATED");
			   }
		  }
		  else err = -E_FAULT;

	 fa_exit_1:

		  vpd[idx] = pde;
		  /* Need not refresh the table, since there is append only modification */
		  /* MAGIC comment? */
		  irq_restore(flags);

		  if (verbose)
		  {
			   kprintf("\n vpd[] = %08x\n", pde);
		  }

	 }
	 else
	 {

		  if (verbose)
		  {
			   kprintf("[L2]");
		  }

		  uint32_t pte;
		  if (create != NULL)
		  {
			   int cp;
			   cp = *create & PDE_USER;

			   if ((err = __fix_addr(verbose, (void *)(vpt + PAGE_NUM(addr)), &cp, false)) < 0)
					goto fa_exit;
		  }
		  else
		  {
			   if ((err = __fix_addr(verbose, (void *)(vpt + PAGE_NUM(addr)), NULL)) < 0)
					goto fa_exit;
		  }

		  idx = PAGE_NUM(addr);
		  uint32_t flags = irq_save();
		  pte = vpt[idx];

		  if (force)
		  {
			   create_rep = pte;
			   goto fa_create_2;
		  }
		  if (pte & PTE_P)
		  {

			   if (!PTE_COW_P(pte))
			   {
					err = 0;
					goto fa_exit_2;
			   }

			   if (verbose)
			   {
					kprintf("[fix_addr] process COW sign.\n");
			   }

			   if ((err = pmem_page_alloc(1, &naddr, 0, PPAGE_CORE)) < 0)
			   {
					goto fa_exit_2;
			   }
			   else pmem_page_inc_ref(naddr);

			   char *tdpage = (char *)KTMP;
			   vpt[PAGE_NUM(tdpage)] = naddr | PTE_W | PTE_P;
			   invlpg(tdpage);

			   /* copy the page */
			   memmove(tdpage, PAGE_ALIGN(addr), PAGE_SIZE);

			   pmem_page_dec_ref(PTE_ADDR(pte));
			   pte = (naddr | (pte & PTE_USER) | PTE_W) & ~(uint32_t)PTE_COW;

			   err = FA_COW;
		  }
		  else if (PTE_AWA_P(pte))
		  {
			   if (verbose)
			   {
					kprintf("[fix_addr] process AWA sign.\n");
			   }

			   if ((err = pmem_page_alloc(1, &naddr, 0, PPAGE_CORE)) < 0)
			   {
					goto fa_exit_2;
			   }
			   else pmem_page_inc_ref(naddr);

			   char *tdpage = (char *)KTMP;
			   vpt[PAGE_NUM(tdpage)] = naddr | PTE_W | PTE_P;
			   invlpg(tdpage);

			   /* copy the page */
			   memset(tdpage, 0, PAGE_SIZE);
			   pte = (naddr | (pte & PTE_USER) | PTE_W | PTE_P) & ~(uint32_t)PTE_AWA;

			   err = FA_AWA;
		  }
		  else if (create != NULL)
		  {
			   create_rep = 0;

		  fa_create_2:

			   if (PTE_ADDR(*create))
			   {
					pte = *create | PTE_P;
					*create = create_rep;

					err = 0;
			   }
			   else
			   {
					if ((err = pmem_page_alloc(1, &naddr, 0, PPAGE_CORE)) < 0)
					{
						 goto fa_exit_2;
					}
					else pmem_page_inc_ref(naddr);

					pte = naddr | (*create & PTE_USER) | PTE_P;
					*create = create_rep;

					err = 0;
			   }

			   if (verbose)
			   {
					kprintf("CREATED");
			   }
		  }
		  else err = -E_FAULT;


	 fa_exit_2:

		  vpt[idx] = pte;
		  invlpg((void *)(idx << PTX_SHIFT));
		  irq_restore(flags);

		  if (verbose)
		  {
			   kprintf("\n vpt[] = %08x\n", pte);
		  }

	 }

fa_exit:

	 if (verbose)
	 {
		  kprintf("[fix_addr] finished err = %d.\n", err);
	 }

	 return err;
}

physaddr_t
vmem_vpt_new(int new_share)
{
	 int i;
	 int err;

	 physaddr_t nvpd;
	 pdir_entry_t *tvpd;
	 if ((err = pmem_page_alloc(1, &nvpd, 0, PPAGE_CORE)) < 0)
	 {
		  return err;
	 }
	 

	 BEGIN_TPAGE(nvpd, tvpd);
	 memset(tvpd, 0, PAGE_SIZE);
	 /* Common part */
	 for (i = PAGEDIR_IDX(UWLIM); i != PAGEDIR_COUNT; ++ i)
	 {
		  tvpd[i] = vpd[i];
	 }

	 if (new_share)
	 {
		  physaddr_t share_vpt[KSHARE_SIZE >> PDX_SHIFT];
   
		  for (i = 0; i != KSHARE_SIZE >> PDX_SHIFT; ++ i)
		  {
			   if ((err = pmem_page_alloc(1, &share_vpt[i], 0, PPAGE_CORE)) < 0)
			   {
					pmem_page_free(nvpd);
					
					int j;
					for (j = 0; j != i; ++ j)
					{
						 pmem_page_free(share_vpt[j]);
					}

					EXIT_TPAGE;
					return err;
			   }

			   /* This do the clear work, which is slow :( */
			   char *tmp;
			   BEGIN_TPAGE(share_vpt[i], tmp);
			   memset(tmp, 0, PAGE_SIZE);
			   END_TPAGE;
			   
			   tvpd[PAGEDIR_IDX(KSHARE) + i] = share_vpt[i] | PTE_U | PTE_W | PTE_P;
		  }
	 }

	 tvpd[PAGEDIR_IDX(KVPT)]    = nvpd | PTE_W | PTE_P;
     tvpd[PAGEDIR_IDX(UVPT)]    = nvpd | PTE_U | PTE_P;
	 END_TPAGE;

	 pmem_page_inc_ref(nvpd);

	 return nvpd;
}
