#include "mmap.h"
#include "elf.h"
#include "riscv.h"
#include "memory.h"
#include "proc.h"
#include <string.h>

extern elf_info current_elf_msg;

pte_t* root_page_table;



int demand_paging = 1; // unless -p flag is given



uint64 va2pa(const void * addr){
  uint64 va=(uint64)addr;
  if (va>DRAM_BASE)
  {
    return va;
  }else
  {
    uint64 offset=va%PGSIZE;
    struct proc *p=myproc();
    pte_t *pte =walk_user_pagetable(p->pagetable,va,0);
    uint64 pa=PTE2PA(*pte);
    if(DEBUG_MMAP){
      printk("va %p pa %p offset %p\n",va,pa,offset);
    }

    return pa|offset;
  }
}

int handle_page_fault(uint64 fault_vaddr ,int plot){
  if(fault_vaddr > USER_STACK){
      printk("page fault vaddr:%p ", fault_vaddr);
      panic("bad vaddr");
  }
  if(DEBUG_PAGING)
      printk("page fault vaddr:%p \n", fault_vaddr);
  uint64 aligned_va = PGROUNDDOWN(fault_vaddr);
  char *mem;
  mem=(char *)pmm_manager.alloc_pages(1);
  if(mem==0){
      panic("trap : mem alloc failed");  
  }
  memset(mem, 0, PGSIZE);
  if(mappages(myproc()->pagetable, aligned_va, PGSIZE, (uint64)mem, prot_to_type(PROT_EXEC|PROT_READ|PROT_WRITE,1)) != 0){
      panic("trap : mappages failed");  
  }
  return 0;
}

static size_t pte_ppn(pte_t pte)
{
  return pte >> PTE_PPN_SHIFT;
}

static uintptr_t ppn(uintptr_t addr)
{
  return addr >> RISCV_PGSHIFT;
}

static size_t pt_idx(uintptr_t addr, int level)
{
  size_t idx = addr >> (RISCV_PGLEVEL_BITS*level + RISCV_PGSHIFT);
  return idx & ((1 << RISCV_PGLEVEL_BITS) - 1);
}

static inline pte_t pte_create(uintptr_t ppn, int type)
{
  return (ppn << PTE_PPN_SHIFT) | PTE_V | type;
}

static inline pte_t ptd_create(uintptr_t ppn)
{
  return pte_create(ppn, PTE_V);
}

static pte_t* walk_root_pagetable(uintptr_t addr, int create)
{
  pte_t* t = root_page_table;
  for (int i = (VA_BITS - RISCV_PGSHIFT) / RISCV_PGLEVEL_BITS - 1; i > 0; i--) {
    size_t idx = pt_idx(addr, i);
    if (unlikely(!(t[idx] & PTE_V))){
      if(create){
        t[idx] = ptd_create(ppn(pmm_manager.alloc_pages(1)));
        return walk_root_pagetable(addr, 1);
      }else{
        return 0;
      }
      //return create ? walk_create(addr, &t[idx]) : 0;
    }
    t = (pte_t*)(pte_ppn(t[idx]) << RISCV_PGSHIFT);
  }
  return &t[pt_idx(addr, 0)];
}

static int va_avail(uintptr_t vaddr)
{
  pte_t* pte = walk_root_pagetable(vaddr, 0);
  return pte == 0 || *pte == 0;
}

pte_t prot_to_type(int prot, int user)
{
  pte_t pte = 0;
  if (prot & PROT_READ) pte |= PTE_R | PTE_A;
  if (prot & PROT_WRITE) pte |= PTE_W | PTE_D;
  if (prot & PROT_EXEC) pte |= PTE_X | PTE_A;
  if (pte == 0) pte = PTE_R;
  if (user) pte |= PTE_U;
  return pte;
}


int __valid_user_range(uintptr_t vaddr, size_t len)
{
  if (vaddr + len < vaddr)
    return 0;
  return vaddr + len <= current_elf_msg.mmap_max;
}

void map_kernel_range(uintptr_t vaddr, uintptr_t paddr, size_t len, int prot)
{
  uintptr_t n = ROUNDUP(len, RISCV_PGSIZE) / RISCV_PGSIZE;
  uintptr_t offset = paddr - vaddr;
  for (uintptr_t a = vaddr, i = 0; i < n; i++, a += RISCV_PGSIZE)
  {
    pte_t* pte =walk_root_pagetable(a, 1);
    kassert(pte);
    *pte = pte_create((a + offset) >> RISCV_PGSHIFT, prot_to_type(prot, 0));
  }
}





static pte_t* __attribute__((noinline)) __continue_walk_create_user(uintptr_t addr, pte_t* pte,pte_t * pagetable)
{
  *pte = ptd_create(ppn(pmm_manager.alloc_pages(1)));
  return walk_user_pagetable(pagetable,addr,1);
}

pte_t *
walk_user_pagetable(pte_t * pagetable, uint64 addr, int create)
{

   pte_t* t = pagetable;

   for (int i = (VA_BITS - RISCV_PGSHIFT) / RISCV_PGLEVEL_BITS - 1; i > 0; i--) {
    size_t idx = pt_idx(addr, i);
    if (unlikely(!(t[idx] & PTE_V)))
      return create ? __continue_walk_create_user(addr, &t[idx],pagetable) : 0;
    t = (pte_t*)(pte_ppn(t[idx]) << RISCV_PGSHIFT);
  }
  return &t[pt_idx(addr, 0)];
}


// Create PTEs for virtual addresses starting at va that refer to
// physical addresses starting at pa. va and size might not
// be page-aligned. Returns 0 on success, -1 if walk_root_pagetable() couldn't
// allocate a needed page-table page.
int                           
mappages(pagetable_t pagetable, uint64 va, uint64 size, uint64 pa, int perm)
{
  if(DEBUG_MMAP){
    printk("va %p,size %p ,pa %p ,perm %p\n",va,size,pa,perm);
  }
  uint64 a, last;
  pte_t *pte;

  a = PGROUNDDOWN(va);
  last = PGROUNDDOWN(va + size - 1);
  for(;;){
    if((pte = walk_user_pagetable(pagetable, a, 1)) == 0)
      return -1;
    if(*pte & PTE_V){
        uvmprint(pagetable);
        printk("remap va %p\n",a);
        panic("remap");
    }
    
    *pte = PA2PTE(pa) | perm | PTE_V;

    if(a == last)
      break;
    a += PGSIZE;
    pa += PGSIZE;
  }
  return 0;
}

// Allocate PTEs and physical memory to grow process from oldsz to
// newsz, which need not be page aligned.  Returns new size or 0 on error.
uint64
uvmalloc(pagetable_t pagetable, uint64 oldsz, uint64 newsz)
{
  char *mem;
  uint64 a;

  if(newsz < oldsz)
    return oldsz;

  oldsz = PGROUNDUP(oldsz);
  for(a = oldsz; a < newsz; a += PGSIZE){
     mem =(char *)pmm_manager.alloc_pages(1);
    if(mem == 0){
      panic("uvmalloc mem alloc falied\n");
    }
    memset(mem, 0, PGSIZE);
    //PTE_W|PTE_X|PTE_R|PTE_U|PTE_A
    if(mappages(pagetable, a, PGSIZE, (uint64)mem, prot_to_type(PROT_WRITE|PROT_READ|PROT_EXEC,1)) != 0){
      panic("uvmalloc mappages  falied\n");
    }
  }
  return newsz;
}

void uvmprint_help(pte_t * pagetable,int levet, uint64 * vpn){
      
      for(int i=0;i<512;i++){
        vpn[levet]=i;
        pte_t pte=pagetable[i];
        if(pte==0)
            continue;
        //打印层次
           printk("  vm ");
        for(int j=0;j<=levet;j++){
          if(j!=levet)
            printk(".. ");
          else
            printk("..");
        }
        if(pte & PTE_V && (pte & (PTE_R|PTE_W|PTE_W))==0){
          //打印页目录
          printk(" %d: pte %p pa %p\n",i,(void *)pagetable[i],(void *)PTE2PA(pte));
          uint64 child=PTE2PA(pte);
          uvmprint_help((pte_t *)child,levet+1,vpn);
        }else
        {
          //打印叶子节点
            printk("va %p ",((vpn[0]<<18)|(vpn[1]<<9)|(vpn[2]))<<12);
            printk("%d: pte %p pa %p\n",i,(void *)pagetable[i],(void *)PTE2PA(pte));
        }
      
    }
}

void uvmprint(pte_t *  pagetable) {
     uint64 vpn[3];
    printk("  vm page table %p\n",(void *)pagetable);
    uvmprint_help(pagetable,0,vpn);
    return;
 }


// Given a parent process's page table, copy
// its memory into a child's page table.
// Copies both the page table and the
// physical memory.
// returns 0 on success, -1 on failure.
// frees any allocated pages on failure.
int
uvmcopy(pagetable_t old, pagetable_t new, uint64 sz)
{
  pte_t *pte;
  uint64 pa, i;
  uint flags;
  char *mem;
// uvmprint(old);
// uvmprint(new);

  for(i = 0; i < sz; i += PGSIZE){
    if((pte = walk_user_pagetable(old, i, 0)) == 0)
      panic("uvmcopy: pte should exist");
    if((*pte & PTE_V) == 0)
      panic("uvmcopy: page not present");
    pa = PTE2PA(*pte);
    flags = PTE_FLAGS(*pte);
    if((mem =(char *)pmm_manager.alloc_pages(1)) == 0)
      goto err;
    memmove(mem, (char*)pa, PGSIZE);
// printk("------%p ----mem -%p--1\n",i,mem);
    // uvmprint(new);
    if(mappages(new, i, PGSIZE, (uint64)mem, flags) != 0){
      pmm_manager.free_pages((uint64)mem,1);
      goto err;
    }
// printk("------%p -------2\n",i);

  }

  //copy user stack  do not map
  if((pte = walk_user_pagetable(old, USER_STACK-PGSIZE, 0)) == 0)
      panic("uvmcopy: pte should exist");
  if((*pte & PTE_V) == 0)
      panic("uvmcopy: page not present");
  pa = PTE2PA(*pte);

  pte_t * pte_new = 0;
  uint64 pa_new=0;
   if((pte_new = walk_user_pagetable(new, USER_STACK-PGSIZE, 0)) == 0)
      panic("uvmcopy: pte_new should exist");
  if((*pte_new & PTE_V) == 0)
      panic("uvmcopy: page of new pagetable not present");
  pa_new=PTE2PA(*pte_new);
  memmove((char *)pa_new, (char*)pa, PGSIZE);
  if(DEBUG_PROC){
    printk("copt stack %p to %p\n",pa,pa_new);
  }

  if(DEBUG_PROC){
    printk("page table copy finished\n");
  }
  return 0;

 err:
  uvmunmap(new, 0, i / PGSIZE, 1);
  return -1;
}

// Remove npages of mappings starting from va. va must be
// page-aligned. The mappings must exist.
// Optionally free the physical memory.
void
uvmunmap(pagetable_t pagetable, uint64 va, uint64 npages, int do_free)
{
  uint64 a;
  pte_t *pte;

  if((va % PGSIZE) != 0)
    panic("uvmunmap: not aligned");

  for(a = va; a < va + npages*PGSIZE; a += PGSIZE){
    if((pte = walk_user_pagetable(pagetable, a, 0)) == 0)
      panic("uvmunmap: walk");
    if((*pte & PTE_V) == 0)
      panic("uvmunmap: not mapped");
    if(PTE_FLAGS(*pte) == PTE_V)
      panic("uvmunmap: not a leaf");

    if(do_free){
      uint64 pa = PTE2PA(*pte);
       pmm_manager.free_pages(pa,1);
    }
    *pte = 0;
  }
}

// Recursively free page-table pages.
// All leaf mappings must already have been removed.
void
freewalk(pagetable_t pagetable)
{
  // there are 2^9 = 512 PTEs in a page table.
  for(int i = 0; i < 512; i++){
    pte_t pte = pagetable[i];
    if((pte & PTE_V) && (pte & (PTE_R|PTE_W|PTE_X)) == 0){
      // this PTE points to a lower-level page table.
      uint64 child = PTE2PA(pte);
      freewalk((pagetable_t)child);
      pagetable[i] = 0;
    } else if(pte & PTE_V){
      panic("freewalk: leaf");
    }
  }
    pmm_manager.free_pages((uint64)pagetable,1);
}

// Free user memory pages,
// then free page-table pages.
void
uvmfree(pagetable_t pagetable, uint64 sz)
{
  if(sz > 0)
    uvmunmap(pagetable, 0, PGROUNDUP(sz)/PGSIZE, 1);
  freewalk(pagetable);
}

// Free a process's page table, and free the
// physical memory it refers to.
void
proc_freepagetable(pagetable_t pagetable, uint64 sz,uint64 trapframe)
{
  uvmunmap(pagetable, USER_STACK-PGSIZE, 2, 1);
    extern char trap_sec_start;
  uvmunmap(pagetable, (uint64)&trap_sec_start, 1, 0);
  uvmunmap(pagetable,(uint64)trapframe, 1, 1);
  uvmfree(pagetable, sz);
}


// Copy from kernel to user.
// Copy len bytes from src to virtual address dstva in a given page table.
// Return 0 on success, -1 on error.
int
copyout(pagetable_t pagetable, uint64 dstva, char *src, uint64 len)
{
  uint64 n, va0, pa0;

  while(len > 0){
    va0 = PGROUNDDOWN(dstva);
    pte_t * pte = walk_user_pagetable(pagetable, va0,0);
    pa0=PTE2PA(*pte);
    if(pa0 == 0)
      return -1;
    n = PGSIZE - (dstva - va0);
    if(n > len)
      n = len;
    memmove((void *)(pa0 + (dstva - va0)), src, n);

    len -= n;
    src += n;
    dstva = va0 + PGSIZE;
  }
  return 0;
}
