/*
 * virtual address mapping related functions.
 */

#include "vmm.h"
#include "riscv.h"
#include "pmm.h"
#include "util/types.h"
#include "memlayout.h"
#include "util/string.h"
#include "spike_interface/spike_utils.h"
#include "util/functions.h"

/* --- utility functions for virtual address mapping --- */
//
// establish mapping of virtual address [va, va+size] to phyiscal address [pa, pa+size]
// with the permission of "perm".
//
int map_pages(pagetable_t page_dir, uint64 va, uint64 size, uint64 pa, int perm) {
    uint64 first, last;
    pte_t *pte;
    if(va<0x80000000)//sv48----------------------
    {
        for (first = ROUNDDOWN(va, PGSIZE48), last = ROUNDDOWN(va + size - 1, PGSIZE48);
              first <= last; first += PGSIZE48, pa += PGSIZE48) {//检查从va起 是否有足够多的虚拟地址空间可供分配
            if ((pte = page_walk(page_dir, first, 1)) == 0) return -1;
            if (*pte & PTE_V)//该虚拟地址已经被占用了
              panic("map_pages fails on mapping va (0x%lx) to pa (0x%lx)", first, pa);
            *pte = PA2PTE48(pa) | perm | PTE_V;//向对应的sv48页表项中填入物理地址及其权限
          }
          return 0;
    }
    else//sv39--------------------------
    {
          for (first = ROUNDDOWN(va, PGSIZE), last = ROUNDDOWN(va + size - 1, PGSIZE);
              first <= last; first += PGSIZE, pa += PGSIZE) {//检查从va起 是否有足够多的虚拟地址空间可供分配
            if ((pte = page_walk(page_dir, first, 1)) == 0) return -1;
            if (*pte & PTE_V)//该虚拟地址已经被占用了
              panic("map_pages fails on mapping va (0x%lx) to pa (0x%lx)", first, pa);
            *pte = PA2PTE(pa) | perm | PTE_V;//向对应的sv39页表项中填入物理地址及其权限
          }
          return 0;
    }
}

//
// convert permission code to permission types of PTE
//
uint64 prot_to_type(int prot, int user) {
  uint64 perm = 0;
  if (prot & PROT_READ) perm |= PTE_R | PTE_A;
  if (prot & PROT_WRITE) perm |= PTE_W | PTE_D;
  if (prot & PROT_EXEC) perm |= PTE_X | PTE_A;
  if (perm == 0) perm = PTE_R;
  if (user) perm |= PTE_U;
  return perm;
}

//
// traverse the page table (starting from page_dir) to find the corresponding pte of va.
// returns: PTE (page table entry) pointing to va.
//
pte_t *page_walk(pagetable_t page_dir, uint64 va, int alloc) {
  if (va >= MAXVA) panic("page_walk");

  // starting from the page directory
  pagetable_t pt = page_dir;


    if(va<0x80000000)//sv48
    {
    for (int level = 1; level > 0; level--) {//由于大页只有两级页表 所以只需要翻译一次就可以得到最后一级的PTE

        pte_t *pte = pt + PX48(level, va);//通过页表和第level级的VPN 可以获得下一级页表的物理地址

        if (*pte & PTE_V) {
          pt = (pagetable_t)PTE2PA48(*pte);//获得下一级的页表地址
        } else { 
          if( alloc && ((pt = (pte_t *)alloc_page(1)) != 0) ){
            memset(pt, 0, PGSIZE48);
            *pte = PA2PTE48(pt) | PTE_V;//将该页面标记为valid
          }else //当alloc==0(即不允许分配页面时)或没有剩余的大页页面时 返回0
            return 0;
        }
      }
      return pt + PX48(0, va);
    }
    else
    {
      for (int level = 2; level > 0; level--) {//由于小页有三级页表 所以需要翻译两次才可以得到最后一级的PTE
          
        pte_t *pte = pt + PX(level, va);//通过页表和第level级的VPN 可以获得下一级页表的物理地址
          
        if (*pte & PTE_V) { 
          pt = (pagetable_t)PTE2PA(*pte);
        } else { 
          if( alloc && ((pt = (pte_t *)alloc_page(0)) != 0) ){
            memset(pt, 0, PGSIZE);
            *pte = PA2PTE(pt) | PTE_V;//将该页面标记为valid
          }else //当alloc==0(即不允许分配页面时)或没有剩余的小页页面时 返回0
            return 0;
        }
      }
      return pt + PX(0, va);
    }
}

//
// look up a virtual page address, return the physical page address or 0 if not mapped.
//
uint64 lookup_pa(pagetable_t pagetable, uint64 va) {
  pte_t *pte;
  uint64 pa;

  if (va >= MAXVA) return 0;
  if(va<0x80000000)//sv48
  {
  pte = page_walk(pagetable, va, 0);//获得最后一级页表
  if (pte == 0 || (*pte & PTE_V) == 0 || ((*pte & PTE_R) == 0 && (*pte & PTE_W) == 0))
    return 0;
  pa = PTE2PA48(*pte);//根据页表获得该页面的起始物理地址

  return pa;
  }else//sv39
  {
      pte = page_walk(pagetable, va, 0);//获得最后一级页表
  if (pte == 0 || (*pte & PTE_V) == 0 || ((*pte & PTE_R) == 0 && (*pte & PTE_W) == 0))
    return 0;
  pa = PTE2PA(*pte);//根据页表获得该页面的起始物理地址

  return pa;
  }
}

/* --- kernel page table part --- */
// _etext is defined in kernel.lds, it points to the address after text and rodata segments.
extern char _etext[];

// pointer to kernel page director
pagetable_t g_kernel_pagetable;

//
// maps virtual address [va, va+sz] to [pa, pa+sz] (for kernel).
//
void kern_vm_map(pagetable_t page_dir, uint64 va, uint64 pa, uint64 sz, int perm) {
  if (map_pages(page_dir, va, sz, pa, perm) != 0) panic("kern_vm_map");
}

//
// kern_vm_init() constructs the kernel page table.
//
void kern_vm_init(void) {
  pagetable_t t_page_dir;

  // allocate a page (t_page_dir) to be the page directory for kernel
  t_page_dir = (pagetable_t)alloc_page(0);//给内核页表申请一个小页
  memset(t_page_dir, 0, PGSIZE);

  // map virtual address [KERN_BASE, _etext] to physical address [DRAM_BASE, DRAM_BASE+(_etext - KERN_BASE)],
  // to maintain (direct) text section kernel address mapping.
  kern_vm_map(t_page_dir, KERN_BASE, DRAM_BASE, (uint64)_etext - KERN_BASE,
         prot_to_type(PROT_READ | PROT_EXEC, 0));

  sprint("KERN_BASE 0x%lx\n", lookup_pa(t_page_dir, KERN_BASE));

  kern_vm_map(t_page_dir, (uint64)_etext, (uint64)_etext, PHYS_TOP - (uint64)_etext,
         prot_to_type(PROT_READ | PROT_WRITE, 0));

  sprint("physical address of _etext is: 0x%lx\n", lookup_pa(t_page_dir, (uint64)_etext));

  g_kernel_pagetable = t_page_dir;
}

/* --- user page table part --- */

//
// convert and return the corresponding physical address of a virtual address (va) of
// application.
//
void *user_va_to_pa(pagetable_t page_dir, void *va) {//由于是用户空间的地址翻译  且用户空间均采用大页存储 所以该函数应该采用sv48标准

  if(lookup_pa(page_dir, (uint64)va))
  {
    return (void *)lookup_pa(page_dir, (uint64)va)+ ((uint64)va & ((1<<PGSHIFT48 )-1));
  }
  else return NULL;

}

//
// maps virtual address [va, va+sz] to [pa, pa+sz] (for user application).
//
void user_vm_map(pagetable_t page_dir, uint64 va, uint64 size, uint64 pa, int perm) {
  if (map_pages(page_dir, va, size, pa, perm) != 0) {
    panic("fail to user_vm_map .\n");
  }
}

//
// unmap virtual address [va, va+size] from the user app.
// reclaim the physical pages if free!=0
//
void user_vm_unmap(pagetable_t page_dir, uint64 va, uint64 size, int free) {//同理 该函数也应当该为sv48的标准
  // TODO (lab2_2): implement user_vm_unmap to disable the mapping of the virtual pages
  // in [va, va+size], and free the corresponding physical pages used by the virtual
  // addresses when if free is not zero.
  // basic idea here is to first locate the PTEs of the virtual pages, and then reclaim
  // (use free_page() defined in pmm.c) the physical pages. lastly, invalidate the PTEs.
  // as naive_free reclaims only one page at a time, you only need to consider one page
  // to make user/app_naive_malloc to produce the correct hehavior.
  if(free==0) return ;
  uint64 first, last;
  pte_t *pte;
  uint64 pa;  // 首地址pa

  for (first = ROUNDDOWN(va, PGSIZE48), last = ROUNDDOWN(va + size - 1, PGSIZE48);first <= last; first += PGSIZE48, pa += PGSIZE48) {
    if ((pte = page_walk(page_dir, first, 0)) != 0){ // 找到va对应的PTE*
      pa = lookup_pa(page_dir, (uint64)va); // 找到va对应的pa
      if ( pa != 0 && free != 0 ){
        free_page((void *)pa,1);
      }
      *pte = 0;
    }
  }

}
