/*
  S.M.A.C.K - An operating system kernel
  Copyright (C) 2010,2011 Mattias Holm and Kristian Rietveld
  For licensing and a full list of authors of the kernel, see the files
  COPYING and AUTHORS.
*/

#include <stdint.h>
#include <stddef.h>
#include <string.h>
#include <arch-types.h>
#include <process.h>
#include <assert.h>
#include <hal.h>
#include <vm.h>

#include <cache.h>

#include "cpu.h"

// This vm space is reserved for the kernel on the ARM
#define KERNEL_START  0x80000000 //!< Start of kernel virtual address space
#define KERNEL_END    0xffffffff //!< End of kernel virtual address space
#define VA_OFFSET     0          //!< Offset for translating a physical address
                                 //!< to virtual (only kernel)

/*! Translates physical to virtual address in the kernel */
#define PA_TO_VA(pa) ((va_t)((pa_t)pa+VA_OFFSET))
#define VA_TO_PA(va) ((pa_t)((va_t)va-VA_OFFSET))

#define HIGH_EXCEPT_ADDR 0xffff0000 //!< ARM High exception vector address
#define MMU_FAULT         0         //!< PDE / PTE for indicating invalid address.
#define MMU_L1_PTD      0x1         //!< Tag value for identifying PTDs
#define MMU_L1_SEC      0x00000002  //!< Tag value for identifying section descriptors
#define MMU_L1_SUPSEC   0x00040002  //!< Tag value for identifying super section descriptors
#define MMU_L2_SMALL    0x2         //!< Tag value for identifying normal pages
#define MMU_L2_LARGE    0x1         //!< Tag value for identifying large pages
#define MMU_L2_LENGTH   256         //!< Length of L2 page table.
#define MMU_L2_SIZE     1024        //!< Size of L2 page table.
#define MMU_L1_SIZE     16384       //!< Size of L1 page table.
#define PTD_IMP      (1 << 9)
#define PTD_NS       (1 << 3)
#define PTD_BASE     0xfffffc00

#define SEC_ADDR        0xfff00000
#define SUPSEC_ADDR     0xff000000
#define SUPSEC_EXTADDR  0x00f001e0

#define SEC_XN       (1 << 4) // Section descripor XN bit
#define SUPSEC_XN    (1 << 4) // Supersection descripor XN bit

#define PTE_NG       (1 << 11) //!< Not Global
#define PTE_S        (1 << 10) //!< Sharable
#define LPAGE_XN     (1 << 15) //!< Large PTE No eXecute
#define SPAGE_XN     (1)       //!< Small PTE No eXecute, note on ARMv5 this bit indicate a tiny page



#define AP_NA_NA  0x00
#define AP_RW_NA  0x01
#define AP_RW_RO  0x02
#define AP_RW_RW  0x03
#define AP_RO_NA  0x21
#define AP_RO_RO  0x23 // ARMv7
// # define AP_RO_RO  0x22 // For ARMv6

#define SEC_AP_SHIFT    10
#define SUPSEC_AP_SHIFT 10
#define LPAGE_AP_SHIFT  4
#define SPAGE_AP_SHIFT  4

#define PTE_C 8
#define PTE_B 4

#define SEC_TEX_SHIFT    12
#define SUPSEC_TEX_SHIFT 12
#define LPTE_TEX_SHIFT   12
#define SPTE_TEX_SHIFT    6

#define SPTE_STRONGLY_ORDERED   ((0 << SPTE_TEX_SHIFT))
#define SPTE_SHARED_DEVICE      ((0 << SPTE_TEX_SHIFT) | PTE_B)
#define SPTE_OWT_IWT            ((0 << SPTE_TEX_SHIFT) | PTE_C)
#define SPTE_OWB_IWB            ((0 << SPTE_TEX_SHIFT) | PTE_B | PTE_C)
#define SPTE_ONC_INC            ((1 << SPTE_TEX_SHIFT))
#define SPTE_OWBA_IWBA          ((1 << SPTE_TEX_SHIFT) | PTE_B | PTE_C)
#define SPTE_DEVICE             ((2 << SPTE_TEX_SHIFT))

#define SPTE_CACHEABLE          ((4 << SPTE_TEX_SHIFT))
#define SPTE_ONC                ((0 << SPTE_TEX_SHIFT))
#define SPTE_OWBA               ((1 << SPTE_TEX_SHIFT))
#define SPTE_OWT                ((2 << SPTE_TEX_SHIFT))
#define SPTE_OWB                ((3 << SPTE_TEX_SHIFT))
#define SPTE_INC                ((0 << 2))
#define SPTE_IWBA               ((1 << 2))
#define SPTE_IWT                ((2 << 2))
#define SPTE_IWB                ((3 << 2))

#define LPTE_STRONGLY_ORDERED   ((0 << LPTE_TEX_SHIFT))
#define LPTE_SHARED_DEVICE      ((0 << LPTE_TEX_SHIFT) | PTE_B)
#define LPTE_OWT_IWT            ((0 << LPTE_TEX_SHIFT) | PTE_C)
#define LPTE_OWB_IWB            ((0 << LPTE_TEX_SHIFT) | PTE_B | PTE_C)
#define LPTE_ONC_INC            ((1 << LPTE_TEX_SHIFT))
#define LPTE_OWBA_IWBA          ((1 << LPTE_TEX_SHIFT) | PTE_B | PTE_C)
#define LPTE_DEVICE             ((2 << LPTE_TEX_SHIFT))

#define LPTE_CACHEABLE          ((4 << LPTE_TEX_SHIFT))
#define LPTE_ONC                ((0 << LPTE_TEX_SHIFT))
#define LPTE_OWBA               ((1 << LPTE_TEX_SHIFT))
#define LPTE_OWT                ((2 << LPTE_TEX_SHIFT))
#define LPTE_OWB                ((3 << LPTE_TEX_SHIFT))
#define LPTE_INC                ((0 << 2))
#define LPTE_IWBA               ((1 << 2))
#define LPTE_IWT                ((2 << 2))
#define LPTE_IWB                ((3 << 2))

#define SEC_STRONGLY_ORDERED   ((0 << SEC_TEX_SHIFT))
#define SEC_SHARED_DEVICE      ((0 << SEC_TEX_SHIFT) | PTE_B)
#define SEC_OWT_IWT            ((0 << SEC_TEX_SHIFT) | PTE_C)
#define SEC_OWB_IWB            ((0 << SEC_TEX_SHIFT) | PTE_B | PTE_C)
#define SEC_ONC_INC            ((1 << SEC_TEX_SHIFT))
#define SEC_OWBA_IWBA          ((1 << SEC_TEX_SHIFT) | PTE_B | PTE_C)
#define SEC_DEVICE             ((2 << SEC_TEX_SHIFT))

#define SEC_CACHEABLE          ((4 << SEC_TEX_SHIFT))
#define SEC_ONC                ((0 << SEC_TEX_SHIFT))
#define SEC_OWBA               ((1 << SEC_TEX_SHIFT))
#define SEC_OWT                ((2 << SEC_TEX_SHIFT))
#define SEC_OWB                ((3 << SEC_TEX_SHIFT))
#define SEC_INC                ((0 << 2))
#define SEC_IWBA               ((1 << 2))
#define SEC_IWT                ((2 << 2))
#define SEC_IWB                ((3 << 2))

#define SUPSEC_STRONGLY_ORDERED   ((0 << SUPSEC_TEX_SHIFT))
#define SUPSEC_SHARED_DEVICE      ((0 << SUPSEC_TEX_SHIFT) | PTE_B)
#define SUPSEC_OWT_IWT            ((0 << SUPSEC_TEX_SHIFT) | PTE_C)
#define SUPSEC_OWB_IWB            ((0 << SUPSEC_TEX_SHIFT) | PTE_B | PTE_C)
#define SUPSEC_ONC_INC            ((1 << SUPSEC_TEX_SHIFT))
#define SUPSEC_OWBA_IWBA          ((1 << SUPSEC_TEX_SHIFT) | PTE_B | PTE_C)
#define SUPSEC_DEVICE             ((2 << SUPSEC_TEX_SHIFT))

#define SUPSEC_CACHEABLE          ((4 << SUPSEC_TEX_SHIFT))
#define SUPSEC_ONC                ((0 << SUPSEC_TEX_SHIFT))
#define SUPSEC_OWBA               ((1 << SUPSEC_TEX_SHIFT))
#define SUPSEC_OWT                ((2 << SUPSEC_TEX_SHIFT))
#define SUPSEC_OWB                ((3 << SUPSEC_TEX_SHIFT))
#define SUPSEC_INC                ((0 << 2))
#define SUPSEC_IWBA               ((1 << 2))
#define SUPSEC_IWT                ((2 << 2))
#define SUPSEC_IWB                ((3 << 2))

// TEX remap regions TEX[0], C, B
// C and B are the same for both lpages, spages, sections and super sections, we only support
// two memory types for now, so we just define two different mappings.
// Also, we can define two additional ones if necessary.
#define TEX_MAP_DEVICE   0x00000000
#define TEX_MAP_MEM      0x00000004
#define TEX_MAP_NO_CACHE 0x00000008

// Base page size
#define PAGE_WORDS    (1024)

#define PAGE_SIZE     (4096)
#define LPAGE_SIZE    (64*1024)
#define SEC_SIZE      (1024*1024)
#define SUPSEC_SIZE   (16*1024*1024)

#define PAGE_MASK     0xfffff000
#define LPAGE_MASK    0xffff0000
#define SEC_MASK      0xfff00000
#define SUPSEC_MASK   0xff000000


#define TTBCR_N       7      //!< Mask for accessing the N-bits in the TTBCR register
static inline void
tlb_invalidate_all(void)
{
  /* Clean cache */
  /* FIXME: Improve this, because we only have to clear the line containing
   * the page table entry which has just been modified from the cache.
   */
  hw_cache_clean_all();

  __asm__ volatile (
    "dsb\n"
    "isb\n"
    "mov r4, #0\n"
    "mcr p15, 0, r4, c8, c5, 0          @ Invalidate entire iTLB\n"
    "mcr p15, 0, r4, c8, c6, 0          @ Invalidate entire dTLB\n"
    "mcr p15, 0, r4, c8, c7, 0          @ Invalidate entire uTLB\n"
    "dsb                                @ Wait for TLB invalidation to complete\n"
    "isb\n"
    : // Outs
    : // Ins
    : "r4"
    );
}

void
hw_tlb_invalidate(va_t va, size_t size)
{
  tlb_invalidate_all(); // TODO: Only invalidate requested region
}


/* Compute level 1 index from a given address */
static inline uint32_t
level_1_idx(va_t va)
{
  return va >> 20;
}

/* Compute level 2 index from a given address */
static inline uint32_t
level_2_idx(va_t va)
{
  return (va >> 12) & 0xff;
}

static inline uint32_t
make_ptd(pa_t lev2_base_addr)
{
  // NOTE: Sets PTD_IMP and PTD_NS to 0
  return (lev2_base_addr & 0xfffffc00) | MMU_L1_PTD;
}

static inline uint32_t
make_small_pte(pa_t base_addr, uint32_t flags)
{
  assert((flags & PAGE_MASK) == 0);

  return (base_addr & PAGE_MASK) | flags | MMU_L2_SMALL;
}

static inline uint32_t
make_lpte(pa_t base_addr, uint32_t flags)
{
  assert((flags & LPAGE_MASK) == 0);

  return (base_addr & LPAGE_MASK) | flags | MMU_L2_LARGE;
}


// These symbols come from the linker script, make sure they are defined.
extern uint8_t _stext, _etext;
extern uint8_t _srdata, _erdata;
extern uint8_t _sdata, _edata;
extern uint8_t _sbss, _ebss;
extern uint8_t _svc_stack_top, _svc_stack_bottom;
extern uint8_t _fiq_stack_top, _fiq_stack_bottom;
extern uint8_t _irq_stack_top, _irq_stack_bottom;
extern uint8_t _abt_stack_top, _abt_stack_bottom;
extern uint8_t _und_stack_top, _und_stack_bottom;
extern uint32_t _tbr0_start;
extern uint32_t _tbr1_start;
extern uint32_t _mmu_lev2_start, _mmu_lev2_end;
extern uint8_t _except_page;
extern uint8_t _free_ram_start;


void
putreg(const char *regname, uint32_t val)
{
  printf("%s = %p\r\n", regname, val);
}

void
mmu_prefetch_abort(void)
{
  puts("====================== MMU PREFETCH ABORT ====================");

  puts("INSTRUCTION FETCH ABORT");
  putreg("Instruction Fault Status ", read_ifsr());
  putreg("Instruction Fault Address", read_ifar());

  panic();
}

void
mmu_data_abort(uint32_t fault_instruction)
{
  puts("====================== MMU DATA ABORT ====================");

  /* Fault instruction is not the exact address, but still good enough. */
  if (fault_instruction >= KERNEL_START) {
    puts("ERROR: The kernel tried to read from a faulty address");
    putreg("  Data Fault Status ", read_dfsr());
    putreg("  Data Fault Address", read_dfar());
    putreg("  Fault instruction", fault_instruction);

    panic();
  }

  printf("ERROR: Process %d tried to read from a faulty address "
         "and will be terminated\n", proc_current()->pid);
  putreg("  Data Fault Status ", read_dfsr());
  putreg("  Data Fault Address", read_dfar());
  putreg("  Fault instruction", fault_instruction);

  proc_exit(proc_current(), 255);

  /* proc_exit will never return. */
}



#if 0
static struct {
  void* l2_page_tables_start;
  void* l2_page_tables_end;
  void* l2_free_page_tables;
} mmu_status;
#endif


/* Needed by architecture-independent vm.c, so cannot be static. */
vm_map_t kernel_vm_map[] =
{
  [VM_REG_NULL]        = {LIST_HEAD_INIT, 0x00000000,   4 KiB,          &_tbr0_start, (pa_t)&_tbr0_start},
  [VM_REG_USER]        = {LIST_HEAD_INIT, 0x00001000,   2 GiB - 4 KiB,  &_tbr0_start, (pa_t)&_tbr0_start},
  [VM_REG_KERNEL]      = {LIST_HEAD_INIT, 0x80000000, 512 MiB,          &_tbr0_start, (pa_t)&_tbr0_start},
  [VM_REG_KERNEL_HEAP] = {LIST_HEAD_INIT, 0xA0000000, 512 MiB,          &_tbr0_start, (pa_t)&_tbr0_start},
  [VM_REG_DEVICE]      = {LIST_HEAD_INIT, 0xC0000000,   1 GiB,          &_tbr0_start, (pa_t)&_tbr0_start},
};


/*!
  Initalises the ARM MMU.

  \pre System just booted and MMU not enabled.
 */
void
hw_mmu_init(void)
{
  hw_print_cache_info();

  const size_t text_size = (uintptr_t)&_etext - (uintptr_t)&_stext;
  const size_t ro_size = (uintptr_t)&_erdata - (uintptr_t)&_srdata;
  const size_t data_size = (uintptr_t)&_edata - (uintptr_t)&_sdata;
  const size_t bss_size = (uintptr_t)&_ebss - (uintptr_t)&_sbss;

  // Compute stack sizes. Add 4 to compensate for bottom != next page (it is the last word on the page)
  // subtract PAGE_SIZE to compensate for the stack protector page.
  const size_t svc_size = (uintptr_t)&_svc_stack_bottom + 4 - (uintptr_t)&_svc_stack_top - PAGE_SIZE;
  const size_t fiq_size = (uintptr_t)&_fiq_stack_bottom + 4 - (uintptr_t)&_fiq_stack_top - PAGE_SIZE;
  const size_t irq_size = (uintptr_t)&_irq_stack_bottom + 4 - (uintptr_t)&_irq_stack_top - PAGE_SIZE;
  const size_t abt_size = (uintptr_t)&_abt_stack_bottom + 4 - (uintptr_t)&_abt_stack_top - PAGE_SIZE;
  const size_t und_size = (uintptr_t)&_und_stack_bottom + 4 - (uintptr_t)&_und_stack_top - PAGE_SIZE;

  const size_t lev2_size = (uintptr_t)&_mmu_lev2_end - (uintptr_t)&_mmu_lev2_start;

  uint32_t *tbr0_table = &_tbr0_start;
  uint32_t *tbr1_table = &_tbr1_start;

  set_tbr0((pa_t)tbr0_table);
  set_tbr1((pa_t)tbr1_table);
  set_ttbcr(1);  // Use both tbr0 and 1

  // Clear page tables for kernel
  for (int i = 0 ; i < 2048 ; i++) {
    tbr0_table[i] = MMU_FAULT;
  }

  for (int i = 0 ; i < 4096 ; i++) {
    tbr1_table[i] = MMU_FAULT;
  }

  // Start by mapping in standard kernel segments
  hw_map(NULL, (va_t)&_stext,  (pa_t)&_stext,  text_size, VM_SUPER_RX|VM_USER_EXEC);
  hw_map(NULL, (va_t)&_srdata, (pa_t)&_srdata, ro_size,   VM_SUPER_READ);
  hw_map(NULL, (va_t)&_sdata,  (pa_t)&_sdata,  data_size, VM_SUPER_RW);
  hw_map(NULL, (va_t)&_sbss,   (pa_t)&_sbss,   bss_size,  VM_SUPER_RW);
  // Configure the stacks
  hw_map(NULL, (va_t)&_svc_stack_top, (pa_t)&_svc_stack_top, PAGE_SIZE, 0);
  hw_map(NULL, (va_t)&_svc_stack_top+PAGE_SIZE, (pa_t)&_svc_stack_top + PAGE_SIZE, svc_size, VM_SUPER_RW);
  hw_map(NULL, (va_t)&_fiq_stack_top, (pa_t)&_fiq_stack_top, PAGE_SIZE, 0);
  hw_map(NULL, (va_t)&_fiq_stack_top+PAGE_SIZE, (pa_t)&_fiq_stack_top + PAGE_SIZE, fiq_size, VM_SUPER_RW);
  hw_map(NULL, (va_t)&_irq_stack_top, (pa_t)&_irq_stack_top, PAGE_SIZE, 0);
  hw_map(NULL, (va_t)&_irq_stack_top+PAGE_SIZE, (pa_t)&_irq_stack_top + PAGE_SIZE, irq_size, VM_SUPER_RW);
  hw_map(NULL, (va_t)&_abt_stack_top, (pa_t)&_abt_stack_top, PAGE_SIZE, 0);
  hw_map(NULL, (va_t)&_abt_stack_top+PAGE_SIZE, (pa_t)&_abt_stack_top + PAGE_SIZE, abt_size, VM_SUPER_RW);
  hw_map(NULL, (va_t)&_und_stack_top, (pa_t)&_und_stack_top, PAGE_SIZE, 0);
  hw_map(NULL, (va_t)&_und_stack_top+PAGE_SIZE, (pa_t)&_und_stack_top + PAGE_SIZE, und_size, VM_SUPER_RW);
  // Map in the exception page on high exception addresses
  hw_map(NULL, HIGH_EXCEPT_ADDR, (pa_t)&_except_page, PAGE_SIZE, VM_SUPER_RX|VM_USER_EXEC);

  // Map in the kernels page tables as well
  hw_map(NULL, (va_t)&_tbr1_start, (pa_t)&_tbr1_start, MMU_L1_SIZE, VM_SUPER_RW | VM_NO_CACHE); // L1:1 page tables
  hw_map(NULL, (va_t)&_mmu_lev2_start, (pa_t)&_mmu_lev2_start, lev2_size, VM_SUPER_RW | VM_NO_CACHE); // L2 page tables

  // Remapping uses region 0 for devices, region 1 for normal memory,
  // region 2 for page tables.
  set_prrr(PRRR_NS1 | PRRR_DS1 |
           (PRRR_TR_NORMAL_MEMORY << PRRR_TR1) |
           (PRRR_TR_NORMAL_MEMORY << PRRR_TR2) |
           (/*PRRR_TR_DEVICE*/PRRR_TR_STRONGLY_ORDERED << PRRR_TR0));
  set_nmrr((NMRR_WB_WA << NMRR_OR1) |
           (NMRR_WB_WA << NMRR_IR1) |
           (NMRR_NON_CACHABLE << NMRR_OR2) |
           (NMRR_NON_CACHABLE << NMRR_IR2) |
           (NMRR_NON_CACHABLE << NMRR_OR0) |
           (NMRR_NON_CACHABLE << NMRR_IR0));

  // Invalidate data caches but do not write back... this is safe as the MMU is
  // not yet enabled and the data cache is thus not enabled.
  hw_cache_invalidate_all();
  hw_bpc_invalidate();
  tlb_invalidate_all();

  set_acr(ACR_L2EN, 0);
  set_sctlr(SCTLR_TRE | SCTLR_C, SCTLR_I | SCTLR_AFE); // Clear TRE and AFE

  set_domain(0x55555555);

  set_sctlr(SCTLR_V | SCTLR_M, 0); // Enable high exceptions and the MMU
}

void
get_l2_table(vm_map_t *vm, uint32_t *lev1_table, va_t va,
             uint32_t **l2_table_pa, uint32_t **l2_table_va)
{
  bool clear_table = false;
  uint32_t *pagetable_va = NULL, *pagetable_pa = NULL;


  if (lev1_table[level_1_idx(va)] == 0) {
    // Alloc new page table
    if (KERNEL_START <= va && va <= KERNEL_END) {
      pagetable_pa = &((uint32_t*)&_mmu_lev2_start)[level_1_idx(va) * MMU_L2_LENGTH]; // TODO: do not use & operator here
    } else {
      if (vm != NULL) {
        pagetable_pa = &((uint32_t*)(vm->page_table_pa+MMU_L1_SIZE/2))[level_1_idx(va) * MMU_L2_LENGTH];
      }
    }

    if (!pagetable_pa)
      assert(0 && "Please implement page allocation for kernel user space");

    clear_table = true;
  } else if ((lev1_table[level_1_idx(va)] & 3) == 1) {
     pagetable_pa = (uint32_t *)(lev1_table[level_1_idx(va)] & PTD_BASE);
  } else {
    assert(0 && "No L2 table possible for section or supersection");
  }

  if (!pagetable_va) {
    if (KERNEL_START <= va && va <= KERNEL_END)
      pagetable_va = pagetable_pa;
    else if (vm != NULL)
      pagetable_va = &((uint32_t*)(vm->page_table+MMU_L1_SIZE/2))[level_1_idx(va) * MMU_L2_LENGTH];
    else
      assert(0 && "Please implement page allocation for kernel user space");
  }

  if (clear_table)
    memset(pagetable_va, 0, MMU_L2_SIZE);

  if (l2_table_pa)
    *l2_table_pa = pagetable_pa;

  if (l2_table_va)
    *l2_table_va = pagetable_va;
}

static const uint32_t rw_map[] = {
  AP_NA_NA, AP_RO_NA, AP_NA_NA, AP_RW_NA,
  AP_NA_NA, AP_RO_RO, AP_NA_NA, AP_RW_RO,
  AP_NA_NA, AP_NA_NA, AP_NA_NA, AP_NA_NA,
  AP_NA_NA, AP_NA_NA, AP_NA_NA, AP_RW_RW,
};

uint32_t
vm_flags_to_sec_flags(uint32_t vmflags)
{
  uint32_t secflags = 0;
  uint32_t rwflags = ((vmflags & VM_USER_RW) >> 1) | (vmflags & VM_SUPER_RW);

  // Lookup permissions for rw flags
  secflags |= rw_map[rwflags] << SEC_AP_SHIFT;

  if ((vmflags & VM_EXEC) == 0) {
    secflags |= SEC_XN;
  } else if ((vmflags & VM_EXEC) != VM_EXEC) {
    puts("Different exec permissions for super and user not supported on ARM");
    panic();
  }

  if ((vmflags & (VM_DEVICE|VM_SHARED)) == (VM_DEVICE|VM_SHARED)) {
    secflags |= SEC_SHARED_DEVICE;
  } else if ((vmflags & (VM_DEVICE|VM_SHARED)) == (VM_DEVICE)) {
    secflags |= SEC_DEVICE;
  }

  // TODO: Add flags for normal RAM
  return secflags;
}

uint32_t
vm_flags_to_lpage_flags(uint32_t vmflags)
{
  uint32_t lpageflags = 0;

  // Extract RW flags for super and user and compress them
  uint32_t rwflags = ((vmflags & VM_USER_RW) >> 1) | (vmflags & VM_SUPER_RW);

  // Lookup permissions for rw flags
  lpageflags |= rw_map[rwflags] << LPAGE_AP_SHIFT;

  if ((vmflags & VM_EXEC) == 0) {
    lpageflags |= LPAGE_XN;
  } else if ((vmflags & VM_EXEC) != VM_EXEC) {
    puts("Different exec permissions for super and user not supported on ARM");
    panic();
  }

  if ((vmflags & (VM_DEVICE|VM_SHARED)) == (VM_DEVICE|VM_SHARED)) {
    lpageflags |= LPTE_SHARED_DEVICE;
  } else if ((vmflags & (VM_DEVICE|VM_SHARED)) == (VM_DEVICE)) {
    lpageflags |= LPTE_DEVICE;
  }

  // TODO: Add flags for normal RAM
  return lpageflags;
}

uint32_t
vm_flags_to_spage_flags(uint32_t vmflags)
{
  uint32_t spageflags = 0;
  uint32_t rwflags = ((vmflags & VM_USER_RW) >> 1) | (vmflags & VM_SUPER_RW);

  // Lookup permissions for rw flags
  spageflags |= rw_map[rwflags] << SPAGE_AP_SHIFT;

  if ((vmflags & VM_EXEC) == 0) {
    // No execute permission
    spageflags |= SPAGE_XN;
  } else if ((vmflags & VM_EXEC) != VM_EXEC) {
    puts("Different exec permissions for super and user not supported on ARM");
    panic();
  }

  if ((vmflags & (VM_DEVICE|VM_SHARED)) == (VM_DEVICE|VM_SHARED)) {
    //spageflags |= SPTE_SHARED_DEVICE;
    spageflags |= TEX_MAP_DEVICE | PTE_S;
  } else if ((vmflags & (VM_DEVICE|VM_SHARED)) == (VM_DEVICE)) {
    //spageflags |= SPTE_DEVICE;
    spageflags |= TEX_MAP_DEVICE;
  } else if ((vmflags & VM_NO_CACHE) == VM_NO_CACHE) {
    spageflags |= TEX_MAP_NO_CACHE;
  } else {
    spageflags |= TEX_MAP_MEM;
  }


  // TODO: Add flags for normal RAM
  return spageflags;
}

uint32_t
make_sec(pa_t pa, uint32_t flags)
{
  return (pa & SEC_MASK) | flags | MMU_L1_SEC;
}

void
hw_map(vm_map_t *vm, va_t va, pa_t pa, size_t size, uint32_t flags)
{
  assert((va & ~PAGE_MASK) == 0 && "must be page aligned");
  assert((pa & ~PAGE_MASK) == 0 && "must be page aligned");
  assert((size & ~PAGE_MASK) == 0 && "must be whole number of pages");

  if (size == 0) return;

  uint32_t ttbcr = read_ttbcr();
  uint32_t mask = 0;

  // Determine which page table to use (TTBR0 or TTBR1)
  switch (ttbcr & TTBCR_N) {
  case 0: mask = 0; break;
  case 1: mask = 0x80000000; break;
  case 2: mask = 0xc0000000; break;
  case 3: mask = 0xe0000000; break;
  case 4: mask = 0xf0000000; break;
  case 5: mask = 0xf8000000; break;
  case 6: mask = 0xfc000000; break;
  case 7: mask = 0xfe000000; break;
  default:
    assert(0 && "invalid case");
  }

  uint32_t *l1_table = NULL;
  uint32_t *l2_table_pa = NULL;
  uint32_t *l2_table_va = NULL;

  if ((va & mask) == 0) {
    if (vm == NULL) l1_table = (uint32_t*) (read_tbr0() & 0xffffff80); // Current table
    else l1_table = (uint32_t*) vm->page_table;//(proc->cpu_helper.ttbr0_va & 0xffffff80); // Given process table
  } else
    l1_table = (uint32_t*) (read_tbr1() & 0xffffc000);

  // We do the allocation of pages in several steps. Firstly, we add small pages,
  // until we are aligned and can add large pages. After that we add large pages
  // until we are aligned and can add sections. After this follows the installation
  // of large pages and then small pages when alignment is satisfied.
  // Supersections are not supported as they are optional in the ARMv7.

  va_t mapping_va = va;
  pa_t mapping_pa = pa;
  size_t remaining_range = size;

  unsigned pagecount = 0;
  uint32_t pageflags = vm_flags_to_spage_flags(flags);

  /* FIXME: Temporarily disabled.  DO NOT REMOVE. */
#if 0
  unsigned lpagecount = 0, seccount = 0;

  // First pages, skip if we are large page aligned
  if (mapping_va & ~LPAGE_MASK) {
    va_t next_lpage = (mapping_va & LPAGE_MASK) + LPAGE_SIZE; // Next large page
    // Number of single pages to allocate this pass
    unsigned pages = (next_lpage <= mapping_va + remaining_range)
      ? (next_lpage - mapping_va) / PAGE_SIZE
      : remaining_range / PAGE_SIZE;

    pagecount += pages;

    for (unsigned i = 0 ; i < pages ; i ++) {
      l2_table = (uint32_t*) get_l2_table(proc, l1_table, mapping_va+i*PAGE_SIZE);
      l1_table[level_1_idx(mapping_va+i*PAGE_SIZE)] = make_ptd((pa_t)l2_table);
      l2_table[level_2_idx(mapping_va+i*PAGE_SIZE)] = make_small_pte(mapping_pa+i*PAGE_SIZE, pageflags);
    }

    mapping_va += pages * PAGE_SIZE;
    mapping_pa += pages * PAGE_SIZE;
    remaining_range -= pages * PAGE_SIZE;
  }

  uint32_t lpageflags = vm_flags_to_lpage_flags(flags);

  // Large pages, skip if we are section aligned or need less then one large page
  if ((mapping_va & ~SEC_MASK) && (remaining_range >= LPAGE_SIZE)) {
    va_t next_sec = (mapping_va & SEC_MASK) + SEC_SIZE;
    unsigned lpages = (next_sec <= mapping_va + remaining_range)
      ? (next_sec - mapping_va) / LPAGE_SIZE
      : remaining_range / LPAGE_SIZE;

    lpagecount += lpages;

    for (unsigned i = 0 ; i < lpages ; i ++) {
      l2_table = (uint32_t*) get_l2_table(proc, l1_table, mapping_va+i*LPAGE_SIZE);
      l1_table[level_1_idx(mapping_va+i*LPAGE_SIZE)] = make_ptd((pa_t)l2_table);

      // For large pages, the pte needs to be repeated 16 times
      uint32_t pte = make_lpte(mapping_pa+i*LPAGE_SIZE, lpageflags);
      for (unsigned j = 0 ; j < 16 ; j ++) {
        l2_table[level_2_idx(mapping_va+i*LPAGE_SIZE)+j] = pte;
      }
    }

    mapping_va += lpages * LPAGE_SIZE;
    mapping_pa += lpages * LPAGE_SIZE;
    remaining_range -= lpages * LPAGE_SIZE;
  }

  // Sections
  if (remaining_range >= SEC_SIZE) {
    uint32_t sections = remaining_range / SEC_SIZE;
    uint32_t secflags = vm_flags_to_sec_flags(flags);

    seccount += sections;


    for (int i = 0 ; i < sections ; i ++) {
      l1_table[level_1_idx(mapping_va+i*SEC_SIZE)] = make_sec(mapping_pa+i*SEC_SIZE, secflags);
    }

    mapping_va += sections * SEC_SIZE;
    mapping_pa += sections * SEC_SIZE;
    remaining_range -= sections * SEC_SIZE;
  }

  // We may need to map in further large pages after the sections, note that we are guarnteed
  // to be lpage aligned here since we are section aligned
  if (remaining_range >= LPAGE_SIZE) {
    unsigned lpages = remaining_range / LPAGE_SIZE;

    lpagecount += lpages;

    for (unsigned i = 0 ; i < lpages ; i ++) {
      l2_table = (uint32_t*) get_l2_table(proc, l1_table, mapping_va+i*LPAGE_SIZE);
      l1_table[level_1_idx(mapping_va+i*LPAGE_SIZE)] = make_ptd((pa_t)l2_table);

      // For large pages, the pte needs to be repeated 16 times
      uint32_t pte = make_lpte(mapping_pa+i*LPAGE_SIZE, lpageflags);
      for (unsigned j = 0 ; j < 16 ; j ++) {
        l2_table[level_2_idx(mapping_va+i*LPAGE_SIZE)+j] = pte;
      }
    }

    mapping_va += lpages * LPAGE_SIZE;
    mapping_pa += lpages * LPAGE_SIZE;
    remaining_range -= lpages * LPAGE_SIZE;
  }
#endif

  // Pages
  if (remaining_range >= PAGE_SIZE) {
    unsigned pages = remaining_range / PAGE_SIZE;

    pagecount += pages;

    for (unsigned i = 0 ; i < pages ; i ++) {
      get_l2_table(vm, l1_table, mapping_va+i*PAGE_SIZE,
                   &l2_table_pa, &l2_table_va);
      uint32_t ptd = make_ptd((pa_t)l2_table_pa);
      l1_table[level_1_idx(mapping_va+i*PAGE_SIZE)] = ptd;
      assert(l2_table_va[level_2_idx(mapping_va+i*PAGE_SIZE)] == 0);
      uint32_t pte = make_small_pte(mapping_pa+i*PAGE_SIZE, pageflags);
      l2_table_va[level_2_idx(mapping_va+i*PAGE_SIZE)] = pte;
    }

    mapping_va += pages * PAGE_SIZE;
    mapping_pa += pages * PAGE_SIZE;
    remaining_range -= pages * PAGE_SIZE;
  }
  assert(remaining_range == 0 && "something went horribly wrong");

  tlb_invalidate_all();
}

pa_t
manual_walk(vm_map_t *vm, va_t vaddr)
{
  //  Workaround for bug in qemu, assumes that the page tables are on a 1:1 mapping
  uint32_t ttbcr = read_ttbcr();
  uint32_t mask = 0;

  switch (ttbcr & TTBCR_N) {
  case 0: mask = 0; break;
  case 1: mask = 0x80000000; break;
  case 2: mask = 0xc0000000; break;
  case 3: mask = 0xe0000000; break;
  case 4: mask = 0xf0000000; break;
  case 5: mask = 0xf8000000; break;
  case 6: mask = 0xfc000000; break;
  case 7: mask = 0xfe000000; break;
  default:
    assert(0 && "invalid case");
  }

  uint32_t *l1_table = NULL;

  if ((vaddr & mask) == 0)
    l1_table = (uint32_t*) (read_tbr0() & 0xffffff80);
  else
    l1_table = (uint32_t*) (read_tbr1() & 0xffffc000);

  // Special hack to probe user addresses
  if (vm && (vaddr < KERNEL_START)) {
    l1_table = vm->page_table;
    uint32_t *l2_table = &((uint32_t*)(((uintptr_t)l1_table)+MMU_L1_SIZE/2))[level_1_idx(vaddr) * MMU_L2_LENGTH];
    uint32_t l2_entry = l2_table[level_2_idx(vaddr)];
    return (l2_entry & PAGE_MASK) | (vaddr & ~PAGE_MASK);
  }

  uint32_t l1_entry = l1_table[level_1_idx(vaddr)];

  switch (l1_entry & 3) {
  case 0:
    assert(0 && "IGNORE");
    break;
  case 1: {
    uint32_t *l2_table = (uint32_t*) (l1_entry & PTD_BASE);
    uint32_t l2_entry = l2_table[level_2_idx(vaddr)];
    switch (l2_entry & 3) {
    case 0: assert(0 && "IGNORE"); break;
    case 1: // Large address
      return (l2_entry & LPAGE_MASK) | (vaddr & ~LPAGE_MASK);
    case 2: // Small address
    case 3:
      return (l2_entry & PAGE_MASK) | (vaddr & ~PAGE_MASK);
    }

    break;
  }

  case 2:
    // Section or supersection, supersections not supported for now
    return (l1_entry & SEC_MASK) | (vaddr & ~SEC_MASK);
    break;

  case 3: // fall through, reserved
  default:
    assert(0 && "RESERVED");
  }

  assert(0 && "unreachable");
}


pa_t
hw_probe(vm_map_t *vm, va_t vaddr)
{
#if 1
  return manual_walk(vm, vaddr);
#else
// FIXME: DO NOT REMOVE: This should replace manual_walk call on hw
  pa_t paddr;


  __asm__ volatile (
    "mcr p15, 0, %[vaddr], c7, c8, 0    @ Priveleged read operation\n"
    "mrc p15, 0, %[paddr], c7, c4, 0\n"
    : [paddr] "=r" (paddr) // Out
    : [vaddr] "r" (vaddr) // In
    );

  return (paddr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
#endif
}


void
hw_unmap(vm_map_t *vm, va_t addr, size_t sz)
{
  uint32_t ttbcr = read_ttbcr();
  uint32_t mask = 0;

  // Determine which page table to use (TTBR0 or TTBR1)
  switch (ttbcr & TTBCR_N) {
  case 0: mask = 0; break;
  case 1: mask = 0x80000000; break;
  case 2: mask = 0xc0000000; break;
  case 3: mask = 0xe0000000; break;
  case 4: mask = 0xf0000000; break;
  case 5: mask = 0xf8000000; break;
  case 6: mask = 0xfc000000; break;
  case 7: mask = 0xfe000000; break;
  default:
    assert(0 && "invalid case");
  }

  uint32_t *l1_table = NULL;
  uint32_t *l2_table_pa = NULL;
  uint32_t *l2_table_va = NULL;

  if ((addr & mask) == 0) {
    if (vm == NULL) l1_table = (uint32_t*) (read_tbr0() & 0xffffff80); // Current table
    else l1_table = (uint32_t*) vm->page_table;//(proc->cpu_helper.ttbr0_va & 0xffffff80); // Given process table
  } else
    l1_table = (uint32_t*) (read_tbr1() & 0xffffc000);


  va_t mapping_va = addr;
  size_t remaining_range = sz;

  unsigned pages = remaining_range / PAGE_SIZE;

  // Unmap the l1 entries
  for (unsigned i = 0 ; i < pages ; i ++) {
    // May alloc if the l2 table does not exist, but this is irrelevant at the moment, since the page tables are stupid
    get_l2_table(vm, l1_table, mapping_va+i*PAGE_SIZE,
                 &l2_table_pa, &l2_table_va);
    l2_table_va[level_2_idx(mapping_va+i*PAGE_SIZE)] = 0;
  }

  // TODO: Invalidate the l1 entries no longer in use, this is strictly not necissary since we
  //       invalidate all the l2 entries these point out. But we should do this for cleaniness...

  hw_tlb_invalidate(addr, sz);
}

