/*
  S.M.A.C.K - An operating system kernel
  Copyright (C) 2010,2011 Mattias Holm and Kristian Rietveld
  For licensing and a full list of authors of the kernel, see the files
  COPYING and AUTHORS.
*/

#include <vm.h>
#include <hal.h>
#include <process.h>
#include <sync.h>
#include <assert.h>
#include <bittools.h>

#define PAGE_SIZE 4096

extern pa_range_t pa_mem_map[];
extern vm_map_t kernel_vm_map[];

#define VM_VALID_FLAGS (VM_RWX|VM_DEVICE|VM_SHARED|VM_WIRED|VM_CONTIG|\
                        VM_NO_CACHE|VM_NULL)

void
vm_append(vm_map_t *vm, vm_region_t *reg)
{

  assert(0x80000000 <= (va_t)reg && (va_t)reg < 0xc0000000);
  LIST_APPEND(vm->region, reg, link);
}

void
vm_insert(vm_map_t *vm, vm_region_t *pos, vm_region_t *reg)
{
  assert(0x80000000 <= (va_t)reg && (va_t)reg < 0xc0000000);
  LIST_INSERT_AFTER(vm->region, pos, reg, link);
}


vm_region_t* vm_get_new_region(vm_map_t *vm, unsigned flags, size_t sz);

void
vm_insert_region_descriptor(vm_map_t *vm, vm_region_t *region)
{
  assert(vm->start <= region->start && "valid mem range");
  assert(region->start+region->size <= vm->start+vm->size && "valid mem range");
  vm_region_t *e = NULL;

  if (LIST_EMPTY(vm->region) ||
      (LIST_LAST(vm->region)->start + LIST_LAST(vm->region)->size <=
       region->start)) {
    // Inserting as the first region desc or the last one in the list
    vm_append(vm, region);
    return;
  }

  // Region must be inserted somewhere in the middle
  LIST_FOREACH (e, vm->region, link) {
    va_t region_end = e->start + e->size;

    if (LIST_NEXT(e, link)) {
      vm_region_t *next = LIST_NEXT(e, link);

      if (region->start >= region_end &&
          region->start+region->size <= next->start) {
        assert(region->start+region->size <= next->start && "not overlapping");
        vm_insert(vm, e, region);
        return;
      } else if (region->start > e->start && region->start < next->start) {
        printf("something fishy happened (%p:%p) < (%p:%p) < (%p:%p)\n",
               e->start, e->size,
               region->start, region->size, next->start, next->size);
        break;
      }
    }
  }

  printf("tried to insert region desc: (%p, %p), (%p, %p)\n",
         vm->start, vm->size,
         region->start, region->size);

  assert(0 && "unreachable code");
}

vm_map_t*
vm_get_kernel_map(kernel_region_type_t vm)
{
  assert(VM_REG_KERNEL <= vm && vm <= VM_REG_DEVICE);
  return &kernel_vm_map[vm];
}

static pmap_list_t wired_ppages;
static pmap_list_t used_ppages;
static pmap_list_t free_ppages;
static pmap_region_t *ram_ppages;

mutex_t vm_lock = MUTEX_INITIALISER;

static inline pa_t
pmap_get_pa(pmap_region_t *desc)
{
  extern uint8_t _stext;
  pa_t pa = ((uintptr_t)&_stext) +
    PAGE_SIZE*((uintptr_t)desc - (uintptr_t)ram_ppages) / sizeof(pmap_region_t);
  assert(pa % PAGE_SIZE == 0);
  return pa;
}

pmap_region_t*
vm_get_pmap(vm_map_t *vm, vm_region_t *reg)
{
  pa_t pa = vm_probe_physical(vm, (void*)reg->start);
  // TOOD: use page size constant
  extern uint8_t _stext;
  pa_t pa_offset = pa - ((pa_t)&_stext);
  return &ram_ppages[pa_offset/PAGE_SIZE];
}

va_t
vm_get_first_free_kernel_heap_address(void)
{
  vm_map_t *kernel_heap = &kernel_vm_map[VM_REG_KERNEL_HEAP];
  va_t vaddr = kernel_heap->start;
  vm_region_t *e = NULL;
  LIST_FOREACH (e, kernel_heap->region, link) {
    if (LIST_NEXT(e, link)) {
      if (LIST_NEXT(e, link)->start - (e->start + e->size) >= PAGE_SIZE) {
        // We have our virtual address
        vaddr = e->start + e->size;
        break;
      }
    } else {
      // At the end of the list
      if ((kernel_heap->start + kernel_heap->size) - (e->start + e->size)
          >= PAGE_SIZE) {
        // We have our virtual address
        vaddr = e->start + e->size;
        break;
      } else {
        assert(0 && "no more virtual addresses in kernel heap");
      }
    }
  }
  return vaddr;
}

pmap_region_t*
pmap_alloc_contiguous(size_t pages, size_t align, unsigned flags)
{
  pmap_region_t *pg = LIST_FIRST(free_ppages);

  while (pg) {
    if (pmap_get_pa(pg) != align_pa(pmap_get_pa(pg), align)) goto nextpage;
    pmap_region_t *a = pg;
    for (unsigned i = 0 ; i < pages ; i ++) {
      if (pg && (&pg[1] != LIST_NEXT(pg, link))) goto nextpage;
      pg = LIST_NEXT(pg, link);
    }

    // We have a contiguous region, remove from free_ppages list and insert in wired list
    pmap_region_t *b = LIST_PREV(pg, link);

    LIST_REMOVE_RANGE(free_ppages, a, b, link);

    if (flags & VM_WIRED) {
      LIST_APPEND_RANGE(wired_ppages, a, b, link);
    } else {
      LIST_APPEND_RANGE(used_ppages, a, b, link);
    }
    assert(((uintptr_t)b-(uintptr_t)a)/sizeof(pmap_region_t) == pages-1);

    return a;
  nextpage:
    pg = LIST_NEXT(pg, link);
  }

  return NULL;
}


pmap_region_t*
pmap_alloc(unsigned page_count)
{
  pmap_region_t *lst_start = LIST_FIRST(free_ppages);
  pmap_region_t *lst_end = LIST_FIRST(free_ppages);

  for (unsigned i = 0 ; i < page_count - 1 ; i ++) {
    lst_end = LIST_NEXT(lst_end, link);
  }

  LIST_REMOVE_RANGE(free_ppages, lst_start, lst_end, link);
  LIST_APPEND_RANGE(used_ppages, lst_start, lst_end, link);

  return lst_start; // Safe as we vm locking and the lst end will terminate
}

pmap_region_t*
pmap_alloc_wired(unsigned page_count)
{
  pmap_region_t *lst_start = LIST_FIRST(free_ppages);
  pmap_region_t *lst_end = LIST_FIRST(free_ppages);

  for (unsigned i = 0 ; i < page_count - 1 ; i ++) {
    lst_end = LIST_NEXT(lst_end, link);
  }

  LIST_REMOVE_RANGE(free_ppages, lst_start, lst_end, link);
  LIST_APPEND_RANGE(wired_ppages, lst_start, lst_end, link);

  return lst_start; // Safe as we vm locking and the lst end will terminate
}


static inline void
pmap_init(size_t nrampages)
{
  extern uint8_t _free_ram_start, _stext;
  size_t ppdesc_size = nrampages * sizeof(pmap_region_t);
  // Physical page descriptors are placed exactly at free ram start
  ram_ppages = (pmap_region_t*)&_free_ram_start;
  hw_map(NULL, (va_t)ram_ppages, (pa_t)ram_ppages, ppdesc_size, VM_SUPER_RW);

  // Initialize the free ppage list
  for (unsigned i = 0 ; i < nrampages-1 ; i ++) {
    ram_ppages[i].logsize = 0;
    LIST_APPEND(free_ppages, &ram_ppages[i], link);
  }
  ram_ppages[nrampages-1].logsize = 0;
  // Transfer the kernel resident pages to the wired list, since free_ppages list is ordered by address
  // initially, we just relink the section of the list corresponding to the wired list
  size_t ppdescs_pages = ppdesc_size / PAGE_SIZE + ((ppdesc_size & 0x0fff) ? 1 : 0);
  size_t kernel_pages = ((uintptr_t)&_free_ram_start - (uintptr_t)&_stext) / PAGE_SIZE + ppdescs_pages;

  LIST_REMOVE_RANGE(free_ppages, &ram_ppages[0], &ram_ppages[kernel_pages-1], link);
  LIST_APPEND_RANGE(wired_ppages, &ram_ppages[0], &ram_ppages[kernel_pages-1], link);
}

/* This funcion is responsible for "stealing" space for the region descriptors
   NOTE: If you do not take care to insert the returned region descriptor into a
         region descriptor list, the kernel heap will start to leak memory.
*/
static LIST_HEAD(_vm_region_t) free_regions;

// VM Steal page steals a page in the kernel heap and maps it in using hw_map
// the page is not registered anywhere, this task is up to the caller.
// The function will succeed and return a pointer (virtual address) to the
// stolen page if there is space available. If no space exists, the function
// will generate a kernel panic.
void*
vm_steal_page(void)
{
  vm_map_t *kernel_heap = &kernel_vm_map[VM_REG_KERNEL_HEAP];

  va_t vaddr = vm_get_first_free_kernel_heap_address();
  assert(vaddr != 0);

  pmap_region_t *ppage = pmap_alloc_wired(1);

  pa_t paddr = pmap_get_pa(ppage);

  // We now have a valid physical address and a valid free virtual address in the kernel heap
  // we now associate these addresses, and initialise the newly created page of region descriptors
  hw_map(NULL, vaddr, paddr, PAGE_SIZE, VM_SUPER_RW);

  vm_region_t *region_array = (vm_region_t*)vaddr;
  region_array[0].start = vaddr;
  region_array[0].size = PAGE_SIZE;
  region_array[0].pmap.head = ppage;
  region_array[0].pmap.tail = ppage;

  // Insert self descriptive descriptor
  vm_insert_region_descriptor(kernel_heap, &region_array[0]);

  return (void*)vaddr;
}

vm_region_t*
vm_steal_region_descriptor(void)
{
  if (LIST_EMPTY(free_regions)) {
    va_t vaddr = (va_t)vm_steal_page();
    vm_region_t *region_array = (vm_region_t*)vaddr;
    for (unsigned i = 1 ; i < PAGE_SIZE/sizeof(vm_region_t) ; i ++) {
      LIST_APPEND(free_regions, &region_array[i], link);
    }
  }

  vm_region_t *reg = LIST_FIRST(free_regions);

  LIST_REMOVE(free_regions, reg, link);

  return reg;
}

void
vm_release_region_descriptor(vm_map_t *vm, vm_region_t *region)
{
  LIST_REMOVE(vm->region, region, link);
  LIST_APPEND(free_regions, region, link);
}

vm_region_t*
vm_get_new_region(vm_map_t *vm, unsigned flags, size_t sz)
{
  vm_region_t *e = NULL;

  if (LIST_EMPTY(vm->region)) {
    if (sz > vm->size) return NULL;

    // Region is unused and size is valid
    e = vm_steal_region_descriptor();
    e->start = vm->start;
    e->size = sz;
    e->flags = flags;

    vm_append(vm, e);
    return e;
  }

  vm_region_t *desc = vm_steal_region_descriptor();

  LIST_FOREACH (e, vm->region, link) {
    va_t region_end = e->start + e->size;

    if (LIST_NEXT(e, link)) {
      vm_region_t *next = LIST_NEXT(e, link);
      if (next->start > region_end + sz) break;
    } else {
      if ((vm->start + vm->size) < (region_end + sz)) return NULL;
      break;
    }
  }

  desc->start = e->start + e->size;
  desc->size = sz;
  desc->flags = flags;
  LIST_INSERT_AFTER(vm->region, e, desc, link);
  return desc;
}

vm_region_t*
vm_get_new_region_at_addr(vm_map_t *vm, unsigned flags, va_t addr, size_t sz)
{
  vm_region_t *e = NULL;

  if (addr < vm->start || vm->start + vm->size < addr + sz) {
    return NULL; // Address not in vm_map range
  }

  if (LIST_EMPTY(vm->region)) {
    // Region is unused and size is valid
    e = vm_steal_region_descriptor();
    e->start = addr;
    e->size = sz;
    e->flags = flags;

    vm_append(vm, e);
    return e;
  }

  vm_region_t *desc = vm_steal_region_descriptor();

  // Check if the descriptor can be squezed in as the first one
  // Does the new region fit?
  if (addr < LIST_FIRST(vm->region)->start && addr + sz <= LIST_FIRST(vm->region)->start) {
    desc->start = addr;
    desc->size = sz;
    desc->flags = flags;

    LIST_INSERT_BEFORE(vm->region, LIST_FIRST(vm->region), desc, link);
    return desc;
  }

  // Otherwise we try to locate the descriptor which should go before the one to insert
  LIST_FOREACH (e, vm->region, link) {
    if (LIST_NEXT(e, link)) {
      vm_region_t *next = LIST_NEXT(e, link);
      if (next->start > addr) break; // got it
    }
  }

  if (e == NULL) {
    // Did not find it, must be at the end
    if (LIST_LAST(vm->region)->start + LIST_LAST(vm->region)->size <= addr) {
      desc->start = addr;
      desc->size = sz;
      desc->flags = flags;
      vm_append(vm, desc);
      return desc;
    }
    vm_release_region_descriptor(vm, desc);
    return NULL;
  }

  // We got one, our node should go between e and e->next
  // BUG: Will this leak a descriptor?
  if (addr + sz > LIST_NEXT(e, link)->start) return NULL; // next block overlaps addr

  desc->start = addr;
  desc->size = sz;
  desc->flags = flags;

  LIST_INSERT_AFTER(vm->region, e, desc, link);

  return desc;
}


void
vm_init(void)
{
  extern uint8_t _free_ram_start, _stext;

  hw_mmu_init();

  // Compute total ammount of RAM
  // BUG: Assumes all RAM modules are consecutive in memory.
  size_t total_ram = 0;
  for (unsigned i = 0 ; pa_mem_map[i].name != NULL ; i ++) {
    if (pa_mem_map[i].kind == PA_RAM) {
      total_ram += pa_mem_map[i].size;
    }
  }

  size_t nrampages = total_ram/PAGE_SIZE;

  pmap_init(nrampages);
}

static inline bool
vm_is_in_region(vm_region_t *reg, va_t addr)
{
  assert(reg != NULL);
  assert(0x80000000 < (va_t)reg && (va_t)reg < 0xc0000000);

  return reg->start <= addr && (reg->start + reg->size > addr);
}

static inline bool
vm_are_disjoint_regions(vm_region_t *a, vm_region_t *b)
{
  return (a->start <= b->start && a->start + a->size < b->start)
      || (a->start >= b->start && a->start >= b->start + b->size);
}

vm_region_t*
vm_find_region(vm_map_t *vm, va_t addr)
{
  vm_region_t *e = NULL;

  LIST_FOREACH (e, vm->region, link) {
    if (vm_is_in_region(e, addr)) return e;
  }

  return NULL;
}

vm_region_t*
vm_find_region_before(vm_map_t *vm, va_t addr)
{
  vm_region_t *e = NULL;
  LIST_FOREACH (e, vm->region, link) {
    if (LIST_NEXT(e, link)) {
      vm_region_t *next = LIST_NEXT(e, link);
      if (next->start > addr) break;
    } else {
      break; // This is at the end, but we break to avoid overwriting e
    }
  }

  if (e->start + e->size < addr) return e; // Address not in region
  return NULL; // Address is in the previous region
}

vm_region_t*
vm_find_region_after(vm_map_t *vm, va_t addr)
{
  vm_region_t *e = NULL;
  LIST_FOREACH (e, vm->region, link) {
    if (vm_is_in_region(e, addr)) return LIST_NEXT(e, link);
  }

  return NULL;
}

void*
vm_map(vm_map_t *map, unsigned flags, va_t addr, size_t len)
{
  assert(((len & (PAGE_SIZE-1)) == 0)&& "vm map len must be rounded to nearest page size");

  // Ensure flags have no invalid flags
  if ((flags & ~(VM_VALID_FLAGS)) != 0) {
    return NULL;
  }

  // Ensure that addr is zero if VM_NULL is set
  if ((flags & VM_NULL) && addr != 0) {
    return NULL;
  }

  mutex_lock(&vm_lock);
  vm_region_t *reg = NULL;
  // If the address is don't care, except if it is the null page
  // assign an address from the appropriate region.
  if ((!(flags & VM_NULL)) && addr == 0) {
    reg = vm_get_new_region(map, flags, len);
  } else {
    reg = vm_get_new_region_at_addr(map, flags, addr, len);
  }

  // No space left in vm map?
  if (reg == NULL) {
    mutex_unlock(&vm_lock);
    return NULL;
  }

  addr = reg->start;

  size_t page_count = len / PAGE_SIZE; // TODO: PAGE_SIZE

  if (flags & VM_DEVICE) {
    // Device mappings must always be physically consecutive
    size_t page_count_2 = clp2_32(page_count);
#if 0
    if (page_count != page_count_2) {
      printf("warning: contig region of non power of two page count\n");
    }
#endif
    pmap_region_t *ppages = pmap_alloc_contiguous(page_count_2, PAGE_SIZE,
                                                  VM_WIRED);
    if (ppages == NULL) {
      vm_release_region_descriptor(map, reg);
      mutex_unlock(&vm_lock);
      return NULL;
    }
    pa_t paddr = pmap_get_pa(ppages);//ppdesc_to_paddr(ppages);
    reg->flags |= VM_WIRED;
    reg->pmap.head = ppages;
    reg->pmap.tail = ppages;

    hw_map(map, addr, paddr, len, flags);
    mutex_unlock(&vm_lock);
    return (void*)addr;
  } else {
    pmap_region_t *ppages_first = NULL;
    if (flags & VM_CONTIG) {
      size_t page_count_2 = clp2_32(page_count);
      ppages_first = pmap_alloc_contiguous(page_count_2, PAGE_SIZE, 0);
    } else {
      ppages_first = pmap_alloc(page_count);
    }

    if (ppages_first == NULL) {
      vm_release_region_descriptor(map, reg);
      mutex_unlock(&vm_lock);
      return NULL;
    }

    pmap_region_t *lst_end = ppages_first;
    va_t tmp_addr = addr;

    while (lst_end) {
      size_t len = (1 << lst_end->logsize) * PAGE_SIZE;
      hw_map(map, tmp_addr, pmap_get_pa(lst_end), len, flags);
      tmp_addr += len;

      reg->pmap.head = lst_end;

      lst_end = LIST_NEXT(lst_end, link);
    }
    reg->pmap.head = ppages_first;
    mutex_unlock(&vm_lock);

    return (void*)addr;
  }

  mutex_unlock(&vm_lock);
  return NULL;
}

void*
vm_map_align(vm_map_t *map, unsigned flags, va_t alignment, size_t len)
{
  assert(is_pow2_va(alignment) && "must align to power of 2 blocks");
  assert(((len & (PAGE_SIZE-1)) == 0) && "vm map len must be rounded to nearest page size");

  va_t addr;
  size_t page_count = len/PAGE_SIZE;

  // Ensure flags have no invalid flags
  if ((flags & ~(VM_VALID_FLAGS)) != 0) {
    return NULL;
  }

  mutex_lock(&vm_lock);
  vm_region_t *reg = NULL;

  reg = vm_get_new_region(map, flags, len+alignment);
  // No space left in vm map?
  if (reg == NULL) {
    mutex_unlock(&vm_lock);
    return NULL;
  }

  // Align virtual address
  addr = align_va(reg->start, alignment);

  // Align region, this is a bit ugly, but should essentially free up the data before
  // the region and after the region that is allocated before
  reg->start = addr;
  reg->size = len;

  if (flags & VM_DEVICE) {
    size_t page_count_2 = clp2_32(page_count);
#if 0
    if (page_count != page_count_2) {
      printf("warning: contig region of non power of two page count\n");
    }
#endif

    // Device mappings must always be physically consecutive
    pmap_region_t *ppages = pmap_alloc_contiguous(page_count_2, alignment,
                                                  VM_WIRED);

    if (ppages == NULL) {
      mutex_unlock(&vm_lock);
      return NULL;
    }
    pmap_region_t *ppages_end = ppages;
    while (LIST_NEXT(ppages_end, link)) {
      ppages_end = LIST_NEXT(ppages_end, link);
    }

    reg->pmap.head = ppages;
    reg->pmap.tail = ppages_end;

    // Align paddr
    pa_t paddr = pmap_get_pa(ppages);
    pa_t paddr_align = align_pa(paddr, alignment);
    assert(paddr == paddr_align);

    //pmap_wire(ppages, ppages_end);
    reg->flags |= VM_WIRED;

    // Since the data is aligned (both virtual and physical) we can just map the
    // entire range here

    hw_map(map, addr, paddr_align, len, flags);
    mutex_unlock(&vm_lock);
    return (void*)addr;
  } else {
    // In the case of non device memory, we ignore the alignment for physical
    // memory
    if (flags & VM_CONTIG) {
    // Device mappings must always be physically consecutive
      size_t page_count_2 = clp2_32(page_count);
#if 0
      if (page_count != page_count_2) {
        printf("warning: contig region of non power of two page count\n");
      }
#endif
      pmap_region_t *ppages = pmap_alloc_contiguous(page_count_2, alignment, 0);

      if (ppages == NULL) {
        mutex_unlock(&vm_lock);
        return NULL;
      }
      pmap_region_t *ppages_end = ppages;
      while (LIST_NEXT(ppages_end, link)) {
        ppages_end = LIST_NEXT(ppages_end, link);
      }
      reg->pmap.head = ppages;
      reg->pmap.tail = ppages_end;
      // Align paddr
      pa_t paddr = pmap_get_pa(ppages);
      pa_t paddr_align = align_pa(paddr, alignment);
      assert(paddr == paddr_align);

      // Since the data is aligned (both virtual and physical) we can just map the
      // entire range here

      hw_map(map, addr, paddr_align, len, flags);

      // Finally, set the pmap range
      reg->pmap.head = ppages;
      reg->pmap.tail = ppages_end;


      mutex_unlock(&vm_lock);
      return (void*)addr;
    }

    pmap_region_t *ppages_first = pmap_alloc(page_count);
    pmap_region_t *ppages = ppages_first;
    va_t tmp_addr = addr;

    while (ppages) {
      size_t len = (1 << ppages->logsize) * PAGE_SIZE;
      hw_map(map, tmp_addr, pmap_get_pa(ppages), len, flags);
      tmp_addr += len;
      reg->pmap.tail = ppages;
      ppages = LIST_NEXT(ppages, link);
    }

    // Finally, set the pmap range
    reg->pmap.head = ppages_first;

    mutex_unlock(&vm_lock);
    return (void*)addr;
  }

  mutex_unlock(&vm_lock);
  return NULL;
}


void
pmap_free(vm_map_t *vm, vm_region_t *reg)
{
  pmap_region_t *ppage = vm_get_pmap(vm, reg);
  pmap_region_t *tmp = NULL;
  size_t pages = reg->size / PAGE_SIZE;

  for (size_t i = 0 ; i < pages ; i++) {
    tmp = LIST_NEXT(ppage, link);

    LIST_REMOVE(used_ppages, ppage, link);
    LIST_APPEND(free_ppages, ppage, link);

    ppage = tmp;
  }
}


void
vm_unmap(vm_map_t *vm, void *addr)
{
  mutex_lock(&vm_lock);

  vm_region_t *reg = vm_find_region(vm, (va_t)addr);

  if (reg != NULL) {
    assert((reg->flags & VM_WIRED) == 0);

    pmap_free(vm, reg);

    hw_unmap(vm, reg->start, reg->size);
    vm_release_region_descriptor(vm, reg);
  } else {
    printf("WARNING: vm_unmap on invalid region (cound not find %p)\n", addr);
  }

  mutex_unlock(&vm_lock);
}

void*
vm_map_physical(vm_map_t *map, unsigned flags, pa_t paddr, size_t len)
{
  assert(((len & (PAGE_SIZE-1)) == 0) && "vm map len must be rounded to nearest page size");

  // Ensure flags have no invalid flags
  if ((flags & ~(VM_VALID_FLAGS)) != 0) {
    return NULL;
  }

  mutex_lock(&vm_lock);
  vm_region_t *reg = NULL;

  reg = vm_get_new_region(map, flags, len);

  // No space left in vm map?
  if (reg == NULL) {
    mutex_unlock(&vm_lock);
    return NULL;
  }

  if (paddr >= 0x80000000 && paddr < 0x90000000)
    {
      int index = (paddr - 0x80000000) / PAGE_SIZE;
      int end_index = index + len / PAGE_SIZE - 1;

      LIST_REMOVE_RANGE(free_ppages, &ram_ppages[index], &ram_ppages[end_index], link);
      LIST_APPEND_RANGE(wired_ppages, &ram_ppages[index], &ram_ppages[end_index], link);
    }

  va_t vaddr = reg->start;

  hw_map(map, vaddr, paddr, len, flags);
  mutex_unlock(&vm_lock);

  return (void*)vaddr;
}

pa_t
vm_probe_physical(vm_map_t *map, void *vaddr)
{
  return hw_probe(map, (va_t)vaddr);
}

bool
vm_map_exists(vm_map_t *map, void *vaddr)
{
  return vm_find_region(map, (va_t)vaddr) != NULL;
}

void
vm_copy(vm_map_t *vmdst, void *dst, vm_map_t *vmsrc, void *src, size_t len)
{
  assert(0 && "vm_copy not implemented");
}

bool
vm_valid_user_address_range(void *start, size_t size)
{
  proc_t *p = proc_current();
  if (!p) return false;

  vm_region_t *reg = vm_find_region(&p->vm_map, (va_t)start);
  if (!reg) return false; // Is the address mapped at all

  if ((va_t)start < reg->start) return false;
  if ((va_t)start > reg->start+reg->size) return false;
  if ((va_t)start + size < reg->start) return false;
  if ((va_t)start + size >= reg->start+reg->size) return false;
  if ((va_t)start >= (va_t)start + size) return false; // Overflow protection

  return true;
}
