#ifdef __KERNEL__
#include <linux/highmem.h>
#endif

#include "paging_api.h"
#include "paging.h"
#include "exceptions.h"


#ifdef __KERNEL__

/*
 * single paging mode: bind test paging to kernel first level paging structure
 */

void pml4_map_kernel(u64 *virt) {
  u64 cur_phys = read_cr3() & ~0xfffLL;
  u64 *cur_pml4 = kmap (phys_to_page (cur_phys));
  int i;
  //kernel virtual memory
  for(i=272; i<512; ++i)
  virt[i] = cur_pml4[i];
  
  kunmap (phys_to_page (cur_phys));
}

#endif


u32 mk_pse32 (u32 addr, u32 flags)
{
  return addr | flags;
}



u32 mk_pse32_pse36 (u64 addr, u32 flags)
{
  return ( (addr >> 32) << 13 ) | flags;
}



void set_pse32 (u32 *table, int pos, u64 addr, u32 flags)
{
  /* 4mb page ? */
  if (addr >> 32 != 0) {
		asm volatile ("int $111");
    table[pos] = mk_pse32_pse36(addr, flags);
  }
  else {
    table[pos] = mk_pse32( (u32) addr, flags );
  }
}



void set_flags32 (u32 *entry, u32 flags)
{
  *entry |= flags;
}



void unset_flags32 (u32 *entry, u32 flags)
{
  *entry &= ~flags;
}



/*
 * pae and 64bit paging
 */

u64 mk_pse64 (u64 addr, u64 flags)
{
  return addr | flags;
}



void set_pse64 (u64 *table, int pos, u64 addr, u64 flags)
{
  table[pos] = mk_pse64(addr, flags);
}



void set_flags64 (u64 *entry, u64 flags)
{
  *entry |= flags;
}



void unset_flags64 (u64 *entry, u64 flags)
{
  *entry &= ~flags;
}


void map64(u64 *table, unsigned pos, u64 addr_mask, u64 phys) {
  table[pos] &= ~addr_mask;
  table[pos] |= phys;
}

void flags64(u64 *table, unsigned pos, u64 flags, u64 uflags) {
  table[pos] &= ~uflags;
  table[pos] |= flags;
}