#include <framework/framework_i.h>

/* For the naive mapping strategy */
#define PADDR(kva) ((physaddr_t)(kva) - KBASE)
#define KADDR(pa)  (void *)((pa) + KBASE)

/* Store the final physical memory map */
static unsigned int mlayout_count;
static uintptr_t    mlayout_addr[MEMMAP_SCALE];

uint32_t        mmap_count;
struct memmap_s mmap[MEMMAP_MAXCOUNT];

static unsigned int mmap_node_count;
static uintptr_t    mmap_node_addr[MEMMAP_SCALE];
static unsigned int mmap_node_flag[MEMMAP_SCALE];

#define PA_VAILD(addr) (mlayout_bin_search(addr) & 1)
/* return the first boundary index that has value larger than addr */
static int
mlayout_bin_search(physaddr_t addr)
{
     int l = 0, r = mlayout_count - 1, m;

     while (l < r)
     {
		  if (mlayout_addr[m = ((l + r) >> 1)] > addr)
			   r = m;
		  else l = m + 1;
     }

     return l;
}

static int  page_count;
static int *page_rc;
static int *page_status;

static struct buddy_context_s bp_buddy;
static int                    buddy_offset[LAPIC_COUNT];
static struct buddy_context_s buddy[LAPIC_COUNT];

/* Allocate memory space when initailizing system */
static physaddr_t init_free;

static void *
init_alloc(size_t size, size_t align, int verbose)
{
	 if (verbose)
		  kprintf("[init_alloc] init_free = %p, size = %d, align = 0x%x\n", init_free, size, align);

	 /* Find the first segment possible */
     int lid = mlayout_bin_search(init_free);
     lid |= 1;

     while (lid < mlayout_count)
     {
		  if (init_free < mlayout_addr[lid - 1])
			   init_free = mlayout_addr[lid - 1];

		  /* Find the continuous memory space that would contain the
		   * need */
		  if ((init_free |=  align - 1) + size < mlayout_addr[lid])
			   break;
		  else lid += 2;
     }

     /* NULL is failed */
     if (lid >= mlayout_count) return NULL;
     if (init_free + size >= KSIZE) return NULL;
     else ++ init_free;

     void *result = KADDR(init_free);
	 
     if (verbose)
		  kprintf("[init_alloc] result = %p\n", result);

     init_free += size;

	 return result;
}

static int
do_init_alloc(int verbose)
{
	 int i;
	 /* Alloc and init for buddy system */
	 /* {{{ */
	 if ((bp_buddy.node = (struct buddy_node_s *)
		  init_alloc(sizeof(struct buddy_node_s) * (((page_count + 3) >> 2) << 1), PAGE_SIZE, verbose))
		 == NULL) return -E_NO_MEM;
	 /* }}} */
	 page_rc = (int *)
		  init_alloc(sizeof(int) * page_count, PAGE_SIZE, verbose);
	 page_status = (int *)
		  init_alloc(sizeof(int) * page_count, PAGE_SIZE, verbose);

	 /* Initial stacks for LCPUs */
	 uint32_t stacks_start;
	 if ((stacks_start = (uint32_t)
		  init_alloc(ekf_sysconf.lcpu_count * KSTACK_SIZE, PAGE_SIZE, verbose))
		 == 0)
     	  return -E_NO_MEM;
	 
	 for (i = 0; i != ekf_sysconf.lcpu_count; ++ i)
	 {
		  lcpu_static[i].initial_stack_top = stacks_start + (i + 1) * KSTACK_SIZE;
	 }
	 
	 return 0;
}

static int
pmem_make_layout(int verbose)
{
     uint32_t i, j, k, t;

     mmap_node_count = 0;

     for (i = 0; i != mmap_count; ++i)
     {
		  mmap_node_addr[mmap_node_count] = mmap[i].base;
		  mmap_node_flag[mmap_node_count] = mmap[i].flag;
		  ++mmap_node_count;

		  /* ignore if the end overflows */
		  if (!MEMMAP_ADDR_OVERFLOW(mmap[i].end))
		  {
			   mmap_node_addr[mmap_node_count] =  mmap[i].end;
			   mmap_node_flag[mmap_node_count] = -mmap[i].flag;
			   ++mmap_node_count;
		  }
     }

     /* Sort the mmap node table */
     for (i = 0; i != mmap_node_count; ++i)
     {
		  j = i;
		  for (k = i + 1; k != mmap_node_count; ++k)
		  {
			   if (mmap_node_addr[k] < mmap_node_addr[j])
					j = k;
		  }
		  t = mmap_node_addr[i]; mmap_node_addr[i] = mmap_node_addr[j]; mmap_node_addr[j] = t;
		  t = mmap_node_flag[i]; mmap_node_flag[i] = mmap_node_flag[j]; mmap_node_flag[j] = t;
     }

     /* for debugging */
     /* for (i = 0; i != mmapNodeCount; ++i) */
     /* 	  kprintf("addr : %x, flag : %x\n", mmapNodeAddr[i], mmapNodeFlag[i]); */

     /* Make memory layout table */
     int32_t vaild_level, reclaim_level;
     bool last_vaild, v;
     
     vaild_level = 0; reclaim_level = 0; v = false;
     mlayout_count = 0;

     i = 0;
     while (1)
     {
		  if (i >= mmap_node_count) break;

		  last_vaild = v;
		  while (1)
		  {
			   switch (mmap_node_flag[i])
			   {
			   case MEMMAP_FREE:
					++vaild_level;
					break;
			   case -MEMMAP_FREE:
					--vaild_level;
					break;
			   case MEMMAP_RECLAIMABLE:
					++reclaim_level;
					break;
			   case -MEMMAP_RECLAIMABLE:
					--reclaim_level;
					break;
			   default:
					if (mmap_node_flag[i] > 0)
						 --vaild_level;
					else ++vaild_level;
			   }

			   ++i;
			   if (i >= mmap_node_count) break;
			   if (mmap_node_addr[i - 1] != mmap_node_addr[i]) break;
		  }
	  
		  if (vaild_level == 0)
			   v = (reclaim_level > 0);
		  else v = (vaild_level > 0);

		  /* for debugging */
		  // kprintf("%x %d %d %d\n", mmapNodeAddr[i - 1], vaildLevel, reclaimLevel, v);

		  if (v != last_vaild)
		  {
			   mlayout_addr[mlayout_count] = mmap_node_addr[i - 1];
			   ++mlayout_count;
		  }
     }

	 if (verbose)
	 {
		  kprintf("Physical memory layout boundary:\n");
		  for (i = 0; i != mlayout_count; ++i)
		  {
			   kprintf(" [%d] = %08x\n", i, mlayout_addr[i]);
		  }
	 }

	 page_count = mlayout_addr[mlayout_count - 1] >> PAGE_SHIFT;
	 return 0;
}

typedef struct multiboot_memory_map_s {
	 uint32_t size;
	 uint64_t base;
	 uint64_t length;
	 uint32_t type;
} multiboot_memory_map_t;

static int
_is_free(int page)
{
	 int a = mlayout_bin_search(page << PAGE_SHIFT);
	 int b = mlayout_bin_search(((page + 1) << PAGE_SHIFT) - 1);
	 return (a == b &&
			 mlayout_addr[a] > (page << PAGE_SHIFT) &&
			 (a & 1) == 1);
}

static int
pmem_bp_allocator_init(int verbose)
{
	 buddy_init();
	 return buddy_build(&bp_buddy, page_count, _is_free);
}

int
pmem_init(void)
{
	 int i, err;

	 // XXX GRUB MAGIC
	 if (mb_magic == 0x2BADB002)
	 {
		  uint32_t *_mb_info  = (uint32_t *)KADDR(mb_info_phys);
		  if ((_mb_info[0] & (1 << 6)) == 0)
		  {
			   kprintf("No MMAP\n");
			   
			   mmap_count = 0;
			   
			   mmap[mmap_count].base = (uintptr_t)0;
			   mmap[mmap_count].end =  (uintptr_t)0x10000000;
			   mmap[mmap_count].flag = MEMMAP_FREE;
			   ++ mmap_count;
		  }
		  else
		  {
			   char    *_mmap_addr = (char *)KADDR(_mb_info[12]);
			   uint32_t _mmap_size = _mb_info[11];
			   char    *_mmap_end = _mmap_addr + _mmap_size;
			   while (PADDR(_mmap_addr) < PADDR(_mmap_end))
			   {
					multiboot_memory_map_t *mb_mmap = (multiboot_memory_map_t *)_mmap_addr;
					
					mmap[mmap_count].base = mb_mmap->base;
					mmap[mmap_count].end  = mb_mmap->base + mb_mmap->length;
					mmap[mmap_count].flag = mb_mmap->type;
					++ mmap_count;
					
					_mmap_addr += mb_mmap->size + sizeof(uint32_t);
			   }
		  }
	 }
	 else
	 {
		  kprintf("READ MEMMAP FROM EKF BOOTLOADER\n");
		  
		  mmap_count = *(uint32_t *)MEMMAP_BASE;
		  memmove(mmap, MEMMAP_ENTRY, MEMMAP_SIZE * mmap_count);

		  if (mmap_count == 0)
		  {
			   kprintf("Cannot detect physical memory map.\n");
			   return -E_FAULT;
		  }

		  for (i = 0; i != mmap_count; ++i)
			   mmap[i].end += mmap[i].base;
	 }

	 kprintf("mmap_count = %d\n", mmap_count);
	 for (i = 0; i != mmap_count; ++ i)
	 {
		  kprintf("start = %08x, end = %08x, flag = %02x\n",
				  (uint32_t)mmap[i].base, (uint32_t)mmap[i].end, mmap[i].flag);
	 }

	 /* mark the lower memory reserved */
     mmap[mmap_count].base = (uintptr_t)0;
     mmap[mmap_count].end =  (uintptr_t)0x00100000;
     mmap[mmap_count].flag = MEMMAP_RESERVED;
	 ++ mmap_count;

     /* generate the initial layout */
     pmem_make_layout(0);

	 physaddr_t init_alloc_start;
	 init_alloc_start = DMA_BOUNDARY_PAGE << PAGE_SHIFT;
	 if (PADDR(_end) > init_alloc_start)
		  init_alloc_start = PADDR(_end);
	 init_free = init_alloc_start;

	 mmap[mmap_count].base = (uintptr_t)PADDR(_text);
	 mmap[mmap_count].end  = (uintptr_t)PADDR(_end);
	 mmap[mmap_count].flag = MEMMAP_RESERVED;
	 ++ mmap_count;

	 if ((err = do_init_alloc(0)) < 0)
		  return err;
	 
	 mmap[mmap_count].base = init_alloc_start;
	 mmap[mmap_count].end  = init_free;
	 mmap[mmap_count].flag = MEMMAP_RESERVED;
	 ++ mmap_count;

	 pmem_make_layout(1);
	 /* build the buddy system */
	 pmem_bp_allocator_init(0);
	 return 0;
}

int
pmem_page_alloc(int num, physaddr_t *addr, int dma, int status)
{
	 if (status != PPAGE_MANAGED)
		  if (num > 1) return -E_INVAL;
	 
	 struct mcs_lock_node_s node;

	 uint32_t flags = irq_save();
	 int result;	 
	 result = buddy_alloc(buddy + lcpu_id, num);
	 irq_restore(flags);
	 
	 if (result == -1)
	 {
		  kprintf("pmem page alloc failed wieh dma = %d num = %d\n", dma, num);
		  return -E_NO_MEM;
	 }
	 else
	 {
		  result += buddy_offset[lcpu_id];
		  *addr = ((physaddr_t)result) << PAGE_SHIFT;
		  int i;
		  for (i = 0; i != num; ++ i)
		  {
			   // kprintf("PMEM: + %d\n", result + i);
			   page_status[result + i] = status;
			   page_rc[result + i] = 0;
		  }
		  return 0;
	 }
}

int
pmem_bp_page_alloc(int num, physaddr_t *addr, int dma, int status)
{
	 if (status != PPAGE_MANAGED)
		  if (num > 1) return -E_INVAL;
	 
	 struct mcs_lock_node_s node;

	 uint32_t flags = irq_save();
	 int result;	 
	 result = buddy_alloc(&bp_buddy, num);
	 irq_restore(flags);
	 
	 if (result == -1)
	 {
		  // kprintf("pmem page alloc failed wieh dma = %d num = %d\n", dma, num);
		  return -E_NO_MEM;
	 }
	 else
	 {
		  *addr = ((physaddr_t)result) << PAGE_SHIFT;
		  int i;
		  for (i = 0; i != num; ++ i)
		  {
			   page_status[result + i] = status;
			   page_rc[result + i] = 0;
		  }
		  return 0;
	 }
}

void
pmem_page_free(physaddr_t addr)
{
	 struct mcs_lock_node_s node;
	 int page = addr >> PAGE_SHIFT;
	 if (page >= page_count) return;
	 
	 uint32_t flags = irq_save();
	 int num = buddy_free(buddy + lcpu_id, page - buddy_offset[lcpu_id]);
	 for (-- num; num >= 0; --num)
	 {
		  // kprintf("PMEM: - %d\n", page + num);
	 }
	 irq_restore(flags);
}

void
pmem_page_add_ref(physaddr_t addr, int delta)
{
	 int page = addr >> PAGE_SHIFT;
	 if (page >= page_count || page_status[page] != PPAGE_CORE) return;
	 uint32_t flags = irq_save();
	 page_rc[page] += delta;
	 irq_restore(flags);
}

void
pmem_page_inc_ref(physaddr_t addr)
{
	 int page = addr >> PAGE_SHIFT;
	 if (page >= page_count || page_status[page] != PPAGE_CORE) return;
	 uint32_t flags = irq_save();
	 ++ page_rc[page];
	 irq_restore(flags);
}

void
pmem_page_dec_ref(physaddr_t addr)
{
	 int page = addr >> PAGE_SHIFT;
	 if (page >= page_count || page_status[page] != PPAGE_CORE) return;
	 uint32_t flags = irq_save();
	 if (-- page_rc[page] == 0)
	 {
		  buddy_free(buddy + lcpu_id, page - buddy_offset[lcpu_id]);
		  irq_restore(flags);
	 }
	 else irq_restore(flags);
}

int
pmem_page_status(physaddr_t addr)
{
	 int result = 0;
	 int page = addr >> PAGE_SHIFT;
	 if (page >= page_count) return -1;
	 result = page_status[page];
	 return result;
}

int
pmem_page_fsm(physaddr_t addr, int *tf_vec, int size)
{
	 int result = 0;
	 int page = addr >> PAGE_SHIFT;
	 if (page >= page_count) return -1;
	 uint32_t flags = irq_save();
	 if (page_status[page] < size &&
		 tf_vec[page_status[page]] != -1)
		  result = page_status[page] = tf_vec[page_status[page]];
	 else result = -1;
	 irq_restore(flags);
	 return result;
}

int
pmem_bp_fix(void)
{
	 /* set the current lapic id */
	 lcpu_id = ekf_sysconf.lcpu_boot;

	 buddy_offset[lcpu_id] = 0;
	 memmove(buddy + lcpu_id, &bp_buddy, sizeof(struct buddy_context_s));

	 return 0;
}
