// Physical memory allocator, for user processes,
// kernel stacks, page-table pages,
// and pipe buffers. Allocates whole 4096-byte pages.

#include "types.h"
#include "param.h"
#include "memlayout.h"
#include "spinlock.h"
#include "riscv.h"
#include "defs.h"

void freerange(void *pa_start, void *pa_end);

extern char end[]; // first address after kernel.
                   // defined by kernel.ld.

struct run {
  struct run *next;
};


struct pgrefcount {
	void * start           ;
	uint64 pgnum           ;
  uint8  inited          ;
	void * refpg           ;
};

struct {
  struct spinlock lock;
  struct run *freelist;
  struct pgrefcount pgref;
} kmem;

void
kinit()
{
  initlock(&kmem.lock, "kmem");
  void * aligend_end = (void *)(PGROUNDUP((uint64)end));
  if( (PHYSTOP - ((uint64)aligend_end)) < 8*4096 ) {
    panic("No free mem for metadata,cannot launch xv6");
  }

  kmem.pgref.refpg  = aligend_end;
  kmem.pgref.start  =  (void *) (((uint64)aligend_end) + (4096 << 3) );
  kmem.pgref.pgnum  =  ((uint64)PHYSTOP - (uint64)(kmem.pgref.start)) >> 12;
  kmem.pgref.inited = 0;
  
  printf("=====kmem init=====\n");
  printf("end location     :%p \n",end);
  printf("metadata location:%p \n",kmem.pgref.refpg);
  printf("PHYSTART         :%p \n",kmem.pgref.start);
  printf("PHYSTOP          :%p \n",(void *)(PHYSTOP));
  printf("memsize          :%lx\n",((uint64)PHYSTOP - (uint64)(kmem.pgref.start)));
  printf("pgnum            :%lu\n",kmem.pgref.pgnum);
  printf("=====kmem end=====\n");
  memset(kmem.pgref.start,0,4096 << 3);
  freerange(kmem.pgref.start, (void*)PHYSTOP);
}

void
freerange(void *pa_start, void *pa_end)
{
  char *p;
  p = (char*)PGROUNDUP((uint64)pa_start);
  for(; p + PGSIZE <= (char*)pa_end; p += PGSIZE)
    kfree(p);
  acquire(&kmem.lock);
  kmem.pgref.inited = 1;
  release(&kmem.lock);
}

// Free the page of physical memory pointed at by pa,
// which normally should have been returned by a
// call to kalloc().  (The exception is when
// initializing the allocator; see kinit above.)

void Add_ref_count(void* pa) {
  acquire(&kmem.lock);

  uint64 idx = ((PGROUNDDOWN((uint64)pa)) - ((uint64)kmem.pgref.start)) >> 12;
  uint8  ref = ((uint8 *)kmem.pgref.refpg)[idx];

  if((ref < 1 || (idx >= kmem.pgref.pgnum)) && kmem.pgref.inited) 
    panic("ref wrong");
  ((uint8 *)kmem.pgref.refpg)[idx] = ref + 1;

  release(&kmem.lock);
}
void kinitfree(void * pa) {

}
void
kfree(void *pa)
{
  struct run *r;

  if(((uint64)pa % PGSIZE) != 0 || (char*)pa < end || (uint64)pa >= PHYSTOP)
    panic("kfree");

  // check if metadata
  if( ((uint8 *)pa) < ((uint8 *)kmem.pgref.start) )
    panic("free pgcnt metadata.");
  
  // clear ref count
  acquire(&kmem.lock);
  if(kmem.pgref.inited) {
    uint64 idx = ((PGROUNDDOWN((uint64)pa)) - ((uint64)kmem.pgref.start)) >> 12;

    uint8  ref = ((uint8 *)kmem.pgref.refpg)[idx];
    if((ref < 1 || (idx >= kmem.pgref.pgnum)) && kmem.pgref.inited) 
    panic("ref wrong");
  
    ((uint8 *)kmem.pgref.refpg)[idx] -= 1;

    // if no ref,then release

    if( (((uint8 *)kmem.pgref.refpg)[idx]) == 0) {
      // Fill with junk to catch dangling refs.
      memset(pa, 1, PGSIZE);
      r = (struct run*)pa;
      r->next = kmem.freelist;
      kmem.freelist = r;
    }
  }
  else {
      memset(pa, 1, PGSIZE);
      r = (struct run*)pa;
      r->next = kmem.freelist;
      kmem.freelist = r;
  }
  release(&kmem.lock);
}

// Allocate one 4096-byte page of physical memory.
// Returns a pointer that the kernel can use.
// Returns 0 if the memory cannot be allocated.
void *
kalloc(void)
{
  struct run *r;

  acquire(&kmem.lock);

  r = kmem.freelist;
  if(r) {
    if( ((uint8 *)r) < ((uint8 *)kmem.pgref.start) )
      panic("free pgcnt metadata.");

    kmem.freelist = r->next;
    uint64 idx = ((PGROUNDDOWN((uint64)r)) - ((uint64)kmem.pgref.start)) >> 12;
    uint8  ref = ((uint8 *)kmem.pgref.refpg)[idx];
    if(ref != 0 || (idx >= kmem.pgref.pgnum) ) 
      panic("alloc ref wrong");
    ((uint8 *)kmem.pgref.refpg)[idx] = 1;
  }

  release(&kmem.lock);

  if(r)
    memset((char*)r, 5, PGSIZE); // fill with junk
  return (void*)r;
}


void* cow_new_page(void * oldpage) {
  struct run *r;
  if(((uint64)oldpage % PGSIZE) != 0 || (char*)oldpage < end || (uint64)oldpage >= PHYSTOP)
    panic("no legal page");

  if( ((uint8 *)oldpage) < ((uint8 *)kmem.pgref.start) )
    panic("free pgcnt metadata.");
  
  acquire(&kmem.lock);

  uint64 oldidx = ((PGROUNDDOWN((uint64)oldpage)) - ((uint64)kmem.pgref.start)) >> 12;
  
  uint8  oldref = ((uint8 *)kmem.pgref.refpg)[oldidx];
  if(oldref == 0)
    panic("try to copy unalloc page");
  if(oldref == 1) { // if only one
    release(&kmem.lock);
    return oldpage;
  }
  else {
    // simulate a alloc
    r = kmem.freelist;
    if(r) {
      if( ((uint8 *)r) < ((uint8 *)kmem.pgref.start)  )
        panic("free pgcnt metadata.");

      if(PGROUNDDOWN((uint64)r) == PGROUNDDOWN((uint64)oldpage))
        panic("try to alloc the same page for COW");

      kmem.freelist = r->next;

      uint64 newidx = ((PGROUNDDOWN((uint64)r)) - ((uint64)kmem.pgref.start)) >> 12;

      uint8  newref = ((uint8 *)kmem.pgref.refpg)[newidx];
      if(newref != 0) 
        panic("alloc ref wrong in cow new page");
      
      // add new page refcount and decrease old page ref count
      ((uint8 *)kmem.pgref.refpg)[newidx]  = 1         ;
      ((uint8 *)kmem.pgref.refpg)[oldidx]  = oldref - 1;
      // copy to new page
      memmove(r,oldpage,PGSIZE);
      release(&kmem.lock);
      return r;
    }
    else {
      release(&kmem.lock);
      return 0;
    }
  }
  
}
