#include "memory.h"

#include "bitmap.h"
#include "debug.h"
#include "global.h"
#include "list.h"
#include "print.h"
#include "stdint.h"
#include "string.h"
#include "sync.h"
#include "thread.h"
/**
 * @brief Initializes the memory pool, dividing it into kernel and user pools.
 *
 * This function initializes the memory pool, which is used for managing
 * physical memory allocation. It calculates the sizes of the kernel and user
 * pools based on the total memory available. The pools are divided by
 * allocating half of the free pages to the kernel pool and the other half to
 * the user pool.
 *
 * @param mem_to_assign Total memory size to be assigned to the memory pool.
 */
void mem_pool_init(uint32_t mem_to_assign);
void output_mem_pool_info();

/**
 * @brief Allocates a block of memory consisting of a specified number of pages.
 *
 * This function allocates a block of memory, consisting of 'pg_cnt' consecutive
 * pages, from the specified memory pool (kernel or user). It performs the
 * following steps:
 *
 * 1. Requests a virtual address range for the memory block.
 * 2. Maps the allocated physical pages to the allocated virtual address range.
 *
 * @param pf       Pool flags indicating whether the allocation is for the
 * kernel or user.
 * @param pg_cnt   The number of consecutive pages to allocate.
 *
 * @return A pointer to the starting virtual address of the allocated memory
 * block if successful; NULL if the allocation fails.
 */
void* page_alloc(enum pool_flags flag, uint32_t pg_cnt);
/**
 * @brief Apply for a range of virtual addresses from the specified memory pool.
 *
 * This function applies for a range of virtual addresses from the specified
 * memory pool based on the given pool flags and the number of pages requested.
 * It scans the virtual address bitmap associated with the pool to find a
 * contiguous range of free bits, marks them as used, and returns the start
 * address of the allocated virtual address range.
 *
 * @param pf The memory pool flag indicating whether it's a kernel or user
 * memory pool.
 * @param pg_cnt The number of pages for which virtual addresses are requested.
 * @return Pointer to the start address of the allocated virtual address range,
 * or NULL if allocation fails.
 */
void*     virtual_addr_applying(enum pool_flags pf, uint32_t pg_cnt);
uint32_t* pte_ptr(uint32_t vaddr);
uint32_t* pde_ptr(uint32_t vaddr);
/**
 * @brief Allocate a physical page from the specified memory pool.
 *
 * This function allocates a physical page from the specified memory pool.
 * It scans the bitmap associated with the pool to find a free page, marks
 * it as used, and returns the physical address of the allocated page.
 *
 * @param m_pool Pointer to the memory pool structure.
 * @return Pointer to the allocated physical page, or NULL if no free page is
 * available.
 */
void* palloc(struct pool* m_pool);

/**
 * @brief Add a mapping entry to the page table.
 *
 * This function adds a mapping entry to the page table for the specified
 * virtual address, mapping it to the specified physical address. If the page
 * directory entry (PDE) for the virtual address exists, it updates the
 * corresponding page table entry (PTE); otherwise, it creates a new page
 * directory entry and page table entry.
 *
 * @param vaddr The virtual address to be mapped.
 * @param pg_paddr The physical address to which the virtual address is mapped.
 */
void page_table_add(void* vaddr, void* pg_paddr);

#define PDE_IDX(addr) ((addr & 0xffc00000) >> 22)
#define PTE_IDX(addr) ((addr & 0x003ff000) >> 12)

struct mem_block_desc k_block_desc[DESC_CNT];

struct pool         kernel_pool, user_pool;
struct virtual_addr kernel_vaddr;

void mem_init()
{
  put_str("memory initialize start.\r\n");

  mem_pool_init(*(uint32_t*)(0xb00));
  mutex_init(&kernel_pool.lock);
  mutex_init(&user_pool.lock);

  // initialize kernel memory block descriptor for arena
  block_desc_init(k_block_desc);

  output_mem_pool_info();
  put_str("memory initialize done.\r\n");
}

void* kernel_page_alloc(uint32_t pg_cnt)
{
  mutex_acquire(&kernel_pool.lock);

  // Attempt to allocate the specified number of pages from the kernel physical
  // memory pool
  void* vaddr = page_alloc(kernel_pflag, pg_cnt);

  if (vaddr != NULL)
  {
    // If the allocation is successful, initialize the memory block to zero
    memset(vaddr, 0, pg_cnt * PG_SIZE);
  }

  mutex_release(&kernel_pool.lock);

  // Return the virtual address of the allocated memory block
  // (or NULL if allocation failed)
  return vaddr;
}

void* user_page_alloc(uint32_t pg_cnt)
{
  mutex_acquire(&user_pool.lock);

  // Attempt to allocate the specified number of pages from the user physical
  // memory pool
  void* vaddr = page_alloc(user_pflag, pg_cnt);

  if (vaddr != NULL)
  {
    // If the allocation is successful, initialize the memory block to zero
    memset(vaddr, 0, pg_cnt * PG_SIZE);
  }

  mutex_release(&user_pool.lock);

  // Return the virtual address of the allocated memory block
  // (or NULL if allocation failed)
  return vaddr;
}

void mem_pool_init(uint32_t mem_to_assign)
{
  // Calculate memory usage by reserving space for 256 pages and the lower 1MB
  // of memory.

  //  memory used: 256 pages & low 1mb
  //  256 pages = 1 directory + 1(#0 #768) + 254(#769-#1022)
  uint32_t mem_used = 256 * PG_SIZE + 0x100000;
  uint32_t mem_free = mem_to_assign - mem_used;
  uint32_t page_free = mem_free / PG_SIZE;

  // Calculate the number of free pages for the kernel and user pools.
  uint16_t kernel_free_page = page_free / 2;
  uint16_t user_free_page = page_free - kernel_free_page;

  // Calculate the lengths of the bitmaps for kernel and user pools.
  // may cannot control some pages but dont need to manage it
  uint32_t kernel_bitmap_length = kernel_free_page / 8;
  uint32_t user_bitmap_length = user_free_page / 8;

  // Set the physical start addresses and pool sizes for kernel and user pools.
  kernel_pool.phy_address_start = mem_used;
  user_pool.phy_address_start = mem_used + kernel_free_page * PG_SIZE;

  kernel_pool.pool_size = kernel_free_page * PG_SIZE;
  user_pool.pool_size = user_free_page * PG_SIZE;

  // Initialize memory bitmaps.
  kernel_pool.pool_bitmap.bitmap_bytes_lens = kernel_bitmap_length;
  user_pool.pool_bitmap.bitmap_bytes_lens = user_bitmap_length;

  kernel_pool.pool_bitmap.bits = (void*)MEM_BITMAP_BASE;
  user_pool.pool_bitmap.bits = (void*)(MEM_BITMAP_BASE + kernel_bitmap_length);

  bitmap_init(&kernel_pool.pool_bitmap);
  bitmap_init(&user_pool.pool_bitmap);

  // Initialize the kernel virtual address bitmap.
  kernel_vaddr.vaddr_start = K_HEAP_START;
  kernel_vaddr.vaddr_bitmap.bitmap_bytes_lens = kernel_bitmap_length;
  kernel_vaddr.vaddr_bitmap.bits =
      (void*)(MEM_BITMAP_BASE + kernel_bitmap_length + user_bitmap_length);

  bitmap_init(&kernel_vaddr.vaddr_bitmap);
}

void output_mem_pool_info()
{
  put_str("   kernel_pool bitmap start: ");
  put_int((int)kernel_pool.pool_bitmap.bits);
  put_str("   kernel_pool phy_address_start: ");
  put_int((int)kernel_pool.phy_address_start);

  put_str("\r\n");
  put_str("   user_pool bitmap start: ");
  put_int((int)user_pool.pool_bitmap.bits);
  put_str("   user_pool phy_address_start: ");
  put_int((int)user_pool.phy_address_start);

  put_str("\r\n");
}

void* page_alloc(enum pool_flags pf, uint32_t pg_cnt)
{
  // apply for physical address and map virutal address to it
  struct pool* m_pool = (pf == kernel_pflag) ? &kernel_pool : &user_pool;

  mutex_acquire(&m_pool->lock);

  // Check if pg_cnt is within valid bounds.
  ASSERT(pg_cnt > 0 && pg_cnt < 3840);
  // apply for virutal address
  void* m_vaddr_start = virtual_addr_applying(pf, pg_cnt);
  if (m_vaddr_start == NULL) return NULL;

  uint32_t m_vaddr = (uint32_t)m_vaddr_start;
  while (pg_cnt--)
  {
    void* m_paddr = palloc(m_pool);
    if (m_paddr == NULL)
    {
      // TODO:release alloced memory while failed
      return NULL;
    }
    page_table_add((void*)m_vaddr, m_paddr);

    if (pg_cnt) m_vaddr += PG_SIZE;
  }

  mutex_release(&m_pool->lock);

  return m_vaddr_start;
}

void* physical_page_alloc(enum pool_flags pf, uint32_t vaddr)
{
  struct pool* m_pool = (pf == kernel_pflag) ? &kernel_pool : &user_pool;
  mutex_acquire(&m_pool->lock);
  struct task_struct* cur = cur_thread_PCB_get();

  // set virual memory bitmap
  if (cur->pg_dir_entry != NULL && pf == user_pflag)
  {
    uint32_t bit_idx = (vaddr - cur->usrprog_vaddr.vaddr_start) / PG_SIZE;
    ASSERT(bit_idx > 0);
    bitmap_set(&cur->usrprog_vaddr.vaddr_bitmap, bit_idx, 1);
  }
  else if (cur->pg_dir_entry == NULL && pf == kernel_pflag)
  {
    uint32_t bit_idx = (vaddr - kernel_vaddr.vaddr_start) / PG_SIZE;
    ASSERT(bit_idx > 0);
    bitmap_set(&kernel_vaddr.vaddr_bitmap, bit_idx, 1);
  }
  else
  {
    PANIC("get_a_page:not allow kernel alloc userspace or user alloc "
          "kernelspace by get_a_page");
  }

  // Allocate physical page
  void* page_phyaddr = palloc(m_pool);
  if (page_phyaddr == NULL)
  {
    mutex_release(&m_pool->lock);
    return NULL; // Allocation failed
  };

  // Add page table entry
  page_table_add((void*)vaddr, page_phyaddr);

  mutex_release(&m_pool->lock);
  return (void*)vaddr;
}

void* virtual_addr_applying(enum pool_flags pf, uint32_t pg_cnt)
{

  struct virtual_addr vaddr = kernel_vaddr;

  if (pf == user_pflag)
  {
    vaddr = cur_thread_PCB_get()->usrprog_vaddr;
  }

  int bit_idx_start = -1;
  int cnt = 0;

  bit_idx_start = bitmap_scan(&vaddr.vaddr_bitmap, pg_cnt);
  if (bit_idx_start == -1)
  {
    return NULL;
  }

  // set each continuous free bits to 1(used)
  for (uint32_t i = 0; i < pg_cnt; i++)
  {
    bitmap_set(&vaddr.vaddr_bitmap, bit_idx_start + i, 1);
  }

  // get start address of virtual address applied
  uint32_t v_start = vaddr.vaddr_start + bit_idx_start * PG_SIZE;

  return (void*)v_start;
}

uint32_t* pde_ptr(uint32_t vaddr)
{
  uint32_t* pde = (uint32_t*)((0xfffff000) + PDE_IDX(vaddr) * 4);
  return pde;
}

uint32_t* pte_ptr(uint32_t vaddr)
{
  uint32_t* pte = (uint32_t*)(0xffc00000 + ((vaddr & 0xffc00000) >> 10) +
                              PTE_IDX(vaddr) * 4);
  return pte;
}

void* palloc(struct pool* m_pool)
{
  // find a free physical page
  int bit_idx = bitmap_scan(&m_pool->pool_bitmap, 1);
  if (bit_idx == -1)
  {
    return NULL;
  }

  // set free bit to 1(used)
  bitmap_set(&m_pool->pool_bitmap, bit_idx, 1);
  uint32_t page_phy_addr = m_pool->phy_address_start + bit_idx * PG_SIZE;

  return (void*)page_phy_addr;
}

void page_table_add(void* vaddr, void* pg_paddr)
{
  ASSERT(vaddr != NULL || pg_paddr != NULL);
  uint32_t* pde = pde_ptr((uint32_t)vaddr);
  uint32_t* pte = pte_ptr((uint32_t)vaddr);

  if (*pde & 0x00000001)
  {
    // already has pde

    ASSERT(!(*pte & 0x00000001)); // Page table entry must not exist

    // Update page table entry
    *pte = ((uint32_t)pg_paddr | PG_US_U | PG_RW_W | PG_P_1);
  }
  else
  {
    // create pde & pte
    uint32_t pde_phyaddr = (uint32_t)palloc(&kernel_pool);

    // Create page directory entry
    *pde = (pde_phyaddr | PG_US_U | PG_RW_W | PG_P_1);
    // Clear page table
    memset((void*)((uint32_t)pte & 0xfffff000), 0, (uint8_t)PG_SIZE);

    ASSERT(!(*pte & 0x00000001));
    // Set page table entry
    *pte = (uint32_t)pg_paddr | PG_US_U | PG_RW_W | PG_P_1;
  }
}

uint32_t addr_v2p(uint32_t vaddr)
{
  uint32_t* pte = pte_ptr(vaddr);
  return ((*pte & 0xfffff000) + (vaddr & 0x00000fff));
}

void block_desc_init(struct mem_block_desc* desc_array)
{
  int block_size = 16;

  for (int desc_idx = 0; desc_idx < DESC_CNT; desc_idx++)
  {
    desc_array[desc_idx].each_block_size = block_size;
    desc_array[desc_idx].blocks_cnt_per_arena =
        (PG_SIZE - sizeof(struct arena)) / block_size;
    list_init(&desc_array[desc_idx].free_block_list);

    // Double the block size for the next descriptor
    block_size *= 2;
  }
}

struct mem_block* arena2block(struct arena* a, uint32_t block_idx)
{
  return (struct mem_block*)((uint32_t)a + sizeof(struct arena) +
                             block_idx * a->desc->each_block_size);
}

struct arena* block2arena(struct mem_block* b)
{
  return (struct arena*)((uint32_t)b & 0xfffff000);
}

void* sys_malloc(uint32_t size)
{
  enum pool_flags        flag;     // Pool flag indicating kernel or user pool
  struct pool*           mem_pool; // Pointer to the memory pool
  struct mem_block_desc* descs;    // Pointer to the memory block descriptors
  struct task_struct*    cur_thread_PCB = cur_thread_PCB_get();

  // Determine the memory pool, pool flag, and memory block descriptors based on
  // the current thread
  if (cur_thread_PCB->pg_dir_entry == NULL)
  {
    // Current thread belongs to the kernel
    flag = kernel_pflag;
    mem_pool = &kernel_pool;
    descs = k_block_desc;
  }
  else
  {
    // Current thread belongs to a user process
    flag = user_pflag;
    mem_pool = &user_pool;
    descs = cur_thread_PCB->u_block_desc;
  }

  // Check if the requested size is valid
  ASSERT(size > 0 && size <= mem_pool->pool_size);

  struct arena*     a;            // Pointer to the arena
  struct mem_block* b;            // Pointer to the memory block
  uint8_t           desc_idx = 0; // Descriptor index
  mutex_acquire(&mem_pool->lock); // Acquire the memory pool lock

  // If the free list for the corresponding descriptor is empty
  if (list_empty(&descs->free_block_list))
  {
    // If the size is larger than the maximum size of arena
    if (size > 1024)
    {
      // Allocate entire pages
      uint32_t pg_cnt =
          DIV_CEILING(size, PG_SIZE); // Calculate the number of pages needed
      a = page_alloc(flag, pg_cnt);   // Allocate pages
      if (a == NULL)
      {
        // Page allocation failed
        put_str("page alloc failed!");
        mutex_release(&mem_pool->lock);
        return NULL;
      }
      // Initialize the arena information
      a->is_large_block = true;
      a->free_mem_cnt = pg_cnt;
      a->desc = NULL;
      mutex_release(&mem_pool->lock);
      return a;
    }

    a = page_alloc(flag, 1); // Allocate one page
    if (a == NULL)
    {
      // Page allocation failed
      put_str("page alloc failed!");
      mutex_release(&mem_pool->lock);
      return NULL;
    }

    // Find the appropriate descriptor for the requested size
    for (; desc_idx < DESC_CNT; desc_idx++)
    {
      if (size <= descs[desc_idx].each_block_size)
      {
        break;
      }
    }

    // Initialize the arena information for the descriptor
    a->desc = &descs[desc_idx];
    a->is_large_block = false;
    a->free_mem_cnt = descs[desc_idx].blocks_cnt_per_arena;

    // Divide the page into specified size memory blocks
    for (int i = 0; i < descs[desc_idx].blocks_cnt_per_arena; i++)
    {
      list_append(&descs[desc_idx].free_block_list,
                  &arena2block(a, i)->free_elem);
    }
  }

  // Retrieve a memory block from the free list and initialize it
  b = elem2entry(struct mem_block, free_elem,
                 list_pop(&(descs[desc_idx].free_block_list)));
  memset(b, 0, descs[desc_idx].each_block_size);

  // Update the arena information
  a = block2arena(b);
  a->free_mem_cnt--;

  mutex_release(&mem_pool->lock); // Release the memory pool lock

  return (void*)b; // Return the allocated memory block
}
/**
 * @brief Clear a bit in the bitmap.
 *
 * This function clears the specified bit in the bitmap, indicating that the
 * corresponding physical page is now free.
 *
 * @param paddr The physical address corresponding to the bit to be cleared.
 */
void paddr_bitmap_clear_bit(uint32_t paddr)
{
  // Determine the memory pool based on the physical address
  struct pool* m_pool =
      (paddr > user_pool.phy_address_start) ? &user_pool : &kernel_pool;

  // Calculate the bit index corresponding to the physical address
  uint32_t bit_idx = (paddr - m_pool->phy_address_start) / PG_SIZE;

  // Clear the bit in the pool's bitmap
  bitmap_set(&m_pool->pool_bitmap, bit_idx, 0);
}

/**
 * @brief Remove a Page Table Entry (PTE) from the page table.
 *
 * This function removes the Page Table Entry (PTE) corresponding to the given
 * virtual address from the page table. It clears the Present (P) flag in the
 * PTE, indicating that the page is no longer present in memory.
 *
 * @param vaddr The virtual address for which to remove the PTE.
 */
void remove_page_table_entry(uint32_t vaddr)
{
  uint32_t* pte = pte_ptr(vaddr);
  *pte &= ~PG_P_1; // Clear the Present (P) flag in the PTE
  // Update TLB (Translation Lookaside Buffer) to reflect the changes
  asm volatile("invlpg %0" ::"m"(vaddr) : "memory");
}

/**
 * @brief Clear the bitmap entries corresponding to a range of virtual addresses
 * in the virtual address pool.
 *
 * This function clears the bitmap entries in the virtual address pool
 * corresponding to a range of virtual addresses starting from the specified
 * virtual address and extending for the specified number of pages.
 *
 * @param pf The memory pool flag indicating whether it's a kernel or user
 * virtual address pool.
 * @param vaddr The starting virtual address of the range to be cleared.
 * @param pg_cnt The number of pages in the range to be cleared.
 */
void vaddr_bitmap_clear(enum pool_flags pf, uint32_t vaddr, uint32_t pg_cnt)
{
  struct virtual_addr* vaddr_pool = (pf == kernel_pflag)
                                        ? &kernel_vaddr
                                        : &cur_thread_PCB_get()->usrprog_vaddr;

  uint32_t bit_idx_start = (vaddr - vaddr_pool->vaddr_start) / PG_SIZE;

  for (uint32_t pg_freed = 0; pg_freed < pg_cnt; pg_freed++)
  {
    bitmap_set(&vaddr_pool->vaddr_bitmap, bit_idx_start + pg_freed, 0);
  }
}

/**
 * @brief Free a range of virtual memory pages.
 *
 * This function releases a range of contiguous virtual memory pages starting
 * from the specified virtual address. It clears the corresponding physical page
 * bitmap entries, removes the page table entries, and clears the virtual
 * address bitmap entries.
 *
 * @param flag The memory pool flag indicating whether it's a kernel or user
 * virtual memory pool.
 * @param vaddr The starting virtual address of the range to be freed.
 * @param cnt The number of pages to be freed.
 */
void free_virtual_memory(enum pool_flags flag, uint32_t vaddr, uint32_t cnt)
{
  for (uint32_t pg_freed = 0; pg_freed < cnt; pg_freed++)
  {
    uint32_t paddr = addr_v2p(vaddr);

    paddr_bitmap_clear_bit(paddr);
    remove_page_table_entry(vaddr);

    vaddr += PG_SIZE;
  }

  // Adjust vaddr to the starting address before calling vaddr_bitmap_clear
  vaddr -= cnt * PG_SIZE;

  vaddr_bitmap_clear(flag, vaddr, cnt);
}

void sys_free(void* ptr)
{
  enum pool_flags flag;
  struct pool*    m_pool;

  // Determine the memory pool
  if (cur_thread_PCB_get()->pg_dir_entry == NULL)
  {
    flag = kernel_pflag;
    m_pool = &kernel_pool;
  }
  else
  {
    flag = user_pflag;
    m_pool = &user_pool;
  }

  struct mem_block* b = (struct mem_block*)ptr;
  struct arena*     a = block2arena(b);

  mutex_acquire(&m_pool->lock);

  // If the block is a large block (> 1024 bytes), release virtual memory
  if (a->is_large_block && a->desc == NULL)
  {
    free_virtual_memory(flag, (uint32_t)ptr, a->free_mem_cnt);
    mutex_release(&m_pool->lock);
    return;
  }

  // If the block is smaller than or equal to 1024 bytes, return it to the free
  // block list
  list_append(&a->desc->free_block_list, &b->free_elem);
  a->free_mem_cnt++;

  uint32_t each_block_size = a->desc->each_block_size;

  // If the arena is empty, release the entire arena
  if (a->free_mem_cnt == a->desc->blocks_cnt_per_arena)
  {
    for (uint32_t i = 0; i < a->free_mem_cnt; i++)
    {
      b = arena2block(a, i);
      list_remove(&b->free_elem);
    }
    free_virtual_memory(flag, (uint32_t)a, 1);
  }

  mutex_release(&m_pool->lock);
}
