#include <pmm.h>

struct page *pages;
size_t npage;

static struct free_area memarea = {0};
#define free_list (memarea.free_list)
#define nr_free (memarea.nr_free)

#define MAXSLAB (PGSHIFT - 2)
static struct slab *slab_caches;

static struct page *alloc_page() {
  lock(&memarea.lock);
  struct page *p = NULL;
  if (!list_empty(&free_list)) {
    assert(nr_free > 0);
    struct list_head *lh = list_next(&free_list);
    p = LIST_ENTRY(lh, struct page, list);
    list_del(lh);
    nr_free -= 1;
  }
  unlock(&memarea.lock);
  return p;
}

static void free_page(struct page *p) {
  assert(p);
  lock(&memarea.lock);
  list_add(&free_list, &p->list);
  nr_free += 1;
  unlock(&memarea.lock);
}

size_t nr_free_pages() {
  size_t n = 0;
  lock(&memarea.lock);
  n = nr_free;
  unlock(&memarea.lock);
  return n;
}

static void memarea_init() {
  lockinit(&memarea.lock);
  LIST_INIT(&free_list);
  nr_free = 0;

  pages = (struct page *)ROUNDUP(heap.start, PGSIZE);
  npage = KERNSIZE / PGSIZE;
  uintptr_t freemem = ROUNDUP((uintptr_t)pages + npage * sizeof(struct page), PGSIZE);
  struct page *base = pa2page(freemem);
  nr_free = (PHYSTOP - freemem) / PGSIZE;
  for (struct page *p = base; p != base + nr_free; p++) {
    list_add_tail(&free_list, &p->list);
  }
}

static void slab_init() {
  slab_caches = (struct slab *)page2pa(alloc_page());
  for (int i = 0; i < MAXSLAB; i++) {
    struct slab *s = slab_caches + i;
    for (int j = 0; j < NCPU; j++) {
      s->cpu_slab[j].page = NULL;
      LIST_INIT(&(s->cpu_slab[j].partial));
    }
    s->size = 8 * (1 << i);
    s->offset = 0;
    s->objects = PGSIZE / s->size;
  }
}

static void pmm_init() {
  uintptr_t pmsize = ((uintptr_t)heap.end - (uintptr_t)heap.start);
  printf("Got %d MiB heap: [%p, %p)\n", pmsize >> 20, heap.start, heap.end);
  memarea_init();
  slab_init();
}

static inline void set_fp(struct slab *s, void *obj, void *fp) {
  *(void **)(obj + s->offset) = fp;
}

static inline void *get_fp(struct slab *s, void *obj) {
  return *(void **)(obj + s->offset);
}

static inline int slab_index(size_t size) {
  for (int i = 0; i < MAXSLAB; i++) {
    if (size <= 8 * (1 << i))
      return i;
  }
  panic("error slab_index");
}

static inline struct slab *getslab(size_t size) {
  assert(size > 0 && size <= PGSIZE);
  return slab_caches + slab_index(size);
}

static inline struct slab_cpu *getsc(struct slab *s) {
  assert(s);
  return &s->cpu_slab[cpu_current()];
}

static inline void slab_lock(struct page *p) {
  assert(p);
  lock(&p->lock);
}

static inline void slab_unlock(struct page *p) {
  assert(p);
  unlock(&p->lock);
}

static inline struct page *new_slab(struct slab *s) {
  struct page *pg = alloc_page();
  if (!pg) return NULL;
  pg->slab = s;
  pg->frozen = 0;
  pg->inused = 0;
  pg->objects = s->objects;
  lockinit(&pg->lock);
  void *src = (void *)page2pa(pg);
  void *prev = src, *p = src;
  for (; p < src + s->objects * s->size; p += s->size) {
    set_fp(s, prev, p);
    prev = p;
  }
  set_fp(s, prev, NULL);
  pg->freelist = src;
  return pg;
}

void *kalloc(size_t size) {
  assert(size > 0 && size <= PGSIZE);
  void *obj;
  int flag;
  local_irq_save(flag);
  struct slab *s = getslab(size);
  struct slab_cpu *c = getsc(s);
  if (!c->page)
    goto new_slab;
load_freelist:
  slab_lock(c->page);
  obj = c->page->freelist;
  if (!obj) {
    c->page->frozen = 1;
    slab_unlock(c->page);
    c->page = NULL;
  } else {
    c->page->inused += 1;
    c->page->freelist = get_fp(s, obj);
    slab_unlock(c->page);
    local_irq_restore(flag);
    return obj;
  }
new_slab:
  if (!list_empty(&c->partial)) {
    struct list_head *lh = list_next(&c->partial);
    c->page = LIST_ENTRY(lh, struct page, list);
    list_del(lh);
    goto load_freelist;
  }
  c->page = new_slab(s);
  if (c->page)
    goto load_freelist;
  local_irq_restore(flag);
  return NULL;
}

void kfree(void *ptr) {
  if (!ptr) return;
  int flag;
  local_irq_save(flag);
  struct page *pg = pa2page((uintptr_t)ptr);
  slab_lock(pg);
  struct slab *s = pg->slab;
  struct slab_cpu *c = getsc(s);
  pg->inused -= 1;
  if (!pg->inused)
    goto slab_empty;
  set_fp(s, ptr, pg->freelist);
  pg->freelist = ptr;
  if (pg->frozen) {
    pg->frozen = 0;
    list_add_tail(&c->partial, &pg->list);
  }
  slab_unlock(pg);
  local_irq_restore(flag);
  return;
slab_empty:
  if (!pg->frozen) {
    if (pg == c->page)
      c->page = NULL;
    else
      list_del(&pg->list);
  }
  free_page(pg);
  local_irq_restore(flag);
}

/* #define N 1000
struct ptr {
  spinlock_t lock;
  void *p;
} arr[N] = {0};

static void test() {
  for (int i = 0; i < N; i++) {
    arr[i].p = kalloc(4000);
    printf("cpuid: %d; alloc -> 0x%x\n", cpu_current(), (uintptr_t)arr[i].p);
  }
  for (int i = 0; i < N; i++) {
    lock(&arr[i].lock);
    if (arr[i].p) {
      kfree(arr[i].p);
      printf("cpuid: %d; free -> 0x%x\n", cpu_current(), (uintptr_t)arr[i].p);
      arr[i].p = NULL;
    }
    unlock(&arr[i].lock);
  }
} */

MODULE_DEF(pmm) = {
  .init  = pmm_init,
  .alloc = kalloc,
  .free  = kfree,
  // .test  = test,
};
