/*
slub.c
**/

#include <slub.h>

#include <list.h>

#include <defs.h>

#include <string.h>

#include <stdio.h>

struct slab_t {

    int ref;

    struct kmem_cache_t *cachep;

    uint16_t inuse;

    uint16_t free;

    list_entry_t slab_link;

};

// The number of sized cache : 16, 32, 64, 128, 256, 512, 1024, 2048

#define SIZED_CACHE_NUM     8

#define SIZED_CACHE_MIN     16

#define SIZED_CACHE_MAX     2048

#define le2slab(le,link)    ((struct slab_t*)le2page((struct Page*)le,link))

#define slab2kva(slab)      (page2kva((struct Page*)slab))

static list_entry_t cache_chain;

static struct kmem_cache_t cache_cache;

static struct kmem_cache_t *sized_caches[SIZED_CACHE_NUM];

static char *cache_cache_name = "cache";

static char *sized_cache_name = "sized";

// kmem_cache_grow - add a free slab

static void * kmem_cache_grow(struct kmem_cache_t *cachep) {

    struct Page *page = alloc_page();

    void *kva = page2kva(page);

    // Init slub meta data

    struct slab_t *slab = (struct slab_t *) page;

    slab->cachep = cachep;

    slab->inuse = slab->free = 0;

    list_add(&(cachep->slabs_free), &(slab->slab_link));

    // Init bufctl

    int16_t *bufctl = kva;

    for (int i = 1; i < cachep->num; i++)

        bufctl[i-1] = i;

    bufctl[cachep->num-1] = -1;

    // Init cache

    void *buf = bufctl + cachep->num;

    if (cachep->ctor)

        for (void *p = buf; p < buf + cachep->objsize * cachep->num; p += cachep->objsize)

            cachep->ctor(p, cachep, cachep->objsize);

    return slab;

}

// kmem_slab_destroy - destroy a slab

static void kmem_slab_destroy(struct kmem_cache_t *cachep, struct slab_t *slab) {

    // Destruct cache

    struct Page *page = (struct Page *) slab;

    int16_t *bufctl = page2kva(page);

    void *buf = bufctl + cachep->num;

    if (cachep->dtor)

        for (void *p = buf; p < buf + cachep->objsize * cachep->num; p += cachep->objsize)

            cachep->dtor(p, cachep, cachep->objsize);

    // Return slub page

    page->property = page->flags = 0;

    list_del(&(page->page_link));

    free_page(page);

}

static int kmem_sized_index(size_t size) {

    // Round up

    size_t rsize = ROUNDUP(size, 2);

    if (rsize < SIZED_CACHE_MIN)

        rsize = SIZED_CACHE_MIN;

    // Find index

    int index = 0;

    for (int t = rsize / 32; t; t /= 2)

        index ++;

    return index;

}

// ! Test code

#define TEST_OBJECT_LENTH 2046

#define TEST_OBJECT_CTVAL 0x22

#define TEST_OBJECT_DTVAL 0x11

static const char *test_object_name = "test";

struct test_object {

    char test_member[TEST_OBJECT_LENTH];

};

static void test_ctor(void* objp, struct kmem_cache_t * cachep, size_t size) {

    char *p = objp;

    for (int i = 0; i < size; i++)

        p[i] = TEST_OBJECT_CTVAL;

}

static void test_dtor(void* objp, struct kmem_cache_t * cachep, size_t size) {

    char *p = objp;

    for (int i = 0; i < size; i++)

        p[i] = TEST_OBJECT_DTVAL;

}

static size_t list_length(list_entry_t *listelm) {

    size_t len = 0;

    list_entry_t *le = listelm;

    while ((le = list_next(le)) != listelm)

        len ++;

    return len;

}

static void check_kmem() {

    assert(sizeof(struct Page) == sizeof(struct slab_t));

    size_t fp = nr_free_pages();

    // Create a cache

    struct kmem_cache_t *cp0 = kmem_cache_create(test_object_name, sizeof(struct test_object), test_ctor, test_dtor);

    assert(cp0 != NULL);

    assert(kmem_cache_size(cp0) == sizeof(struct test_object));

    assert(strcmp(kmem_cache_name(cp0), test_object_name) == 0);

    // Allocate six objects

    struct test_object *p0, *p1, *p2, *p3, *p4, *p5;

    char *p;

    assert((p0 = kmem_cache_alloc(cp0)) != NULL);

    assert((p1 = kmem_cache_alloc(cp0)) != NULL);

    assert((p2 = kmem_cache_alloc(cp0)) != NULL);

    assert((p3 = kmem_cache_alloc(cp0)) != NULL);

    assert((p4 = kmem_cache_alloc(cp0)) != NULL);

    p = (char *) p4;

    for (int i = 0; i < sizeof(struct test_object); i++)

        assert(p[i] == TEST_OBJECT_CTVAL);

    assert((p5 = kmem_cache_zalloc(cp0)) != NULL);

    p = (char *) p5;

    for (int i = 0; i < sizeof(struct test_object); i++)

        assert(p[i] == 0);

    assert(nr_free_pages()+3 == fp);

    assert(list_empty(&(cp0->slabs_free)));

    assert(list_empty(&(cp0->slabs_partial)));

    assert(list_length(&(cp0->slabs_full)) == 3);

    // Free three objects

    kmem_cache_free(cp0, p3);

    kmem_cache_free(cp0, p4);

    kmem_cache_free(cp0, p5);

    assert(list_length(&(cp0->slabs_free)) == 1);

    assert(list_length(&(cp0->slabs_partial)) == 1);

    assert(list_length(&(cp0->slabs_full)) == 1);

    // Shrink cache

    assert(kmem_cache_shrink(cp0) == 1);

    assert(nr_free_pages()+2 == fp);

    assert(list_empty(&(cp0->slabs_free)));

    p = (char *) p4;

    for (int i = 0; i < sizeof(struct test_object); i++)

        assert(p[i] == TEST_OBJECT_DTVAL);

    // Reap cache

    kmem_cache_free(cp0, p0);

    kmem_cache_free(cp0, p1);

    kmem_cache_free(cp0, p2);

    assert(kmem_cache_reap() == 2);

    assert(nr_free_pages() == fp);

    // Destory a cache

    kmem_cache_destroy(cp0);

    // Sized alloc

    assert((p0 = kmalloc(2048)) != NULL);

    assert(nr_free_pages()+1 == fp);

    kfree(p0);

    assert(kmem_cache_reap() == 1);

    assert(nr_free_pages() == fp);

    cprintf("check_kmem() succeeded!\n");

}

// ! End of test code

// kmem_cache_create - create a kmem_cache

struct kmem_cache_t * kmem_cache_create(const char *name, size_t size,

                       void (*ctor)(void*, struct kmem_cache_t *, size_t),

                       void (*dtor)(void*, struct kmem_cache_t *, size_t)) {

    assert(size <= (PGSIZE - 2));

    struct kmem_cache_t *cachep = kmem_cache_alloc(&(cache_cache));

    if (cachep != NULL) {

        cachep->objsize = size;

        cachep->num = PGSIZE / (sizeof(int16_t) + size);

        cachep->ctor = ctor;

        cachep->dtor = dtor;

        memcpy(cachep->name, name, CACHE_NAMELEN);

        list_init(&(cachep->slabs_full));

        list_init(&(cachep->slabs_partial));

        list_init(&(cachep->slabs_free));

        list_add(&(cache_chain), &(cachep->cache_link));

    }

    return cachep;

}

// kmem_cache_destroy - destroy a kmem_cache

void kmem_cache_destroy(struct kmem_cache_t *cachep) {

    list_entry_t *head, *le;

    // Destory full slabs

    head = &(cachep->slabs_full);

    le = list_next(head);

    while (le != head) {

        list_entry_t *temp = le;

        le = list_next(le);

        kmem_slab_destroy(cachep, le2slab(temp, page_link));

    }

    // Destory partial slabs

    head = &(cachep->slabs_partial);

    le = list_next(head);

    while (le != head) {

        list_entry_t *temp = le;

        le = list_next(le);

        kmem_slab_destroy(cachep, le2slab(temp, page_link));

    }

    // Destory free slabs

    head = &(cachep->slabs_free);

    le = list_next(head);

    while (le != head) {

        list_entry_t *temp = le;

        le = list_next(le);

        kmem_slab_destroy(cachep, le2slab(temp, page_link));

    }

    // Free kmem_cache

    kmem_cache_free(&(cache_cache), cachep);

}

// kmem_cache_alloc - allocate an object

void * kmem_cache_alloc(struct kmem_cache_t *cachep) {

    list_entry_t *le = NULL;

    // Find in partial list

    if (!list_empty(&(cachep->slabs_partial)))

        le = list_next(&(cachep->slabs_partial));

    // Find in empty list

    else {

        if (list_empty(&(cachep->slabs_free)) && kmem_cache_grow(cachep) == NULL)

            return NULL;

        le = list_next(&(cachep->slabs_free));

    }

    // Alloc

    list_del(le);

    struct slab_t *slab = le2slab(le, page_link);

    void *kva = slab2kva(slab);

    int16_t *bufctl = kva;

    void *buf = bufctl + cachep->num;

    void *objp = buf + slab->free * cachep->objsize;

    // Update slab

    slab->inuse ++;

    slab->free = bufctl[slab->free];

    if (slab->inuse == cachep->num)

        list_add(&(cachep->slabs_full), le);

    else

        list_add(&(cachep->slabs_partial), le);

    return objp;

}

// kmem_cache_zalloc - allocate an object and fill it with zero

void * kmem_cache_zalloc(struct kmem_cache_t *cachep) {

    void *objp = kmem_cache_alloc(cachep);

    memset(objp, 0, cachep->objsize);

    return objp;

}

// kmem_cache_free - free an object

void kmem_cache_free(struct kmem_cache_t *cachep, void *objp) {

    // Get slab of object

    void *base = page2kva(pages);

    void *kva = ROUNDDOWN(objp, PGSIZE);

    struct slab_t *slab = (struct slab_t *) &pages[(kva-base)/PGSIZE];

    // Get offset in slab

    int16_t *bufctl = kva;

    void *buf = bufctl + cachep->num;

    int offset = (objp - buf) / cachep->objsize;

    // Update slab

    list_del(&(slab->slab_link));

    bufctl[offset] = slab->free;

    slab->inuse --;

    slab->free = offset;

    if (slab->inuse == 0)

        list_add(&(cachep->slabs_free), &(slab->slab_link));

    else

        list_add(&(cachep->slabs_partial), &(slab->slab_link));

}

// kmem_cache_size - get object size

size_t kmem_cache_size(struct kmem_cache_t *cachep) {

    return cachep->objsize;

}

// kmem_cache_name - get cache name

const char * kmem_cache_name(struct kmem_cache_t *cachep) {

    return cachep->name;

}

// kmem_cache_shrink - destroy all slabs in free list

int kmem_cache_shrink(struct kmem_cache_t *cachep) {

    int count = 0;

    list_entry_t *le = list_next(&(cachep->slabs_free));

    while (le != &(cachep->slabs_free)) {

        list_entry_t *temp = le;

        le = list_next(le);

        kmem_slab_destroy(cachep, le2slab(temp, page_link));

        count ++;

    }

    return count;

}

// kmem_cache_reap - reap all free slabs

int kmem_cache_reap() {

    int count = 0;

    list_entry_t *le = &(cache_chain);

    while ((le = list_next(le)) != &(cache_chain))

        count += kmem_cache_shrink(to_struct(le, struct kmem_cache_t, cache_link));

    return count;

}

void * kmalloc(size_t size) {

    assert(size <= SIZED_CACHE_MAX);

    return kmem_cache_alloc(sized_caches[kmem_sized_index(size)]);

}

void kfree(void *objp) {

    void *base = slab2kva(pages);

    void *kva = ROUNDDOWN(objp, PGSIZE);

    struct slab_t *slab = (struct slab_t *) &pages[(kva-base)/PGSIZE];

    kmem_cache_free(slab->cachep, objp);

}

void kmem_int() {

    // Init cache for kmem_cache

    cache_cache.objsize = sizeof(struct kmem_cache_t);

    cache_cache.num = PGSIZE / (sizeof(int16_t) + sizeof(struct kmem_cache_t));

    cache_cache.ctor = NULL;

    cache_cache.dtor = NULL;

    memcpy(cache_cache.name, cache_cache_name, CACHE_NAMELEN);

    list_init(&(cache_cache.slabs_full));

    list_init(&(cache_cache.slabs_partial));

    list_init(&(cache_cache.slabs_free));

    list_init(&(cache_chain));

    list_add(&(cache_chain), &(cache_cache.cache_link));

    // Init sized cache

    for (int i = 0, size = 16; i < SIZED_CACHE_NUM; i++, size *= 2)

        sized_caches[i] = kmem_cache_create(sized_cache_name, size, NULL, NULL);

    check_kmem();

}
