/*
 * implementation of slab allocator
 */

#include <stdlib.h>
#include <stdbool.h>
#include <errno.h>
#include <string.h>
#include <sys/mman.h>

#include "mm.h"

bool mm_enable_rec = false;

mem_cache_t cache_cache;
mem_cache_t* next_rec_cachep = &cache_cache;
struct list_head cache_list_head = {0,0};
int cur_cache_count = 0;

cache_sizes_t cache_sizes[] =
{
    {    32, 0},
    {    64, 0},
    {   128, 0},
    {   256, 0},
    {   512, 0},
    {  1024, 0},
    {  2048, 0},
    {  4096, 0},
    {  8192, 0},
    { 16384, 0},
};

int page_occupied = 0;

// ----------------------------------------------------------------------
// key routines
// ----------------------------------------------------------------------

/* double word (8) alignment */
#define ALIGNMENT 8

/* rounds up to the nearest multiple of ALIGNMENT */
#define ALIGN(size) (((size) + (ALIGNMENT-1)) & ~0x7)

bool mm_init(bool enable_rec)
{
    mm_enable_rec = enable_rec;
    next_rec_cachep = &cache_cache;

    // init the cache_cache struct

    cache_cache.next.prev = (struct list_head*)&cache_cache;
    cache_cache.next.next = (struct list_head*)&cache_cache;

    cache_cache.slabs_full.prev = 0;
    cache_cache.slabs_full.next = 0;

    cache_cache.slabs_partial.prev = 0;
    cache_cache.slabs_partial.next = 0;

    cache_cache.slabs_free.prev = 0;
    cache_cache.slabs_free.next = 0;

    cache_cache.objsize = ALIGN(sizeof(mem_cache_t)+sizeof(void*));

    int objnumcount = 0;
    int slab_ctl_size = sizeof(slab_t);
    int page_remain_size = PAGE_SIZE - slab_ctl_size;
    for (objnumcount = 0; page_remain_size >= cache_cache.objsize + sizeof(mem_bufctl_t); ++objnumcount)
    {
        page_remain_size -= (cache_cache.objsize + sizeof(mem_bufctl_t));
    }
    cache_cache.objnum = objnumcount;

    cache_cache.pagenum = 1;

    cache_cache.colour = page_remain_size;

    cache_cache.colour_off = L1_CACHE_LINE_BYTES_X86;

    cache_cache.colour_next = 0;

    cache_cache.ctor = 0;
    cache_cache.dtor = 0;

    // add this cache to cache list

    cache_list_head.prev = (struct list_head*)(&cache_cache);
    cache_list_head.next = (struct list_head*)(&cache_cache);

    // increase current cache count
    cur_cache_count++;

    // create fixed sized caches
    int i = 0;
    for (i=0; i<sizeof(cache_sizes)/sizeof(cache_sizes_t); ++i)
    {
        mem_cache_t* newcache = 0;

        // check if proper sized cache already exists
        // attention, if such cache do exists, it can only be cache_cache now!

        if (ALIGN(sizeof(mem_cache_t)+4) == ALIGN(cache_sizes[i].cs_size+4))
        {
            newcache = &cache_cache;
        }
        else
        {
            newcache = mem_cache_create(ALIGN(cache_sizes[i].cs_size+4), 0, 0, 0);
        }

        cache_sizes[i].cs_cachep = newcache;
    }

    return true;
}

bool mm_deinit()
{
    // loop until all caches are destroied

    mem_cache_t* cur_cache = (mem_cache_t*)(cache_list_head.next);

    while (cur_cache_count > 1)
    {
        mem_cache_t* next_cache = (mem_cache_t*)(cur_cache->next.next);
        mem_cache_destory(cur_cache);
        cur_cache = next_cache;
    }

    mem_cache_destory(cur_cache);

    return true;
}

void* mm_malloc(int size)
{
    void* objp = 0;

    // round up the size
    int newsize = ALIGN(size+sizeof(void*));

    // check if currently a proper sized cache exist

    mem_cache_t* head = (mem_cache_t*)cache_list_head.next;
    mem_cache_t* propercache = head;

    for (; propercache && propercache->next.next && (propercache->next.next != (struct list_head*)head); propercache = (mem_cache_t*)(propercache->next.next))
    {
        if (propercache && propercache->objsize == newsize)
        {
            break;
        }
    }

    if (propercache && propercache->objsize == newsize)
    {
        objp = mem_cache_alloc(propercache);
    }
    else
    {
        mem_cache_t* newcache = mem_cache_create(newsize, 0, 0, 0);
        objp = mem_cache_alloc(newcache);
    }

    // do memory recycle if required
    if (mm_enable_rec && page_occupied > RECYCLE_THRESHOLD)
    {
        mem_cache_recycle();
    }

    return (objp+4);
}

void mm_free(void* objp)
{
    slab_t* slabp = 0;

    // find the slab_t pointer and the cache pointer
    slabp = (slab_t*)(*(unsigned int*)(objp-4));
    //memcpy(&slabp, objp - 4, 4);
    mem_cache_t* cachep = slabp->cache_masterp;

    // free the object
    mem_cache_free(cachep, objp);
}

// ----------------------------------------------------------------------
// cache level
// ----------------------------------------------------------------------

mem_cache_t* mem_cache_create (int size, unsigned int flags,
        void (*ctor)(void*, mem_cache_t*, unsigned int), void (*dtor)(void*, mem_cache_t*, unsigned int))
{
    // create the cache structure
    mem_cache_t* newcache = mm_malloc(sizeof(mem_cache_t));

    // init the new cache structure

    newcache->next.prev = 0;
    newcache->next.next = 0;

    newcache->slabs_full.prev = 0;
    newcache->slabs_full.next = 0;

    newcache->slabs_partial.prev = 0;
    newcache->slabs_partial.next = 0;

    newcache->slabs_free.prev = 0;
    newcache->slabs_free.next = 0;

    newcache->objsize = size;

    if (newcache->objsize <= PAGE_SIZE)
    {
        int objnumcount = 0;
        int slab_ctl_size = sizeof(slab_t);
        int page_remain_size = (newcache->objsize>=512)?PAGE_SIZE:(PAGE_SIZE - slab_ctl_size);
        int per_obj_space = (newcache->objsize>=512)?newcache->objsize:(newcache->objsize + sizeof(mem_bufctl_t));
        for (objnumcount = 0; page_remain_size >= per_obj_space; ++objnumcount)
        {
            page_remain_size -= per_obj_space;
        }

        newcache->objnum = objnumcount;

        newcache->pagenum = 1;

        newcache->colour = page_remain_size;
    }
    else
    {
        newcache->objnum = 1;

        newcache->pagenum = (newcache->objsize)/PAGE_SIZE + 1;

        if ((newcache->pagenum)*PAGE_SIZE - newcache->objsize >= PAGE_SIZE)
        {
            newcache->pagenum -= 1;
        }

        newcache->colour = (newcache->pagenum)*PAGE_SIZE - newcache->objsize;
    }

    newcache->colour_off = L1_CACHE_LINE_BYTES_X86;

    newcache->colour_next = 0;

    newcache->ctor = ctor;
    newcache->dtor = dtor;

    // add this cache to cache list

    newcache->next.next = cache_list_head.next;
    newcache->next.prev = cache_list_head.prev;
    cache_list_head.prev->next = (struct list_head*)newcache;
    cache_list_head.next->prev = (struct list_head*)newcache;

    cache_list_head.next = (struct list_head*)newcache;
    cache_list_head.prev = ((struct list_head*)newcache)->prev;

    // increase current cache count
    cur_cache_count++;

    return newcache;
}

int mem_cache_recycle()
{
    int freed_pages = 0;
    int i = 0;

    for (i = RECYCLE_CACHE_CHAIN_LEN; i>0; i--)
    {
        mem_cache_t* cur_rec_cache = next_rec_cachep;
        next_rec_cachep = (mem_cache_t*)(cur_rec_cache->next.prev);

        freed_pages += mem_cache_destory(cur_rec_cache);
    }

    return freed_pages;
}

int mem_shrink(mem_cache_t* cachep) // we only free slabs in free list
{
    if (cachep->slabs_free.next == 0)
    {
        return 0;
    }

    // clear the free list

    int freed_pages = 0;

    slab_t* head = (slab_t*)(cachep->slabs_free.next);
    slab_t* tail = (slab_t*)(cachep->slabs_free.prev);

    tail->list.next = 0;

    slab_t* cur_slab = head;
    for (; cur_slab && cur_slab->list.next != ((struct list_head*)cur_slab);)
    {
        slab_t* next_slab = (slab_t*)(cur_slab->list.next);
        freed_pages += slab_destroy(cachep, cur_slab);
        cur_slab = next_slab;
    }
    if (cur_slab)
    {
        freed_pages += slab_destroy(cachep, cur_slab);
    }

    return freed_pages;
}

int mem_cache_destory(mem_cache_t* cachep)
{
    // firstly, check if this cache is empty, if it's not empty, we won't destory it

    if (cachep->slabs_full.next || cachep->slabs_partial.next)
    {
        return 0;
    }

    // remove the cache from the cache chain and update cache_list_head

    if (cur_cache_count == 1)
    {
        cache_list_head.prev = 0;
        cache_list_head.next = 0;
    }
    else
    {
        // preserve current cache list's head and tail
        struct list_head* head = cache_list_head.next;
        struct list_head* tail = cache_list_head.prev;

        // remove the cache from the current cache chain
        cachep->next.next->prev = cachep->next.prev;
        cachep->next.prev->next = cachep->next.next;

        // check if currently removed ccache is at head or tail
        if ((struct list_head*)cachep == head)
        {
            cache_list_head.next = cachep->next.next;
        }
        else if ((struct list_head*)cachep == tail)
        {
            cache_list_head.prev = cachep->next.prev;
        }
    }

    // decrease current cache count
    cur_cache_count--;

    // shrink the cache to delete all available slabs
    int freecount = mem_shrink(cachep);

    // delete the cache descripter from the cache_cache
    if (cachep != &cache_cache)
    {
        mem_cache_free(&cache_cache,cachep);
    }

    return freecount;
}

// ----------------------------------------------------------------------
// slab level
// ----------------------------------------------------------------------

#define threshold 512
#define slab_bufctl(slabp) ((mem_bufctl_t *)(((slab_t*)slabp)+1))

slab_t * slab_create(mem_cache_t *cachep)
{
    void *ptr = mmap(NULL,
            PAGE_SIZE*cachep->pagenum,
            PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE,
            -1,
            0);
    if(MAP_FAILED == ptr)
    {
        switch(errno)
        {
            case   EACCES:
                   break;

            case   EINVAL:
                   break;

            case   ENOMEM:
                   break;

            default:
                   break;
        }
        exit(1);
    }
    page_occupied += cachep->pagenum;
    memset(ptr, 0x00, PAGE_SIZE*cachep->pagenum);

    slab_t *slabp;

    /*if the object is small,then store slab manager on-slab*/
    if(cachep->objsize < threshold)
    {
        slabp = (slab_t *)ptr;
        slabp->list.next = (struct list_head*)slabp;
        slabp->list.prev = (struct list_head*)slabp;
        slabp->colouroff = cachep->colour_next;
        slabp->inuse = 0;
        slabp->s_mem = (void *)(slabp + 1) + cachep->objnum * sizeof(mem_bufctl_t) + slabp->colouroff;
        slabp->free = 0;
        slabp->cache_masterp = cachep;

        slab_init(cachep, slabp);

    }
    else
    {
        unsigned int size;
        size = sizeof(slab_t) + cachep->objnum * sizeof(mem_bufctl_t);
        slabp = mm_malloc(size);
        slabp->list.next = (struct list_head*)slabp;
        slabp->list.prev = (struct list_head*)slabp;
        slabp->colouroff = cachep->colour_next;
        slabp->inuse = 0;
        slabp->s_mem = ptr + slabp->colouroff;
        slabp->free = 0;
        slabp->cache_masterp = cachep;

        slab_init(cachep, slabp);
    }

    // update colour_next
    if (cachep->colour >= cachep->colour_off)
    {
        cachep->colour_next = cachep->colour_next+cachep->colour_off;

        if (cachep->colour_next >= cachep->colour)
        {
            cachep->colour_next = 0;
        }
    }

    return slabp;

}

void slab_init(mem_cache_t *cachep, slab_t *slabp)
{
    unsigned int i;
    for(i = 0; i < cachep->objnum; i++)
    {
        void *objp = slabp->s_mem + i * cachep->objsize;
        if(cachep->ctor)
            cachep->ctor(objp + 4, cachep, 0);
        slab_bufctl(slabp)[i] = i + 1;
    }
    slab_bufctl(slabp)[i - 1] = BUFCTL_END;
}

void* mem_cache_alloc(mem_cache_t *cachep)
{
    slab_t *slabp  = NULL;
    void *objp = NULL;
    /*if the partial list is empty*/
    if(NULL == cachep->slabs_partial.next && NULL == cachep->slabs_partial.prev)
    {
        /*if the free list is empty, too*/
        if(NULL == cachep->slabs_free.next && NULL == cachep->slabs_free.prev)
        {
            slabp = slab_create(cachep);
            slab_init(cachep, slabp);
        }
        else
        {
            slabp = (slab_t*)cachep->slabs_free.next;
            /*remember to delete from the slabs_free list!!!*/
            delfrom_list(&(cachep->slabs_free),slabp);
        }
        addto_listhead(&(cachep->slabs_partial),slabp);
    }
    else
    {
        slabp = (slab_t*)cachep->slabs_partial.next;
    }

    objp = slabp->s_mem + slabp->free * cachep->objsize;
    slabp->free = slab_bufctl(slabp)[slabp->free];
    slabp->inuse++;

    /*if the slab get full, put it to the slab_full list*/
    if(slabp->inuse == cachep->objnum)
    {
        delfrom_list(&(cachep->slabs_partial),slabp);
        addto_listtail(&(cachep->slabs_full),slabp);
    }

    /*trick: use the first 4 bytes to store the pointer of the owner's slab*/
    *((unsigned int*)objp) = (unsigned int)slabp;
    return objp;
}

void addto_listhead(struct list_head *list, slab_t *slabp)
{
    addto_listtail(list, slabp);
    /*just update slab_list pointer in the cache*/
    list->next = (struct list_head*)slabp;
    list->prev = list->next->prev;
}

void addto_listtail(struct list_head *list, slab_t *slabp)
{

    /*if the list is NULL*/
    if(NULL == list->next && NULL == list->prev)
    {
        list->prev = (struct list_head*)slabp;
        list->next = (struct list_head*)slabp;
    }
    else
    {
        list->prev->next = (struct list_head*)slabp;
        ((struct list_head*)slabp)->prev = list->prev;
        list->next->prev = (struct list_head*)slabp;
        ((struct list_head*)slabp)->next = list->next;

        /*update the pointer of the slabp_list in the cache*/
        list->prev = (struct list_head*)slabp;
    }
}

void delfrom_list(struct list_head *list, slab_t *slabp)
{
    /*if the list has only one slab*/
    if(list->next == list->prev)
    {
        list->next = NULL;
        list->prev = NULL;
    }
    else
    {
        ((struct list_head*)slabp)->prev->next = ((struct list_head*)slabp)->next;
        ((struct list_head*)slabp)->next->prev = ((struct list_head*)slabp)->prev;

        /*if the slab is the head*/
        if(list->next == (struct list_head*)slabp)
        {
            list->next = ((struct list_head*)slabp)->next;
        }
        /*if the slab is the tail*/
        if(list->prev == (struct list_head*)slabp)
        {
            list->prev = ((struct list_head*)slabp)->prev;
        }
        /*update the pointer in the slab*/
        ((struct list_head*)slabp)->next = ((struct list_head*)slabp);
        ((struct list_head*)slabp)->prev = ((struct list_head*)slabp);
    }
}

void mem_cache_free(mem_cache_t *cachep, void *objp)
{
    slab_t *slabp = 0;
    unsigned int objnr;
    bool isfull,isfree;
    slabp = (slab_t*)(*(unsigned int*)(objp-4));
    //memcpy(&slabp, objp - 4, 4);
    isfree = false;
    isfull = false;

    if(slabp->inuse == cachep->objnum)
        isfull = true;

    objnr = (objp - 4 -slabp->s_mem)/cachep->objsize;
    slab_bufctl(slabp)[objnr] = slabp->free;
    slabp->free = objnr;
    slabp->inuse--;

    if(0 == slabp->inuse)
        isfree = true;

    if(isfull)
    {
        delfrom_list(&(cachep->slabs_full),slabp);
        addto_listtail(&(cachep->slabs_partial),slabp);
    }
    if(isfree)
    {
        delfrom_list(&(cachep->slabs_partial),slabp);
        addto_listtail(&(cachep->slabs_free),slabp);
    }
}

unsigned int slab_destroy(mem_cache_t *cachep, slab_t *slabp)
{
    unsigned int i;

    /*update slab_list in the cache*/
    if(cachep->objnum == slabp->inuse)
    {
    	delfrom_list(&(cachep->slabs_full),slabp);
    }
    else if(0 == slabp->inuse)
    {
    	delfrom_list(&(cachep->slabs_free),slabp);
    }
    else
    {
    	delfrom_list(&(cachep->slabs_partial),slabp);
    }

    for(i = 0; i < cachep->objnum; i++)
    {
    	void *objp = slabp->s_mem + i * cachep->objsize;

    	if(cachep->dtor)
    	    cachep->dtor(objp + 4, cachep, 0);

    }
    if(cachep->objsize >= threshold)
    {
        munmap(slabp->s_mem - slabp->colouroff, PAGE_SIZE * cachep->pagenum);

        slab_t* slabp_ctl = 0;
        slabp_ctl = (slab_t*)(*(unsigned int*)((void*)slabp - 4));
        // memcpy(&slabp_ctl, (void*)slabp - 4, 4);
        mem_cache_t* cachep = slabp_ctl->cache_masterp;
        mem_cache_free(cachep, slabp);
    }
    else
    {
        munmap(slabp, PAGE_SIZE * cachep->pagenum);
    }
    page_occupied -= cachep->pagenum;

    return cachep->pagenum;
}
