#include <stdlib.h>
#include <stdio.h>
#include "slab-allocator.h"

struct object_slab {
	struct linked_list slabs_list;
	void * ready_list;
	unsigned int used_objects;
	unsigned long object_memory[];
};

#define offsetof(type, member) ((unsigned long)&((type *)0)->member)

static void corrupted_canary(const void * object) __attribute__((noreturn));
static void corrupted_canary(const void * object)
{
	printf("memory overrun on object %p\n", object);
	abort();
}

void slab_create(struct slab_allocator * allocator,
		 unsigned int objcount,
		 unsigned int objsize,
		 int canary)
{
	pthread_mutex_init(&allocator->global_lock, NULL);
	allocator->slabs_list.next = &allocator->slabs_list;
	allocator->slabs_list.prev = &allocator->slabs_list;

	objsize +=   sizeof(void *) - 1;
	objsize &= ~(sizeof(void *) - 1);
	canary = canary != 0;

	allocator->object_count = objcount;
	allocator->object_size  = objsize;
	allocator->canary_check = canary;
}

void * slab_alloc(struct slab_allocator * allocator)
{
	struct linked_list * list;
	struct object_slab * slab;
	void * before;
	void * object;

	pthread_mutex_lock(&allocator->global_lock);

	list = allocator->slabs_list.next;
	if (list == &allocator->slabs_list) {
		unsigned int i;

		slab = malloc(((sizeof(void *) << allocator->canary_check) +
			       allocator->object_size) *
			      allocator->object_count +
			      sizeof(struct object_slab));
		if (slab == NULL) {
			object = NULL;
			goto unlock_finish;
		}

		allocator->slabs_list.next = &slab->slabs_list;
		allocator->slabs_list.prev = &slab->slabs_list;
		slab->slabs_list.next = &allocator->slabs_list;
		slab->slabs_list.prev = &allocator->slabs_list;

		before = &slab->ready_list;
		object = slab->object_memory;
		for (i = 0; i < allocator->object_count; i++) {
			*((void **)before) = object;
			before = object;
			object += allocator->object_size;
			if (allocator->canary_check != 0) {
				*((void **)object) = allocator;
				object += sizeof(void *);
			}
			*((void **)object) = slab;
			object += sizeof(void *);
		}
		*((void **)before) = NULL;

		slab->used_objects = 0;
	} else {
		slab = (void *)list - offsetof(struct object_slab, slabs_list);
	}

	slab->used_objects++;
	object = slab->ready_list;
	slab->ready_list = *((void **)object);

	if (slab->ready_list == NULL) {
		slab->slabs_list.next->prev = slab->slabs_list.prev;
		slab->slabs_list.prev->next = slab->slabs_list.next;
	}

unlock_finish:

	pthread_mutex_unlock(&allocator->global_lock);

	return object;
}

void slab_free(struct slab_allocator * allocator, void * object)
{
	void * lookup;
	struct object_slab * slab;
	struct object_slab * comp;
	struct linked_list * next;
	struct linked_list * prev;

	lookup = object;
	lookup += allocator->object_size;
	if (allocator->canary_check != 0) {
		if (*((void **)lookup) != allocator)
			corrupted_canary(object);
		lookup += sizeof(void *);
	}
	slab = *((void **)lookup);

	pthread_mutex_lock(&allocator->global_lock);

	slab->used_objects--;
	if (slab->ready_list != NULL) {
		next = slab->slabs_list.next;
		prev = slab->slabs_list.prev;
		if (next == &allocator->slabs_list)
			goto insert_obj;
		next->prev = prev;
		prev->next = next;
	} else {
		prev = &allocator->slabs_list;
		next = prev->next;
		goto insert_slab;
	}

	for (;;) {
		comp = NULL;
		if (next == &allocator->slabs_list)
			break;
		comp = (void *)next - offsetof(struct object_slab, slabs_list);
		if (slab->used_objects >= comp->used_objects)
			break;
		prev = next;
		next = next->next;
	}

	if (comp != NULL &&
	    comp->used_objects == 0 &&
	    slab->used_objects == 0)
		goto unlock_finish;

insert_slab:

	next->prev = &slab->slabs_list;
	prev->next = &slab->slabs_list;
	slab->slabs_list.next = next;
	slab->slabs_list.prev = prev;

insert_obj:

	*((void **)object) = slab->ready_list;
	slab->ready_list = object;
	slab = NULL;

unlock_finish:

	pthread_mutex_unlock(&allocator->global_lock);

	if (slab != NULL)
		free(slab);
}
