#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <errno.h>
#include <assert.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <dirent.h>
#include <pthread.h>
#define _XOPEN_SOURCE_EXTENDED 1
#include <sys/mman.h>
#include <unistd.h>

#include "../../interfaces/memalloc.h"

#define ALLOC_PATH_SUFFIX "/allocTmp/allocBlock"
#define ALLOC_LN_MAX_BLOCKS 6	//how many digits to add in filename.

#define PRI() /*fprintf(stderr,"%s __LINE__ %3d\n",__FUNCTION__,__LINE__)*/

#define ERROR(RETVAL, ERRCODE)	{PRI(); errno = ERRCODE; return RETVAL;}
#define ABORT(RETVAL)			{PRI();return RETVAL;}

static pthread_mutex_t list_lock;
static pthread_mutex_t alloc_lock;
static size_t operations;
enum Bool {
		FALSE,
		TRUE,
		UNDEFINED
	};

static struct list {
	struct list* next;
	struct list* prev;
	off_t len;

	enum Bool is_free;

} * list_head;

static void list_pri()
{ 
	struct list* curr = list_head;
	fprintf(stderr, "%d <%p|%p|%p> %5lld\n", curr->is_free, curr->prev, curr, curr->next, curr->len);
	for (curr = curr->next; curr != list_head; curr = curr->next)
		fprintf(stderr, "%d <%p|%p|%p> %5lld\n", curr->is_free, curr->prev, curr, curr->next, curr->len);
}

static inline int list_create(off_t val)
{
	if((list_head = (struct list*)malloc(sizeof(struct list)))==NULL)
		ERROR(ENOMEM,0);
	list_head->is_free = UNDEFINED;
	struct list*ptr;
	if((ptr = (struct list*)malloc(sizeof(struct list)))==NULL)
	{
		free(list_head);
		list_head = NULL;
		ERROR(ENOMEM,0);
	}
	list_head->prev = list_head->next = ptr;
	ptr->prev = ptr->next = list_head;
	ptr->len = val;
	ptr->is_free = TRUE;
	
	return 1;
}

static inline void list_destroy()
{
	struct list* curr = list_head->next->next;
	while (curr != list_head) {
		free(curr->prev);
		curr = curr->next;
	}

	free(curr->prev);
	list_head = NULL;
}

static inline void list_try_group_basic(struct list* ptr)
{
	if (ptr->is_free != ptr->next->is_free)
		return;
	void* x = ptr->next;
	ptr->len += ptr->next->len;
	ptr->next->next->prev = ptr;
	ptr->next = ptr->next->next;
	free(x);
}

static void list_try_group(struct list* ptr)
{
	assert(ptr != NULL);
	list_try_group_basic(ptr);
	list_try_group_basic(ptr->prev);
}

static off_t list_add(off_t sz)
{
//	fprintf(stderr, "list<<%lld\n", sz);
	//FIXME: if some ma_free or ma_release is proccessing it should wait for them and do stuff after them?
	// cause now it give errors more freqently
	assert(pthread_mutex_lock(&list_lock) == 0);
	struct list* curr = list_head->prev;
	off_t off = 0;
	while (curr != list_head) {
		if (curr->is_free == TRUE)
		{
			if (curr->len > sz) {
				//FIXME: SEGFAULT
				struct list* ptr;
				if((ptr = (struct list*)malloc(sizeof(struct list)))==NULL)
					ERROR(-1,ENOMEM);
				ptr->prev = curr;
				ptr->next = curr->next;
				curr->next->prev = ptr;
				curr->next = ptr;

				ptr->len = sz;
				ptr->is_free = FALSE;
				curr->len -= sz;

				list_try_group(ptr);
				break;
			} else if (curr->len == sz) {
				curr->is_free = FALSE;
				list_try_group(curr);
				break;
			}
		}
		off += curr->len;
//		fprintf(stderr,"ladd_off = %lld\n",(long long int)off);
		curr = curr->prev;
	}
	assert(pthread_mutex_unlock(&list_lock) == 0);
	if(curr == list_head)
	{
		ERROR(-1,ENOMEM);
	}
//	fprintf(stderr,"list_add returns %lld\n",(long long int)off);
	return off;
}
//FIXME: add some comment about purpose of function list_remove, especialy about args

static int list_remove(off_t addr, off_t len)
{
	assert(pthread_mutex_lock(&list_lock) == 0);
	struct list* curr = list_head->prev;

	do {
		assert(curr != list_head);
		if (addr < curr->len)
			break;
		addr -= curr->len;
		curr = curr->prev;
	} while (1);
	//FIXME: Why? This function needs comments or rewriting
	assert(curr->is_free == FALSE);
	assert(curr->len >= addr + len);
	//FIXME: very strange especialy after assert
	int flag = 0;
	if (addr + len != curr->len) {
		//FIXME: SEGFAULT
		struct list* ptr;
		if((ptr = (struct list*)malloc(sizeof(struct list)))==NULL)
			ERROR(-1,ENOMEM);
		ptr->next = curr;
		ptr->prev = curr->prev;
		curr->prev->next = ptr;
		curr->prev = ptr;

		ptr->len = curr->len - addr - len;
		ptr->is_free = FALSE;
		curr->len = addr + len;
		flag = 1;
	}
	if (addr != 0) {
		struct list* ptr;
		if((ptr = (struct list*)malloc(sizeof(struct list)))==NULL)
		{
			if(flag)
			{
				ptr = curr->prev;
				curr->prev = ptr->prev;
				ptr->prev->next = curr;
				curr->len += ptr->len;
				free(ptr);
			}
			ERROR(-1,ENOMEM);
		}
		ptr->prev = curr;
		ptr->next = curr->next;
		curr->next->prev = ptr;
		curr->next = ptr;

		ptr->len = addr;
		ptr->is_free = FALSE;
		curr->len = len;
	}
	curr->is_free = TRUE;
	list_try_group(curr);

	assert(pthread_mutex_unlock(&list_lock) == 0);
}

enum AllocSwitch {
	AS_OFF, // Allocator is not running
	AS_LOCKED, // Allocator's status is being changed
	AS_ON // Allocator is running
};
enum AllocSwitch allocRunning = AS_OFF;
//FIXME !64bit variables
static size_t allocLimitGiven = 0;
static off_t allocLimitTotal = 0;
static volatile size_t allocCurrentGiven = 0;
static off_t allocCurrentTotal = 0;

static size_t page_size = 0;

static int fildes = 0;

enum AllocStatus {
		AS_VOID = 0, // No block with this id
		AS_ACTIVE = 1, // ptr is valid
		AS_PASSIVE = 2, // ptr is invalid
		AS_LOCK = 3 // Locked by a thread
	};
	
struct AllocBlock {

	volatile enum AllocStatus status;
	void *ptr;
	size_t size;
	off_t off;
	unsigned int ref;
};

static size_t ALLOC_MAX_BLOCKS = 1023;
static size_t allocBlocks = 0;
static struct AllocBlock* statusTable = NULL;
static size_t allocIter = 0;

static inline int ma_free_wrapped(size_t id, int polite);

//FIXME: swap is off_t

static inline int ma_init_wrapped(size_t mem, off_t swap, const char* swap_path)
{
	if((mem != 0 && swap != 0 && swap >= mem)==0)
		ERROR(0,EINVAL);
	if((fildes = open(swap_path, O_CREAT | O_RDWR | O_TRUNC, S_IRWXU)) == -1)
		ABORT(0);
	if((statusTable = (struct AllocBlock*)calloc(ALLOC_MAX_BLOCKS, sizeof(struct AllocBlock))) == NULL)
	{
		close(fildes);
		ABORT(0);
	}

	allocLimitGiven = mem;
	allocLimitTotal = swap;
	//FIXME: swap constant should be smaller.
	//FIXME: Check rlimit for constraints
	if(errno = posix_fallocate(fildes, 0, ((off_t) swap)))
	{
		free(statusTable);
		statusTable = NULL;
		close(fildes);
		ERROR(0,ENOMEM);
	}

	//Advise: use static mutex initializer
	pthread_mutex_init(&list_lock, NULL);
	pthread_mutex_init(&alloc_lock, NULL);

	page_size = sysconf(_SC_PAGESIZE);

	if(list_create((off_t) swap) != 1)
		ABORT(0);

	return 1;
}
int ma_init(size_t mem, off_t swap, const char* swap_path)
{
	if((__sync_bool_compare_and_swap(&allocRunning, AS_OFF, AS_LOCKED)) == 0)
		ERROR(0,EALREADY);
	int r = ma_init_wrapped(mem, swap, swap_path);
	if(r==1)
		assert(__sync_bool_compare_and_swap(&allocRunning, AS_LOCKED, AS_ON));
	else
		assert(__sync_bool_compare_and_swap(&allocRunning, AS_LOCKED, AS_OFF));
	return r;
}

static inline void ma_deinit_wrapped()
{
	assert(pthread_mutex_lock(&alloc_lock) == 0);
	//FIXME: some sleep or pthread_condition maybe?
	while (operations);

	size_t i;
	if(statusTable!=NULL)
		for(i=0; i<ALLOC_MAX_BLOCKS;i++)
{
			ma_free_wrapped(i, 0);
}	//FIXME: i think this should be in ma_init
	ALLOC_MAX_BLOCKS = 1000000;
	allocBlocks = 0;
	allocIter = 0;

	assert(operations == 0);

	free(statusTable);
	statusTable = NULL;

	allocLimitGiven = 0;
	allocLimitTotal = 0;
	allocCurrentGiven = 0;
	allocCurrentTotal = 0;

	list_destroy();
	close(fildes);

	pthread_mutex_unlock(&alloc_lock);

	fildes = 0;
}
void ma_deinit()
{
	if(__sync_bool_compare_and_swap(&allocRunning, AS_ON, AS_LOCKED) == 0)
		return;
	ma_deinit_wrapped();
	assert(__sync_bool_compare_and_swap(&allocRunning, AS_LOCKED, AS_OFF));
}

// TODO: ON->ALLOC_begin->DEINIT->ALLOC_cont->FAILURE;


size_t ma_alloc(size_t sz)
{
	if(sz == 0)
		ERROR(0,EINVAL);
	//FIXME: potentional overflow of off_t
	off_t sz_inc = (((off_t) sz + page_size - 1) / page_size) * page_size;
	//FIXME: i think that it is incorrect behaviour
	//should wait when AS_LOCKED and do work
	if(allocRunning != AS_ON)
		ERROR(0,ECANCELED);
	assert(pthread_mutex_lock(&alloc_lock) == 0);
	if(allocRunning != AS_ON)
	{
		assert(pthread_mutex_unlock(&alloc_lock) == 0);
		ERROR(0,ECANCELED);
	}
	//FIXME: always allocLimitGiven < allocLimitTotal maybe it is bug and should be 
	//sz<=allocLimitTotal-allocCurrentTotal ?
	if(sz > allocLimitTotal)
	{
		assert(pthread_mutex_unlock(&alloc_lock) == 0);
		ERROR(0,ENOMEM);
	}
	if(__sync_fetch_and_add(&allocCurrentTotal, sz) > allocLimitTotal - sz)
	{
		__sync_fetch_and_add(&allocCurrentTotal, -sz);
		assert(pthread_mutex_unlock(&alloc_lock) == 0);
		ERROR(0,ENOMEM);
	}
	while (1) {
		size_t i = __sync_fetch_and_add(&allocIter, 1) % ALLOC_MAX_BLOCKS;
		if (__sync_bool_compare_and_swap(&(statusTable[i].status), AS_VOID, AS_LOCK)) {
			statusTable[i].ptr = NULL;
			statusTable[i].size = sz;
			statusTable[i].ref = 0;
			//FIXME: never happens? error code of list_add() is 0
			if((statusTable[i].off = list_add(sz_inc)) == -1)
			{
				PRI();
				statusTable[i].status = AS_VOID;
				__sync_fetch_and_add(&allocCurrentTotal, -sz);
				assert(pthread_mutex_unlock(&alloc_lock) == 0);
				ERROR(0,ENOMEM);
			}
//			fprintf(stderr,"statusTable[%d].off == %d\n",i,(int)statusTable[i].off);
			// Poisoning
			allocBlocks++;
			if(allocBlocks == ALLOC_MAX_BLOCKS)
			{
				//FIXME: not overflow-safe. 
				assert(ALLOC_MAX_BLOCKS != (size_t) (-1));
				//FIXME: too fast as for me
				ALLOC_MAX_BLOCKS = (ALLOC_MAX_BLOCKS)*2 + 1;
				void * temp = statusTable;
				statusTable = (struct AllocBlock*)realloc(statusTable, sizeof(struct AllocBlock)*ALLOC_MAX_BLOCKS);
				if(statusTable == NULL)
				{
					statusTable = (struct AllocBlock*)temp;
					ALLOC_MAX_BLOCKS = (ALLOC_MAX_BLOCKS)/2;
					allocBlocks--;
					statusTable[i].status = AS_VOID;
					__sync_fetch_and_add(&allocCurrentTotal, -sz);
					assert(pthread_mutex_unlock(&alloc_lock) == 0);
					ERROR(0,ENOMEM);
				}
				memset(&(statusTable[ALLOC_MAX_BLOCKS / 2 + 1]), 0,
					((size_t)sizeof(struct AllocBlock))*(ALLOC_MAX_BLOCKS / 2));
			}
			
			statusTable[i].status = AS_PASSIVE;

			assert(pthread_mutex_unlock(&alloc_lock) == 0);
			return i+1;
		}
	}
}

void* ma_get(size_t id_f)
{
	if(allocRunning != AS_ON)
		ERROR(NULL,ECANCELED);
	assert(__sync_add_and_fetch(&operations, 1) != 0);
	if(allocRunning != AS_ON)
	{
		assert(__sync_fetch_and_add(&operations, -1) != 0);
		PRI();
		ERROR(NULL,ECANCELED);
	}

	size_t id = id_f-1;
	if(id_f == 0)
	{
		assert(__sync_fetch_and_add(&operations, -1) != 0);
		PRI();
		ERROR(NULL,EINVAL);
	}
	if(id >= ALLOC_MAX_BLOCKS)
	{
		assert(__sync_fetch_and_add(&operations, -1) != 0);
		PRI();
		ERROR(NULL,EFAULT);
	}

	enum AllocStatus x;
	while (1) {
		//ADVISE: be very-very careful with values of enum, I suggest that you should make AS_LOCK smth like ~0 in definition of enum
		x = (enum AllocStatus)__sync_fetch_and_or(&(statusTable[id].status), AS_LOCK);
		if (x != AS_LOCK)
			break;
	}

	void *ptr;
	switch (x) {
	case AS_VOID:
		statusTable[id].status = x;
		assert(__sync_fetch_and_add(&operations, -1) != 0);
		PRI();
		ERROR(NULL,EFAULT);
		break;
	case AS_LOCK:
		assert(0);	// Checked for it before;
		break;
	case AS_ACTIVE:
		assert (__sync_add_and_fetch(&(statusTable[id].ref),1) != 0);
		// Race condition below. UndefBeh:
		ptr = statusTable[id].ptr;
		statusTable[id].status = AS_ACTIVE;
		break;
	case AS_PASSIVE:
		if(__sync_fetch_and_add(&allocCurrentGiven, statusTable[id].size) > allocLimitGiven - statusTable[id].size)
		{
			assert(statusTable[id].size == 4096);
			__sync_fetch_and_add(&allocCurrentGiven, -statusTable[id].size);
			statusTable[id].status = x;
			assert(__sync_fetch_and_add(&operations, -1) != 0);
			fprintf(stderr, "--> %d+%d of %d\n",(int)allocCurrentGiven,(int)statusTable[id].size,(int)allocLimitGiven);
			PRI();
			ERROR(NULL,ENOMEM);
		}
		//FIXME: unreadable
		ptr = statusTable[id].ptr = mmap(NULL, (((off_t) (statusTable[id].size) + page_size - 1) / page_size) * page_size,
			PROT_EXEC | PROT_READ | PROT_WRITE, MAP_SHARED, fildes, statusTable[id].off);
		if(ptr == MAP_FAILED)
		{
			__sync_fetch_and_add(&allocCurrentGiven, -statusTable[id].size);
			statusTable[id].ptr = NULL;
			statusTable[id].status = x;
			assert(__sync_fetch_and_add(&operations, -1) != 0);
			PRI();
			ABORT(NULL);
		}
		assert(__sync_add_and_fetch(&(statusTable[id].ref), 1) != 0);
		// Race condition below. UndefBeh:
		statusTable[id].status = AS_ACTIVE;
		break;
	}
	assert(__sync_fetch_and_add(&operations, -1) != 0);
	return ptr;
}

int ma_release(size_t id_f)
{
	if(allocRunning != AS_ON)
		ERROR(0,ECANCELED);
	assert(__sync_add_and_fetch(&operations, 1) != 0);
	if(allocRunning != AS_ON)
	{
		assert(__sync_fetch_and_add(&operations, -1) != 0);
		ERROR(0,ECANCELED);
	}
	
	size_t id = id_f-1;
	if(id_f == 0)
	{
		assert(__sync_fetch_and_add(&operations, -1) != 0);
		ERROR(0,EINVAL);
	}
	if(id_f == 0 || id >= ALLOC_MAX_BLOCKS)
	{
		assert(__sync_fetch_and_add(&operations, -1) != 0);
		ERROR(0,EFAULT);
	}

	enum AllocStatus x;
	while (1) {
		//ADVISE: be very-very careful with values of enum, I suggest that you should make AS_LOCK smth like ~0 in definition of enum
		x = (enum AllocStatus)__sync_fetch_and_or(&(statusTable[id].status), AS_LOCK);
		if (x != AS_LOCK)
			break;
	}

	switch(x){
	case AS_VOID:
		statusTable[id].status = x;
		assert(__sync_fetch_and_add(&operations, -1) != 0);
		ERROR(0,EFAULT);
		break;
	case AS_LOCK:
		assert(0); // Checked for it before;
		break;
	case AS_ACTIVE:
		//deobfuscateME:
		int ref;
		assert((ref=__sync_fetch_and_add(&(statusTable[id].ref),-1)) != 0);
		if(ref-1)
		{
			statusTable[id].status = AS_ACTIVE;
			break;
		}
		assert (__sync_fetch_and_add(&allocCurrentGiven, -statusTable[id].size) >= statusTable[id].size);
		assert (munmap(statusTable[id].ptr, (((off_t)statusTable[id].size+page_size-1)/page_size)*page_size) != -1);
		statusTable[id].ptr = NULL;
		statusTable[id].status = AS_PASSIVE;
		break;
	case AS_PASSIVE:
		assert(statusTable[id].ref == 0);
		statusTable[id].status = x;
		assert(__sync_fetch_and_add(&operations, -1) != 0);
		ERROR(0,EFAULT);
		break;
	}
	assert (__sync_fetch_and_add(&operations, -1) != 0);
	return 1;
}

int ma_free(size_t id)
{
	if (allocRunning != AS_ON)
		ERROR(0,ECANCELED)
	assert (__sync_add_and_fetch(&operations, 1) != 0);
	if(allocRunning != AS_ON)
	{
		assert(__sync_fetch_and_add(&operations, -1) != 0);
		ERROR(0,ECANCELED);
	}
	if(id == 0)
	{
		assert(__sync_fetch_and_add(&operations, -1) != 0);
		ERROR(0,EINVAL);
	}
	int r = ma_free_wrapped(id-1, 1);
	assert (__sync_fetch_and_add(&operations, -1) != 0);
	return r;
}

static inline int ma_free_wrapped(size_t id, int polite)
{
	if(id >= ALLOC_MAX_BLOCKS)
		ERROR(0,EFAULT);
	enum AllocStatus x;
	while (1) 
	{
		x = __sync_fetch_and_or(&(statusTable[id].status), AS_LOCK);
		if(x != AS_LOCK)
			break;
	}

	switch(x){
	case AS_VOID:
		statusTable[id].status = x;
		//TEMP:
		if(polite)
			ERROR(0,EFAULT);
		return 0;
		break;
	case AS_LOCK:
		assert (0);	// Checked for it before;
		break;
	case AS_ACTIVE:
		if(polite)
		{
			statusTable[id].status = x;
			ERROR(0,EFAULT);
			break;
		}
		assert (__sync_fetch_and_add(&allocCurrentGiven, -statusTable[id].size) >= statusTable[id].size);
		munmap(statusTable[id].ptr, (((off_t)statusTable[id].size+page_size-1)/page_size)*page_size);
		statusTable[id].ref = 0;
	case AS_PASSIVE:
		assert (statusTable[id].ref == 0);
		assert (__sync_fetch_and_add(&allocCurrentTotal, -statusTable[id].size) >= statusTable[id].size);
		// POISONING
		list_remove(statusTable[id].off, (((off_t)statusTable[id].size+page_size-1)/page_size)*page_size);
		assert(__sync_fetch_and_add(&allocBlocks,-1) != 0);
		break;
	}
	statusTable[id].status = AS_VOID;
	return 1;
}
/*
int main()
{
	pthread_mutex_init(&list_lock, NULL);
	pthread_mutex_init(&alloc_lock, NULL);
	pthread_mutex_init(&list_lock, NULL);
	pthread_mutex_init(&alloc_lock, NULL);
	pthread_mutex_init(&list_lock, NULL);
	pthread_mutex_init(&alloc_lock, NULL);
	int a, b, c;
	int *d = NULL;
	ma_init(300000, 500000, "./file");
	while (1) {
		scanf("%d", &a);
		switch (a) {
		case 1: //ma_alloc
			scanf("%d", &b);
			fprintf(stderr, "   ma_alloc: %d<<%d\n", (int)ma_alloc(b), b);
			break;
		case 2: //ma_get
			scanf("%d", &b);
			fprintf(stderr, "     ma_get: %d<<%d\n", (int)ma_get(b), b);
			break;
		case 3: //ma_release
			scanf("%d", &b);
			fprintf(stderr, " ma_release: %d<<%d\n", (int)ma_release(b), b);
			break;
		case 4: //ma_free
			scanf("%d", &b);
			fprintf(stderr, "    ma_free: %d<<%d\n", (int)ma_free(b), b);
			break;
		case 8: //list_add
			list_pri();
			scanf("%d", &b);
			fprintf(stderr, "   list_add: %d<<%d\n", (int)list_add(b), b);
			list_pri();
			break;
		case 9: //list_remove
			list_pri();
			scanf("%d%d", &b, &c);
			fprintf(stderr, "list_remove: %d<<%d,%d\n", (int)list_remove(b, c), b, c);
			list_pri();
			break;
		case 0: //list_remove
			list_pri();
			break;
		default:
			*d = 42;
			fprintf(stderr,"  ma_deinit\n");
			ma_deinit();
			return 0;
		}
	}
	return 0;
}
*/
