#ifndef CUFALLOC_C
#define CUFALLOC_C
#define FALLOC_C
#define __THROW

// This is the smallest amount of memory, per-thread, which is allowed. It is also the largest amount of space a single malloc() can take up
const static int HEAPCHUNK_SIZE = 256;
const static int FALLOCNODE_SLACK = 0x10;

#include "cuFalloc.h"
#include <malloc.h>
#include <string.h>

typedef struct __align__(8) _cuFallocHeapChunk {
    unsigned short magic;
	unsigned short count;
    volatile struct _cuFallocHeapChunk* next;
	void* reserved;
} fallocHeapChunk;

typedef struct __align__(8) _cuFallocHeap {
	size_t chunks;
	volatile fallocHeapChunk* freeChunks;
	void* reserved;
} fallocHeap;

#define CHUNKSIZE (sizeof(fallocHeapChunk)+HEAPCHUNK_SIZE)
#define CHUNKSIZEALIGN (CHUNKSIZE%16?CHUNKSIZE+16-(CHUNKSIZE%16):CHUNKSIZE)

#ifdef TRACE
///////////////////////////////////////////////////////////////////////////////
// TRACE
const static int TRACEHEAP_SIZE = 2048;

typedef struct __align__(8) _cudaFallocTrace {
	volatile __int8* trace;
	fallocHeapChunk* lastChunk;
	int contextIndex;
	bool complete;
	struct _cudaFallocTrace* deviceTrace;
} fallocTrace;
#endif


#ifndef HOSTONLY

typedef struct _cuFallocNode {
	struct _cuFallocNode* next;
	struct _cuFallocNode* nextAvailable;
	unsigned short freeOffset;
	unsigned short magic;
} fallocNode;

typedef struct _cuFallocContext {
	fallocNode node;
	fallocNode* nodes;
	fallocNode* availableNodes;
	fallocHeap* heap;
} fallocContext;

// All our headers are prefixed with a magic number so we know they're ready
#define FALLOC_MAGIC (unsigned short)0x3412        // Not a valid ascii character
#define FALLOCNODE_MAGIC (unsigned short)0x7856

__device__ void fallocInit(fallocHeap* heap) {
	if (threadIdx.x || threadIdx.y || threadIdx.z)
		return;
	volatile fallocHeapChunk* chunk = (fallocHeapChunk*)((__int8*)heap + sizeof(fallocHeap));
	heap->freeChunks = chunk;
	size_t chunks = heap->chunks;
	if (!chunks)
		__THROW;
	// preset all chunks
	chunk->magic = FALLOC_MAGIC;
	chunk->count = 1;
	chunk->reserved = nullptr;
	while (chunks-- > 1) {
		chunk = chunk->next = (fallocHeapChunk*)((__int8*)chunk + CHUNKSIZEALIGN);
		chunk->magic = FALLOC_MAGIC;
		chunk->count = 1;
		chunk->reserved = nullptr;
	}
	chunk->next = nullptr;
}

__device__ void* fallocGetChunk(fallocHeap* heap) {
	if (threadIdx.x || threadIdx.y || threadIdx.z)
		__THROW;
	volatile fallocHeapChunk* chunk = heap->freeChunks;
	if (!chunk)
		return nullptr;
	{ // critical
		heap->freeChunks = chunk->next;
		chunk->next = nullptr;
	}
	return (void*)((__int8*)chunk + sizeof(fallocHeapChunk));
}

__device__ void* fallocGetChunks(fallocHeap* heap, size_t length, size_t* allocLength) {
    // fix up length to be a multiple of chunkSize
    length = (length < CHUNKSIZEALIGN ? CHUNKSIZEALIGN : length);
    if (length % CHUNKSIZEALIGN)
        length += CHUNKSIZEALIGN - (length % CHUNKSIZEALIGN);
	// set length, if requested
	if (allocLength)
		*allocLength = length - sizeof(fallocHeapChunk);
	size_t chunks = (size_t)(length / CHUNKSIZEALIGN);
	if (chunks > heap->chunks)
		__THROW;
	// single, equals: fallocGetChunk
	if (chunks == 1)
		return fallocGetChunk(heap);
    // multiple, find a contiguous chuck
	size_t index = chunks;
	volatile fallocHeapChunk* chunk;
	volatile fallocHeapChunk* endChunk = (fallocHeapChunk*)((__int8*)heap + sizeof(fallocHeap) + (CHUNKSIZEALIGN * heap->chunks));
	{ // critical
		for (chunk = (fallocHeapChunk*)((__int8*)heap + sizeof(fallocHeap)); index && chunk < endChunk; chunk = (fallocHeapChunk*)((__int8*)chunk + (CHUNKSIZEALIGN * chunk->count))) {
			if (chunk->magic != FALLOC_MAGIC)
				__THROW;
			index = (chunk->next ? index - 1 : chunks);
		}
		if (index)
			return nullptr;
		// found chuck, remove from freeChunks
		endChunk = chunk;
		chunk = (fallocHeapChunk*)((__int8*)chunk - (CHUNKSIZEALIGN * chunks));
		for (volatile fallocHeapChunk* chunk2 = heap->freeChunks; chunk2; chunk2 = chunk2->next)
			if (chunk2 >= chunk && chunk2 <= endChunk)
				chunk2->next = (chunk2->next ? chunk2->next->next : nullptr);
		chunk->count = chunks;
		chunk->next = nullptr;
	}
	return (void*)((__int8*)chunk + sizeof(fallocHeapChunk));
}

__device__ void fallocFreeChunk(fallocHeap* heap, void* obj) {
	if (threadIdx.x || threadIdx.y || threadIdx.z)
		__THROW;
	volatile fallocHeapChunk* chunk = (fallocHeapChunk*)((__int8*)obj - sizeof(fallocHeapChunk));
	if (chunk->magic != FALLOC_MAGIC || chunk->count > 1)
		__THROW;
	{ // critical
		chunk->next = heap->freeChunks;
		heap->freeChunks = chunk;
	}
}

__device__ void fallocFreeChunks(fallocHeap* heap, void* obj) {
	volatile fallocHeapChunk* chunk = (fallocHeapChunk*)((__int8*)obj - sizeof(fallocHeapChunk));
	if (chunk->magic != FALLOC_MAGIC)
		__THROW;
	size_t chunks = chunk->count;
	// single, equals: fallocFreeChunk
	if (chunks == 1) {
		{ // critical
			chunk->next = heap->freeChunks;
			heap->freeChunks = chunk;
		}
		return;
	}
	// retag chunks
	chunk->count = 1;
	while (chunks-- > 1) {
		chunk = chunk->next = (fallocHeapChunk*)((__int8*)chunk + sizeof(fallocHeapChunk) + HEAPCHUNK_SIZE);
		chunk->magic = FALLOC_MAGIC;
		chunk->count = 1;
		chunk->reserved = nullptr;
	}
	{ // critical
		chunk->next = heap->freeChunks;
		heap->freeChunks = chunk;
	}
}


//////////////////////
// ALLOC

__device__ static fallocContext* fallocCreateCtx(fallocHeap* heap) {
	if (sizeof(fallocContext) > HEAPCHUNK_SIZE)
		__THROW;
	fallocContext* ctx = (fallocContext*)fallocGetChunk(heap);
	if (!ctx)
		__THROW;
	ctx->heap = heap;
	unsigned short freeOffset = ctx->node.freeOffset = sizeof(fallocContext);
	ctx->node.magic = FALLOCNODE_MAGIC;
	ctx->node.next = nullptr; ctx->nodes = (fallocNode*)ctx;
	ctx->node.nextAvailable = nullptr; ctx->availableNodes = (fallocNode*)ctx;
	// close node
	if ((freeOffset + FALLOCNODE_SLACK) > HEAPCHUNK_SIZE)
		ctx->availableNodes = nullptr;
	return ctx;
}

__device__ static void fallocDisposeCtx(fallocContext* ctx) {
	fallocHeap* heap = ctx->heap;
	for (fallocNode* node = ctx->nodes; node; node = node->next)
		fallocFreeChunk(heap, node);
}

__device__ static void* falloc(fallocContext* ctx, unsigned short bytes, bool alloc) {
	if (bytes > (HEAPCHUNK_SIZE - sizeof(fallocContext)))
		__THROW;
	// find or add available node
	fallocNode* node;
	unsigned short freeOffset;
	unsigned char hasFreeSpace;
	fallocNode* lastNode;
	for (lastNode = (fallocNode*)ctx, node = ctx->availableNodes; node; lastNode = node, node = (alloc ? node->nextAvailable : node->next))
		 if (hasFreeSpace = ((freeOffset = (node->freeOffset + bytes)) <= HEAPCHUNK_SIZE))
			 break;
	if (!node || !hasFreeSpace) {
		// add node
		node = (fallocNode*)fallocGetChunk(ctx->heap);
		if (!node)
			__THROW;
		freeOffset = node->freeOffset = sizeof(fallocNode); 
		freeOffset += bytes;
		node->magic = FALLOCNODE_MAGIC;
		node->next = ctx->nodes; ctx->nodes = node;
		node->nextAvailable = (alloc ? ctx->availableNodes : nullptr); ctx->availableNodes = node;
	}
	//
	void* obj = (__int8*)node + node->freeOffset;
	node->freeOffset = freeOffset;
	// close node
	if (alloc && ((freeOffset + FALLOCNODE_SLACK) > HEAPCHUNK_SIZE)) {
		if (lastNode == (fallocNode*)ctx)
			ctx->availableNodes = node->nextAvailable;
		else
			lastNode->nextAvailable = node->nextAvailable;
		node->nextAvailable = nullptr;
	}
	return obj;
}

__device__ static void* fallocRetract(fallocContext* ctx, unsigned short bytes) {
	fallocNode* node = ctx->availableNodes;
	int freeOffset = (int)node->freeOffset - bytes;
	// multi node, retract node
	if (node != &ctx->node && freeOffset < sizeof(fallocNode)) {
		node->freeOffset = sizeof(fallocNode);
		// search for previous node
		fallocNode* lastNode;
		for (lastNode = (fallocNode*)ctx, node = ctx->nodes; node; lastNode = node, node = node->next)
			if (node == ctx->availableNodes)
				break;
		node = ctx->availableNodes = lastNode;
		freeOffset = (int)node->freeOffset - bytes;
	}
	// first node && !overflow
	if (node == &ctx->node && freeOffset < sizeof(fallocContext))
		__THROW;
	node->freeOffset = (unsigned short)freeOffset;
	return (__int8*)node + freeOffset;
}

__device__ static void fallocMark(fallocContext* ctx, void* &mark, unsigned short &mark2) { mark = ctx->availableNodes; mark2 = ctx->availableNodes->freeOffset; }
__device__ static bool fallocAtMark(fallocContext* ctx, void* mark, unsigned short mark2) { return (mark == ctx->availableNodes && mark2 == ctx->availableNodes->freeOffset); }

#if __CUDA_ARCH__ > 100 // atomics only used with > sm_10 architecture
//////////////////////
// ATOMIC
#include <sm_11_atomic_functions.h>

#define CPUFATOMIC_MAGIC (unsigned short)0xBC9A

typedef struct _cpuFallocAutomic {
	fallocHeap* heap;
	unsigned short magic;
	unsigned short pitch;
	size_t bufferLength;
	unsigned __int32* bufferBase;
	volatile unsigned __int32* buffer;
} fallocAutomic;

fallocAutomic* fallocCreateAtom(fallocHeap* heap, unsigned short pitch, size_t length) {
	// align pitch
	if (pitch % 16)
		pitch += 16 - (pitch % 16);
	// fix up length to be a multiple of pitch
    length = (length < pitch ? pitch : length);
    if (length % pitch)
        length += pitch - (length % pitch);
	//
	size_t allocLength;
	fallocAutomic* atom = (fallocAutomic*)fallocGetChunks(heap, length + sizeof(fallocAutomic), &allocLength);
	if (!atom)
		throw;
	atom->heap = heap;
	atom->magic = CPUFATOMIC_MAGIC;
	atom->pitch = pitch;
	atom->bufferLength = allocLength - sizeof(fallocAutomic);
	atom->bufferBase = (unsigned __int32*)atom + sizeof(fallocAutomic);
	atom->buffer = (volatile unsigned __int32*)atom->bufferBase;
	return atom;
}

void fallocDisposeAtom(fallocAutomic* atom) {
	fallocFreeChunks(atom->heap, atom);
}

void* fallocAtomNext(fallocAutomic* atom, unsigned short bytes) {
	size_t offset = atomicAdd(atom->bufferBase, atom->pitch) - (size_t)atom->buffer;
    offset %= atom->bufferLength;
    return (void*)(atom->buffer + offset);
}
#endif

#ifdef TRACE
///////////////////////////////////////////////////////////////////////////////
// TRACE

typedef struct {
	unsigned short magic;
	unsigned short count;
	bool free;
	bool showDetail;
} traceChunk;

// All our headers are prefixed with a magic number so we know they're ready
#define CUFALLOCTRACE_MAGIC (unsigned short)0x0A0A

__global__ void fallocWTrace(fallocHeap* heap, fallocTrace* deviceTrace) {
	volatile __int8* trace = deviceTrace->trace;
	if (!trace)
		__THROW;
	volatile __int8* endTrace = trace + TRACEHEAP_SIZE - sizeof(CUFALLOCTRACE_MAGIC);
	fallocHeapChunk* chunk = deviceTrace->lastChunk;
	if (!chunk) {
		chunk = (fallocHeapChunk*)((__int8*)heap + sizeof(fallocHeap));
		// trace
		*((int*)trace) = (int)heap->chunks; trace += sizeof(int);
	}
	volatile fallocHeapChunk* endChunk = (fallocHeapChunk*)((__int8*)heap + sizeof(fallocHeap) + (CHUNKSIZEALIGN * heap->chunks));
	for (; trace < endTrace && chunk < endChunk; trace += sizeof(traceChunk), chunk = (fallocHeapChunk*)((__int8*)chunk + (CHUNKSIZEALIGN * chunk->count))) {
		if (chunk->magic != FALLOC_MAGIC)
			__THROW;
		// trace
		traceChunk* w = (traceChunk*)trace;
		w->magic = CUFALLOCTRACE_MAGIC;
		w->count = chunk->count;
		if (chunk->next)
			w->free = true;
		else {
			volatile fallocHeapChunk* chunk2;
			for (chunk2 = heap->freeChunks; chunk2 == chunk || chunk2 != nullptr; chunk2 = chunk2->next) ;
			w->free = (chunk2 == chunk);
		}
		w->showDetail = (bool)chunk->reserved;
		if (!w->free && w->showDetail) {
			/* NEED */
		}
	}
	deviceTrace->lastChunk = chunk;
	deviceTrace->complete = (trace < endTrace);
	if (deviceTrace->complete) {
		*((unsigned short*)trace) = (unsigned short)-1; trace += sizeof(CUFALLOCTRACE_MAGIC);
	}
	deviceTrace->trace = trace;
}
#endif

#endif // HOSTONLY

#ifndef CLIENTONLY

///////////////////////////////////////////////////////////////////////////////
// HOST SIDE

//
//  cudaFallocInit
//
//  Takes a buffer length to allocate, creates the memory on the device and
//  returns a pointer to it for when a kernel is called. It's up to the caller
//  to free it.
//
extern "C" cudaFallocHost cudaFallocInit(size_t length, cudaError_t* error, void* reserved) {
	cudaFallocHost host; memset(&host, 0, sizeof(cudaFallocHost));
    // fix up length to be a multiple of chunkSize
    length = (length < CHUNKSIZEALIGN ? CHUNKSIZEALIGN : length);
    if (length % CHUNKSIZEALIGN)
        length += CHUNKSIZEALIGN - (length % CHUNKSIZEALIGN);
	size_t chunks = (size_t)(length / CHUNKSIZEALIGN);
	if (!chunks)
		return host;
	// Fix up length to include fallocHeap
	length += sizeof(fallocHeap);
	if ((length % 16) > 0)
        length += 16 - (length % 16);
    // Allocate a print buffer on the device and zero it
	fallocHeap* heap;
	if ((!error && (cudaMalloc((void**)&heap, length) != cudaSuccess)) ||
		(error && ((*error = cudaMalloc((void**)&heap, length)) != cudaSuccess)))
		return host;
    cudaMemset(heap, 0, length);
	// transfer to heap
	fallocHeap hostHeap;
	hostHeap.chunks = chunks;
	hostHeap.freeChunks = nullptr;
	hostHeap.reserved = reserved;
	if ((!error && (cudaMemcpy(heap, &hostHeap, sizeof(fallocHeap), cudaMemcpyHostToDevice) != cudaSuccess)) ||
		(error && ((*error = cudaMemcpy(heap, &hostHeap, sizeof(fallocHeap), cudaMemcpyHostToDevice)) != cudaSuccess)))
		return host;
	// return the heap
	if (error)
		*error = cudaSuccess;
	host.heap = heap;
	host.length = (int)length;
	host.reserved = reserved;
    return host;
}

//
//  cudaFallocEnd
//
//  Frees up the memory which we allocated
//
extern "C" void cudaFallocEnd(cudaFallocHost &host) {
    if (!host.heap)
        return;
    cudaFree(host.heap); host.heap = nullptr;
}

#ifdef TRACE
///////////////////////////////////////////////////////////////////////////////
// TRACE: HOST SIDE

//
//  cudaFallocWTraceInit
//
//  Takes a buffer length to allocate, creates the memory on the device and
//  returns a pointer to it for when a kernel is called. It's up to the caller
//  to free it.
//
extern "C" cudaFallocHost cudaFallocWTraceInit(size_t length, cudaError_t* error) {
	cudaFallocHost host; memset(&host, 0, sizeof(cudaFallocHost));
	// Allocate a buffer on the device and zero it
	void* deviceTrace;
	if ((!error && (cudaMalloc((void**)&deviceTrace, TRACEHEAP_SIZE) != cudaSuccess)) ||
		(error && ((*error = cudaMalloc((void**)&deviceTrace, TRACEHEAP_SIZE)) != cudaSuccess)))
		return host;
	//
	return cudaFallocInit(length, error, deviceTrace);
}

//
//  cudaFallocWTraceEnd
//
//  Frees up the memory which we allocated
//
extern "C" void cudaFallocWTraceEnd(cudaFallocHost &host) {
	if (!host.heap)
        return;
    cudaFree(host.reserved); host.reserved = nullptr;
	//
	cudaFallocEnd(host);
}

//
//  cuFallocSetTraceInfo
//
//	Sets a trace Info.
//
extern "C" void cudaFallocSetTraceInfo(size_t id, bool showDetail) {
}

//
//  cudaFallocTraceInit
//
//	Creates a trace Stream.
//
extern "C" fallocTrace* cudaFallocTraceInit() {
	fallocTrace* trace = (fallocTrace*)malloc(sizeof(fallocTrace)); memset(trace, 0, sizeof(fallocTrace));
	cudaMalloc(&trace->deviceTrace, sizeof(fallocTrace));
	return trace;
}

//
//  cudaFallocTraceStream
//
//	Streams till empty.
//
extern "C" void* cudaFallocTraceStream(cudaFallocHost &host, fallocTrace* trace, size_t &length) {
	if (trace->complete) {
		length = 0;
		return nullptr;
	}
	trace->trace = (volatile __int8*)host.reserved;
	size_t r = cudaMemcpy(trace->deviceTrace, trace, sizeof(fallocTrace), cudaMemcpyHostToDevice);
#ifndef HOSTONLY
	fallocWTrace<<<1, 1>>>(host.heap, trace->deviceTrace);
#endif
	cudaMemcpy(trace, trace->deviceTrace, sizeof(fallocTrace), cudaMemcpyDeviceToHost);
	length = (__int8*)trace->trace - (__int8*)host.reserved;
	return host.reserved;
}

//
//  cudaFallocTraceEnd
//
//	Frees a trace Stream.
//
extern "C" void cudaFallocTraceEnd(fallocTrace* trace) {
	cudaFree(trace->deviceTrace);
	free(trace);
}
#endif

#endif // CLIENTONLY

#endif // CUFALLOC_C