#include "Cache.h"

void Cache_C::init() {
	occupancy = 0;
	hits = 0;
	compulsoryMisses = 0;
	capacityMisses = 0;
}

void Cache_C::printStatistics() {
	int accesses = hits + compulsoryMisses + capacityMisses;
	printf(">> C: %d accesses, %d hits, %d compulsory misses, %d capacity misses\n", accesses, hits, compulsoryMisses, capacityMisses);
}

void Cache_C::evaluateCacheSize(int nPoints) {
	size_t estimatePitch;
	{
		void* estimate;
		CUDA_SAFE_CALL( cudaMallocPitch(&estimate, &estimatePitch, nPoints*sizeof(float), 2));
		cudaFree(estimate);
	}
	unsigned int remainingMemory, totalMemory;

	printf("\n>> C: Allocate dictionary for cache status.\n");
	//CUDA_SAFE_CALL( cudaMalloc((void**)&dictionary, nPoints*sizeof(dicEntry)) );
	//CUDA_SAFE_CALL( cudaMemset(dictionary, 0, nPoints*sizeof(dicEntry)));
	dictionary = (dicEntry*)malloc(nPoints*sizeof(dicEntry));
	memset(dictionary,0,nPoints*sizeof(dicEntry));

	cuMemGetInfo(&remainingMemory, &totalMemory);

	cacheSizeInRow = remainingMemory/((int)(estimatePitch + sizeof(lruEntry)));
	cacheSizeInRow = (int)((float)cacheSizeInRow*ratioOfDramForCache);
	if (nPoints < cacheSizeInRow) cacheSizeInRow = nPoints;

	//CUDA_SAFE_CALL( cudaMalloc((void**)&pool, (cacheSizeInRow+1)*sizeof(lruEntry)) );
	pool = (lruEntry*)malloc((cacheSizeInRow+1)*sizeof(lruEntry));
	lruHead = pool++;
	lruHead->next = lruHead->prev = lruHead;
	printf(">> C: allocate memory pool for lruList (~= %d rows).\n", cacheSizeInRow);
	printf(">> C: %d/%d bytes memory currently free.\n", remainingMemory, totalMemory);
	printf(">> C: %d rows of kernel matrix could be cached.\n\t(%d bytes per row, %d estimated)\n", cacheSizeInRow, nPoints*sizeof(float), estimatePitch);

	size_t cachePitch;
	CUDA_SAFE_CALL(cudaMallocPitch((void**)&devCache, &cachePitch, nPoints*sizeof(float), cacheSizeInRow));
	cachePitchInFloat = cachePitch/sizeof(float);
	//int devCachePitchInFloats = (int)cachePitch/(sizeof(float));
	printf(">> C: Cache allocate done (%d bytes per row).\n", cachePitch);
}

void Cache_C::cleanCache() {
	//CUDA_SAFE_CALL( cudaFree(dictionary) );
	free(dictionary);
	CUDA_SAFE_CALL( cudaFree(devCache) );
	pool--;
	//CUDA_SAFE_CALL( cudaFree(pool) );
	free(pool);
}

void Cache_C::findData(int index, int& offset, bool& compute) {
	dicEntry &entry = dictionary[index];
	if(entry.status == INCACHE) {
		hits++;
		//*(offset) = entry.offset;
		offset = entry.offset;
		//*(compute) = false;
		compute = false;
		entry.lruEntryPtr->next->prev = entry.lruEntryPtr->prev;
		entry.lruEntryPtr->prev->next = entry.lruEntryPtr->next;

		entry.lruEntryPtr->prev = lruHead;
		entry.lruEntryPtr->next = lruHead->next;

		lruHead->next->prev = entry.lruEntryPtr;
		lruHead->next = entry.lruEntryPtr;
	} else if(occupancy < cacheSizeInRow) { //status == NEVER
		compulsoryMisses++;
		entry.offset = occupancy;
		entry.lruEntryPtr = pool+occupancy;
		occupancy++;
		entry.status = INCACHE;

		entry.lruEntryPtr->index = index;

		entry.lruEntryPtr->prev = lruHead;
		entry.lruEntryPtr->next = lruHead->next;

		lruHead->next->prev = entry.lruEntryPtr;
		lruHead->next = entry.lruEntryPtr;

		//*(offset) = entry.offset;
		//*(compute) = true;
		offset = entry.offset;
		compute = true;
	} else {
		if(entry.status == EVICTED)
			capacityMisses++;
		else
			compulsoryMisses++;

		lruEntry* expire = lruHead->prev;
		expire->next->prev = expire->prev;
		expire->prev->next = expire->next;

		expire->prev = lruHead;
		expire->next = lruHead->next;

		lruHead->next->prev = expire;
		lruHead->next = expire;

		dictionary[expire->index].status = EVICTED;
		entry.offset = dictionary[expire->index].offset;
		entry.status = INCACHE;
		expire->index = index;
		entry.lruEntryPtr = expire;

		//*(offset) = entry.offset;
		//*(compute) = true;
		offset = entry.offset;
		compute = true;
	}
}