/*
 * block_cache.c
 *
 *  Created on: Nov 22, 2011
 *      Author: bendischinger
 */

#include <stdlib.h>
#include <stdio.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <strings.h>
#include <aio.h>
#include <time.h>
#include <pthread.h>

#include "block_cache.h"
#include "test_stat.h"

static struct timespec aio_sus_tmout = {0,5000000}; /*50 ms*/

static int get_block_slot(blk_cache_t *cache, off_t offset) {
	int a = offset;
	a = (a+0x7ed55d16) + (a<<12);
	a = (a^0xc761c23c) ^ (a>>19);
	a = (a+0x165667b1) + (a<<5);
	a = (a+0xd3a2646c) ^ (a<<9);
	a = (a+0xfd7046c5) + (a<<3);
	a = (a^0xb55a4f09) ^ (a>>16);
	return (a > 0 ? a : -a) % cache->num_entries;
}

static int check_aio(blk_cache_t *cache, blk_cache_ent_t *entry) {
	int is_ready = 1;
	if (entry->aio_arg.aio_fildes) {
		int error = aio_error(&entry->aio_arg);
		if (error != EINPROGRESS) {
			if (error) {
				global_stat.aio_err++;
				printf("AIO Error %d %s\n", errno, strerror(errno));
				entry->offset = 0;
			}
			aio_return(&entry->aio_arg);
			bzero(&entry->aio_arg, sizeof (struct aiocb));
		} else {
			is_ready = 0;
		}
	}
	return is_ready;
}

static void check_all_aio(blk_cache_t *cache) {
	int i;
	blk_cache_ent_t *entry;
	for (i=0;i<cache->num_entries;i++) {
		entry = &cache->entries[i];
		pthread_mutex_lock(&entry->lock);
		if (entry->aio_arg.aio_fildes) {
			check_aio(cache, entry);
		}
		pthread_mutex_unlock(&entry->lock);
	}
}

static void clear_block_entry(blk_cache_ent_t *entry) {
	bzero(&entry->aio_arg, sizeof(struct aiocb));
	entry->offset = 0;
	entry->in_use = 0;
}

void init_block_cache(blk_cache_t *cache, int fd, int num_entries, int blocksize) {
	int i;
	bzero(cache, sizeof(blk_cache_t));
	cache->fd = fd;
	cache->num_entries = num_entries;
	cache->blk_size = blocksize;
	cache->entries = calloc(num_entries, sizeof (blk_cache_ent_t));
	for (i=0;i<num_entries;i++) {
		cache->entries[i].block = malloc(blocksize);
		pthread_mutex_init(&cache->entries[i].lock, NULL);
	}
}

void free_block_cache(blk_cache_t *cache) {
	int i;
	for (i=0;i<cache->num_entries;i++) {
		free(cache->entries[i].block);
		pthread_mutex_destroy(&cache->entries[i].lock);
	}
	free(cache->entries);
}

int is_cached_block(blk_cache_t *cache, off_t offset) {
	int slot;
	blk_cache_ent_t *entry = NULL;

	if (!cache || !cache->fd) {
		return 0;
	}

	slot = get_block_slot(cache, offset);
	entry = &cache->entries[slot];

	return (entry->offset == offset);
}

int *get_cached_block(blk_cache_t *cache, off_t offset) {
	int slot;
	blk_cache_ent_t *entry;

	if (!cache || !cache->fd) {
		return NULL;
	}

	slot = get_block_slot(cache, offset);
	entry = &cache->entries[slot];

	struct aiocb *list[1];
	int incr_aio = 1;
	pthread_mutex_lock(&entry->lock);
	while (!check_aio(cache, entry)) {
		if (entry->aio_arg.aio_fildes) {
			entry->in_use++;
			list[0] = &entry->aio_arg;
			pthread_mutex_unlock(&entry->lock);
			aio_suspend(&list, 1, &aio_sus_tmout);
			pthread_mutex_lock(&entry->lock);
			entry->in_use--;
			if (incr_aio) {
				global_stat.aio_wait++;
				incr_aio = 0;
			}
		}
	}

	if (entry->offset == offset) {
		entry->in_use++;
		global_stat.cache_hit++;
		pthread_mutex_unlock(&entry->lock);
	} else {
		pthread_mutex_unlock(&entry->lock);
		entry = NULL;
		global_stat.cache_miss++;
	}

	return (entry == NULL ? NULL : entry->block);
}

void return_cached_block(blk_cache_t *cache, off_t offset) {
	int slot = get_block_slot(cache, offset);
	blk_cache_ent_t *entry = &cache->entries[slot];
	pthread_mutex_lock(&entry->lock);
	if (entry->offset == offset) {
		entry->in_use--;
	}
	pthread_mutex_unlock(&entry->lock);
}

void invalidate_cached_block(blk_cache_t *cache, off_t offset) {
	int slot = get_block_slot(cache, offset);
	blk_cache_ent_t *entry = &cache->entries[slot];
	pthread_mutex_lock(&entry->lock);
	clear_block_entry(entry);
	pthread_mutex_unlock(&entry->lock);
}

int put_aio_cached_block(blk_cache_t *cache, off_t offset) {
	int error = 0;
	int retries = 1;
	int slot;
	blk_cache_ent_t *entry;

	if (!cache || !cache->fd) return EINVAL;

	slot = get_block_slot(cache, offset);
	entry = &cache->entries[slot];
	pthread_mutex_lock(&entry->lock);
	check_aio(cache, entry);
	if (entry->offset != offset) {
		if (entry->in_use || entry->aio_arg.aio_fildes) {
			global_stat.cache_col++;
		} else {
			clear_block_entry(entry);
retry:
			entry->offset = offset;
			entry->aio_arg.aio_buf = entry->block;
			entry->aio_arg.aio_fildes = cache->fd;
			entry->aio_arg.aio_nbytes = cache->blk_size;
			entry->aio_arg.aio_offset = offset;

			if (aio_read(&entry->aio_arg)) {
				error = errno;
				if (error == EAGAIN) {
					/* EAGAIN, clean up outstanding requests and try again*/
					clear_block_entry(entry);
					pthread_mutex_unlock(&entry->lock);
					check_all_aio(cache);
					pthread_mutex_lock(&entry->lock);
					if (retries-- > 0) {
						error = 0;
						goto retry;
					}
				}
				if (error != EAGAIN) {
					printf("aio_read error %d %s\n", error, strerror(error));
				}
			}
		}
	}
	pthread_mutex_unlock(&entry->lock);
	return (error);
}

void put_cached_block(blk_cache_t *cache, off_t offset, int *block) {
	int slot;
	blk_cache_ent_t *entry;

	if (!cache || !cache->fd) return;

	slot = get_block_slot(cache, offset);
	entry = &cache->entries[slot];

	pthread_mutex_lock(&entry->lock);
	if (entry->offset != offset) {
		if (entry->offset != 0) {
			global_stat.cache_col++;
		}
		if (!entry->in_use) {
			entry->offset = offset;
			memcpy(entry->block, block, cache->blk_size);
			global_stat.cache_pop++;
		}
	}
	pthread_mutex_unlock(&entry->lock);
}
