/*
 * fingerprint_cache.c
 *
 *  Created on: Mar 24, 2014
 *      Author: fumin
 */
#include "../destor.h"
#include "index.h"
#include "../storage/containerstore.h"
#include "../recipe/recipestore.h"
#include "../utils/lru_cache.h"
#include "../jcr.h"
#include <assert.h>

extern struct metaEntry
{
	int32_t off;
	int32_t len;
	fingerprint fp;
	/*
	 * the flag indicates whether it is a delta
	 * 0 -> base chunk
	 * 1 -> delta chunk
	 * */
	char flag;
	uint64_t sf1;
	uint64_t sf2;
	uint64_t sf3;
	struct metaEntry *pre;
	struct metaEntry *next;
	// containerid baseid;
	// int32_t delta_size;
	// int32_t base_size;
};

extern struct index_item
{
	fingerprint fp;
	containerid id;
};

extern struct super_feature_item
{
	uint64_t sf;
	struct index_item *p_item;
};

static struct lruCache *lru_queue;
static struct lruCache *cp_lru_queue;

static pthread_mutex_t mutex;
void init_fingerprint_cache()
{
	switch (destor.index_category[1])
	{
	case INDEX_CATEGORY_PHYSICAL_LOCALITY:
		if (destor.enable_sidc)
			lru_queue = new_lru_cache_two_hitfun(destor.index_cache_size,
												 free_container_meta, lookup_fingerprint_in_container_meta,
												 lookup_sf_in_container_meta);
		else
			lru_queue = new_lru_cache(destor.index_cache_size,
									  free_container_meta, lookup_fingerprint_in_container_meta);
		break;
	case INDEX_CATEGORY_LOGICAL_LOCALITY:
		lru_queue = new_lru_cache(destor.index_cache_size,
								  free_segment_recipe, lookup_fingerprint_in_segment_recipe);
		break;
	default:
		WARNING("Invalid index category!");
		exit(1);
	}
	pthread_mutex_init(&mutex, NULL);
}

/*
 * The function is used for matching the adjact chunk
 */
void init_chunkpointer_cache()
{
	cp_lru_queue = new_lru_cache(destor.index_cache_size,
								 free_segment_recipe, lookup_fingerprint_in_segment_recipe);
}

int64_t fingerprint_cache_lookup(fingerprint *fp)
{
	switch (destor.index_category[1])
	{
	case INDEX_CATEGORY_PHYSICAL_LOCALITY:
	{
		struct containerMeta *cm = lru_cache_lookup(lru_queue, fp);
		if (cm)
		{
			return cm->id;
		}
		break;
	}
	case INDEX_CATEGORY_LOGICAL_LOCALITY:
	{
		struct segmentRecipe *sr = lru_cache_lookup(lru_queue, fp);
		if (sr)
		{
			struct chunkPointer *cp = g_hash_table_lookup(sr->kvpairs, fp);
			if (cp->id <= TEMPORARY_ID)
			{
				WARNING("expect > TEMPORARY_ID, but being %lld", cp->id);
				assert(cp->id > TEMPORARY_ID);
			}
			return cp->id;
		}
		break;
	}
	}

	return TEMPORARY_ID;
}

int64_t delta_fingerprint_cache_lookup(struct chunk *c)
{
	switch (destor.index_category[1])
	{
	case INDEX_CATEGORY_PHYSICAL_LOCALITY:
	{
		struct containerMeta *cm = lru_cache_lookup_two_hitfun(lru_queue, &c->fp, 0);
		// struct containerMeta *cm = lru_cache_lookup(lru_queue, &c->fp);
		if (cm)
		{
			struct metaEntry *me = g_hash_table_lookup(cm->map, &c->fp);
			assert(me);
			// if (me->flag)
			// {
			// 	c->baseid = me->baseid;
			// 	c->base_size = me->base_size;
			// 	c->delta_size = me->delta_size;
			// }
			c->type = me->flag;
			return cm->id;
		}

		break;
	}
	case INDEX_CATEGORY_LOGICAL_LOCALITY:
	{
		struct segmentRecipe *sr = lru_cache_lookup(lru_queue, &c->fp);
		if (sr)
		{
			struct chunkPointer *cp = g_hash_table_lookup(sr->kvpairs, &c->fp);
			if (cp->id <= TEMPORARY_ID)
			{
				WARNING("expect > TEMPORARY_ID, but being %lld", cp->id);
				assert(cp->id > TEMPORARY_ID);
			}
			c->type = cp->flag;

			return cp->id;
		}
		break;
	}
	}

	return TEMPORARY_ID;
}

struct index_item *match_cache_lookup(struct chunk *c)
{
	pthread_mutex_lock(&mutex);
	struct containerMeta *cm = lru_cache_lookup_two_hitfun(lru_queue, c, 1);
	if (cm)
	{
		struct metaEntry *me_sf = NULL;
		struct metaEntry *me_sf1 = g_hash_table_lookup(cm->delta_sf1, &c->superfeature->sf1);
		struct metaEntry *me_sf2 = g_hash_table_lookup(cm->delta_sf2, &c->superfeature->sf2);
		struct metaEntry *me_sf3 = g_hash_table_lookup(cm->delta_sf3, &c->superfeature->sf3);
		if (me_sf1)
			me_sf = me_sf1;
		else if (me_sf2)
			me_sf = me_sf2;
		else if (me_sf3)
			me_sf = me_sf3;
		assert(me_sf);
		struct index_item *item = (struct index_item *)malloc(sizeof(struct index_item));
		item->id = cm->id;
		memcpy(item->fp, me_sf->fp, sizeof(fingerprint));
		pthread_mutex_unlock(&mutex);
		return item;
	}
	pthread_mutex_unlock(&mutex);
	return NULL;
}

int match_adjacent(struct segment *s, struct chunk *lastChunk, struct chunk *nowChunk,
				   GSequenceIter *last_iter, GSequenceIter *now_iter, int last_index, int now_index)
{
	int preMatchLen = 0, nextMatchLen = 0;
	pthread_mutex_lock(&mutex);
	struct containerMeta *last_cm = NULL;
	if (lastChunk)
		last_cm = lru_cache_lookup_two_hitfun(lru_queue, &lastChunk->fp, 3);
	struct containerMeta *now_cm = lru_cache_lookup_two_hitfun(lru_queue, &nowChunk->fp, 3);
	struct metaEntry *last_me = NULL;
	struct metaEntry *now_me = NULL;

	if (last_cm && last_cm->id <= jcr.container_num_now_stored)
	{
		last_me = g_hash_table_lookup(last_cm->map, &lastChunk->fp);
	}
	if (now_cm && now_cm->id <= jcr.container_num_now_stored)
	{
		now_me = g_hash_table_lookup(now_cm->map, &nowChunk->fp);
	}

	if (now_me == NULL && last_me == NULL)
	{
		pthread_mutex_unlock(&mutex);
		return 0;
	}

	if (now_me == NULL)
	{
		preMatchLen = 0;
		nextMatchLen = min(destor.adjchunk_match_length, now_index - last_index - 1);
	}
	else if (last_me == NULL) // First similar chunk or last chunk can't be found
	{
		nextMatchLen = 0;
		if (last_index < 0)
			preMatchLen = min(destor.adjchunk_match_length, now_index);
		else
			preMatchLen = min(destor.adjchunk_match_length, now_index - last_index - 1);
	}
	else
	{
		preMatchLen = min(destor.adjchunk_match_length, (now_index - last_index) / 2);
		nextMatchLen = min(destor.adjchunk_match_length, now_index - last_index - 1); // Greedy match
	}

	int lv_adjacent = 1, i, match_num = 0;
	GSequenceIter *cur_iter = now_iter;

	struct metaEntry *lookup_me = now_me;
	for (i = 1; i <= preMatchLen; ++i)
	{
		struct chunk *preChunk = NULL;

		assert(now_index - i >= 0);
		cur_iter = g_sequence_iter_prev(cur_iter);
		preChunk = g_sequence_get(cur_iter);

		if (CHECK_CHUNK(preChunk, CHUNK_FILE_START) ||
			CHECK_CHUNK(preChunk, CHUNK_FILE_END) ||
			CHECK_CHUNK(preChunk, CHUNK_ADJ_UNIQUE))
			break;

		lookup_me = lookup_me->pre;
		if (!lookup_me)
			break;

		if (preChunk->type == CHUNK_TYPE_UNIQUE && lookup_me->flag == 0)
		{
			memcpy(preChunk->basefp, lookup_me->fp, sizeof(fingerprint));
			preChunk->baseid = now_cm->id;
			SET_CHUNK(preChunk, CHUNK_ADJ_UNIQUE_LEFT);
			SET_CHUNK(preChunk, CHUNK_ADJ_UNIQUE);
			preChunk->type = CHUNK_TYPE_DELTA;
			preChunk->level = lv_adjacent++;
			match_num++;
		}
	}

	lv_adjacent = 1;
	cur_iter = last_iter;
	int len = g_sequence_get_length(s->chunks);

	lookup_me = last_me;
	for (i = 1; i <= nextMatchLen && last_index + i < len; ++i)
	{
		struct chunk *nextChunk = NULL;
		cur_iter = g_sequence_iter_next(cur_iter);
		nextChunk = g_sequence_get(cur_iter);

		if (CHECK_CHUNK(nextChunk, CHUNK_FILE_START) ||
			CHECK_CHUNK(nextChunk, CHUNK_FILE_END) ||
			CHECK_CHUNK(nextChunk, CHUNK_ADJ_UNIQUE))
			break;

		lookup_me = lookup_me->next;
		if (!lookup_me)
			break;

		if (nextChunk->type == CHUNK_TYPE_UNIQUE && lookup_me->flag == 0)
		{
			memcpy(nextChunk->basefp, lookup_me->fp, sizeof(fingerprint));
			nextChunk->baseid = last_cm->id;
			SET_CHUNK(nextChunk, CHUNK_ADJ_UNIQUE_RIGHT);
			SET_CHUNK(nextChunk, CHUNK_ADJ_UNIQUE);
			nextChunk->type = CHUNK_TYPE_DELTA;
			nextChunk->level = lv_adjacent++;
			match_num++;
		}
	}
	pthread_mutex_unlock(&mutex);
	return match_num;
}

int match_last_adjacent(struct segment *s, struct chunk *lastChunk,
						GSequenceIter *last_iter, int index)
{
	int nextMatchLen = 0, lv_adjacent = 1, match_num = 0, i;
	int len = g_sequence_get_length(s->chunks);
	GSequenceIter *cur_iter = last_iter;

	pthread_mutex_lock(&mutex);

	struct containerMeta *last_cm = lru_cache_lookup_two_hitfun(lru_queue, &lastChunk->fp, 3);
	struct metaEntry *last_me = NULL;

	if (last_cm && last_cm->id <= jcr.container_num_now_stored)
	{
		last_me = g_hash_table_lookup(last_cm->map, &lastChunk->fp);
		assert(last_me);
		nextMatchLen = min(destor.adjchunk_match_length, len - index - 1);
	}
	else
	{
		pthread_mutex_unlock(&mutex);
		return 0;
	}

	struct metaEntry *lookup_me = last_me;

	for (i = 1; i <= nextMatchLen; ++i)
	{
		struct chunk *nextChunk = NULL;

		cur_iter = g_sequence_iter_next(cur_iter);
		nextChunk = g_sequence_get(cur_iter);

		assert(index + i < len);

		if (CHECK_CHUNK(nextChunk, CHUNK_FILE_START) ||
			CHECK_CHUNK(nextChunk, CHUNK_FILE_END) ||
			CHECK_CHUNK(nextChunk, CHUNK_ADJ_UNIQUE))
			break;

		lookup_me = lookup_me->next;
		if (!lookup_me)
			break;

		if (nextChunk->type == CHUNK_TYPE_UNIQUE && lookup_me->flag == 0)
		{
			memcpy(nextChunk->basefp, lookup_me->fp, sizeof(fingerprint));
			nextChunk->baseid = last_cm->id;
			SET_CHUNK(nextChunk, CHUNK_ADJ_UNIQUE_RIGHT);
			SET_CHUNK(nextChunk, CHUNK_ADJ_UNIQUE);
			nextChunk->type = CHUNK_TYPE_DELTA;
			nextChunk->level = lv_adjacent++;
			match_num++;
		}
	}

	pthread_mutex_unlock(&mutex);
	return match_num;
}

struct chunkPointer *chunkpointer_cache_lookup(struct chunk *c)
{
	assert(destor.enable_match_adjacent);
	struct segmentRecipe *sr = lru_cache_lookup(cp_lru_queue, &c->fp);
	if (sr)
	{
		struct chunkPointer *cp = g_hash_table_lookup(sr->kvpairs, &c->fp);
		return cp;
	}
	return NULL;
}

int simiulated_fingerprint_cache_lookup(struct super_feature_item *c1, struct super_feature_item *c2, struct super_feature_item *c3)
{
	struct containerMeta *cm = NULL;
	if (!c1)
		cm = lru_cache_lookup_lcs_two(lru_queue, &c2->p_item->fp, &c3->p_item->fp);
	else if (!c2)
		cm = lru_cache_lookup_lcs_two(lru_queue, &c1->p_item->fp, &c3->p_item->fp);
	else if (!c3)
		cm = lru_cache_lookup_lcs_two(lru_queue, &c1->p_item->fp, &c2->p_item->fp);
	else
		cm = lru_cache_lookup_lcs_three(lru_queue, &c1->p_item->fp, &c2->p_item->fp, &c3->p_item->fp);
	if (cm)
	{
		if (c1 && cm->id == c1->p_item->id)
			return 1;
		else if (c2 && cm->id == c2->p_item->id)
			return 2;
		else
			return 3;
	}
	int res = 0;
	if (c1)
		res = 1;
	else if (c2)
		res = 2;
	else
		res = 3;
	if (destor.enable_bestmatch)
	{
		if (destor.enable_sort_features)
		{
			if (c1 && c2 && !memcmp(c1->p_item->fp, c2->p_item->fp, destor.index_key_size))
				res = 1;
			else if (c1 && c3 && !memcmp(c1->p_item->fp, c3->p_item->fp, destor.index_key_size))
				res = 1;
			else if (c2 && c3 && !memcmp(c2->p_item->fp, c3->p_item->fp, destor.index_key_size))
				res = 2;
		}
	}
	return res;
}

void fingerprint_cache_prefetch(int64_t id)
{
	switch (destor.index_category[1])
	{
	case INDEX_CATEGORY_PHYSICAL_LOCALITY:
	{

		struct containerMeta *cm = retrieve_container_meta_by_id(id);

		if (cm)
		{
			if (destor.enable_sidc)
			{
				pthread_mutex_lock(&mutex);
				lru_cache_insert(lru_queue, cm, NULL, NULL);
				pthread_mutex_unlock(&mutex);
			}
			else
				lru_cache_insert(lru_queue, cm, NULL, NULL);
		}
		else
		{
			WARNING("Error! The container %lld has not been written!", id);
			exit(1);
		}
		break;
	}
	case INDEX_CATEGORY_LOGICAL_LOCALITY:
	{
		GQueue *segments = prefetch_one_segment(id);
		VERBOSE("Dedup phase: prefetch %d segments into %d cache",
				g_queue_get_length(segments),
				destor.index_cache_size);
		struct segmentRecipe *sr;
		while ((sr = g_queue_pop_tail(segments)))
		{
			/* From tail to head */
			if (!lru_cache_hits(lru_queue, &sr->id,
								segment_recipe_check_id))
			{
				lru_cache_insert(lru_queue, sr, NULL, NULL);
			}
			else
			{
				/* Already in cache */
				free_segment_recipe(sr);
			}
		}
		g_queue_free(segments);
		break;
	}
	}
}

void chunkpointer_cache_prefetch(int64_t id)
{
	GQueue *segments = prefetch_one_segment(id);

	VERBOSE("Dedup phase: prefetch %d segments into %d cache",
			g_queue_get_length(segments),
			destor.index_cache_size);
	struct segmentRecipe *sr;
	while ((sr = g_queue_pop_tail(segments)))
	{
		/* From tail to head */
		if (!lru_cache_hits(cp_lru_queue, &sr->id,
							segment_recipe_check_id))
		{
			lru_cache_insert(cp_lru_queue, sr, NULL, NULL);
		}
		else
		{
			/* Already in cache */
			free_segment_recipe(sr);
		}
	}
	g_queue_free(segments);
}

void free_fingerprint_cache()
{
	free_lru_cache(lru_queue);
	free_lru_cache(cp_lru_queue);
	pthread_mutex_destroy(&mutex);
}