/*
 * chunkserver-journal-entry.c
 *
 *  Created on: Jun 17, 2016
 *      Author: zhangzm
 */
#define USE_LOCAL_LOG_LEVEL
#include "st.h"
#include "../utcache.h"
#include "../module-client/disk-cache/cache-entry.h"
#include "chunkserver-journal-entry.h"

typedef struct ChunkEntry
{
	uint32_t volumeid;
	uint32_t index;
	uint64_t start_version;
	uint64_t end_version;
	time_t start_time;
	time_t end_time;
	void* cache_entry; // cache-entry.cpp
	st_mutex_t entry_lock;
} ChunkEntry;

static inline uint64_t entry_key(uint32_t volumeid, uint32_t index)
{
	return ((uint64_t)index << 32) | volumeid;
}

static ChunkEntry *new_chunk_entry(uint32_t volumeid, uint32_t index)
{
	ChunkEntry *ce = (ChunkEntry *)zalloc(sizeof(*ce));
	ce->volumeid = volumeid;
	ce->index = index;
	ce->cache_entry = cache_create(NULL, ce);
	ce->entry_lock = st_mutex_new();
	return ce;
}

static void delete_chunk_entry(ChunkEntry *ce)
{
	st_mutex_destroy(ce->entry_lock);
	cache_destroy((struct CacheRegistry*)ce->cache_entry);
	free(ce);
}

struct ChunkServerJournalEntry
{
	struct foo_hashtable* chunk_table;
};

struct ChunkServerJournalEntry *new_chunk_journal_entry()
{
	struct ChunkServerJournalEntry *e = (struct ChunkServerJournalEntry *)zalloc(sizeof(*e));
	e->chunk_table = foo_hashtable_create((foo_destructor)&delete_chunk_entry);
	return e;
}

void del_chunk_journal_entry(struct ChunkServerJournalEntry *entry)
{
	foo_hashtable_delete(entry->chunk_table, 0);
	free(entry);
}

static void search_or_insert_chunk_entry(struct ChunkServerJournalEntry *entry, uint32_t volumeid, uint32_t index, uint64_t range_version, time_t time, ChunkEntry** ce_out)
{
	ChunkEntry* ce = NULL;
	uint64_t key = entry_key(volumeid, index);
	int ret = foo_hashtable_get(entry->chunk_table, key, &ce);
	if(ret < 0){
		ce = new_chunk_entry(volumeid, index);
		foo_hashtable_put(entry->chunk_table, key, ce);
		if(range_version != INVALID_VERSION){
			ce->start_version = range_version;
			ce->end_version = range_version;
		}
		ce->start_time = time;
		ce->end_time = time;
	} else {
		if(range_version != INVALID_VERSION){
			if(unlikely(range_version != (ce->end_version+1))){
				LOG_INFO("maybe is bug, chunk %08x.%d end version is %ld range_version is %ld", volumeid, index, ce->end_version, range_version);
			}
			ce->end_version = range_version;
		}
		ce->end_time = time;
	}
	*ce_out = ce;
}

void lock_chunk_entry(struct ChunkServerJournalEntry *entry, uint32_t volumeid, uint32_t index)
{
	ChunkEntry* ce = NULL;
	search_or_insert_chunk_entry(entry, volumeid, index, INVALID_VERSION, time(0), &ce);
	st_mutex_lock(ce->entry_lock);
}

void unlock_chunk_entry(struct ChunkServerJournalEntry *entry, uint32_t volumeid, uint32_t index)
{
	ChunkEntry* ce = NULL;
	search_or_insert_chunk_entry(entry, volumeid, index, INVALID_VERSION, time(0), &ce);
	st_mutex_unlock(ce->entry_lock);
}

int insert_range(struct ChunkServerJournalEntry *entry, uint32_t volumeid, uint32_t index, uint32_t begin, uint32_t len, uint64_t pos, uint64_t range_version, time_t time)
{
	LOG_DEBUG("insert_range chunk %08x.%d begin %d len %d pos %ld, range version %ld", volumeid, index, begin, len, pos, range_version);
	ChunkEntry* ce = NULL;
	search_or_insert_chunk_entry(entry, volumeid, index, range_version, time, &ce);

	if(!len){
		return 0;
	}

	struct Segment s;
	s.begin = begin;
	s.end=begin+len;
	s.logic_pos = pos;
	return cache_insert((struct CacheRegistry*)ce->cache_entry, &s);
}

int clear_range(struct ChunkServerJournalEntry *entry, uint32_t volumeid, uint32_t index, uint64_t begin, uint32_t len, uint64_t range_version, time_t time)
{
	ChunkEntry* ce = NULL;
	search_or_insert_chunk_entry(entry, volumeid, index, range_version, time, &ce);

	if(!len){
		return 0;
	}
	return cache_clear_range((struct CacheRegistry*)ce->cache_entry, begin, len);
}

int lookup_range(struct ChunkServerJournalEntry *entry, uint32_t volumeid, uint32_t index, uint32_t begin, uint32_t len, uint32_t *rbegin, uint32_t *rend, uint64_t *rpos)
{
	ChunkEntry* ce = NULL;
	uint64_t key = entry_key(volumeid, index);
	int ret = foo_hashtable_get(entry->chunk_table, key, &ce);
	if(ret < 0){
		return -1;
	}
	uint32_t end = begin + len;
	struct Segment os, is;
	is.begin = begin;
	is.end = end;
	void* it = NULL;
	ret = cache_lookup((struct CacheRegistry*)ce->cache_entry, &is, &os, &it);
	if(ret < 0){
		return ret;
	}
	*rbegin = os.begin;
	*rend = os.end;
	*rpos = os.logic_pos;
	LOG_DEBUG("lookup_range chunk %08x.%d begin %d len %d rbegin %d rend %d rpos %ld", volumeid, index, begin, len, *rbegin, *rend, *rpos);
	return ret;
}

int lookup_multy(struct ChunkServerJournalEntry *entry, uint32_t volumeid, uint32_t index, uint32_t begin, uint32_t len, struct Segment **os)
{
	ChunkEntry* ce = NULL;
	uint64_t key = entry_key(volumeid, index);
	int ret = foo_hashtable_get(entry->chunk_table, key, &ce);
	if(ret < 0){
		return -1;
	}
	uint32_t end = begin + len;
	struct Segment is;
	is.begin = begin;
	is.end = end;
	ret = cache_lookup_multy((struct CacheRegistry*)ce->cache_entry, &is, os);
	if(ret < 0){
		return ret;
	}
	return ret;
}

int get_chunk_version_range(struct ChunkServerJournalEntry* entry, uint32_t volumeid, uint32_t index, uint64_t *start_version, uint64_t *end_version)
{
	ChunkEntry* ce = NULL;
	uint64_t key = entry_key(volumeid, index);
	int ret = foo_hashtable_get(entry->chunk_table, key, &ce);
	if(ret < 0){
		return -1;
	}
	LOG_INFO("chunk %08x.%d start version %ld end version %ld", volumeid, index, ce->start_version, ce->end_version);
	*start_version = ce->start_version;
	*end_version = ce->end_version;
	return 0;
}

__attribute__ ((__constructor__))
static void log_init()
{
//	register_local_log_level("s.chunkserverjindex");
}
