#include "data_flood.h"
#include <unistd.h>
#include <time.h>
#include <stdlib.h>

int tuples_per_page = 0;
AttributeMap *att_map = NULL;

static void expend_table(char* relname, int size_gb);
static void expend_files(char* filepath, int file_num);

static char* type_data_procuder_f(uint32 typid)
{
	char* text_pro = "md5(random()::text)";
	char* number_pro = "generate_number";
	char* null_pro = "NULL";

	if(typid == DF_TEXTOID || typid == DF_VARCHAROID || typid == DF_BPCHAROID) {
		return text_pro;
	} else if(typid == DF_INT8OID || typid == DF_INT4OID) {
		return number_pro;
	} else {
		return null_pro;
	}
}

static char* type_data_procuder_s(AttributeMap *att_map)
{
	char* result = NULL;
	uint32 typid = att_map->typid;
	bool unique = att_map->isunique;
	int64 unique_value = att_map->unique_num++;

	if(typid == DF_INT2OID) {
		int16 pro_num = 0;

		if(!unique)
			pro_num = random_int16_maker();
		else
			pro_num = (int16)unique_value;

		memset(num_buff, 0, NUMBER_BUFF_LEN);
		int_to_string(pro_num, num_buff);
		result = strdup(num_buff);

	} else if(typid == DF_INT4OID) {
		int pro_num = 0;

		if(!unique)
			pro_num = random_int_maker();
		else
			pro_num = (int)unique_value;

		memset(num_buff, 0, NUMBER_BUFF_LEN);
		int_to_string(pro_num, num_buff);
		result = strdup(num_buff);
	} else if(typid == DF_INT8OID) {
		int64 pro_num = 0;

		if(!unique)
			pro_num = random_int64_maker();
		else
			pro_num = (int64)unique_value;

		memset(num_buff, 0, NUMBER_BUFF_LEN);
		int64_to_string(pro_num, num_buff);
		result = strdup(num_buff);
	} else if((typid == DF_TEXTOID || typid == DF_VARCHAROID || typid == DF_BPCHAROID) && 
			att_map->atttypmod == -1) {
		result = random_string_maker();
	} else if(typid == DF_FLOAT4OID || typid == DF_FLOAT8OID) {
		result = random_float_maker();
	} else if(typid == DF_TIMESTAMPOID || typid == DF_TIMESTAMPTZOID) {
		result = random_timestamp_maker();
	} else if(typid == DF_DATEOID) {
		result = random_date_maker();
	} else if(typid == DF_TIMEOID) {
		result = random_time_maker();
	} else if(typid == DF_BOOLOID) {
		result = random_bool_maker();	
	} else {
		result = NULL;
	}

	return result;

}

static void prepare_single_tuple(AttributeMap *att_map, char** paramValues)
{
	int atts = att_map[0].atts;

	for(int loop = 0; loop < atts; loop++) {
		paramValues[loop] = type_data_procuder_s(&att_map[loop]);
	}
}

static void free_single_tuple(char** paramValues, int atts)
{
	for(int loop = 0; loop < atts; loop++) {
		if(!paramValues[loop])
			continue;
		free(paramValues[loop]);
		paramValues[loop] = NULL;
	}
}

static void slow_insert_data(AttributeMap *att_map, char* relname)
{
	int generate_num = DF_GENERATE_NUM;
	char sql[8196] = {0};
	char* ptr = NULL;
	char** paramValues = NULL;
	char* stmt_name= "data_flood_slow_insert_stmt";
	int atts = att_map[0].atts;

	if(atts > 50)
		df_error("Can not support with columns large then 50 current %d", atts);

	ptr = sql;
	sprintf(ptr, "INSERT INTO %s VALUES(", relname);
	for(int loop = 1; loop <= atts; loop++) {
		ptr = sql + strlen(sql);
		if(loop != atts)
			sprintf(ptr, "$%d,", loop);
		else
			sprintf(ptr, "$%d)", loop);
	}
	df_debug("preparesql=%s", sql);
	doPrepare(sql, atts, stmt_name);

	paramValues = (char**)df_malloc(sizeof(char*) * atts);
	for(int loop = 0; loop < generate_num; loop++) {
		prepare_single_tuple(att_map, paramValues);
		executePrepared(stmt_name, paramValues, atts);
		free_single_tuple(paramValues, atts);
	}
	exec_checkpoint();

}

static void caculate_base_data_sql(char* sql_buff, AttributeMap *att_map, char* relname)
{
	char* sql_ptr = sql_buff;
	int generate_num = DF_GENERATE_NUM;

	sprintf(sql_ptr, "INSERT INTO %s(", relname);
	sql_ptr += strlen(sql_ptr);

	for(int loop = 0; loop < att_map[0].atts; loop++) {
		if(loop == att_map[0].atts - 1)
			sprintf(sql_ptr, "%s) SELECT ", att_map[loop].attname);
		else
			sprintf(sql_ptr, "%s,", att_map[loop].attname);
		sql_ptr += strlen(sql_ptr);
	}

	for(int loop = 0; loop < att_map[0].atts; loop++) {
		if(loop == att_map[0].atts - 1)
			sprintf(sql_ptr, "%s ", type_data_procuder_f(att_map[loop].typid));
		else
			sprintf(sql_ptr, "%s,", type_data_procuder_f(att_map[loop].typid));
		sql_ptr += strlen(sql_ptr);
	}

	sprintf(sql_ptr, "FROM generate_series(1,%d) AS generate_number", generate_num);
	sql_ptr += strlen(sql_ptr);

	df_debug("%s", sql_buff);
	
}

static void unique_column_valid(AttributeMap *att_map)
{
	char* att_name = user_arg.unique_column;

	if(!att_name)
		return;

	for(int loop = 0; loop < att_map[0].atts; loop++) {
		if(strcmp(att_name, att_map[loop].attname) == 0) {
			if(IS_UNIQUE_TYPID(att_map[loop].typid)) {
				user_arg.unique_no = loop;
				att_map[user_arg.unique_no].isunique = true;
				att_map[user_arg.unique_no].unique_num = 0;
				if(user_arg.unique_no != 0)
					df_error("Support unique first column only.");
				return;
			}
			else
				df_error("Unsupport %s be unique column because not int type", att_name);
		}
		if(!IS_UNIQUE_TYPID(att_map[loop].typid)) {
			df_error("Unsupport %s be unique column, because not first int type", att_name);;
		}
	}
	df_error("No %s colum in table %s", att_name, user_arg.rel_name);
}

static void water_base_data(char* relname)
{
	uint64 start_time = 0;
	uint64 end_time = 0;
	char sql_buf[1024] = {0};

	df_debug("Water base data to %s", relname);
	unique_column_valid(att_map);

	srand((unsigned int)time(NULL));
	if(user_arg.fastmode) {
		caculate_base_data_sql(sql_buf, att_map, relname);
		base_data_import(sql_buf);
	} else {
		slow_insert_data(att_map, relname);
	}
}

char* read_base_data(int read_pages, char* filepath)
{
	char *result = NULL;
	FILE *fp = NULL;
	int nread = 0;

	result = df_malloc(read_pages * PAGE_SIZE);
	if(!result)
		df_error("can not malloc for size %lu", read_pages * PAGE_SIZE);
	fp = fopen(filepath, "r");
	if(!fp)
		df_error("Can not open file %s:%m", filepath);

	nread = fread(result, PAGE_SIZE, read_pages, fp);
	if(nread != read_pages && 1) {
		df_error("Read expect %lu but get", PAGE_SIZE, nread);
	}
	
	fclose(fp);
	return result;
}

void write_base_data(int read_pages, char* filepath, char* base_buff, int write_pages)
{
	char *result = NULL;
	FILE *fp = NULL;
	uint32 base_pageno = 0;
	static uint64 first_unique_limit_page = DF_GENERATE_NUM + 1;
	
	fp = fopen(filepath, "ab");
	base_pageno = ftell(fp) / PAGE_SIZE;
	page_hack(base_buff, write_pages, base_pageno, &first_unique_limit_page, att_map[user_arg.unique_no].typid);
	fwrite(base_buff, write_pages, PAGE_SIZE, fp);
	fclose(fp);
}

#define ONE_G_SIZE (1024 * 1024 * 1024)
#define COPY_PAGE_NUMBER 1024
#define PAGENUM_PER_SEG (ONE_G_SIZE / PAGE_SIZE)

void copy_1G_file(char* filepath, int file_no, MyThreadHandle *cur_thread)
{
	int write_num = 0;
	int write_total_num = 0;
	char target_file_path[DF_MAX_PATH_LEN] = {0};
	char *hack_buff = NULL;
	int cur_page_no = 0;
	FILE *fp_source = NULL;
	FILE *fp_target = NULL;
	uint64 unique_num = 0;
	uint64 start_time = 0;
	uint64 stop_time = 0;
	uint64 copy_ptr = 0;

	if(!cur_thread->hack_buff)
		cur_thread->hack_buff = df_malloc(PAGE_SIZE * COPY_PAGE_NUMBER);
	hack_buff = cur_thread->hack_buff;
	sprintf(target_file_path, "%s.%d", filepath, file_no);

	fp_source = fopen(filepath, "r");
	if(!fp_source)
		df_error("Can not open file %s to read:%m", filepath);
	
	fp_target = fopen(target_file_path, "w");
	if(!fp_target)
		df_error("Can not open file %s to write:%m", filepath);

	unique_num = (ONE_G_SIZE / PAGE_SIZE) * tuples_per_page * file_no;
	while(true) {
		cur_page_no = PAGENUM_PER_SEG * file_no +  write_total_num / PAGE_SIZE;

		//READ
		start_time = get_current_time();
		memcpy(hack_buff, cur_thread->base_buff + copy_ptr, PAGE_SIZE * COPY_PAGE_NUMBER);
		copy_ptr += PAGE_SIZE * COPY_PAGE_NUMBER;
		stop_time = get_current_time();
		cur_thread->fc_sta.copy_buff_time += time_diff_ns(start_time, stop_time);
		
		//HACK
		start_time = get_current_time();
		page_hack(hack_buff, COPY_PAGE_NUMBER, cur_page_no, &unique_num, att_map[user_arg.unique_no].typid);
		stop_time = get_current_time();
		cur_thread->fc_sta.hack_buff_time += time_diff_ns(start_time, stop_time);

		//WRITE
		start_time = get_current_time();
		write_num = fwrite(hack_buff, 1, PAGE_SIZE * COPY_PAGE_NUMBER, fp_target);
		stop_time = get_current_time();
		cur_thread->fc_sta.write_buff_time += time_diff_ns(start_time, stop_time);
		if(write_num != PAGE_SIZE * COPY_PAGE_NUMBER)
			df_error("fwrite expect %d but %d", PAGE_SIZE * COPY_PAGE_NUMBER, write_num);

		write_total_num += write_num;
		if(write_total_num >= 1024 * 1024 * 1024)
			break;
	}
	start_time = get_current_time();
	fclose(fp_target);
	fclose(fp_source);
	stop_time = get_current_time();
		cur_thread->fc_sta.close_file_time += time_diff_ns(start_time, stop_time);
}

static void expend_files(char* filepath, int file_num)
{
	run_copyfile_thread(user_arg.thread_num, file_num, filepath);
}


static void expend_table(char* relname, int size_gb)
{
	uint32 rel_size_after = 0; //by pages
	int valid_pages = 0;
	int copy_times = 0;
	int remain_pages = 0;
	int loop = 0;
	char* base_data_buff = NULL;
	char target_file_path[1024] = {0};
	char *internal_path = NULL;
	char *pgdata_dir = NULL;


	rel_size_after = get_table_pages(relname);
	if(rel_size_after < 2)
		df_error("rel pages=%d after water", rel_size_after);
	//df_debug("pages after water=%d", rel_size_after);
	/* 导入的size超过1G/2，此时代码处理比较复杂，因此暂不支持 */
	if(rel_size_after > PAGES_1GB / 2)
		df_error("rel pages=%d after water, not support this table layout", rel_size_after);
	
	valid_pages = rel_size_after - 1;
	tuples_per_page = DF_GENERATE_NUM / valid_pages;
	copy_times = (PAGES_1GB - rel_size_after) / valid_pages;
	remain_pages = (PAGES_1GB - rel_size_after) % valid_pages;

	internal_path = get_table_relpath(relname);
	pgdata_dir = get_data_dir();
	sprintf(target_file_path, "%s/%s", pgdata_dir, internal_path);
	base_data_buff = read_base_data(valid_pages, target_file_path);

	//填充当前段到1G
	for(loop = 0; loop < copy_times; loop++) {
		write_base_data(valid_pages, target_file_path, base_data_buff, valid_pages);
	}
	write_base_data(valid_pages, target_file_path, base_data_buff, remain_pages);

	if(size_gb > 1)
		expend_files(target_file_path, size_gb);
}

void table_watering(char* relname, int size_gb)
{
	uint32 rel_filenode = 0;
	uint32 rel_size_before = 0; //by pages
	uint64 start_time = 0;
    uint64 end_time = 0;

	//锁表防止出现文件size变化
	lock_table(relname);
	att_map = get_table_attmap(relname);

	rel_filenode = get_table_relfilenode(relname);
	rel_size_before = get_table_pages(relname);
	
	if(rel_size_before > 0) {
		if(user_arg.force) {
			truncate_table(relname);
		} else {
			release_table();
			df_error("Can not do init to %s beacause it has data yet", relname);
			return;
		}
	}

	start_time = get_current_time();
	water_base_data(relname);
	end_time = get_current_time();
	df_log_0("Flood base data %ds",  time_diff_s(start_time, end_time));

	start_time = get_current_time();
	expend_table(relname, size_gb);
	end_time = get_current_time();
	df_log_0("Flood fill data %ds",  time_diff_s(start_time, end_time));

	release_table();
}

void show_table_size(char* relname)
{
	int pages = get_table_pages(relname);
	df_info("Flood %dGB data to %s", pages * 8 / 1024 / 1024, relname);
}