# -*- coding: utf-8 -*-
"""
Created on Mon Oct  8 14:32:52 2018

@author: luolei

构建训练和测试样本
"""
import torch
import torch.utils.data as Data
from torch.utils.data import DataLoader
from scipy.ndimage.interpolation import shift
import pandas as pd
import numpy as np
from lake.decorator import time_cost
import copy
import sys

sys.path.append('../..')

from mod.modeling import *
from mod.tool.multicollinearity import multicollinearity_filtering


# %% 函数
def check_time_field(data):
	if 'time' not in data.columns:
		data.reset_index(drop = True, inplace = True)
		data['time'] = data.index
	return data


def build_single_dim_manifold(time_series, embed_dim, lag, direc = 1):
	"""
	构建一维时间序列嵌入流形样本
	:param direc: int, 平移方向，1为向下，-1为向上
	:param time_series: np.ndarray or pd.DataFrame, 一维时间序列, shape = (-1,)
	:param embed_dim: int, 嵌入维数
	:param lag: int, 嵌入延迟
	:return: manifold: np.ndarray, 嵌入流形数组, shape = (-1, embed_dim)
	"""
	time_series_copy = copy.deepcopy(time_series)
	manifold = []
	for dim in range(embed_dim):
		manifold.append(shift(time_series_copy, direc * dim * lag))
	manifold = np.array(manifold).T
	return manifold


@time_cost
def build_samples_dataframe(data):
	"""
	构建目标数据集
	:param data: pd.DataFrame, 数据表
	"""
	data = data.copy()
	
	# 检查是否含有time字段
	data = check_time_field(data)
	
	# 求得变量各自对应的维数
	embed_dims = dict()
	for col in selected_cols:
		if (forecast_lags is not None):
			if col in forecast_lags.keys():
				embed_dims[col] = (acf_lags[col] + forecast_lags[col]) // embed_lags[col]
			else:
				embed_dims[col] = acf_lags[col] // embed_lags[col]
		else:
			embed_dims[col] = acf_lags[col] // embed_lags[col]
	
	# 包含预报数据的字段向上平移
	if forecast_lags is not None:
		for col in forecast_lags.keys():
			data[col] = shift(data[col], -1 * forecast_lags[col])
	
	# 连续数值变量流形样本构建
	total_samples = data[['time']]
	for col in selected_cols:
		samples = build_single_dim_manifold(data.loc[:, col], embed_dims[col], embed_lags[col])  # 时间逆序排列
		cols = [col + '_{}'.format(i) for i in range(samples.shape[1])]
		samples = pd.DataFrame(samples, columns = cols)
		total_samples = pd.concat([total_samples, samples], axis = 1, sort = True)
	
	# **数据截取
	total_samples = total_samples.iloc[max(embed_dims.values()) - 1:, :]
	
	# **共线性特征筛选处理
	total_samples = multicollinearity_filtering(total_samples)
	
	embed_dims = dict()
	for col in selected_cols:
		embed_dims[col] = len([p for p in total_samples.columns if col in p])

	total_cols_n = sum(embed_dims.values())
	cols = list(total_samples.columns)
	
	return total_samples, total_cols_n, cols, embed_dims


def build_targets_dataframe(data):
	"""
	构建目标数据集
	:param data: pd.DataFrame, 数据表
	:return:
	"""
	data = data.copy()
	embed_lag = 1
	
	# 检查是否含有time字段
	data = check_time_field(data)
	
	total_targets = data[['time']]
	for col in target_cols:
		targets = build_single_dim_manifold(data.loc[:, col], pred_dim, embed_lag, direc = -1)  # 时间顺序排列
		columns = [col + '_{}'.format(i) for i in range(targets.shape[1])]
		targets = pd.DataFrame(targets, columns = columns)
		total_targets = pd.concat([total_targets, targets], axis = 1, sort = True)
	
	# **数据截取
	total_targets = total_targets.iloc[:(-pred_dim + 1), :]
	
	return total_targets


def build_total_samples_and_targets():
	"""
	构建训练样本集和目标
	:return:
	"""
	data = pd.read_csv('../../data/runtime/data_nmlzd.csv')
	
	total_samples, total_cols_n, cols, embed_dims = build_samples_dataframe(data)
	total_targets = build_targets_dataframe(data)
	print('embed_dims: {}'.format(embed_dims))
	print('total_cols_n: {}'.format(total_cols_n))
	
	# 数据截取
	# total_samples = total_samples.iloc[max(embed_dims.values()) - 1:, :]
	# total_targets = total_targets.iloc[:(-pred_dim + 1), :]
	
	# 提取共有时间戳
	sample_tsps = [int(p) for p in list(total_samples['time'])]
	target_tsps = [int(p) for p in list(total_targets['time'])]
	mutual_tsps = sorted(list(set(sample_tsps).intersection(set(target_tsps))))
	
	total_samples = total_samples[total_samples.time.isin(mutual_tsps)].reset_index(drop = True)
	total_targets = total_targets[total_targets.time.isin(mutual_tsps)].reset_index(drop = True)
	
	return total_samples, total_targets, total_cols_n


def build_train_samples_and_targets():
	"""
	构建训练集
	"""
	total_samples, total_targets, total_cols_n = build_total_samples_and_targets()
	
	# 确定分割时间戳
	train_ratio = 0.7
	tsps = list(total_samples.time)
	min_tsp, max_tsp = min(tsps), max(tsps)
	tsp_split = (train_ratio * (max_tsp - min_tsp) + min_tsp) // hr * hr
	
	# 构造数据
	X_train_df = total_samples[(total_samples.time >= min_tsp) & (total_samples.time < tsp_split)]
	y_train_df = total_targets[(total_targets.time >= min_tsp + hr) & (total_targets.time < tsp_split + hr)]
	
	X_train = np.array(X_train_df.iloc[:, 1:])
	y_train = np.array(y_train_df.iloc[:, 1:])
	
	return X_train, y_train, total_cols_n


def build_test_samples_and_targets():
	"""
	构建测试集
	"""
	total_samples, total_targets, total_cols_n = build_total_samples_and_targets()
	
	# 确定分割时间戳
	train_ratio = 0.7
	tsps = list(total_samples.time)
	min_tsp, max_tsp = min(tsps), max(tsps)
	tsp_split = (train_ratio * (max_tsp - min_tsp) + min_tsp) // hr * hr
	
	# 构造数据
	X_test_df = total_samples[(total_samples.time >= tsp_split) & (total_samples.time < max_tsp)]
	y_test_df = total_targets[(total_targets.time >= tsp_split + hr) & (total_targets.time < max_tsp + hr)]
	
	X_test = np.array(X_test_df.iloc[:, 1:])
	y_test = np.array(y_test_df.iloc[:, 1:])
	
	return X_test, y_test, total_cols_n


def build_train_and_verify_datasets():
	"""
	构建训练和验证样本集
	"""
	X, y, total_cols_n = build_train_samples_and_targets()
	
	# shuffle操作
	id_list = np.random.permutation(range(X.shape[0]))
	X, y = X[list(id_list), :], y[list(id_list), :]
	
	# 划分训练集和验证集
	split_num = int(0.8 * X.shape[0])
	X_train, y_train = X[:split_num, :], y[:split_num, :]
	X_verify, y_verify = X[split_num:, :], y[split_num:, :]
	
	train_dataset = Data.TensorDataset(torch.from_numpy(X_train.astype(np.float32)), torch.from_numpy(y_train.astype(np.float32)))
	trainloader = DataLoader(train_dataset, batch_size = batch_size, shuffle = True)
	verify_dataset = Data.TensorDataset(
		torch.from_numpy(X_verify.astype(np.float32)),
		torch.from_numpy(y_verify.astype(np.float32))
	)
	verifyloader = DataLoader(verify_dataset, batch_size = X_verify.shape[0])
	
	return trainloader, verifyloader, X_train, y_train, X_verify, y_verify, total_cols_n


if __name__ == '__main__':
	data = pd.read_csv('../../data/runtime/data_nmlzd.csv')
	total_samples, total_cols_n, cols, embed_dims = build_samples_dataframe(data)


