import numpy as np
from data_process import EUAData
from data_process import TimeSeriesDataset
from lstm import LSTMModel
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from sklearn.preprocessing import MinMaxScaler
import xlwt
from args import read_args
import math
import os

os.environ['CUDA_VISIBLE_DEVICES']='0'

def inverse_transf(data):
	# out_arraty = np.zeros(data.shape)
	open = data[:, 0]
	high = data[:, 1]
	low = data[:, 2]
	last = data[:, 3]
	low_o = (np.exp(open))[:, np.newaxis]
	high_o = (np.exp(open) + np.exp(high))[:, np.newaxis]
	flambda1 = (np.exp(low)/(np.exp(low) + 1))[:, np.newaxis]
	flambda2 = (np.exp(last)/(np.exp(last) + 1))[:, np.newaxis]
	open_o = (flambda1 * high_o + (1-flambda1) * low_o)
	last_o = (flambda2 * high_o + (1-flambda2) * low_o)
	out_array = np.concatenate([open_o, high_o], axis=1)
	out_array = np.concatenate([out_array, low_o], axis=1)
	out_array = np.concatenate([out_array, last_o], axis=1)

	return out_array

# def recover(z):
# 	for y in z:
# 		y1_pre, y2_pre, y3_pre, y4_pre = y[0], y[1], y[2], y[3]
# 		flow = torch.exp(y1_pre)
# 		fhigh = torch.exp(y1_pre) + torch.exp(y2_pre)
# 		flambda1 = torch.exp(y3_pre) / (torch.exp(y3_pre) + 1)
# 		flambda2 = torch.exp(y4_pre) / (torch.exp(y4_pre) + 1)
# 		fopen = flambda1 * fhigh + (1 - flambda1) * flow
# 		fclose = flambda2 * fhigh + (1 - flambda2) * flow
# 		y[0], y[1], y[2], y[3] = fopen, fhigh, flow, fclose
# 	return z

args = read_args()

# load data
data_p = EUAData()
data_f, data_img_f, data_l = data_p.download_data()
data_x = np.array(data_f)
data_img_x = np.array(data_img_f)
data_y = np.array(data_l)

# split dataset
split_index = int(data_y.shape[0] * args.train_split_size)
data_x_train = data_x[:split_index]
data_x_val = data_x[split_index:]
data_img_x_train = data_img_x[:split_index]
data_img_x_val = data_img_x[split_index:]
data_y_train = data_y[:split_index]
data_y_val = data_y[split_index:]

# data normalization
# def normalization2(input):
# 	input_shape = input.shape
# 	input = input.reshape([input_shape[0], input_shape[1] * input_shape[2]])
# 	minmax = MinMaxScaler()  # 对象实例化
# 	output = minmax.fit_transform(input)
# 	output = output.reshape([input_shape[0], input_shape[1], input_shape[2]])
# 	return output
#
# data_x_train = normalization2(data_x_train)
# data_x_val = normalization2(data_x_val)

# data loader
dataset_train = TimeSeriesDataset(data_x_train, data_img_x_train, data_y_train)
dataset_val = TimeSeriesDataset(data_x_val, data_img_x_val, data_y_val)
print("Train data shape", dataset_train.x.shape, dataset_train.y.shape)
print("Validation data shape", dataset_val.x.shape, dataset_val.y.shape)

train_dataloader = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True)
val_dataloader = DataLoader(dataset_val, batch_size=args.batch_size, shuffle=True)


def run_epoch(dataloader, is_training=False):
	epoch_loss = 0

	if is_training:
		model.train()
	else:
		model.eval()

	for idx, (x, img_x, y) in enumerate(dataloader):
		if is_training:
			optimizer.zero_grad()

		batchsize = x.shape[0]

		x = x.to(args.device)
		img_x = img_x.to(args.device)
		y = y.to(args.device)

		out = model(x, img_x)
		# out_test = out.contiguous()
		# y_test = y.contiguous()
		if is_training:
			loss = criterion(out.contiguous(), y.contiguous())
			loss.backward()
			optimizer.step()
			epoch_loss += (loss.detach().item() / batchsize)
		else:
			epoch_loss = Loss1(out.contiguous(), y.contiguous())

	lr = scheduler.get_last_lr()[0]

	return epoch_loss, lr


class Loss(nn.Module):
	def __init__(self):
		super().__init__()
		self.mse = nn.MSELoss()

	def forward(self, yhat, y):
		# return torch.mean(torch.abs((y - yhat) / y)) * 100
		if args.loss == 'penalty':
			penalty_factor = args.penalty
			yhat_o = yhat[:, 0]
			yhat_h = yhat[:, 1]
			yhat_lo = yhat[:, 2]
			yhat_la = yhat[:, 3]
			penalty1 = (max(max(yhat_lo.sub(yhat_h)), 0) ** 2) * penalty_factor
			penalty2 = (max(max(yhat_o.sub(yhat_h)), 0) ** 2) * penalty_factor
			penalty3 = (max(max(yhat_lo.sub(yhat_o)), 0) ** 2) * penalty_factor
			penalty4 = (max(max(yhat_la.sub(yhat_h)), 0) ** 2) * penalty_factor
			penalty5 = (max(max(yhat_lo.sub(yhat_la)), 0) ** 2) * penalty_factor
			penalty6 = (min(min(yhat_lo), 0) ** 2) * 10000
			return self.mse(yhat, y) + penalty1 + penalty2 + penalty3 + penalty4 + penalty5 + penalty6
		return self.mse(yhat, y)


class Loss_val(nn.Module):
	def __init__(self):
		super().__init__()
		self.mse = nn.MSELoss()

	def forward(self, yhat, y):
		# yhat, y = recover(yhat), recover(y)
		if args.if_constrained == 'unconstrained':
			yhat = yhat.cpu().detach().numpy()
			y = y.cpu().detach().numpy()
			yhat, y = inverse_transf(yhat), inverse_transf(y)
			yhat = torch.tensor(yhat).cuda()
			y = torch.tensor(y).cuda()
		loss = torch.mean(torch.abs(torch.div(y - yhat, y)))
		# loss = np.mean(np.abs((y - yhat) / y)) * 100
		return loss


# model
model = LSTMModel(input_size=args.input_size, hidden_layer_size=args.lstm_size,
				  num_layers=args.num_lstm_layers, output_size=4, dropout=args.dropout)
model = model.to(args.device)
criterion = Loss().cuda()
Loss1 = Loss_val().cuda()
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, betas=(0.9, 0.98), eps=1e-9)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.scheduler_step_size, gamma=0.1)

# train
best = 66666666
best_epoch = 0
for epoch in range(args.num_epoch):
	loss_train, lr_train = run_epoch(train_dataloader, is_training=True)
	loss_val, lr_val = run_epoch(val_dataloader, is_training=False)
	scheduler.step()
	if best > loss_val:
		best = loss_val
		best_epoch = epoch
		torch.save(model, '/home/hlf/code/code_LSTM+CNN/model/' + str(best) + str(args.input_size)
				   + str(args.num_lstm_layers) + str(args.lstm_size) + str(args.learning_rate) + '.pt')
	print('Epoch[{}/{}] | loss train:{:.6f}, test:{:.6f} | lr:{:.6f} | best:{:.6f} | best_epoch:{}'
		  .format(epoch + 1, args.num_epoch, loss_train, loss_val, lr_train, best, best_epoch))
	# print('Epoch[{}/{}] | loss train:{:.6f}, | lr:{:.6f}'
	# 	  .format(epoch + 1, args.num_epoch, loss_train, lr_train))


# here we re-initialize dataloader so the data doesn't shuffled, so we can plot the values by date
train_dataloader = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=False)
val_dataloader = DataLoader(dataset_val, batch_size=args.batch_size, shuffle=False)


# class AverageMeter(object):
# 	"""Computes and stores the average and current value"""
#
# 	def __init__(self):
# 		self.reset()
#
# 	def reset(self):
# 		self.val = 0
# 		self.avg = 0
# 		self.sum = 0
# 		self.count = 0
#
# 	def update(self, val, n=1):
# 		self.val = val
# 		self.sum += val * n
# 		self.count += n
# 		self.avg = self.sum / self.count


model.eval()

# predict on the training data, to see how well the model managed to learn and memorize

predicted_train = np.array([])

for idx, (x, img_x, y) in enumerate(train_dataloader):
	x = x.to(args.device)
	img_x = img_x.to(args.device)
	out = model(x, img_x)
	out = out.cpu().detach().numpy()
# predicted_train = np.concatenate((predicted_train, out))

# predict on the validation data, to see how the model does

predicted_val = np.array([])
loss_val = Loss_val()
# losses_1 = AverageMeter()

for idx, (x, img_x, y) in enumerate(val_dataloader):
	x = x.to(args.device)
	img_x = img_x.to(args.device)
	out = model(x, img_x)
	loss = criterion(out.contiguous(), y.cuda().contiguous())
	out = out.cpu().detach().numpy()
# y_val_tensor = torch.from_numpy(data_y_val.astype(np.float))
# y_pre_tensor = torch.from_numpy(out)
# loss_val_1 = loss_val(y_pre_tensor, y_val_tensor)
# loss_val_1 = loss_val_1.float()
# losses_1.update(loss_val_1.item(), x.size(0))


# predicted_val = np.concatenate((predicted_val, out))

# save excel
def saveMatrix2Excel(data, path):
	# 将np的矩阵数组保存至表格
	# data =data.T
	f = xlwt.Workbook()  # 创建工作簿
	sheet1 = f.add_sheet(u'sheet1', cell_overwrite_ok=True)  # 创建sheet
	title = ['Open(QCFI2Z2)', 'High(QCFI2Z2)', 'Low(QCFI2Z2)', 'Last(QCFI2Z2)']
	[h, l] = data.shape  # h为行数，l为列数
	for i in range(h):
		if i == 0:
			for j in range(l):
				sheet1.write(i, j, title[j])
		for j in range(l):
			sheet1.write(i+1, j, float(data[i, j]))
	f.save(path)

output_path = 'output_' + args.if_constrained + '.xls'
label_path = 'label_' + args.if_constrained + '.xls'
saveMatrix2Excel(out, output_path)
saveMatrix2Excel(data_y_val, label_path)




data_y_val = data_y_val.astype(np.float)
if args.if_constrained == "unconstrained":
	data_y_val = inverse_transf(data_y_val)
	out = inverse_transf(out)

def MAPE(out, label):
	return np.mean(np.abs((label - out) / label)) * 100
	# mape = np.zeros(out.shape[0])
	# for i in range(out.shape[0]):
	# 	mape[i] = np.mean(np.abs((label[i] - out[i]) / label[i])) * 100
	# return np.mean(mape)

mape = MAPE(out, data_y_val)
print('mape:', mape)

print(loss)
print(predicted_train, predicted_val)
