import numpy as np
from data_process import EUAData
from data_process import TimeSeriesDataset
from lstm import LSTMModel
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from sklearn.preprocessing import MinMaxScaler
import xlwt
from args import read_args
import math
import os

os.environ['CUDA_VISIBLE_DEVICES']='1'

def inverse_transf(data):
	# out_arraty = np.zeros(data.shape)
	open = data[:, 0]
	high = data[:, 1]
	low = data[:, 2]
	last = data[:, 3]
	low_o = (np.exp(open))[:, np.newaxis]
	high_o = (np.exp(open) + np.exp(high))[:, np.newaxis]
	flambda1 = (np.exp(low)/(np.exp(low) + 1))[:, np.newaxis]
	flambda2 = (np.exp(last)/(np.exp(last) + 1))[:, np.newaxis]
	open_o = (flambda1 * high_o + (1-flambda1) * low_o)
	last_o = (flambda2 * high_o + (1-flambda2) * low_o)
	out_array = np.concatenate([open_o, high_o], axis=1)
	out_array = np.concatenate([out_array, low_o], axis=1)
	out_array = np.concatenate([out_array, last_o], axis=1)

	return out_array

args = read_args()

# load data
data_p = EUAData()
data_f, data_img_f, data_l = data_p.download_data()
data_x = np.array(data_f)
data_img_x = np.array(data_img_f)
data_y = np.array(data_l)

# split dataset
split_index = int(data_y.shape[0] * args.train_split_size)
data_x_train = data_x[:split_index]
data_x_val = data_x[split_index:]
data_img_x_train = data_img_x[:split_index]
data_img_x_val = data_img_x[split_index:]
data_y_train = data_y[:split_index]
data_y_val = data_y[split_index:]

# data loader
dataset_train = TimeSeriesDataset(data_x_train, data_img_x_train, data_y_train)
dataset_val = TimeSeriesDataset(data_x_val, data_img_x_val, data_y_val)
print("Train data shape", dataset_train.x.shape, dataset_train.y.shape)
print("Validation data shape", dataset_val.x.shape, dataset_val.y.shape)

model = torch.load("/home/hlf/code/code_LSTM+CNN/model/"
				   "tensor(0.0189, device='cuda:0', grad_fn=<MeanBackward0>)272150.005.pt")
model = model.to(args.device)


# here we re-initialize dataloader so the data doesn't shuffled, so we can plot the values by date
train_dataloader = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=False)
val_dataloader = DataLoader(dataset_val, batch_size=args.batch_size, shuffle=False)


model.eval()

# predict on the validation data, to see how the model does
for idx, (x, img_x, y) in enumerate(val_dataloader):
	x = x.to(args.device)
	img_x = img_x.to(args.device)
	out = model(x, img_x)
	out = out.cpu().detach().numpy()

error_count = 0
for i in out:
	if i[2] < 0:
		error_count += 1
		print('1', i[2])
	if i[2] > i[1]:
		error_count += 1
		print('2', i[2], i[1])
	if i[0] < i[2] or i[0] > i[1]:
		error_count += 1
		print('3', i[0], i[2], i[1])
	if i[3] < i[2] or i[3] > i[1]:
		print('4', i[3], i[2], i[1])
print(error_count)


# save excel
def saveMatrix2Excel(data, path):
	# 将np的矩阵数组保存至表格
	# data =data.T
	f = xlwt.Workbook()  # 创建工作簿
	sheet1 = f.add_sheet(u'sheet1', cell_overwrite_ok=True)  # 创建sheet
	title = ['Open(QCFI2Z2)', 'High(QCFI2Z2)', 'Low(QCFI2Z2)', 'Last(QCFI2Z2)']
	[h, l] = data.shape  # h为行数，l为列数
	for i in range(h):
		if i == 0:
			for j in range(l):
				sheet1.write(i, j, title[j])
		for j in range(l):
			sheet1.write(i+1, j, float(data[i, j]))
	f.save(path)

output_path = 'test_result/output_' + args.if_constrained + '.xls'
label_path = 'test_result/label_' + args.if_constrained + '.xls'
saveMatrix2Excel(out, output_path)
saveMatrix2Excel(data_y_val, label_path)



data_y_val = data_y_val.astype(np.float)

def MAPE(out, label):
	return np.mean(np.abs((label - out) / label)) * 100
	# mape = np.zeros(out.shape[0])
	# for i in range(out.shape[0]):
	# 	mape[i] = np.mean(np.abs((label[i] - out[i]) / label[i])) * 100
	# return np.mean(mape)

mape = MAPE(out, data_y_val)
print('mape:', mape)
