# -*- coding: utf-8 -*-
"""
Created on Mon Oct  8 14:32:52 2018

@author: luolei

模型预测
"""
import json
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from torch.autograd import Variable
import sys

sys.path.append('../..')

from lib.eval_model import *


#%% 定义函数和类
def model_prediction(X_test, nn_model):
	"""模型预测"""
	var_x_test = Variable(torch.from_numpy(X_test.astype(np.float32)))
	y_pred = nn_model(var_x_test).detach().cpu().numpy()
	return y_pred


def model_evaluation(y_test_real, y_test_pred, col, show_fitting = True, show_scatter = True, show_evals = True,
					 show_heatmap = True):
	"""模型效果评估"""
	steps = []
	rmse_results, smape_results, mae_results, r2_results = [], [], [], []
	for i in range(y_test_real.shape[1]):
		rmse_results.append(rmse(y_test_real[:, i], y_test_pred[:, i]))
		smape_results.append(smape(y_test_real[:, i], y_test_pred[:, i]))
		mae_results.append(mae(y_test_real[:, i], y_test_pred[:, i]))
		r2_results.append(r2(y_test_real[:, i], y_test_pred[:, i]))


	print('\n========== {} PREDICTION EVALUATIONS ==========='.format(col))
	for step in [0, 49, 99, 199, 299, 399, 499]:
		if step > pred_dim - 1:
			break
		print('step {}: mae = {:4f}, rmse = {:4f}, smape = {:4f}, r2 = {:4f}'.format(
			step + 1, mae_results[step], rmse_results[step], smape_results[step], r2_results[step])
		)
	print('===========================================')

	if show_fitting:
		plt.figure('{} fitting results'.format(col), figsize = [5, 10])
		for step in steps:
			if step > pred_dim - 1:
				break
			plt.subplot(len(steps), 1, steps.index(step) + 1)
			if steps.index(step) == 0:
				plt.title('fitting results at different pred steps')
			plt.plot(y_test_real[:, step])
			plt.plot(y_test_pred[:, step], 'r')
			plt.ylabel(col)
			plt.legend(['step = {}'.format(step + 1)], loc = 'upper right')
			if steps.index(step) == len(steps) - 1:
				plt.xlabel('time step')
				plt.tight_layout()

	if show_scatter:
		bounds = var_bounds[col]
		plt.figure('{} scatter plot'.format(col), figsize = [8, 10])
		plt.suptitle('comparison of true and pred values at different predicting time steps')
		for step in steps:
			if step > pred_dim - 1:
				break
			plt.subplot(2, len(steps) / 2, steps.index(step) + 1)
			plt.scatter(y_test_real[:, step], y_test_pred[:, step], s = 1)
			plt.plot(bounds, bounds, 'k--')
			plt.xlim(bounds)
			plt.ylim(bounds)
			plt.xlabel('true value')
			plt.ylabel('pred value')
			plt.legend(['step = {}, r2_score: {:.2f}'.format(step, r2_results[step])], loc = 'upper right')
			if steps.index(step) == len(steps) - 1:
				plt.xlabel('time step')
				plt.tight_layout()

	if show_evals:
		eval_methods = ['mae', 'smape', 'rmse', 'r2']
		plt.figure('{} evaluation results at different time steps'.format(col), figsize = [5, 6])
		for method in eval_methods:
			plt.subplot(len(eval_methods), 1, eval_methods.index(method) + 1)
			if eval_methods.index(method) == 0:
				plt.title('model evaluations with different methods')
			plt.plot(eval(method + '_results'))
			plt.xlim([0, 71])
			plt.ylabel(method)
			if eval_methods.index(method) == len(eval_methods) - 1:
				plt.ylim([-0.2, 1.0])
				plt.xlabel('time step')
				plt.tight_layout()

	if show_heatmap:
		plt.figure(figsize = [12, 6])
		plt.subplot(1, 2, 1)
		sns.heatmap(y_test_real, cmap = 'Blues')
		plt.subplot(1, 2, 2)
		sns.heatmap(y_test_pred, cmap = 'Blues')
		plt.tight_layout()

	return mae_results, r2_results, rmse_results, smape_results


#%% 主程序
if __name__ == '__main__':
	# 载入训练好的模型
	nn_model = load_models()
	
	# 构造测试样本集
	X_test, y_test, total_cols_n = build_test_samples_and_targets()
	
	# 模型预测
	y_pred = model_prediction(X_test, nn_model)
	
	# 各污染物数据切分
	y_real_dict, y_pred_dict = {}, {}
	for i in range(len(target_cols)):
		y_real_dict[target_cols[i]] = y_test[:, i * pred_dim: (i + 1) * pred_dim]
		y_pred_dict[target_cols[i]] = y_pred[:, i * pred_dim: (i + 1) * pred_dim]
		
	# 还原为真实值
	for col in target_cols:
		bounds = var_bounds[col]
		y_real_dict[col] = y_real_dict[col] * (bounds[1] - bounds[0]) + bounds[0]
		y_pred_dict[col] = y_pred_dict[col] * (bounds[1] - bounds[0]) + bounds[0]
		
	# 模型效果评估
	evaluations_results = pd.DataFrame(list(range(1, pred_dim + 1)), columns = ['pred_tsp'])
	for col in target_cols:
		col_evaluation = {}
		mae_results, r2_results, rmse_results, smape_results = model_evaluation(
			y_real_dict[col],
			y_pred_dict[col],
			col,
			show_fitting = True,
			show_scatter = False,
			show_evals = True,
			show_heatmap = False
		)
		col_evaluation[col + '_mae'] = mae_results
		col_evaluation[col + '_rmse'] = rmse_results
		col_evaluation[col + '_r2'] = r2_results
		col_evaluation[col + '_smape'] = smape_results
		col_evaluation = pd.DataFrame(col_evaluation)
		evaluations_results = pd.concat([evaluations_results, col_evaluation], axis = 1)
	
	evaluations_results.to_csv('../../file/model/nn_evaluation_results.csv', index = False)
	
	# 打印loss曲线
	with open('../../file/model/nn_train_loss.json', 'r') as f:
		train_loss_list = json.load(f)
	with open('../../file/model/nn_verify_loss.json', 'r') as f:
		verify_loss_list = json.load(f)
	
	plt.figure('loss curve', figsize = [4, 3])
	plt.plot(train_loss_list[4800:])
	plt.plot(verify_loss_list[4800:], 'r')
	plt.legend(['train set', 'verify set'])
	plt.xlabel('epoch')
	plt.ylabel('loss value')
	plt.tight_layout()
	
	plt.savefig('../../graph/nn_train_verify_loss_curves.png')
