import os


import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd


from utils import extract_time


class predictor(nn.Module):
    def __init__(self, params):
        super(predictor, self).__init__()
        self.input_size = params['input_size']
        self.hidden_size = params['hidden_dim']
        self.rnn = nn.GRU(input_size=self.input_size, hidden_size=self.hidden_size, batch_first=True)
        self.fc = nn.Linear(self.hidden_size, 1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, input):
        '''
        用前dim-1个维度预测最后一个维度
        '''
        output, hidden_state = self.rnn(input)  # hidden_state [num_layers*num_dirs, bsz, hidden_size] batch_first对隐藏状态不起作用
        y_hat_logit = self.fc(output)
        y_hat = self.sigmoid(y_hat_logit)  # todo 输出也不是概率为啥用sigmoid,

        return y_hat


def show_wave(real_values, pred_values, result_start, result_end, y_label, fig_dir):
    plt.figure(figsize=(11, 3.4))
    plt.plot(real_values[result_start:result_end], linewidth=1, color="blue")
    plt.plot(pred_values[result_start:result_end], linewidth=1, color="red")

    plt.yticks(fontproperties='Times New Roman', fontsize=14)
    plt.xticks(fontproperties='Times New Roman', fontsize=14)
    plt.xlabel("Timestep", fontdict={'family': 'Times New Roman', 'size': 14})
    plt.ylabel(y_label, fontdict={'family': 'Times New Roman', 'size': 14})
    # plt.title(title, fontsize=16)
    plt.legend(labels=['Real Value', 'Predictive Value'], prop={'family': 'Times New Roman', 'size': '9'},
               loc='upper right', fontsize='x-small')
    # plt.figure(figsize=(10,4),dpi=750)
    # plt.ylim(7.0,9.1)
    plt.subplots_adjust(bottom=0.15, left=0.05, right=0.98, top=0.98)
    # plt.show()
    plt.savefig(os.path.join(fig_dir, 'pred' + ".svg"), dpi=600)


def predictive_score_metrics(ori_data, generated_data, device: torch.device = None, fig_dir='', plot_pred=False):
    # seed(0)
    # ori_data 真实数据，generated_data生成数据/插值后的数据
    # ori_data [n_sample, seq_len, feature_dim]
    seq_len = 8  # 固定评价指标的seq_len，用最小的seq_len
    ori_data = np.asarray(ori_data)[:, :seq_len, :]
    generated_data = np.asarray(generated_data)[:, :seq_len, :]

    no, seq_len, feature_dim = ori_data.shape
    ori_time, ori_max_seq_len = extract_time(ori_data)
    generated_time, generated_max_seq_len = extract_time(generated_data)
    max_seq_len = max(ori_max_seq_len, generated_max_seq_len)

    params = dict()
    params['input_size'] = feature_dim - 1
    params['hidden_dim'] = int(feature_dim / 2) if feature_dim > 1 else 1

    iterations = 5000
    batch_size = 128
    if device is None:
        device = torch.device("cuda:0")

    netp = predictor(params).to(device)
    # MAE loss
    l_mae = nn.L1Loss()
    optimizer = optim.Adam(netp.parameters())
    # TSTR
    # 训练，用生成数据训练，用真实数据测试
    # np.random.seed(0)  # 不能固定随机数，因为多次实验，结果取均值
    for itt in range(iterations):
        # 1. 获取输入，用前dim-1个特征预测最后一个特征
        idx = np.random.permutation(len(generated_data))  # 每次都是随机取得
        train_idx = idx[:batch_size]

        X_mb = list(generated_data[i][:-1, :(feature_dim - 1)] for i in train_idx)
        T_mb = list(generated_time[i] - 1 for i in train_idx)
        Y_mb = list(
            np.reshape(generated_data[i][1:, (feature_dim - 1)], [len(generated_data[i][1:, (feature_dim - 1)]), 1]) for
            i in train_idx)

        X_mb = torch.tensor(np.array(X_mb), dtype=torch.float32).to(device)
        Y_mb = torch.tensor(np.array(Y_mb), dtype=torch.float32).to(device)

        # 2. 前向传播
        y_pred = netp(X_mb)

        # 3. 计算loss
        p_loss = l_mae(y_pred, Y_mb)

        # 4. backward
        optimizer.zero_grad()
        p_loss.backward()

        # 5. 更新参数
        optimizer.step()
    # 用真实数据测试
    # 测试的时候需要固定随机种子，
    np.random.seed(5959894)  # 固定随机种子
    idx = np.random.permutation(5300)  # 这里必须是一个固定的数 否则返回的不是固定序列 之前用的是len(ori_data)
    train_idx = idx[:no]
    # train_idx = range(len(ori_data))  # 测试的时候不要shuffle
    X_mb = list(ori_data[i][:-1, :(feature_dim - 1)] for i in train_idx)
    T_mb = list(ori_time[i] - 1 for i in train_idx)
    Y_mb = list(
        np.reshape(ori_data[i][1:, (feature_dim - 1)], [len(ori_data[i][1:, (feature_dim - 1)]), 1]) for i in train_idx)

    X_tensor = torch.tensor(np.array(X_mb), dtype=torch.float32).to(device)  # [num_samples, seq_len, 1]
    Y_tensor = torch.tensor(np.array(Y_mb), dtype=torch.float32).to(device)

    y_pred = netp(X_tensor)

    predictive_score = l_mae(y_pred, Y_tensor)  # 越低预测越准，说明生成数据捕获了时间依赖

    # 画一下预测图
    if plot_pred and fig_dir != '':
        y_pred_np = y_pred.detach().cpu().numpy().flatten()  # [num_samples, seq_len, 1]
        y_target_np = Y_tensor.detach().cpu().numpy().flatten()
        # y_pred_np = y_pred.detach().cpu().numpy()[:, -1, 0]
        # y_target_np = Y_tensor.detach().cpu().numpy()[:, -1, 0]
        data = np.stack([y_target_np[500:3000], y_pred_np[500:3000]], axis=1)
        save_csv(data, fig_dir, 'imputed')
        show_wave(y_target_np, y_pred_np, 500, 3000, '', fig_dir)
    return predictive_score.detach().cpu().numpy()


def save_csv(data, log_dir, file_name):
    data_path = os.path.join(log_dir, file_name+'.csv')
    df = pd.DataFrame(data)
    df.to_csv(data_path, index=False)

def seed(seed_value):
    # Check if seed is default value
    if seed_value == -1:
        return

    # Otherwise seed all functionality
    import random
    random.seed(seed_value)
    torch.manual_seed(seed_value)
    torch.cuda.manual_seed_all(seed_value)
    np.random.seed(seed_value)
    torch.backends.cudnn.deterministic = True