import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from sklearn.metrics import accuracy_score


from utils import extract_time, train_test_divide, get_logger
from lib.data import batch_generator


class discriminator(nn.Module):
    def __init__(self, params):
        super(discriminator, self).__init__()
        self.input_size = params['input_size']
        self.hidden_size = params['hidden_dim']
        self.rnn = nn.GRU(num_layers=1, input_size=self.input_size, hidden_size=self.hidden_size, batch_first=True, dropout=0.1)
        self.dropout = nn.Dropout(p=0.1)
        self.fc = nn.Linear(self.hidden_size, 1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, input):
        output, hidden_state = self.rnn(input)  # hidden_state [num_layers*num_dirs, bsz, hidden_size] batch_first对隐藏状态不起作用
        y_hat_logit = self.fc(hidden_state[-1])  # 这里用的最后一个隐藏单元输出，他代表整个序列的语义[num_layers*num_dirs, bsz, 1]
        y_hat = self.sigmoid(y_hat_logit)  # [num_layers*num_dirs, bsz, 1] ，损失函数是BCE自带sigmoid了

        return y_hat_logit, y_hat


def discriminative_score_metrics(ori_data, generated_data, device: torch.device = None, log_dir="", iterations=4000):
    # np.random.seed(0)  # 固定，不能固定随机数，因为做十次取平均值
    # seed(0)
    # iterations = 1
    if log_dir != "":
        logger = get_logger(log_dir, "dis", "dis.log")
    seq_len = 8  # todo 固定评价指标的seq_len为最小值，还是用参数seq_len呢
    ori_data = np.asarray(ori_data)[:, :seq_len, :]
    generated_data = np.asarray(generated_data)[:, :seq_len, :]

    no, seq_len, feature_dim = np.asarray(ori_data).shape
    ori_time, ori_max_seq_len = extract_time(ori_data)
    generated_time, generated_max_seq_len = extract_time(generated_data)
    max_seq_len = max(ori_max_seq_len, generated_max_seq_len)

    params = dict()
    params['input_size'] = feature_dim
    params['hidden_dim'] = int(feature_dim / 2) if feature_dim > 1 else 1

    # iterations = 5000  # 2000个batch(2000*128/5000) 相当于 50个epoch左右
    batch_size = 128
    if device is None:
        device = torch.device("cuda:0")

    netd = discriminator(params).to(device)

    l_bce = nn.BCEWithLogitsLoss()

    optimizer = optim.Adam(netd.parameters(), lr=0.001, betas=(0.9, 0.999))
    # optimizer = optim.Adam(netd.parameters())

    # TODO 里面有shuffle，随机种子提前固定
    # hat后缀是补全后的数据，相当于label 0
    train_x, train_x_hat, test_x, test_x_hat, train_t, train_t_hat, test_t, test_t_hat = \
        train_test_divide(ori_data, generated_data, ori_time, generated_time)

    for itt in range(iterations):
        # 1.获取输入
        X0, T = batch_generator(train_x, train_t, batch_size)
        X0 = np.array(X0)
        X_mb = torch.tensor(X0, dtype=torch.float32).to(device)

        X_hat0, T_hat = batch_generator(train_x_hat, train_t_hat, batch_size)
        X_hat0 = np.array(X_hat0)
        X_hat_mb = torch.tensor(X_hat0, dtype=torch.float32).to(device)

        # 2.前向传播
        y_logit_real, _ = netd(X_mb)  # [1, bsz, 1]
        y_logit_fake, _ = netd(X_hat_mb)  # [1, bsz, 1]

        # 3.计算loss
        d_loss_real = l_bce(y_logit_real, torch.ones_like(y_logit_real))
        d_loss_fake = l_bce(y_logit_fake, torch.zeros_like(y_logit_fake))
        d_loss = d_loss_fake + d_loss_real

        # 4. backward
        optimizer.zero_grad()
        d_loss.backward()

        # 5. 更新
        optimizer.step()
        if log_dir != "":
            if itt % 1 == 0:
                logger.info("{}/{}: loss: {:.4f}".format(itt, iterations, float(d_loss.cpu().detach().numpy())))
    # 重新获取测试数据，固定随机种子，    不能重新获取，否则获取到的测试数据可能是训练数据，导致分类的特别准
    # np.random.seed(0)
    # train_x, train_x_hat, test_x, test_x_hat, train_t, train_t_hat, test_t, test_t_hat = \
    #     train_test_divide(ori_data, generated_data, ori_time, generated_time)
    # real， fake
    test_x = torch.tensor(np.array(test_x), dtype=torch.float32).to(device)
    test_x_hat = torch.tensor(np.array(test_x_hat), dtype=torch.float32).to(device)

    test_y_pred_real = netd(test_x)[1].detach().cpu().numpy().squeeze()    # 需要用经过sigmoid的输出。[num_layer, bsz, 1]->[bsz,]
    test_y_pred_fake = netd(test_x_hat)[1].detach().cpu().numpy().squeeze()

    y_pred = np.squeeze(np.concatenate((test_y_pred_real, test_y_pred_fake), axis=0))
    y_label = np.concatenate((np.ones([len(test_y_pred_real), ]), np.zeros([len(test_y_pred_fake), ])), axis=0)
    # 计算准确率
    acc = accuracy_score(y_label, (y_pred > 0.5).astype(int))  # sigmoid输出>0.5 真样本
    print('acc:'+str(acc))
    # score越小越好,离0.5越进越好，不是分类越不准越好，0.5代表随机
    discriminative_score = np.abs(0.5-acc)

    return discriminative_score

def seed(seed_value):
    # Check if seed is default value
    if seed_value == -1:
        return

    # Otherwise seed all functionality
    import random
    random.seed(seed_value)
    torch.manual_seed(seed_value)
    torch.cuda.manual_seed_all(seed_value)
    np.random.seed(seed_value)
    torch.backends.cudnn.deterministic = True