import os
import time
import numpy as np
import matplotlib.pyplot as plt

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader

from RNN_VAE import Path2Vec
from load_data_translate import load_data_translate


def gpu_setup(use_gpu, gpu_id):
    """GPU设置"""
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    if torch.cuda.is_available() and use_gpu:
        print('cuda available with GPU:', torch.cuda.get_device_name(0))
        device = torch.device("cuda")
    else:
        print('cuda not available')
        device = torch.device("cpu")
    return device


def train_epoch(model, optimizer, device, data_loader, data_arrays):
    """训练一个epoch"""
    model.train()
    iter = 0
    epoch_loss = 0
    epoch_kld = 0
    epoch_recons_loss = 0
    epoch_kld_list = []  # 方便查看KL分布情况
    for iter, batch in enumerate(data_loader):
        # X,Y: (batch_size, num_steps); X_valid_len, Y_valid_len: (batch_size,)
        X, X_valid_len, Y, Y_valid_len = [x.to(device) for x in batch]

        bos = torch.tensor([2] * X.shape[0], device=device).reshape(-1, 1)
        dec_input = torch.cat([bos, X[:, :-1]], 1)
        output, mu, sigma = model.forward(X, dec_input)
        loss, recons_loss, KL_loss = model.loss(output, X, mu, sigma)
        # 反向传播过程
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        epoch_loss += loss.detach().item()
        epoch_kld += KL_loss.detach().item()
        epoch_kld_list.append(KL_loss.detach().item())
        epoch_recons_loss += recons_loss.detach().item()
    epoch_loss /= (iter + 1)
    epoch_kld /= (iter + 1)
    epoch_recons_loss /= (iter + 1)
    return epoch_loss, epoch_kld, epoch_recons_loss, epoch_kld_list, optimizer


def val_epoch(model, device, data_loader):
    """验证一个epoch"""
    model.eval()
    iter = 0
    epoch_loss = 0
    epoch_kld = 0
    epoch_recons_loss = 0
    with torch.no_grad():
        for iter, batch_path in enumerate(data_loader):
            inputs = batch_path['path'].to(device)
            bos = torch.tensor([0] * inputs.size(0), device=inputs.device).reshape(-1, 1)
            eos = torch.tensor([1] * inputs.size(0), device=inputs.device).reshape(-1, 1)
            encoder_inputs = torch.cat((inputs, eos), dim=1)
            decoder_inputs = torch.cat((bos, inputs), dim=1)
            output, mu, sigma = model.forward(encoder_inputs, decoder_inputs)
            target = torch.cat((inputs, eos), dim=1)
            loss, recons_loss, KL_loss = model.loss(output, target, mu, sigma)
            epoch_loss += loss.detach().item()
            epoch_kld += KL_loss.detach().item()
            epoch_recons_loss += recons_loss.detach().item()
        epoch_loss /= (iter + 1)
        epoch_kld /= (iter + 1)
        epoch_recons_loss /= (iter + 1)
    return epoch_loss, epoch_kld, epoch_recons_loss


def train_model(config):
    """训练过程"""
    # 获取训练集和验证集
    train_loader, data_arrays, src_vocab, tgt_vocab = load_data_translate(config.train_path,
                                                                          batch_size=config.batch_size,
                                                                          num_steps=10,
                                                                          num_examples=1000,
                                                                          method='char')

    # val_dataset = load_data(config.val_path)
    # train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)
    # val_loader = DataLoader(val_dataset, batch_size=config.batch_size, shuffle=False)

    model = Path2Vec(input_size=len(src_vocab),
                     embed_size=config.embed_size,
                     hidden_size=config.hidden_size,
                     hidden_layers=config.hidden_layers,
                     latent_size=config.latent_size,
                     dropout=config.dropout,
                     device=config.device)
    optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)

    kld_list = []  # 保存KL分布变化情况，方便查看
    # 训练过程
    for epoch in range(config.num_epochs):
        start = time.time()

        train_loss, train_kld, train_recons_loss, train_kld_list, optimizer = \
            train_epoch(model, optimizer, config.device, train_loader, data_arrays)
        # val_loss, val_kld, val_recons_loss = \
        #     val_epoch(model, config.device, val_loader)

        kld_list.extend(train_kld_list)
        epoch_time = time.time() - start
        # 打印相关信息
        print('*' * 80)
        # "val_loss:  {:.5f}  val_recons_loss:  {:.8f}  val_KLD:  {:.8f}\n"
        print("epoch:{:d}\n"
              "train_loss:{:.5f}  train_recons_loss:{:.8f}  train_KLD:{:.8f}\n"
              "epoch_time:{:.5f} s".format(
            epoch + 1,
            train_loss, train_recons_loss, train_kld,
            # val_loss, val_recons_loss, val_kld,
            epoch_time))

        reconstruct_path(model, data_arrays, index=0)
        reconstruct_path(model, data_arrays, index=1)

    return model, kld_list



def reconstruct_path(model, data_arrays, index):
    input = data_arrays[0][index].unsqueeze(0)
    output = model.reconstruct(input)
    print("输入：", input)
    print("重构：", output)
    # input = dataset.__getitem__(index)['path'].unsqueeze(0)
    # bos = torch.tensor([0] * input.size(0), device=input.device).reshape(-1, 1)
    # eos = torch.tensor([1] * input.size(0), device=input.device).reshape(-1, 1)
    # input = torch.cat((input, eos), dim=1)
    # with torch.no_grad():
    #     out_path = model.reconstruct(input)
    # print("输入：", input.squeeze().cpu().detach().numpy())
    # print("重构：", out_path)
    # print("path：", (np.array(out_path, dtype=int) - 2*np.ones(len(out_path), dtype=int)).tolist())
    # print()


class Config(object):
    def __init__(self):
        self.train_path = "../DataSet/en-zh-char.txt"
        self.val_path = "D:/DataSet/PATH_data/pop/5.txt"

        self.batch_size = 64
        self.num_epochs = 100
        self.learning_rate = 0.001
        self.device = gpu_setup(True, 0)

        self.input_size = 64
        self.embed_size = 128
        self.hidden_size = 128
        self.hidden_layers = 2
        self.latent_size = 128
        self.dropout = 1.0



if __name__ == '__main__':
    config = Config()
    model, kld_list = train_model(config)