import numpy as np

import torch
from torch import nn
from torch.nn import functional as F

from load_data_translate import load_data_translate

class Encoder(nn.Module):
    def __init__(self, input_size, embed_size, hidden_size, hidden_layers, latent_size, dropout):
        super().__init__()
        self.input_size = input_size
        self.embed_size = embed_size
        self.hidden_size = hidden_size
        self.hidden_layers = hidden_layers
        self.latent_size = latent_size
        self.dropout = dropout

        self.rnn = nn.GRU(
            input_size=self.embed_size,
            hidden_size=self.hidden_size,
            num_layers=self.hidden_layers,
            dropout=self.dropout,
            batch_first=True)

        self.rnn2mean = nn.Linear(
            in_features=self.hidden_size * self.hidden_layers,
            out_features=self.latent_size)

        self.rnn2log_var = nn.Linear(
            in_features=self.hidden_size * self.hidden_layers,
            out_features=self.latent_size)

    def forward(self, embeddings):
        batch_size = embeddings.size(0)
        state = self.init_state(dim=batch_size, device=embeddings.device)
        _, state = self.rnn(embeddings, state)
        state = state.view(batch_size, self.hidden_size * self.hidden_layers)
        mean = self.rnn2mean(state)
        log_var = self.rnn2log_var(state)
        z = self.sample_normal(dim=batch_size, device=embeddings.device)
        std = torch.exp(0.5 * log_var)
        latent_sample = z * std + mean
        return latent_sample, mean, std

    def sample_normal(self, dim, device):
        z = torch.randn((self.hidden_layers, dim, self.latent_size), device=device, requires_grad=False)
        return z

    def init_state(self, dim, device):
        state = torch.zeros((self.hidden_layers, dim, self.hidden_size), device=device, requires_grad=False)
        return state

class Decoder(nn.Module):
    def __init__(self, embed_size, latent_size, hidden_size, hidden_layers, dropout, output_size):
        super().__init__()
        self.embed_size = embed_size
        self.latent_size = latent_size
        self.hidden_size = hidden_size
        self.hidden_layers = hidden_layers
        self.output_size = output_size
        self.dropout = dropout

        self.rnn = nn.GRU(
            input_size=self.embed_size,
            hidden_size=self.hidden_size,
            num_layers=self.hidden_layers,
            dropout=self.dropout,
            batch_first=True)

        self.rnn2out = nn.Linear(
            in_features=hidden_size,
            out_features=output_size)

    def forward(self, embeddings, state):
        batch_size = embeddings.size(0)
        hidden, state = self.rnn(embeddings, state)
        state = state.view(self.hidden_layers, batch_size, self.hidden_size)
        output = self.rnn2out(hidden)
        return output, state

class Path2Vec(nn.Module):
    def __init__(self, input_size, embed_size, hidden_size, hidden_layers, latent_size, dropout, device):
        super().__init__()
        self.input_size = input_size
        self.embed_size = embed_size
        self.hidden_size = hidden_size
        self.hidden_layers = hidden_layers
        self.latent_size = latent_size
        self.dropout = dropout
        self.device = device

        self.embedding = nn.Embedding(input_size, embed_size)

        self.latent2rnn = nn.Linear(
            in_features=self.latent_size,
            out_features=self.hidden_size)

        self.encoder = Encoder(
            input_size=self.input_size,
            embed_size=self.embed_size,
            hidden_size=self.hidden_size,
            hidden_layers=self.hidden_layers,
            latent_size=self.latent_size,
            dropout=self.dropout)

        self.decoder = Decoder(
            embed_size=self.embed_size,
            latent_size=self.latent_size,
            hidden_size=self.hidden_size,
            hidden_layers=self.hidden_layers,
            dropout=self.dropout,
            output_size=self.input_size)

    def forward(self, encoder_inputs, decoder_inputs):
        batch_size = encoder_inputs.size(0)
        encoder_embeddings = self.embedding(encoder_inputs)
        encoder_embeddings = F.dropout(encoder_embeddings, p=self.dropout, training=self.training)
        # encoder_embeddings = F.one_hot(encoder_inputs, num_classes=self.input_size)
        # encoder_embeddings = encoder_embeddings.float()
        z, mu, sigma = self.encoder(encoder_embeddings)
        state = self.latent2rnn(z)
        state = state.view(self.hidden_layers, batch_size, self.hidden_size)
        decoder_embeddings = self.embedding(decoder_inputs)
        decoder_embeddings = F.dropout(decoder_embeddings, p=self.dropout, training=self.training)
        # decoder_embeddings = F.one_hot(decoder_inputs, num_classes=self.input_size)
        # decoder_embeddings = decoder_embeddings.float()
        output, state = self.decoder(decoder_embeddings, state)
        return output, mu, sigma

    def loss(self, output, target, mu, sigma):
        criterion = nn.CrossEntropyLoss()
        recons_loss = criterion(output.permute(0, 2, 1), target)
        KL_loss = -0.5 * torch.sum(1 + sigma - mu.pow(2) - sigma.exp())
        loss = recons_loss + KL_loss
        return loss, recons_loss, KL_loss

    def reconstruct(self, encoder_inputs):
        """给定输入，返回重构输出"""
        batch_size = encoder_inputs.size(0)
        encoder_embeddings = self.embedding(encoder_inputs)
        encoder_embeddings = F.dropout(encoder_embeddings, p=self.dropout, training=self.training)
        # encoder_embeddings = F.one_hot(encoder_inputs, num_classes=self.input_size)
        # encoder_embeddings = encoder_embeddings.float()
        z, mu, sigma = self.encoder(encoder_embeddings)
        state = self.latent2rnn(z)
        state = state.view(self.hidden_layers, batch_size, self.hidden_size)
        # 单步重构路径
        input = torch.unsqueeze(torch.tensor([0], dtype=torch.long, device=encoder_inputs.device), dim=0)
        num_steps = encoder_inputs.size(1)
        output_path = []  # 保存重构路径
        for i in range(num_steps):
            embedding = self.embedding(input)
            # embedding = F.one_hot(input, num_classes=self.input_size)
            # embedding = embedding.float()
            output, state = self.decoder(embedding, state)
            input = output.argmax(dim=2)
            pred = input.squeeze(dim=0).type(torch.int32).item()
            output_path.append(pred)
        return output_path

    def generate(self, z, num_steps, device):
        state = self.latent2rnn(z)
        state = state.view(self.hidden_layers, 1, self.hidden_size)
        # 单步重构路径
        input = torch.unsqueeze(torch.tensor([0], dtype=torch.long, device=device), dim=0)
        output_path = []  # 保存重构路径
        for i in range(num_steps):
            embedding = self.embedding(input)
            output, state = self.decoder(embedding, state)
            input = output.argmax(dim=2)
            pred = input.squeeze(dim=0).type(torch.int32).item()
            output_path.append(pred)
        return output_path

    def transform(self, encoder_inputs):
        encoder_embeddings = self.embedding(encoder_inputs)
        encoder_embeddings = F.dropout(encoder_embeddings, p=self.dropout, training=self.training)
        z, mu, sigma = self.encoder(encoder_embeddings)
        return z


if __name__ == '__main__':
    file_path = "../DataSet/en-zh-char.txt"
    data_loader, data_arrays, src_vocab, tgt_vocab = load_data_translate(file_path, batch_size=4, num_steps=10, num_examples=100, method='char')

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = Path2Vec(input_size=len(src_vocab), embed_size=64, hidden_size=64, hidden_layers=1, latent_size=64, dropout=0.0, device=device)
    model.to(device)
    for iter, batch in enumerate(data_loader):
        # X,Y: (batch_size, num_steps); X_valid_len, Y_valid_len: (batch_size,)
        X, X_valid_len, Y, Y_valid_len = [x.to(device) for x in batch]

        bos = torch.tensor([src_vocab['<bos>']] * X.shape[0], device=device).reshape(-1, 1)
        dec_input = torch.cat([bos, X[:, :-1]], 1)
        output, mu, sigma = model.forward(X, dec_input)

        loss, recons_loss, KL_loss  = model.loss(output, X, mu, sigma)
        print(loss, recons_loss, KL_loss)

    input = data_arrays[0][0].unsqueeze(0)
    output = model.reconstruct(input)
    print(input)
    print(output)
