import fitlog
import torch.nn as nn
import torch
import torch.nn.functional as F
import re
import random
import torch.optim as optim
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.ticker as ticker
import time
import math
import pickle
import pdb
import os
from tensorboardX import SummaryWriter
import numpy as np
import sys
sys.path.append("..")
from pytorch_transformers.optimization import AdamW, WarmupLinearSchedule

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


SOS_token = 0
EOS_token = 1

MAX_LENGTH = 100

def showPlot(points):
    plt.figure()
    fig, ax = plt.subplots()
    # this locator puts ticks at regular intervals
    loc = ticker.MultipleLocator(base=0.2)
    ax.yaxis.set_major_locator(loc)
    plt.plot(points)

class Lang:
    def __init__(self, name, pre_defined_words = None):
        self.name = name

        if pre_defined_words is not None:
            cur = 2
            self.word2index = {w:idx+cur for idx, w in enumerate(pre_defined_words)}
            self.word2index.update({"SOS":0, "EOS":1})
            self.index2word = {idx:w for w, idx in self.word2index.items()}
            self.word2count = {w: 0 for w, idx in self.word2index.items()}
        else:
            self.word2index = {"SOS":0, "EOS":1}
            self.word2count = {"SOS":0, "EOS":1}
            self.index2word = {0: "SOS", 1: "EOS"}
        self.n_words = len(self.word2count)  # Count SOS and EOS

    def addSentence(self, sentence):
        for word in sentence.split(' '):
            self.addWord(word)

    def addWord(self, word):
        if word not in self.word2index:
            self.word2index[word] = self.n_words
            self.word2count[word] = 1
            self.index2word[self.n_words] = word
            self.n_words += 1
        else:
            self.word2count[word] += 1

    def Caches(self, fname="../saved/AutoEncoder.lang"):
        with open(fname, "wb") as fw:
            pickle.dump((self.name, self.word2index, self.index2word, self.n_words), fw, protocol=pickle.HIGHEST_PROTOCOL)

    def Load(self, fname="../saved/AutoEncoder.lang"):
        with open(fname, "rb") as fr:
            (self.name, self.word2index, self.index2word, self.n_words) = pickle.load(fr)

    def EmbeddingLayer(self, emb_dim=300, uniform_init=(-0.8, 0.8), word2vec=None):
        emb_layer = nn.Embedding(self.n_words, emb_dim)
        if word2vec is not None:
            weight = torch.stack([torch.tensor(word2vec[w]) if w in word2vec
                                  else torch.tensor(np.random.uniform(uniform_init[0], uniform_init[1], [emb_dim]))
                                  for w, idx in self.word2index.items()])
            emb_layer.weight.data.copy_(weight)
        return emb_layer

class SelfAttention(nn.Module):
    def __init__(self, hidden_size, num_attention_heads, output_attentions, lay_norm_eps=1e-8):
        super(SelfAttention, self).__init__()
        if hidden_size % num_attention_heads != 0:
            raise ValueError(
                "The hidden size (%d) is not a multiple of the number of attention "
                "heads (%d)" % (hidden_size, num_attention_heads))
        self.output_attentions = output_attentions

        self.num_attention_heads = num_attention_heads
        self.attention_head_size = int(hidden_size / num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size

        self.query = nn.Linear(hidden_size, self.all_head_size)
        self.key = nn.Linear(hidden_size, self.all_head_size)
        self.value = nn.Linear(hidden_size, self.all_head_size)

        self.layer_norm = torch.nn.LayerNorm(hidden_size, eps=lay_norm_eps)
        self.act_fn = torch.nn.functional.relu

    def transpose_for_scores(self, x):
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
        x = x.view(*new_x_shape)
        return x.permute(0, 2, 1, 3)

    def forward(self, query_states, hidden_states, head_mask=None):
        mixed_query_layer = self.query(query_states)  # [batch, seq_1, dim]
        mixed_key_layer = self.key(hidden_states)     # [batch, seq_2, dim]
        mixed_value_layer = self.value(hidden_states)

        query_layer = self.transpose_for_scores(mixed_query_layer) # [batch, head, seq_1, head_dim]
        key_layer = self.transpose_for_scores(mixed_key_layer)   # [batch, head, seq_2, head_dim]
        value_layer = self.transpose_for_scores(mixed_value_layer)

        # Take the dot product between "query" and "key" to get the raw attention scores.
        attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) # [batch, head, seq_1, seq_2]
        attention_scores = attention_scores / math.sqrt(self.attention_head_size)
        # Apply the attention mask is (precomputed for all layers in BertModel forward() function)

        # Normalize the attention scores to probabilities.
        attention_probs = nn.Softmax(dim=-1)(attention_scores)

        # This is actually dropping out entire tokens to attend to, which might
        # seem a bit unusual, but is taken from the original Transformer paper.
        # attention_probs = self.dropout(attention_probs)

        # Mask heads if we want to
        if head_mask is not None:
            attention_probs = attention_probs * head_mask

        context_layer = torch.matmul(attention_probs, value_layer) # [batch, head, seq_1, head_dim]

        context_layer = context_layer.permute(0, 2, 1, 3).contiguous() # [batch, seq_1, head, head_dim]
        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
        context_layer = context_layer.view(*new_context_layer_shape) # [batch, seq_1, dim]
        context_layer = self.act_fn( self.layer_norm(context_layer+query_states) )

        outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
        return outputs

class EncoderLSTM(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, bidirec, drop_out=0.1):
        super(EncoderLSTM, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.num_direc = 2 if bidirec else 1
        self.lstm = nn.LSTM(hidden_size, hidden_size, num_layers=num_layers, bidirectional=bidirec, dropout=drop_out).to(device) #这里的hidden_size 和 Embedding 的 hidden_size 对应

    def forward(self, input, hidden):
        output, hidden = self.lstm(input, hidden)
        return output, hidden

    def initHidden(self, batch_size):
        return (torch.zeros(self.num_direc*self.num_layers, batch_size, self.hidden_size, device=device),
               torch.zeros(self.num_direc*self.num_layers, batch_size, self.hidden_size, device=device))


class EncoderGRU(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, bidirec):
        super(EncoderGRU, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.num_direc = 2 if bidirec else 1
        self.gru = nn.GRU(hidden_size, hidden_size, num_layers=self.num_layers, bidirectional=bidirec).to(device) #这里的hidden_size 和 Embedding 的 hidden_size 对应

    def forward(self, input, hidden, batch_first=False):
        if batch_first:
            input = input.transpose(1, 0).contiguous()
            hidden = hidden.transpose(1, 0).contiguous()
        output, hidden = self.gru(input, hidden)
        if batch_first:
            return output.transpose(1, 0).contiguous(), hidden.transpose(1, 0).contiguous()
        return output, hidden

    def initHidden(self, batch_size, batch_first=False):
        if batch_first:
            return torch.zeros(batch_size, self.num_layers*self.num_direc, self.hidden_size, device=device)
        else:
            return torch.zeros(self.num_layers*self.num_direc, batch_size, self.hidden_size, device=device)

class DecoderGRU(nn.Module):
    def __init__(self, hidden_size, output_size, num_layers, bi_direc, drop_out=0.1):
        super(DecoderGRU, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.num_direc = 2 if bi_direc else 1
        self.gru = nn.GRU(hidden_size, hidden_size, num_layers=self.num_layers, bidirectional=bi_direc, dropout=drop_out).to(device)
        self.out = nn.Linear(hidden_size*self.num_direc, output_size).to(device)
        self.softmax = nn.LogSoftmax(dim=1)

    def forward(self, input, hidden):
        output, hidden = self.gru(input, hidden)
        output = self.softmax(self.out(output[0]))
        return output, hidden

    def initHidden(self):
        return torch.zeros(self.num_layers*self.num_direc, 1, self.hidden_size, device=device)

class AttnDecoderLSTM(nn.Module):
    def __init__(self, hidden_size, output_size, num_layers, bi_direc, drop_out=0.1, max_length=256):
        super(AttnDecoderLSTM, self).__init__()
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.num_layers = num_layers
        self.num_direc = 2 if bi_direc else 1
        self.max_length = max_length

        self.attn = nn.Linear(self.hidden_size * 2, self.max_length).to(device)
        self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size).to(device)
        self.dropout = nn.Dropout(self.dropout_p)
        self.gru = nn.LSTM(self.hidden_size, self.hidden_size, num_layers=num_layers, bidirectional=bi_direc, dropout=drop_out).to(device)
        self.out = nn.Linear(self.hidden_size, self.output_size).to(device)

    def forward(self, input, hidden, encoder_outputs):
        embedded = self.dropout(input)
        attn_weights = F.softmax(
            self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
        attn_applied = torch.bmm(attn_weights.unsqueeze(0),
                                 encoder_outputs.unsqueeze(0))

        output = torch.cat((embedded[0], attn_applied[0]), 1)
        output = self.attn_combine(output).unsqueeze(0)

        output = F.relu(output)
        output, hidden = self.gru(output, hidden)

        output = F.log_softmax(self.out(output[0]), dim=1)
        return output, hidden, attn_weights

    def initHidden(self, batch_size):
        return (torch.zeros(self.num_layers*self.num_direc, batch_size, self.hidden_size, device=device),
                torch.zeros(self.num_layers * self.num_direc, batch_size, self.hidden_size, device=device))


def indexesFromSentence(lang, sentence):
    return [lang.word2index[word] for word in sentence.split(' ')]


def tensorFromSentence(lang, sentence):
    indexes = indexesFromSentence(lang, sentence)
    indexes.append(EOS_token)
    return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)


def tensorsFromPair(pair, input_lang, output_lang):
    input_tensor = tensorFromSentence(input_lang, pair[0])
    target_tensor = tensorFromSentence(output_lang, pair[1])
    return (input_tensor, target_tensor)




def asMinutes(s):
    m = math.floor(s / 60)
    s -= m * 60
    return '%dm %ds' % (m, s)


def timeSince(since, percent):
    now = time.time()
    s = now - since
    es = s / (percent)
    rs = es - s
    return '%s (- %s)' % (asMinutes(s), asMinutes(rs))

class AttnDecoderGRU(nn.Module):
    def __init__(self, hidden_size, output_size,  num_layers, bi_direc, drop_out=0.1,  max_length=256):
        super(AttnDecoderGRU, self).__init__()
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.max_length = max_length
        self.num_layers = num_layers
        self.num_direc = 2 if bi_direc else 1
        self.self_attn = SelfAttention(hidden_size, num_attention_heads=10, output_attentions=True, lay_norm_eps=1e-8).to(device)
        self.dropout = nn.Dropout(drop_out)
        self.gru = nn.GRU(self.hidden_size, self.hidden_size, num_layers=num_layers, bidirectional=bi_direc).to(device)
        self.out = nn.Linear(self.hidden_size, self.output_size).to(device)
        #
    def forward(self, input, hidden, encoder_outputs):
        """

        :param input: [seq, batch, dim]
        :param hidden: [num_layer, batch, dim]
        :param encoder_outputs: [seq1, batch, dim]
        :return:
        """
        embedded = self.dropout(input)
        query_states = embedded.transpose(1, 0) #[seq, batch, dim] + [seq, batch, dim] => [seq, batch, dim] => [batch, seq, dim]
        hidden_states = encoder_outputs.transpose(1, 0) #[seq1, batch, dim]=> [batch, seq1, dim]
        out, attn_weights = self.self_attn(query_states, hidden_states) #out: [batch, seq, dim], attn:[batch, heads, seq1, seq2]
        output, hidden = self.gru(out.transpose(1, 0), hidden) #output:[seq, batch, dim], hidden:[numlayer, batch, dim]
        output = F.log_softmax(self.out(output), dim=-1) # output: [seq, batch, vocab]
        return output, hidden, attn_weights

    def initHidden(self, batch_size):
        return torch.zeros(self.num_layers*self.num_direc, batch_size, self.hidden_size, device=device)

class EncoderPretrain(nn.Module):
    def __init__(self, input_lang, output_lang, encoder_layers=2, bidirec=True, batch_size=20, max_length=256, emb_dim=300):
        super(EncoderPretrain, self).__init__()
        self.hidden_dim = emb_dim
        self.encoder = EncoderGRU(input_lang.n_words, emb_dim, num_layers=encoder_layers, bidirec=bidirec)
        self.encoder_layers = encoder_layers
        self.num_direc = 1 + int(bidirec)
        self.input_lang = input_lang
        self.output_lang = output_lang
        self.max_length = max_length
        self.criterion = nn.CrossEntropyLoss()
        self.batch_size = batch_size
        self.embedding = nn.Embedding(input_lang.n_words, emb_dim).to(device)  #input_size 总的训练句子数
        self.Encoder_Classifier = nn.Linear(self.hidden_dim, self.input_lang.n_words).to(device)
        torch.nn.init.uniform_(self.embedding.weight, a=-8, b=8)

def pad_input(input):
    batch_len = [len(sent) for sent in input]
    max_seq_len = max(batch_len)
    input_tensor = torch.ones(len(input), max_seq_len+1, dtype=torch.int64, device=device)
    input_tensor[:, 0] = 0
    for i in range(len(input)):
        input_tensor[i, 1:len(input[i])+1] = input[i]
    return input_tensor

class Attn_AutoEncoder(nn.Module):
    def __init__(self, input_lang, output_lang, encoder_layers=2, decoder_layers=4, bidirec=True, batch_size=20, max_length=256, emb_dim=300, word2vec=None):
        super(Attn_AutoEncoder, self).__init__()
        self.hidden_dim = emb_dim
        self.encoder = EncoderGRU(input_lang.n_words, emb_dim, num_layers=encoder_layers, bidirec=bidirec)
        self.decoder = AttnDecoderGRU(emb_dim, output_lang.n_words, num_layers=decoder_layers, bi_direc=False, max_length=max_length)
        self.encoder_layers = encoder_layers
        self.decoder_layers = decoder_layers
        self.num_direc = 1 + int(bidirec)
        self.input_lang = input_lang
        self.output_lang = output_lang
        self.max_length = max_length
        self.criterion = nn.CrossEntropyLoss()
        self.batch_size = batch_size
        self.embedding = input_lang.EmbeddingLayer(emb_dim=300, uniform_init=(-0.8, 0.8), word2vec=None).to(device)

    def encoder_lm_LossAndGrad(self, input_tensor):
        embedded = self.embedding(input_tensor).transpose(1, 0)  # [batch, seq, dim] => [seq, batch, dim]
        encoder_hidden = self.encoder.initHidden(len(input_tensor))
        encoder_outputs, encoder_hiddens = self.encoder(embedded, encoder_hidden)
        new_shape = encoder_outputs.size()[:-1] + (self.num_direc, self.hidden_dim)
        encoder_outputs = encoder_outputs.view(*new_shape).sum(dim=-2)  # [seq, batch, dim]
        encoder_pred = self.Encoder_Classifier(encoder_outputs).softmax(dim=-1)  # [seq, batch, vocab]
        ep_shape = encoder_pred.shape
        batch_seq_logits = encoder_pred.transpose(1, 0).contiguous().view([ep_shape[0] * ep_shape[1], ep_shape[2]])
        ground_truth = input_tensor.view([-1])
        loss = self.criterion(batch_seq_logits, ground_truth)
        loss.backward()
        return loss

    def decoder_lm_LossAndGrad(self, input_tensor):
        embedded = self.embedding(input_tensor).transpose(1, 0)  # [batch, seq, dim] => [seq, batch, dim]
        decoder_hidden = self.decoder.initHidden(len(input_tensor))
        decoder_outputs, decoder_hiddens = self.decoder.gru(embedded, decoder_hidden)
        decoder_pred = self.Decoder_Classifier(decoder_outputs).softmax(dim=-1)  # [seq, batch, vocab]
        ep_shape = decoder_pred.shape
        # batch_seq_logits = decoder_pred[:-1, :, :].transpose(1, 0).contiguous().view([(ep_shape[0]-1) * ep_shape[1], ep_shape[2]])
        # ground_truth = input_tensor[:, 1:].contiguous().view([-1])
        batch_seq_logits = decoder_pred.transpose(1, 0).contiguous().view([ep_shape[0] * ep_shape[1], ep_shape[2]])
        ground_truth = input_tensor.view([-1])
        loss = self.criterion(batch_seq_logits, ground_truth)
        loss.backward()
        return loss

    def DecoderPretraining(self, n_iters, pairs, print_every=100, save_every=10000, learning_rate=0.001,
                   log_dir="./log/", file_suffix="EncoderEmbedding"
                   ):
        self.Decoder_Classifier = nn.Linear(self.hidden_dim, self.input_lang.n_words).to(device)
        batch_size = 32
        start = time.time()
        writer = SummaryWriter(log_dir=log_dir, filename_suffix=file_suffix)
        print_loss_total = 0  # Reset every print_every
        plot_loss_total = 0  # Reset every plot_every
        training_pairs = [tensorsFromPair(pair, self.input_lang, self.output_lang)
                          for pair in pairs]
        optimizer = AdamW([
            {'params': self.embedding.parameters(), 'lr': learning_rate},
            {'params': self.decoder.parameters(), 'lr': learning_rate},
            {'params': self.Decoder_Classifier.parameters(), 'lr': learning_rate}
        ]
        )
        scheduler = WarmupLinearSchedule(optimizer, warmup_steps=100, t_total=n_iters)

        optimizer.zero_grad()
        print("initialization completed!")
        for iter in range(1, n_iters + 1, batch_size):
            training_pair = random.sample(training_pairs, batch_size)
            input = pad_input([sents[0].view([-1]) for sents in training_pair])
            optimizer.zero_grad()
            loss = self.decoder_lm_LossAndGrad(input)

            optimizer.step()
            optimizer.zero_grad()

            scheduler.step()
            torch.cuda.empty_cache()

            print_loss_total += loss
            plot_loss_total += loss
            writer.add_scalar("Training Loss", loss, iter)
            if int(iter / batch_size) % print_every == 0:
                print_loss_avg = print_loss_total / print_every
                print_loss_total = 0
                print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
                                             iter, iter / n_iters * 100, print_loss_avg))
            if int(iter / batch_size) % save_every == 0:
                torch.save(
                    {
                        "embedding": self.embedding.state_dict(),
                        "decoder": self.decoder.state_dict(),
                        "Decoder_CLS": self.Decoder_Classifier.state_dict(),
                    },
                    "../saved/DecoderPreTrain_%d_%3d.pkl" % (
                        self.decoder_layers, iter / save_every)
                )

    def EncoderPreTraining(self, n_iters, pairs, print_every=100, save_every=10000, learning_rate=0.001,
                           log_dir="./log/", file_suffix="EncoderEmbedding"
                           ):
        self.Encoder_Classifier = nn.Linear(self.hidden_dim, self.input_lang.n_words).to(device)
        batch_size = 32
        start = time.time()
        writer = SummaryWriter(log_dir=log_dir, filename_suffix=file_suffix)
        print_loss_total = 0  # Reset every print_every
        plot_loss_total = 0  # Reset every plot_every
        training_pairs = [tensorsFromPair(pair, self.input_lang, self.output_lang)
                          for pair in pairs]
        optimizer = AdamW([
            {'params': self.embedding.parameters(), 'lr': learning_rate},
            {'params': self.encoder.parameters(), 'lr': learning_rate},
            {'params': self.Encoder_Classifier.parameters(), 'lr': learning_rate}
        ]
        )
        scheduler = WarmupLinearSchedule(optimizer, warmup_steps=100, t_total=n_iters)

        optimizer.zero_grad()
        print("initialization completed!")
        for iter in range(1, n_iters + 1, batch_size):
            training_pair = random.sample(training_pairs, batch_size)
            input = pad_input([sents[0].view([-1]) for sents in training_pair])
            optimizer.zero_grad()
            loss = self.encoder_lm_LossAndGrad(input)

            optimizer.step()
            optimizer.zero_grad()

            scheduler.step()
            torch.cuda.empty_cache()

            print_loss_total += loss
            plot_loss_total += loss
            writer.add_scalar("Training Loss", loss, iter)
            if int(iter / batch_size) % print_every == 0:
                print_loss_avg = print_loss_total / print_every
                print_loss_total = 0
                print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
                                             iter, iter / n_iters * 100, print_loss_avg))
            if int(iter / batch_size) % save_every == 0:
                torch.save(
                    {
                        "embedding": self.embedding.state_dict(),
                        "encoder": self.encoder.state_dict(),
                        "Encoder_CLS": self.Encoder_Classifier.state_dict(),
                    },
                    "../saved/EncoderPreTrain_%d_%d_%d_%3d.pkl" % (
                        2, 4, 2, iter / save_every)
                )

    def LossAndGrad(self, input_tensor, target_tensor):
        encoder_hidden = self.encoder.initHidden(len(input_tensor))
        target_length = target_tensor.size(0)
        loss = 0
        embedded = self.embedding(input_tensor).transpose(1, 0) # [batch, seq, dim]=>[seq, batch, dim]
        encoder_outputs, encoder_hiddens = self.encoder(embedded, encoder_hidden)
        new_shape = encoder_outputs.size()[:-1] + (self.num_direc, self.hidden_dim)
        encoder_outputs = encoder_outputs.view(*new_shape).sum(dim=-2)
        if type(encoder_hiddens) == tuple:
            sent_vec, cell_vec = (x.view(self.encoder_layers, self.num_direc, 1, -1).mean(dim=0).mean(dim=0) for x in encoder_hiddens)
            decoder_hidden = (torch.stack([x for _ in range(self.decoder_layers)]) for x in (sent_vec, cell_vec))
        else:
            sent_vec = encoder_hiddens.view(self.encoder_layers, self.num_direc, 1, -1).mean(dim=0).mean(dim=0)
            decoder_hidden = torch.stack([sent_vec for _ in range(self.decoder_layers)])
        decoder_input = torch.tensor([[SOS_token]], device=device)
        for di in range(target_length):
            embedded = self.embedding(decoder_input).view(1, 1, -1)
            decoder_output, decoder_hidden, decoder_attention = self.decoder(
                embedded, decoder_hidden, encoder_outputs)
            topv, topi = decoder_output.topk(1)
            decoder_input = topi.squeeze().detach()  # detach from history as input
            loss += self.criterion(decoder_output, target_tensor[di])
            if decoder_input.item() == EOS_token:
                break
        loss.backward()
        return loss.item() / target_length

    def TeacherForceTrain(self, input_tensor, target_tensor):
        encoder_hidden = self.encoder.initHidden(len(input_tensor))
        target_length = target_tensor.size(1)
        embedded = self.embedding(input_tensor).transpose(1, 0) #[batch, seq, dim] => [seq, batch, dim]
        encoder_outputs, encoder_hiddens = self.encoder(embedded, encoder_hidden)
        new_shape = encoder_outputs.size()[:-1] + (self.num_direc, self.hidden_dim)
        encoder_outputs = encoder_outputs.view(*new_shape).sum(dim=-2)
        if type(encoder_hiddens) == tuple:
            sent_vec, cell_vec = (x.view(self.encoder_layers, self.num_direc, len(input_tensor), -1).mean(dim=0).mean(dim=0) for x in encoder_hiddens)
            decoder_hidden = (torch.stack([x for _ in range(self.decoder_layers)]) for x in (sent_vec, cell_vec))
        else:
            sent_vec = encoder_hiddens.view(self.encoder_layers, self.num_direc, len(input_tensor), -1).mean(dim=0).mean(dim=0)
            decoder_hidden = torch.stack([sent_vec for _ in range(self.decoder_layers)])
        decoder_input = target_tensor
        embedded = self.embedding(decoder_input).transpose(1, 0) #[batch, seq, dim] => [seq, batch, dim]

        decoder_output, decoder_hidden, decoder_attention = self.decoder(
            embedded.detach(), decoder_hidden, encoder_outputs.detach()) # decoder_output: [seq, batch, vocab]
        preds = decoder_output[:-1, :, :].transpose(1, 0).contiguous()
        de_shape = preds.shape
        batch_seq_logits = preds.reshape([de_shape[0]*de_shape[1], -1])
        ground_truth = target_tensor[:, 1:].contiguous().view([-1])
        loss = self.criterion(batch_seq_logits, ground_truth)
        loss.backward()
        return loss

    def trainIters(self, n_iters, pairs, print_every=10, save_every=100000, learning_rate=0.001,
                   log_dir="./log/", file_suffix="_AttnAutoencoder"
                   ):
        start = time.time()
        writer = SummaryWriter(log_dir=log_dir, filename_suffix=file_suffix)
        print_loss_total = 0  # Reset every print_every
        plot_loss_total = 0  # Reset every plot_every
        optimizer = AdamW([
            {'params': self.embedding.parameters(), 'lr': learning_rate*0.01},
            {'params': self.encoder.parameters(), 'lr': learning_rate},
            {'params': self.decoder.parameters(), 'lr': learning_rate}
        ]
        )
        scheduler = WarmupLinearSchedule(optimizer, warmup_steps=100, t_total=n_iters)
        optimizer.zero_grad()
        for iter in range(1, n_iters + 1, self.batch_size):
            training_pairs = random.sample(pairs, self.batch_size)
            input_tensors = [tensorFromSentence(self.input_lang, pair[0]) for pair in training_pairs]
            if iter / len(pairs) <= 10000:
                input = pad_input([sent.view([-1]) for sent in input_tensors])
                loss = self.TeacherForceTrain(input, input)
            else:
                for pair in training_pairs:
                    ipt_tensor = tensorFromSentence(self.input_lang, pair[0])
                    loss += self.LossAndGrad(ipt_tensor, ipt_tensor)
            optimizer.step()
            optimizer.zero_grad()
            scheduler.step()
            print_loss_total += loss
            plot_loss_total += loss
            writer.add_scalar("Training Loss", loss, iter)
            if int(iter / self.batch_size) % print_every == 0:
                print_loss_avg = print_loss_total / print_every
                print_loss_total = 0
                print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
                                             iter, iter / n_iters * 100, print_loss_avg))

            if int(iter / self.batch_size) % save_every == 0:
                torch.save(
                    {
                        "embedding": self.embedding.state_dict(),
                        "encoder": self.encoder.state_dict(),
                        "decoder": self.decoder.state_dict()
                    },
                    "../saved/Attn_Autoencoder(%d)_%d_%d_%d_%d.pkl"%(self.batch_size, self.encoder_layers, self.decoder_layers, self.num_direc, iter/save_every)
                )

    def load_model(self, pretrained_file=""):
        if os.path.exists(pretrained_file):
            ch = torch.load(pretrained_file)
            # self.embedding.load_state_dict(ch['embedding'])
            self.encoder.load_state_dict(ch['encoder'])
            self.decoder.load_state_dict(ch['decoder'])

    def evaluate(self, sentence, beam_K=1):
        with torch.no_grad():
            input_tensor = tensorFromSentence(self.input_lang, sentence)
            input_length = input_tensor.size()[0]
            encoder_hidden = self.encoder.initHidden(1)
            embedded = self.embedding(input_tensor).view(input_length, 1, -1)
            encoder_outputs, encoder_hiddens = self.encoder(embedded, encoder_hidden)
            new_shape = encoder_outputs.size()[:-1] + (self.num_direc, self.hidden_dim)
            encoder_outputs = encoder_outputs.view(*new_shape).sum(dim=-2)
            if type(encoder_hiddens) == tuple:
                sent_vec, cell_vec = (x.view(self.encoder_layers, self.num_direc, 1, -1).mean(dim=0).mean(dim=0) for x
                                      in encoder_hiddens)
                decoder_hidden = (torch.stack([x for _ in range(self.decoder_layers)]) for x in (sent_vec, cell_vec))
            else:
                sent_vec = encoder_hiddens.view(self.encoder_layers, self.num_direc, 1, -1).mean(dim=0).mean(dim=0)
                decoder_hidden = torch.stack([sent_vec for _ in range(self.decoder_layers)])

            decoder_input = torch.tensor([[SOS_token]], dtype=torch.int64, device=device)  # SOS
            candidate_words = torch.zeros([beam_K, beam_K], dtype=torch.int64,  device=device)
            candidate_probs = torch.ones([beam_K, beam_K], device=device)
            beam_seqs = [np.array(['SOS']) for _ in range(beam_K)]
            embedded = self.embedding(decoder_input).view(1, 1, -1)
            decoder_output, decoder_hidden, decoder_attention = self.decoder(
                embedded, decoder_hidden, encoder_outputs)
            topv, topi = decoder_output.squeeze(0).data.topk(beam_K)
            beam_words = topi[0]
            beam_probs = topv[0]
            candidate_probs = candidate_probs*beam_probs.unsqueeze(-1)
            beam_hiddens = [decoder_hidden for _ in range(beam_K)]
            decoder_attentions = [decoder_attention.data for _ in range(beam_K)]
            for di in range(1, self.max_length):
                candidate_hiddens = []
                candidate_attn = []
                for ti in range(beam_K):
                    embedded = self.embedding(beam_words[ti]).view(1, 1, -1)
                    decoder_output, decoder_hidden, decoder_attention = self.decoder(
                        embedded, beam_hiddens[ti], encoder_outputs)
                    topv, topi = decoder_output.squeeze(0).topk(beam_K)
                    candidate_probs[ti] = topv[0]*beam_probs[ti]
                    candidate_words[ti] = topi[0]
                    candidate_hiddens.append(decoder_hidden)
                    candidate_attn.append(decoder_attention)
                topv, topi = candidate_probs.view([-1]).topk(beam_K)
                beam_probs = topv
                beam_words = candidate_words.view([-1])[topi]
                row = (topi/5).tolist()
                beam_hiddens = [candidate_hiddens[idx] for idx in row]
                decoder_attentions = [torch.cat([decoder_attentions[idx], candidate_attn[idx]], dim=0) for idx in row]
                beam_seqs = [np.concatenate([beam_seqs[idx], np.array([self.output_lang.index2word[w]])],axis=0) for idx, w in zip(row, beam_words.tolist())]
            # _, idx = beam_probs.topk(1)
            # idx = idx.item()
            # return beam_seqs[idx], decoder_attentions[idx]
        return beam_seqs, decoder_attentions

def readLangs(text_file, reverse=False):
    print("Reading lines...")
    # Read the file and split into lines
    lines = open(text_file).\
        read().strip().split('\n')
    # Split every line into pairs and normalize
    pairs = [[normalizeString(l.strip()), normalizeString(l.strip())] for l in lines]
    # Reverse pairs, make Lang instances
    if reverse:
        pairs = [list(reversed(p)) for p in pairs]
        input_lang = Lang("in")
        output_lang = Lang("out")
    else:
        input_lang = Lang("in")
        output_lang = Lang("out")
    return input_lang, output_lang, pairs

def filterPair(p):
    return len(p[0].split(' ')) < MAX_LENGTH and \
        len(p[1].split(' ')) < MAX_LENGTH


def filterPairs(pairs):
    return [pair for pair in pairs if filterPair(pair)]

def normalizeString(s):
    s = re.sub(r"([.!?])", r" \1", s)
    s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
    return s

def prepareData(text_file, user_words=None, predefined_lang=None):
    print("Reading lines...")
    lines = open(text_file). \
        read().strip().split('\n')
    pairs = [[normalizeString(l.strip()), normalizeString(l.strip())] for l in lines]
    input_lang = Lang("in", pre_defined_words=user_words)
    if predefined_lang is not None:
        input_lang.Load(fname=predefined_lang)
    print("Read %s sentence pairs" % len(pairs))
    pairs = filterPairs(pairs)
    print("Trimmed to %s sentence pairs" % len(pairs))
    print("Counting words...")
    for pair in pairs:
        input_lang.addSentence(pair[0])
    print("Counted words:")
    print(input_lang.name, input_lang.n_words)
    return input_lang, input_lang, pairs

def train_AttnAutoEncoder(input_lang, output_lang, word2vec=None):
    autoencoder = Attn_AutoEncoder(input_lang, output_lang, batch_size=32, word2vec=word2vec)
    # autoencoder.EncoderPreTraining(1000000000, pairs, print_every=10, learning_rate=2e-3)
    # autoencoder.DecoderPretraining(1000000000, pairs, print_every=10, learning_rate=2e-4)
    # print(random.choice(pairs))
    autoencoder.load_model(pretrained_file="../saved/Attn_Autoencoder_2_4_2_832.pkl")
    autoencoder.trainIters(1000000000, pairs, learning_rate=2e-3)


if __name__ == '__main__':
    with open("./word2vec.txt", "rb") as handle:
    # with open("/home/hadoop/word2vec.txt", "rb") as handle:
        word2vec = pickle.load(handle)
    user_words = list(word2vec.vocab.keys())
    input_lang, output_lang, pairs = prepareData('../data/AE_data.txt', user_words=None)
    input_lang.Caches("../saved/AutoEncoder.lang")
    # input_lang, output_lang, pairs = prepareData('./data/AE_data_twitter.txt', user_words=None)
    # input_lang.Caches("../saved/AutoEncoder_Twitter.lang")
    train_AttnAutoEncoder(input_lang, output_lang, word2vec)