import os.path
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
import numpy as np
import torchnet.meter as meter
import matplotlib.pyplot as plt
from config import Config
from tqdm import tqdm


def get_data():
    poetry = np.load('tang.npz', allow_pickle=True)
    return data.DataLoader(poetry['data'], batch_size=Config.batch_size, shuffle=True, num_workers=1), poetry[
        'word2ix'].item(), poetry['ix2word'].item()


device = torch.device("cuda:0")  # GPU support
dataset, word2ix, ix2word = get_data()
START_INDEX, PADDING_INDEX, END_INDEX = word2ix['<START>'], word2ix['</s>'], word2ix['<EOP>']


class PoetryModel(nn.Module):
    def __init__(self, vocab_size, embedding_dim, hidden_dim):
        # vocab_size: total number of words
        # embedding_dim: shrunk dimension

        super(PoetryModel, self).__init__()
        self.hidden_dim = hidden_dim
        self.embeddings = nn.Embedding(vocab_size, embedding_dim)
        self.lstm = nn.LSTM(embedding_dim, self.hidden_dim, num_layers=2)
        self.linear = nn.Linear(self.hidden_dim, vocab_size)

    def forward(self, input_data, hidden_data=None):
        seq_len, batch_size = input_data.size()  # length of poetry, batch_size
        embeds = self.embeddings(input_data)
        if hidden_data is None:
            # this is the first iteration, the hidden params need to be initialized
            h_0 = input_data.data.new(2, batch_size, self.hidden_dim).fill_(0).float()
            c_0 = input_data.data.new(2, batch_size, self.hidden_dim).fill_(0).float()
        else:
            # use the calculated history hidden params
            h_0, c_0 = hidden_data
            h_0, c_0 = h_0.to(device), c_0.to(device)
        output, hidden = self.lstm(embeds, (h_0, c_0))
        output = self.linear(output).reshape(batch_size * seq_len, -1)
        return output, hidden


def initialize_model():
    model = PoetryModel(
        vocab_size=len(word2ix),
        embedding_dim=Config.embedding_dim,
        hidden_dim=Config.hidden_dim
    )
    # load trained params
    if os.path.exists(Config.model_param_path):
        model.load_state_dict(torch.load(Config.model_param_path))
    model.to(device)
    return model


def train(dataloader, model):
    model.train()
    optimizer = optim.Adam(model.parameters(), lr=Config.learning_rate)
    criterion = nn.CrossEntropyLoss()
    loss_meter = meter.AverageValueMeter()
    loss_list = []
    if os.path.exists(Config.train_loss_path):
        loss_list = torch.load(Config.train_loss_path)

    # begin training
    for epoch in range(Config.epoch):
        loss_meter.reset()

        # fetch a batch size of data
        loop = tqdm(enumerate(dataloader), total=len(dataloader), leave=True)
        for batch_index, batch_data in loop:
            # batch_data: [number of poems in a batch][number of words in a poem]
            batch_data = batch_data.long().transpose(1, 0).contiguous().to(device)
            # batch_data: [number of words in a poem][number of poems in a batch]

            optimizer.zero_grad()
            input_seq, target_seq = batch_data[:-1, :], batch_data[1:, :]
            output, _ = model(input_seq)
            loss = criterion(output, target_seq.view(-1))
            loss.backward()
            optimizer.step()
            loss_meter.add(loss.item())

            # set progress bar
            loop.set_description(f'TRAIN Epoch [{epoch}/{Config.epoch}]')
            loop.set_postfix(loss=loss_meter.mean)
        loss_list.append(loss_meter.mean)
        torch.save(model.state_dict(), Config.model_param_path)
    torch.save(loss_list, Config.train_loss_path)


def generate(model, start_words, length=None):
    if length is None:
        length = Config.max_poetry_length
    results = list(start_words)
    start_words_len = len(start_words)
    input_word = torch.Tensor([START_INDEX]).view(1, 1).long().to(device)
    hidden = None
    for i in range(length):
        output, hidden = model(input_word, hidden)
        if i < start_words_len:
            w = results[i]
            input_word = input_word.data.new([word2ix[w]]).view(1, 1)
        else:
            top_index = output.data[0].topk(1)[1][0].item()
            w = ix2word[top_index]
            results.append(w)
            input_word = input_word.data.new([top_index]).view(1, 1)
        if w == '<EOP>':
            del results[-1]
            break
    return results


def generate_head_specified(model, head_words):
    input_word = torch.Tensor([START_INDEX]).view(1, 1).long().to(device)
    last_word, head_index, last_head_word, hidden, results = '<START>', 0, None, None, []
    for i in range(Config.max_poetry_length):
        if i == 2:
            print(input_word[0].shape, hidden[0].shape)
        output, hidden = model(input_word, hidden)
        top_index = output.data[0].topk(1)[1][0].item()
        w = ix2word[top_index]
        if last_word in {'。', '？', '！', '<START>'}:
            if head_index == len(head_words):
                break
            w = head_words[head_index]
            head_index += 1
            input_word = input_word.data.new([word2ix[w]]).view(1, 1)
        else:
            input_word = input_word.data.new([top_index]).view(1, 1)
        if w == '<EOP>' or w == '<START>' or w == '</s>':
            del results[-1]
            break
        last_word = w
        results.append(w)
    return results


def print_poetry(p):
    for word in p:
        if word in {'。', '？', '！'}:
            print(word)
        else:
            print(word, end='')


if __name__ == '__main__':
    mod = initialize_model()
    '''for _ in range(5):
        train(dataset, mod)
        res = generate(mod, '风雨')
        print(''.join(res))'''
    print_poetry(generate(mod, '江南', 84))
    # print_poetry(generate(mod, '湖光秋月两相和'))
    # print_poetry(generate_head_specified(mod, '人工智能'))
    # print_poetry(generate_head_specified(mod, '酒色财气'))
