import os
import time
import torch
import torch.nn as nn

from Seq2Seq import Seq2Seq
from load_data_translate import load_data_translate, truncate_pad
from utils import *


def train_epoch(model, optimizer, data_loader, tgt_vocab, device):
    """训练一个epoch"""
    # 设为训练模式
    model.train()
    iter = 0
    epoch_loss = 0
    start = time.time()

    for iter, batch in enumerate(data_loader):
        # X,Y: (batch_size, num_steps); X_valid_len, Y_valid_len: (batch_size,)
        X, X_valid_len, Y, Y_valid_len = [x.to(device) for x in batch]
        # 在句子中加入<bos>
        # bos: (batch_size, 1) 内容全是'2' 即 '<bos>'的下标
        bos = torch.tensor([tgt_vocab['<bos>']] * Y.shape[0], device=device).reshape(-1, 1)
        # 强制将第一列变为<bos>; dec_input: (batch_size, num_steps)
        dec_input = torch.cat([bos, Y[:, :-1]], 1)
        # Y_hat: (batch_size, num_steps, vocab_size)
        Y_hat, _ = model.forward(X, dec_input)
        # 计算loss后反向传播
        loss = model.loss(Y_hat, Y, Y_valid_len)
        optimizer.zero_grad()
        loss.backward()
        # 梯度裁剪
        grad_clipping(model, max_norm=1)
        optimizer.step()
        epoch_loss += loss.cpu().detach().item()

    epoch_loss /= (iter + 1)
    epoch_time = time.time() - start
    return epoch_loss, epoch_time, optimizer


def predict_seq2seq(model, src_sentence, src_vocab, tgt_vocab, num_steps, device, save_attention_weights=False):
    """使用seq2seq模型进行预测"""
    # 在预测时将net设置为评估模式
    model.eval()
    # 得到每个单词的字典下标，并在结尾加上<eos>
    src_tokens = src_vocab[src_sentence.lower().split(' ')] + [src_vocab['<eos>']]
    # 获取句子真实长度
    enc_valid_len = torch.tensor([len(src_tokens)], device=device)
    # 对句子后面进行填充，不足10的填充<pad> 下标为1
    src_tokens = truncate_pad(src_tokens, num_steps, src_vocab['<pad>'])
    # 添加批量轴 即 添加batch_size维度
    # (batch_size, num_steps) <= (num_steps)
    enc_X = torch.unsqueeze(torch.tensor(src_tokens, dtype=torch.long, device=device), dim=0)
    # enc_outputs = (enc_out, enc_state)
    # enc_out: (num_steps, batch_size, num_hidden)
    # enc_state: (num_layers, batch_size, num_hidden)
    enc_outputs = model.encoder(enc_X)
    # dec_state: (num_layers, batch_size, num_hidden)
    dec_state = model.decoder.init_state(enc_outputs)
    # 添加批量轴 即 添加batch_size维度
    # dec_X: (batch_size, 1) 即只有一个元素<bos> 重复batch_size次
    dec_X = torch.unsqueeze(torch.tensor([tgt_vocab['<bos>']], dtype=torch.long, device=device), dim=0)
    outputs, attention_weight_seq = [], []  # attention是在attention模型中使用

    for _ in range(num_steps):  # 一步一步生成句子
        Y, dec_state = model.decoder(dec_X, dec_state)
        # 我们使用具有预测最高可能性的词元，作为解码器在下一时间步的输入
        dec_X = Y.argmax(dim=2)
        # 此处pred只是得到字典下标
        pred = dec_X.squeeze(dim=0).type(torch.int32).item()
        # 保存注意力权重
        if save_attention_weights:
            attention_weight_seq.append(model.decoder.attention_weights)
        # 一旦序列结束词元被预测，输出序列的生成就完成了
        if pred == tgt_vocab['<eos>']:
            break
        outputs.append(pred)
    output_seq = ' '.join(tgt_vocab.to_tokens(outputs)), attention_weight_seq
    return output_seq


class Config(object):
    def __init__(self):
        # 数据集设置
        self.file_path = "../DataSet/en-zh.txt"
        # 超参设置
        self.batch_size = 64
        self.num_epochs = 500
        self.learning_rate = 0.005
        self.num_steps = 10
        self.num_examples = 600
        self.method = 'char'  # 分词方法
        # 模型设置

        self.embed_size = 32
        self.hidden_size = 32
        self.num_layers = 2
        self.dropout = 0.1
        self.device = gpu_setup(True, 0)


def train_small():
    config = Config()
    data_loader, src_vocab, tgt_vocab = load_data_translate(config.file_path,
                                                            config.batch_size,
                                                            config.num_steps,
                                                            config.num_examples,
                                                            config.method)
    print("源语言字典大小:", len(src_vocab))
    print("目标语言字典大小:", len(tgt_vocab))
    model = Seq2Seq(encoder_vocab_size=len(src_vocab),
                    decoder_vocab_size=len(tgt_vocab),
                    embed_size=config.embed_size,
                    hidden_size=config.hidden_size,
                    num_layers=config.num_layers,
                    dropout=config.dropout)
    # 对网络权重使用xavier初始化
    model.apply(xavier_init_weights)
    model = model.to(config.device)
    optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)
    # 训练seq2seq模型
    for epoch in range(config.num_epochs):
        epoch_loss, epoch_time, optimizer = train_epoch(model, optimizer, data_loader, tgt_vocab, config.device)
        print("epoch:{:d} | loss:{:.3f} | time:{:.3f}s".format(epoch + 1, epoch_loss, epoch_time))

    # 测试查看翻译效果
    english_seqs = ['i quit', 'he ran', 'really', 'thanks']
    chinese_seqs = ['我不干了', '他跑了', '你确定', '谢谢']

    for english, chinese in zip(english_seqs, chinese_seqs):
        translation, attention_weight_seq = predict_seq2seq(model,
                                                            english,
                                                            src_vocab,
                                                            tgt_vocab,
                                                            config.num_steps,
                                                            config.device)
        print(f'{english} => {translation}, bleu: {bleu(translation, chinese, k=2, method=config.method):.3f}')


def train_large():
    config = Config()
    config.file_path = "../DataSet/en-zh-word.txt"
    config.num_epochs = 500
    config.num_examples = 1000
    config.method = 'word'  # 分词方法
    data_loader, src_vocab, tgt_vocab = load_data_translate(config.file_path,
                                                            config.batch_size,
                                                            config.num_steps,
                                                            config.num_examples,
                                                            config.method)
    print("源语言字典大小:", len(src_vocab))
    print("目标语言字典大小:", len(tgt_vocab))
    model = Seq2Seq(encoder_vocab_size=len(src_vocab),
                    decoder_vocab_size=len(tgt_vocab),
                    embed_size=config.embed_size,
                    hidden_size=config.hidden_size,
                    num_layers=config.num_layers,
                    dropout=config.dropout)
    # 对网络权重使用xavier初始化
    model.apply(xavier_init_weights)
    model = model.to(config.device)
    optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)
    # 训练seq2seq模型
    for epoch in range(config.num_epochs):
        epoch_loss, epoch_time, optimizer = train_epoch(model, optimizer, data_loader, tgt_vocab, config.device)
        print("epoch:{:d} | loss:{:.3f} | time:{:.3f}s".format(epoch + 1, epoch_loss, epoch_time))

    # 测试查看翻译效果
    english_seqs = ['i had a lot of fun',
                    'i had a good time this evening',
                    'you might want to discuss it with Tom',
                    'tom, I want to have a chat with you',
                    'do you have something to say']
    chinese_seqs = ['我玩得很開心',
                    '今天晚上我玩得很開心',
                    '你或许想和汤姆讨论一下',
                    '你有什么事要说吗']

    for english, chinese in zip(english_seqs, chinese_seqs):
        translation, attention_weight_seq = predict_seq2seq(model,
                                                            english,
                                                            src_vocab,
                                                            tgt_vocab,
                                                            config.num_steps,
                                                            config.device)
        print(f'{english} => {translation}, bleu: {bleu(translation, chinese, k=2, method=config.method):.3f}')


if __name__ == '__main__':
    train_small()
    # train_large()
