import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
import math
from sklearn.model_selection import train_test_split

class PositionalEncoding(nn.Module):
    def __init__(self, d_model, dropout=0.1, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)
        # 创建位置编码矩阵
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))

        # 计算位置编码
        pe[:, 0::2] = torch.sin(position * div_term) # 偶数位置
        pe[:, 1::2] = torch.cos(position * div_term) # 奇数位置

        # 增加一个维度并转置
        pe = pe.unsqueeze(0).transpose(0, 1)
        self.register_buffer('pe', pe)  # 注册为缓冲区，不会更新

    def forward(self, x):
        """
        x: [batch_size, seq_len, d_model]
        """
        x = x + self.pe[:x.size(0), :]
        return self.dropout(x)

class TranslatorModel(nn.Module):

    def __init__(self, d_model=128, n_layers=6, n_heads=8):
        super(TranslatorModel, self).__init__()
        self.embedding_src = nn.Embedding(src_vocab_size, d_model)
        self.embedding_tgt = nn.Embedding(tgt_vocab_size, d_model)
        # 定义位置编码器
        self.positional_encoding = PositionalEncoding(d_model, dropout=0)
        # 定义Transformer
        self.transformer = nn.Transformer(d_model=d_model, nhead=n_heads, num_encoder_layers=n_layers,
                                    num_decoder_layers=n_layers, dim_feedforward=2048, batch_first=True
        )
        # 定义最后的线性层，这里并没有用Softmax，因为后面的CrossEntropyLoss带了
        self.projection = nn.Linear(d_model, tgt_vocab_size, bias=False)

    def forward(self, src, tgt):
        # 生成mask
        tgt_mask = self.transformer.generate_square_subsequent_mask(tgt.size()[-1])
        src_key_padding_mask = TranslatorModel.get_key_padding_mask(src)
        tgt_key_padding_mask = TranslatorModel.get_key_padding_mask(tgt)

        # 对src和tgt进行编码
        src = self.embedding_src(src)
        tgt = self.embedding_tgt(tgt)
        # 给src和tgt的token增加位置信息
        src = self.positional_encoding(src)
        tgt = self.positional_encoding(tgt)

        # 将准备好的数据送给transformer
        out = self.transformer(src, tgt,
                               tgt_mask=tgt_mask,
                               src_key_padding_mask=src_key_padding_mask,
                               tgt_key_padding_mask=tgt_key_padding_mask)
        dec_logits = self.projection(out) # dec_logits : [batch_size x src_vocab_size x tgt_vocab_size]
        return dec_logits

    @staticmethod
    def get_key_padding_mask(tokens):
        """
        用于key_padding_mask
        """
        key_padding_mask = torch.zeros(tokens.size())
        key_padding_mask[tokens == 0] = -torch.inf # <PAD>为0
        return key_padding_mask
    
def make_batch(sentences):
    input_batch = [[src_vocab[n] for n in sentence[0].split()] for sentence in sentences]
    output_batch = [[tgt_vocab[n] for n in sentence[1].split()] for sentence in sentences]
    target_batch = [[tgt_vocab[n] for n in sentence[2].split()] for sentence in sentences]
        
    # 调试
    # print("Input batch:", input_batch)
    # print("Output batch:", output_batch)
    # print("Target batch:", target_batch)
    return torch.LongTensor(input_batch), torch.LongTensor(output_batch), torch.LongTensor(target_batch)

def translate_predictions(predictions):
    # 获取 tgt_vocab 的反向字典
    tgt_vocab_inv = {v: k for k, v in tgt_vocab.items()}
    
    # 遍历 predictions，将每个索引转换为对应的单词
    translated_sentences = []
    for prediction in predictions:
        translated_sentence = ' '.join([tgt_vocab_inv[idx.item()] for idx in prediction])
        translated_sentences.append(translated_sentence)
    
    return translated_sentences

if __name__ == '__main__':

    ## 句子的输入部分: [Encoder输入, Decoder输入, Decoder输出]
    ## <CLS>是句子的开始, <PAD>是填充字符, <END>表示句子结束 
    sentences = [
        ['ich mochte ein bier <PAD>', '<CLS> i want a beer', 'i want a beer <END>'],
        ['du trinkst einen kaffee <PAD>', '<CLS> you drink a coffee', 'you drink a coffee <END>'],
        ['er isst einen apfel <PAD>', '<CLS> he eats an apple', 'he eats an apple <END>'],
        ['sie liest ein buch <PAD>', '<CLS> she reads a book', 'she reads a book <END>'],
        ['wir sehen einen film <PAD>', '<CLS> we watch a movie', 'we watch a movie <END>'],
        ['ihr spielt ein spiel <PAD>', '<CLS> you play a game', 'you play a game <END>'],
        ['sie schreiben einen brief <PAD>', '<CLS> they write a letter', 'they write a letter <END>'],
        ['ich fahre ein auto <PAD>', '<CLS> i drive a car', 'i drive a car <END>'],
        ['du kaufst ein haus <PAD>', '<CLS> you buy a house', 'you buy a house <END>'],
        
        ['er liest ein buch <PAD>', '<CLS> he reads a book', 'he reads a book <END>'],
    ]



    ## 构建词表
    src_vocab = {
        '<PAD>': 0, 'ich': 1, 'mochte': 2, 'ein': 3, 'bier': 4, 'du': 5, 'trinkst': 6, 'einen': 7, 'kaffee': 8,
        'er': 9, 'isst': 10, 'apfel': 11, 'sie': 12, 'liest': 13, 'buch': 14, 'wir': 15, 'sehen': 16, 'film': 17,
        'ihr': 18, 'spielt': 19, 'spiel': 20, 'schreiben': 21, 'brief': 22, 'fahre': 23, 'auto': 24, 'kaufst': 25,
        'haus': 26, 'malt': 27, 'bild': 28, 'esse': 29, 'trinkt': 30, 'wasser': 31, 'klavier': 32,
    }
    src_vocab_size = len(src_vocab)

    tgt_vocab = {
        '<PAD>': 0, 'i': 1, 'want': 2, 'a': 3, 'beer': 4, '<CLS>': 5, '<END>': 6, 'you': 7, 'drink': 8, 'coffee': 9,
        'he': 10, 'eats': 11, 'an': 12, 'apple': 13, 'she': 14, 'reads': 15, 'book': 16, 'we': 17, 'watch': 18, 'movie': 19,
        'play': 20, 'game': 21, 'they': 22, 'write': 23, 'letter': 24, 'drive': 25, 'car': 26, 'buy': 27, 'house': 28,
        'paints': 29, 'picture': 30, 'eat': 31, 'water': 32, 'piano': 33, 'read': 34, 'drinks': 35, 'plays': 36,
    }
    tgt_vocab_size = len(tgt_vocab)
    
    # 划分训练集和测试集
    train_sentences, test_sentences = train_test_split(sentences, test_size=0.1, shuffle=False)
    
    # 构建训练集和测试集的批次
    train_enc_inputs, train_dec_inputs, train_target_batch = make_batch(train_sentences)
    test_enc_inputs, test_dec_inputs, test_target_batch = make_batch(test_sentences)
    
    ## 设置模型参数
    d_model = 64  # Embedding Size
    n_layers = 6  # number of Encoder of Decoder Layer
    n_heads = 8  # number of heads in Multi-Head Attention

    model = TranslatorModel(d_model=d_model, n_layers=n_layers, n_heads=n_heads)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    # 存储每个epoch的loss
    loss_values = []
    
    # 训练模型
    num_epochs = 50
    for epoch in range(num_epochs):
        optimizer.zero_grad()
        # 前向传播
        outputs_dec_logits = model(train_enc_inputs, train_dec_inputs)
        # 调整输出和目标张量的形状
        outputs = outputs_dec_logits.contiguous().view(-1, outputs_dec_logits.size(-1))
        targets = train_target_batch.contiguous().view(-1)
        # 计算损失
        loss = criterion(outputs, targets)
        loss_values.append(loss.item())
        print('Epoch:', '%04d' % (epoch + 1), 'loss =', '{:.6f}'.format(loss))
        # 反向传播和优化
        loss.backward()
        optimizer.step()

    # 测试模型
    model = model.eval()
    with torch.no_grad():
        outputs = model(test_enc_inputs, test_dec_inputs)
        prediction = torch.argmax(outputs, dim=-1)
        # print(prediction)
        # Before translation
        print("Before translation:", test_sentences)
        translated_sentence = translate_predictions(prediction)
        # 由于语料库较小，可能会出现一些不符合语法的翻译
        print("Translated sentence:", translated_sentence)

    # 绘制损失曲线图
    plt.plot(range(1, num_epochs + 1), loss_values, label='Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Training Loss Curve')
    plt.legend()
    plt.savefig('Training_loss_curve.pdf')
    # plt.show()