import re # 正则表达式
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
import time
import random
import matplotlib.pyplot as plt
from tqdm import tqdm

device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # cuda
print(device)
# 起始位置
SOS_token = 0
# 结束位置
EOS_token = 1
# 最大句子长度不能超过10个（包含标点）
MAX_LENGTH = 10
# 数据文件路径
data_path = './00-data/eng-fra-v2.txt'


# 定义一个数据清洗函数
def normal_str(s):
    # 将字符串进行小写表示
    s = s.lower().strip()
    # 将 .?! 前面加空格
    s = re.sub(r'([.?!])', r' \1', s)
    # 删除其他字符
    s = re.sub(r'[^a-zA-Z.?!]+', r' ', s)
    return s

# 获取数据，构建英文和法文词典
def my_getdata():
    # 将所有的句子按行为一个元素读到一个列表中，这个列表是一维的
    my_lines = open(data_path, encoding='utf-8').read().strip().split('\n')
    # 先读取上面列表中的元素，然后根据'\t'制表符，分割取出来的元素，经过数据清洗函数之后，存到新的列表中，这个列表是2维的
    my_pairs = [[normal_str(s) for s in line.split('\t')] for line in my_lines]

    # 构建英文单词和法文单词词典，构建词典的目的是为了在训练的时候，将句子中的英文换成对应的数字，然后进行训练
    english_word2index = {"SOS":SOS_token, "EOS":EOS_token}
    english_word_n = 2
    french_word2index = {"SOS":SOS_token, "EOS":EOS_token}
    french_word_n = 2
    # word对应index
    for pair in my_pairs:
        for word in pair[0].split(' '):
            if word not in english_word2index:
                english_word2index[word] = english_word_n
                english_word_n += 1
        for word in pair[1].split(' '):
            if word not in french_word2index:
                french_word2index[word] = french_word_n
                french_word_n += 1
    # index对应word
    english_index2word = {v:k for k,v in english_word2index.items()}
    french_index2word = {v:k for k,v in french_word2index.items()}

    return english_word2index, english_index2word, english_word_n, french_word2index, french_index2word, french_word_n, my_pairs

# 这是返回一个以为列表，只不过里面的元素是不同类型的
(english_word2index, english_index2word, english_word_n,
 french_word2index, french_index2word, french_word_n, my_pairs) = my_getdata()

# 构建数据源对象
class MyDataset(Dataset):
    def __init__(self, my_pairs):
        self.my_pairs = my_pairs
        self.sample_len = len(my_pairs)

    def __getitem__(self, index):
        index = min(max(index, 0), self.sample_len-1)

        english_x = self.my_pairs[index][0]
        french_y = self.my_pairs[index][1]
        x = [english_word2index[word] for word in english_x.split(' ')]
        x.append(EOS_token)
        tensor_x = torch.tensor(x, dtype=torch.long, device=device)

        y = [french_word2index[word] for word in french_y.split(' ')]
        y.append(EOS_token)
        tensor_y = torch.tensor(y, dtype=torch.long, device=device)

        return tensor_x, tensor_y

    def __len__(self):
        return self.sample_len

# GRU编码器
class EncoderGRU(nn.Module):
    def __init__(self, vocab_size, hidden_size):
        super(EncoderGRU, self).__init__()
        self.vocab_size = vocab_size # 表示去重后英文单词的总个数
        self.hidden_size = hidden_size # 表示词嵌入的维度,也是隐藏层输入和输出的大小
        self.embedding = nn.Embedding(num_embeddings=self.vocab_size, embedding_dim=self.hidden_size)
        self.GRU = nn.GRU(input_size=self.hidden_size, hidden_size=self.hidden_size, batch_first=True)

    def forward(self, input, hidden):
        input = self.embedding(input)
        output, hidden = self.GRU(input, hidden)
        return output, hidden

    def init_hidden(self):
        return torch.zeros(1, 1, self.hidden_size, device=device)

# GRU传统的解码器
class DecoderGRU(nn.Module):
    def __init__(self, vocab_size, hidden_size):
        super(DecoderGRU, self).__init__()
        self.vocab_size = vocab_size # 去重后法文单词的个数
        self.hidden_size = hidden_size

        self.embedding = nn.Embedding(num_embeddings=self.vocab_size, embedding_dim=self.hidden_size)

        self.GRU = nn.GRU(input_size=self.hidden_size, hidden_size=self.hidden_size, batch_first=True)

        self.out = nn.Linear(in_features=self.hidden_size, out_features=vocab_size)

        self.softmax = nn.LogSoftmax(dim=-1)

    def forward(self, input, hidden):
        output = self.embedding(input)
        output  = F.relu(output)
        output, hidden = self.GRU(output, hidden)
        output = self.softmax(self.out(output[0]))
        return output, hidden

# GRU带有attention的解码器
class AttentionDecoder(nn.Module):
    def __init__(self, vocab_size, hidden_size, dropout_p=0.1, max_length=MAX_LENGTH):
        super(AttentionDecoder, self).__init__()
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.dropout_p = dropout_p
        self.max_length = max_length

        # 这个是为了处理Q的
        self.embedding = nn.Embedding(num_embeddings=self.vocab_size, embedding_dim=self.hidden_size)
        self.dropout = nn.Dropout(p=self.dropout_p)

        # 计算注意力第一步：按照第一种计算规则，定义一个全连接层， Q和K进行拼接了
        self.attn = nn.Linear(in_features=self.hidden_size * 2, out_features=self.max_length)

        # 计算注意力第二部：因为第一部有拼接过程，所以我们要对Q和第一步的结果进行再次拼接
        self.attn_combin = nn.Linear(in_features=self.hidden_size * 2, out_features=hidden_size)
        # 定义GRU
        self.gru = nn.GRU(input_size=self.hidden_size, hidden_size=self.hidden_size, batch_first=True)
        # 定义输出层out
        self.out = nn.Linear(in_features=self.hidden_size, out_features=self.vocab_size)
        # 定义softmax层
        self.softmax = nn.LogSoftmax(dim=-1)

    def forward(self, Q, K, V):
        # Q代表解码器上一时间步预测的结果
        # K代表解码器上一时间步隐藏层的输出结果
        # V代表编码器的编码结果(每一个时间步隐藏层输出结果合并)
        input = self.embedding(Q) # Q[1,1] -->  [1,1,256]
        # 进行随机失活
        embedded = self.dropout(input)

        # 将Q和K进行拼接操作 [1,256]+[1,256] --> [1,10]
        attn_weight = F.softmax(self.attn(torch.cat((embedded[0], K[0]), dim=-1)), dim=-1)

        # 将上面结果和V进行矩阵相乘 --> [1,1,256]
        attn_applied = torch.bmm(attn_weight.unsqueeze(dim=0), V.unsqueeze(dim=0))

        # 因为上一步进行了拼接操作， 所以要将计算后的结果和操作之后的Q进行再次拼接
        # [1,1,256]  [1,1,256] --> [1,1,512]
        combin_result = torch.cat((embedded[0],attn_applied[0]), dim=-1)

        # 按照注意力计算第三步：将上述结果经过全连接层输出指定维度[1,1,256]
        gru_input = self.attn_combin(combin_result).unsqueeze(dim=0)
        # 将上述结果进行relu函数激活 1,1,256
        gru_input = F.relu(gru_input)

        # 将数据送到GRU模型
        output, hidden = self.gru(gru_input, K)

        result = self.out(output[0])
        return self.softmax(result), hidden, attn_weight

    def init_hidden(self):
        return torch.zeros(1, 1, self.hidden_size, device=device)
mylr = 1e-4
epochs = 2
teacher_forcing_ratio = 0.5
print_interval_num = 1000
plot_interval_num = 100
# 模型训练
def train_seq2seq():
    dataset = MyDataset(my_pairs)
    dataloader = DataLoader(dataset=dataset, batch_size=1, shuffle=True)

    encoder = EncoderGRU(vocab_size=english_word_n, hidden_size=256)
    encoder.to(device=device)

    decoder = AttentionDecoder(vocab_size=french_word_n, hidden_size=256)
    decoder.to(device=device)

    # 优化器
    encoder_optimizer = optim.Adam(encoder.parameters(), lr=mylr)
    decoder_optimizer = optim.Adam(decoder.parameters(), lr=mylr)

    # 损失对象
    cross_entropy = nn.NLLLoss()

    # 定义一个列表储存损失值，画图用
    plot_loss_list = []

    # 进入外层循环
    for epoch_idx in range(1, epochs+1):
        print_loss_total, plot_loss_total = 0.0, 0.0
        start_time = time.time()

        for item, (x, y) in enumerate(tqdm(dataloader), start=1):
            my_loss = Train_Iters(x=x, y=y, my_encoder=encoder, my_attnDecoder=decoder, encoder_optimizer=encoder_optimizer,
                        decoder_optimizer=decoder_optimizer, cross_entropy=cross_entropy)

            print_loss_total += my_loss
            plot_loss_total += my_loss

            if item % print_interval_num == 0:
                print_loss_avg = print_loss_total / print_interval_num
                print_loss_total = 0.0
                print(f'轮次{epoch_idx}, 损失{print_loss_avg}， 时间{time.time() - start_time}')

            if item % plot_interval_num == 0:
                plot_loss_avg = plot_loss_total / plot_interval_num
                plot_loss_list.append(plot_loss_avg)
                plot_loss_total = 0.0

        torch.save(encoder.state_dict(), f'./Encoder_{epoch_idx}.pt')
        torch.save(decoder.state_dict(), f'./Decoder_{epoch_idx}.pt')

    plt.figure()
    plt.plot(plot_loss_list)
    plt.savefig('./00-data/s2sq_loss.png')
    plt.show()

# 定义内部迭代函数
def Train_Iters(x, y, my_encoder, my_attnDecoder, encoder_optimizer, decoder_optimizer, cross_entropy):
    # 将英文文本送入编码器
    encoder_hidden = my_encoder.init_hidden()
    encoder_output, encoder_hidden = my_encoder(input=x,hidden=encoder_hidden)

    # 这个的目的是统一长度 一个二维的10*256的全零矩阵
    encoder_output_c = torch.zeros(MAX_LENGTH, my_encoder.hidden_size, device=device)

    # 得到V值
    for i in range(x.shape[1]): # 遍历x的长度x.shape = [1,seq] 这里遍历seq个长度
        encoder_output_c[i] = encoder_output[0, i]
    # 得到K值
    decoder_hidden = encoder_hidden

    # 得到Q值
    input_y = torch.tensor([[SOS_token]], device=device)

    my_loss = 0.0
    y_len = y.shape[1]

    use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False

    if use_teacher_forcing:
        for idx in range(y_len):
            output_y, decoder_hidden, atte_weight = my_attnDecoder(input_y, decoder_hidden, encoder_output_c)
            # 计算损失
            # target_y 是y中单词的真实值
            target_y = y[0][idx].view(1)
            # 用真实值的预测值进行损失计算
            my_loss = my_loss + cross_entropy(output_y, target_y)
            # 得到当前时间步的真是target
            input_y = y[0][idx].view(1, -1)
    else:
        for idx in range(y_len):
            output_y, decoder_hidden, atte_weight = my_attnDecoder(input_y, decoder_hidden, encoder_output_c)
            # 计算损失
            target_y = y[0][idx].view(1)
            my_loss = my_loss + cross_entropy(output_y, target_y)

            topv, topi = torch.topk(input=output_y, k=1)
            if topi.item() == EOS_token:
                break
            # detach() 不计算梯度
            input_y = topi.detach()

    encoder_optimizer.zero_grad()
    decoder_optimizer.zero_grad()

    my_loss.backward()

    encoder_optimizer.step()
    decoder_optimizer.step()

    return my_loss.item() / y_len

def test_model():
    english_vocab_size = english_word_n
    hidden_size = 256
    # 加载模型也需要初始化
    encoder_gru = EncoderGRU(english_vocab_size, hidden_size=hidden_size)
    encoder_gru.load_state_dict(torch.load('./Encoder_2.pt'))
    encoder_gru.to(device=device)

    french_vocab_size = french_word_n
    atte_decoder_gru = AttentionDecoder(french_vocab_size, hidden_size=hidden_size)
    atte_decoder_gru.load_state_dict(torch.load('./Decoder_2.pt'))
    atte_decoder_gru.to(device=device)


    my_sample_pairs = [
        ['you are really brave .', 'vous êtes vraiment courageux .'],
        ['he look very handsome just like his father .', 'il est beau comme son père .'],
        ['time is money .', 'le temps est .']
    ]
    for index, pair in enumerate(my_sample_pairs):
        x = pair[0]
        y = pair[1]
        # 将x进行张量化表示
        x_list = [english_word2index[word] for word in x.split(' ')]
        x_list.append(EOS_token)
        tensor_x = torch.tensor(x_list, dtype=torch.long, device=device).view(1, -1)
        decoder_list, decoder_atten_weights = Seq2Seq_Evaluate(tensor_x, encoder_gru, atte_decoder_gru)
        french_str = ' '.join(decoder_list)
        print(f'原始输入文本x--》{x}')
        print(f'模型翻译后的文本--》{french_str}')
        print(f'原始的真实发文文本--》{y}')
        print('*'*80)


def Seq2Seq_Evaluate(x, encoder_rnn, my_attn_decoder_rnn):
    with torch.no_grad():
        encoder_output, encoder_hidden = encoder_rnn(x, encoder_rnn.init_hidden())
        # 这个的目的是统一长度 一个二维的10*256的全零矩阵
        encoder_output_c = torch.zeros(MAX_LENGTH, encoder_rnn.hidden_size, device=device)
        # 得到V值
        for i in range(x.shape[1]):  # 遍历x的长度x.shape = [1,seq] 这里遍历seq个长度
            encoder_output_c[i] = encoder_output[0, i]
        # 得到K值
        decoder_hidden = encoder_hidden

        # 得到Q值
        input_y = torch.tensor([[SOS_token]], device=device)

        decoder_list = []
        decoder_atten_weigth = torch.zeros(MAX_LENGTH, MAX_LENGTH, device=device)

        for i in range(MAX_LENGTH):
            output_y, decoder_hidden, attn_weight = my_attn_decoder_rnn(input_y, decoder_hidden, encoder_output_c)
            topv, topi = torch.topk(output_y, 1)

            if topi.item() == EOS_token:
                decoder_list.append("<EOS>")
                break
            french_word = french_index2word[topi.item()]
            decoder_list.append(french_word)
            decoder_atten_weigth[i] = attn_weight
            input_y = topi.detach()
        return decoder_list, decoder_atten_weigth[:i+1]

if __name__ == '__main__':
    # train_seq2seq()
    test_model()
