import json
import random
import re
import time

import torch
import torch.nn as nn
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader, Dataset
import torch.optim as optim
from tqdm import tqdm

# 指定设备：判断是否有GPU，如果有接下来可以在GPU上进行训练
# windows或者linux
if torch.cuda.is_available():
    print('当前正在使用GPU')
else:
    print('当前正在使用CPU')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
print('device:', device)

# 指定特殊token
SOS_token = 0  # 开始字符
EOS_token = 1  # 结束字符
# 数据集路径
datapath = './data/eng-fra-v2.txt'

MAX_LENGTH = 10

my_lr = 1e-4

epochs = 2

teacher_forcing_ratio = 0.5

def normal_str(s):
    s = s.lower().strip()
    s = re.sub(r'([.!?])', r' \1', s)
    s = re.sub(r'[^a-zA-Z.!?]+', r' ', s)
    return s


# 构建英法语词典
def get_data():
    with open(datapath, 'r', encoding='utf-8') as f:
        lines = f.readlines()
    # l = []
    # print(lines[0].strip().split('\t'))
    # for s in lines[0].strip().split('\t'):
    #     l.append(normal_str(s))
    #     print(l)
    my_pairs = []
    # lines[0]:    i m .	j ai ans .
    for line in lines:
        temp = []
        # line.strip().split('\t') : ['i m .', 'j ai ans .']
        for word in line.strip().split('\t'):
            # word: i m .  j ai ans .
            temp.append(normal_str(word))
            # temp: ['i m .', 'j ai ans .']
        my_pairs.append(temp)
        # my_pairs: [['i m .', 'j ai ans .'], ['i m .', 'j ai ans .']]

    english_word2index = {'SOS': 0, 'EOS': 1}
    french_word2index = {'SOS': 0, 'EOS': 1}

    for pair in my_pairs:
        # 'i m ok .'  ->  ['i', 'm', 'ok', '.']
        for word in pair[0].split(' '):
            if word not in english_word2index.keys():
                english_word2index[word] = len(english_word2index)
        for word in pair[1].split(' '):
            if word not in french_word2index.keys():
                french_word2index[word] = len(french_word2index)

    english_index2word = {v: k for k, v in english_word2index.items()}
    french_index2word = {v: k for k, v in french_word2index.items()}

    return english_word2index, english_index2word, len(english_word2index), \
        french_word2index, french_index2word, len(french_word2index), my_pairs


# 构建dataset
class MyPairDataset(Dataset):
    def __init__(self, my_pairs):
        super(MyPairDataset, self).__init__()
        self.my_pairs = my_pairs
        self.sample_len = len(my_pairs)

    def __len__(self):
        return self.sample_len

    def __getitem__(self, index):
        index = min(max(0, index), self.sample_len - 1)

        english_x = my_pairs[index][0]
        french_y = my_pairs[index][1]

        x = [english_word2index[word] for word in english_x.split(' ')]
        x.append(SOS_token)
        torch_x = torch.tensor(x, dtype=torch.long)

        y = [french_word2index[word] for word in french_y.split(' ')]
        y.append(SOS_token)
        torch_y = torch.tensor(y, dtype=torch.long)

        return torch_x, torch_y


# GRU编码器
class MyEncoderGRU(nn.Module):
    def __init__(self, vocab_size, hidden_size):
        super(MyEncoderGRU, self).__init__()
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size

        # 定义词嵌入层
        self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=hidden_size)

        # 定义GRU层
        self.gru = nn.GRU(input_size=hidden_size, hidden_size=hidden_size, num_layers=1, batch_first=True)

    def forward(self, x, hidden):
        # 经过词嵌入层  [1, 6] ---> [1, 6, hidden_size]
        embed = self.embedding(x)

        # 经过GRU层
        output, hidden = self.gru(embed, hidden)

        return output, hidden

    def init_hidden(self):
        return torch.zeros(1, 1, self.hidden_size)

# GRU解码器
class MyDecoderGRU(nn.Module):
    def __init__(self, vocab_size, hidden_size):
        super(MyDecoderGRU, self).__init__()
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size

        # 定义词嵌入层
        self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=hidden_size)

        self.gru = nn.GRU(input_size=hidden_size, hidden_size=hidden_size, num_layers=1, batch_first=True)

        self.linear = nn.Linear(in_features=hidden_size, out_features=vocab_size)

        self.softmax = nn.LogSoftmax(dim=-1)

    def forward(self,input, hidden):
        embed = self.embedding(input)

        output, hidden = self.gru(embed, hidden)

        output = self.linear(output[-1])

        result = self.softmax(output)

        return result, hidden


# 测试GRU编码器、解码器
def demo_MyDecoderGRU_test():
    my_dataset = MyPairDataset(my_pairs)

    dataloader = DataLoader(dataset=my_dataset, batch_size=1, shuffle=True)

    for x, y in dataloader:
        print('特征x和目标y的形状:',x.size(), y.size())
        my_encoder_gru = MyEncoderGRU(vocab_size=english_word_n, hidden_size=10)
        print('编码器模型结构:', my_encoder_gru)
        hidden = my_encoder_gru.init_hidden()
        output, hidden = my_encoder_gru(x, hidden)
        print('编码器输出的形状:', output.shape)
        print('编码器输出的结果:', output)
        print('编码器hidden的形状:', hidden.shape)
        print('编码器hidden的结果:', hidden)
        my_decoder = MyDecoderGRU(vocab_size=french_word_n, hidden_size=10)
        print('解码器模型结构:', my_decoder)
        for i in range(y.shape[1]):
            print(f"========解码第{i + 1}次========")
            input_y = y[0][i].view(1, -1)
            output, hidden = my_decoder(input_y, hidden)
            print(f'解码器输出的形状：-->{output.shape}')
            print(f'解码器输出的结果：-->{output}')
            print(f'解码器hidden的形状：-->{hidden.shape}')
            print(f'解码器hidden的结果：-->{hidden}')
        break

# 带有attention的GRU解码器
class AttentionDecoderGRU(nn.Module):
    def __init__(self, vocab_size, hidden_size, dropout_p=0.1, max_len=MAX_LENGTH):
        super(AttentionDecoderGRU, self).__init__()
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.dropout_p = dropout_p
        self.max_len = max_len

        self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=self.hidden_size)

        self.dropout = nn.Dropout(p=self.dropout_p)

        self.attention = nn.Linear(self.hidden_size * 2, max_len)

        self.attention_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)

        self.gru = nn.GRU(input_size=self.hidden_size, hidden_size=self.hidden_size, num_layers=1, batch_first=True)

        self.linear = nn.Linear(in_features=self.hidden_size, out_features=vocab_size)

        self.softmax = nn.LogSoftmax(dim=-1)

    def forward(self,Q,K,V):
        Q = self.embedding(Q)

        Q = self.dropout(Q)

        QK_cat = self.attention(torch.cat((Q[-1],K[-1]),dim=-1))

        attention_weights = torch.softmax(QK_cat,dim=-1)

        attention_applied = torch.bmm(attention_weights.unsqueeze(0), V.unsqueeze(0))

        concat_result = torch.cat((Q[0], attention_applied[0]), dim=-1)

        gru_input = self.attention_combine(concat_result).unsqueeze(0)

        gru_input = torch.relu(gru_input)

        output, hidden = self.gru(gru_input, K)

        result = self.linear(output[-1])

        return self.softmax(result),hidden,attention_weights

def demo_AttentionDecoderGRU_test():
    my_dataset = MyPairDataset(my_pairs)

    dataloader = DataLoader(dataset=my_dataset, batch_size=1, shuffle=True)

    # 实例化编码器
    my_encoder_gru = MyEncoderGRU(vocab_size=english_word_n, hidden_size=10)
    print('编码器模型结构:', my_encoder_gru)

    my_attention_decoder = AttentionDecoderGRU(vocab_size=french_word_n, hidden_size=10, dropout_p=0.1, max_len=MAX_LENGTH)
    print('解码器模型结构:', my_attention_decoder)

    for x, y in dataloader:
        # 编码
        print('特征x和目标y的形状:', x.size(), y.size())
        hidden = my_encoder_gru.init_hidden()
        encode_output, encoder_hidden = my_encoder_gru(x, hidden)
        print('编码器输出的形状:', encode_output.shape)
        print('编码器输出的结果:', encode_output)
        print('编码器encoder_hidden的形状:', encoder_hidden.shape)
        print('编码器encoder_hidden的结果:', encoder_hidden)

        # 2.解码：一个字符一个字符的解码，准备Q、K、V
        # 2.1 准备Q值  [1,1]
        input_y = torch.tensor([[SOS_token]])
        # 2.2 准备K值：使用编码器的最后一个时间步的hidden  [1,1,256]
        decoder_hidden = encoder_hidden
        # 2.3 准备V值：统一句子长度  [1,4,256] --> [10,256]
        encoder_output_c = torch.zeros(MAX_LENGTH, my_encoder_gru.hidden_size)
        for i in range(encode_output.shape[1]):
            if i > MAX_LENGTH - 1:
                break
            encoder_output_c[i] = encode_output[0, i]
        for i in range(y.shape[1]):
            print(f"========解码第{i + 1}次========")
            # 将Q,K,V数据传入（input_y，decoder_hidden，encoder_output_c）
            output, decoder_hidden, attention_weights = my_attention_decoder(input_y, decoder_hidden, encoder_output_c)
            print(f'解码器输出的形状：-->{output.shape}')
            print(f'解码器输出的结果：-->{output}')
            print(f'解码器decoder_hidden的形状：-->{decoder_hidden.shape}')
            print(f'解码器decoder_hidden的结果：-->{decoder_hidden}')

            input_y = y[0][i].view(1, -1)
        break

def train_iter(x, y, my_encoder_gru, my_decoder_gru, encoder_adam, decoder_adam, criterion):
    # 1.编码：一次性将x送入编码器
    # x.shape                   [1,4]
    # hidden.shape              [1,1,256]
    h0 = my_encoder_gru.init_hidden().to(device)
    encoder_output, encoder_hidden = my_encoder_gru(x, h0)
    # encoder_hidden.shape      [1,1,256]
    # encoder_output.shape      [1,4,256]

    # 2.解码：一个字符一个字符的解码，准备Q、K、V
    # 2.1 准备Q值  [1,1]
    input_y = torch.tensor([[SOS_token]]).to(device)
    # 2.2 准备K值：使用编码器的最后一个时间步的hidden  [1,1,256]
    decoder_hidden = encoder_hidden
    # 2.3 准备V值：统一句子长度  [1,4,256] --> [10,256]
    encoder_output_c = torch.zeros(MAX_LENGTH, my_encoder_gru.hidden_size).to(device)
    for i in range(encoder_output.shape[1]):
        if i > MAX_LENGTH - 1:
            break
        encoder_output_c[i] = encoder_output[0, i]

    my_loss = 0.0
    use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
    if use_teacher_forcing:
        for idx in range(y.shape[1]):
            # input_y.shape            [1,1]
            # decoder_hidden.shape     [1,1,256]
            # encoder_output_c.shape   [10,256]

            output_y, decoder_hidden, attention_weights = my_decoder_gru(input_y, decoder_hidden, encoder_output_c)
            # output_y.shape           [1,4345]
            # decoder_hidden.shape     [1,1,256]
            # attention_weights.shape  [1,10]

            # 计算损失
            # y.shape[1, 5]
            target_y = y[0][idx].view(1)  # [1,5] -> [5] -> 标量 -> [1]
            my_loss += criterion(output_y, target_y)

            # teacher_forcing模式下，用当前时间步的真实结果，当作下一个时间步的输入
            input_y = y[0][idx].view(1, -1)  # [1,5] -> [5] -> 标量 -> [1,1]
    else:
        for idx in range(y.shape[1]):
            # input_y.shape            [1,1]
            # decoder_hidden.shape     [1,1,256]
            # encoder_output_c.shape   [10,256]
            output_y, decoder_hidden, attention_weights = my_decoder_gru(input_y, decoder_hidden, encoder_output_c)
            # output_y.shape           [1,4345]
            # decoder_hidden.shape     [1,1,256]
            # attention_weights.shape  [1,10]

            # 计算损失
            # y.shape[1, 5]
            target_y = y[0][idx].view(1)  # [1,5] -> [5] -> 标量 -> [1]
            my_loss += criterion(output_y, target_y)

            # 非teacher_forcing模式下，用当前时间步的预测结果，当作下一个时间步的输入
            topv, topi = torch.topk(output_y, k=1)
            # topv  [1,1]
            # topi  [1,1]
            if topi.item() == EOS_token:
                break
            input_y = topi.detach()

    # 梯度清零
    encoder_adam.zero_grad()
    decoder_adam.zero_grad()
    # 反向传播
    my_loss.backward()
    # 更新参数
    encoder_adam.step()
    decoder_adam.step()

    return my_loss.item() / y.shape[1]
def train_model():
    my_dataset = MyPairDataset(my_pairs)

    dataloader = DataLoader(dataset=my_dataset, batch_size=1, shuffle=True)

    # 实例化编码器模型
    my_encoder_gru = MyEncoderGRU(vocab_size=english_word_n, hidden_size=256)
    my_encoder_gru = my_encoder_gru.to(device)

    # 实例化解码器模型
    my_attention_decoder = AttentionDecoderGRU(vocab_size=french_word_n, hidden_size=256, dropout_p=0.1,
                                               max_len=MAX_LENGTH)
    my_attention_decoder = my_attention_decoder.to(device)

    # 实例化损失函数
    criterion = nn.NLLLoss()

    # 实例化优化器
    encoder_optim = optim.Adam(params=my_encoder_gru.parameters(), lr=my_lr)
    decoder_optim = optim.Adam(params=my_attention_decoder.parameters(), lr=my_lr)

    total_loss = 0.0
    total_item = 0
    avg_plot_loss_list = []
    start_time = time.time()

    for epoch in range(1, epochs + 1):
        print_loss_total = 0.0
        plot_loss_total = 0.0
        epoch_time = time.time()
        for item, (x, y) in enumerate(tqdm(dataloader), start=1):
            x = x.to(device)
            y = y.to(device)
            my_loss = train_iter(x, y, my_encoder_gru, my_attention_decoder, encoder_optim, decoder_optim, criterion)
            total_loss += my_loss
            print_loss_total += my_loss
            plot_loss_total += my_loss
            total_item += 1
            # 每隔1000步打印一下日志
            if item % 1000 == 0:
                avg_print_loss = print_loss_total / 1000
                print_loss_total = 0.0
                use_time = time.time() - epoch_time
                avg_loss = total_loss / total_item
                print("当前训练的轮次: %d, 平均总损失:%.4f, 每1000步平均损失:%.4f, 用时:%ds" % (
                    epoch, avg_loss, avg_print_loss, use_time))

            # 每隔100步保存损失，画图
            if item % 100 == 0:
                avg_plot_loss = plot_loss_total / 100
                avg_plot_loss_list.append(avg_plot_loss)
                plot_loss_total = 0.0

        # 每2轮保存模型到本地
        if epoch % 1 == 0:
            torch.save(my_encoder_gru.state_dict(), './model/encoder_gru_%d.bin' % (epoch))
            torch.save(my_attention_decoder.state_dict(), './model/decoder_gru_%d.bin' % (epoch))

    # 将画图所需变量保存在json中
    total_time = int(time.time() - start_time)
    result = {
        'avg_plot_loss_list': avg_plot_loss_list,
        'total_time': total_time,
    }
    with open('./result/result.json', 'w') as f:
        f.write(json.dumps(result))

def read_json(data_path):
    with open(data_path, 'r') as f:
        result = json.loads(f.read())
    avg_plot_loss_list = result["avg_plot_loss_list"]
    total_time = result["total_time"]
    return avg_plot_loss_list, total_time


def show_results():
    avg_plot_loss_list, total_time = read_json('./result/result.json')
    print("总共训练时间:%ds" % (total_time))
    # 画loss图
    plt.plot(avg_plot_loss_list)
    plt.savefig('./img/eng2french_loss.png')
    plt.show()

def seq2seq_evaluate(tensor_x, my_encoder_gru, my_decoder_gru):
    my_encoder_gru.eval()
    my_decoder_gru.eval()
    with torch.no_grad():
        # 1.编码：一次性将x送入编码器
        # x.shape                   [1,8]
        # hidden.shape              [1,1,256]
        encoder_output, encoder_hidden = my_encoder_gru(tensor_x, my_encoder_gru.init_hidden().to(device))
        # encoder_hidden.shape      [1,1,256]
        # encoder_output.shape      [1,8,256]

        # 2.解码：一个字符一个字符的解码，准备Q、K、V
        # 2.1 准备Q值  [1,1]
        input_y = torch.tensor([[SOS_token]], device=device)
        # 2.2 准备K值：使用编码器的最后一个时间步的hidden  [1,1,256]
        decoder_hidden = encoder_hidden
        # 2.3 准备V值：统一句子长度  [1,8,256] --> [10,256]
        encoder_output_c = torch.zeros(MAX_LENGTH, my_encoder_gru.hidden_size, device=device)
        for i in range(encoder_output.shape[1]):
            if i > MAX_LENGTH - 1:
                break
            encoder_output_c[i] = encoder_output[0, i]

        # 预测单词列表
        decoder_word_list = []
        # 汇总注意力权重张量，存储每个预测词的注意力权重张量，画图使用  [10,10]
        decoder_attention_weights = torch.zeros(MAX_LENGTH, MAX_LENGTH, device=device)

        # 开始预测
        for idx in range(MAX_LENGTH):
            # input_y.shape            [1,1]
            # decoder_hidden.shape     [1,1,256]
            # encoder_output_c.shape   [10,256]
            output_y, decoder_hidden, attention_weights = my_decoder_gru(input_y, decoder_hidden, encoder_output_c)
            # output_y.shape           [1,4345]
            # decoder_hidden.shape     [1,1,256]
            # attention_weights.shape  [1,10]

            decoder_attention_weights[idx] = attention_weights

            topv, topi = torch.topk(output_y, k=1)
            # topv  [1,1]
            # topi  [1,1]
            if topi.item() == EOS_token:
                # decoder_word_list.append(french_index2word[EOS_token])  # "<EOS>"
                decoder_word_list.append('<EOS>')
                break
            else:
                # index映射为法文单词
                french_word = french_index2word[topi.item()]
                decoder_word_list.append(french_word)

            input_y = topi.detach()

        return decoder_word_list, decoder_attention_weights[:i + 1]


def model_predict():
    # 加载训练好的模型
    # 实例化编码器模型
    english_vocab_size = english_word_n  # 2803
    hidden_size1 = 256
    my_encoder_gru = MyEncoderGRU(vocab_size=english_vocab_size, hidden_size=hidden_size1)
    my_encoder_gru.load_state_dict(torch.load('./model/encoder_gru_2.bin', weights_only=True), strict=False)
    # 如果模型是GPU训练的，通过map_location='cpu'映射到cpu中
    # 如果模型变量名不一致，结构一样，可以strict=False忽略调变量名不一致
    # my_encoder_gru.load_state_dict(torch.load('./model/encoder_gru_2.bin', map_location='cpu'), strict=False)
    my_encoder_gru = my_encoder_gru.to(device)
    print('my_encoder_gru:', my_encoder_gru)

    # 实例化带有attention解码器模型
    french_vocab_size = french_word_n  # 4345
    hidden_size2 = 256
    dropout_p = 0.1
    max_len = MAX_LENGTH
    my_decoder_gru = AttentionDecoderGRU(vocab_size=french_vocab_size, hidden_size=hidden_size2,
                                         dropout_p=dropout_p, max_len=max_len)
    my_decoder_gru.load_state_dict(torch.load('./model/decoder_gru_2.bin', weights_only=True), strict=False)
    my_decoder_gru = my_decoder_gru.to(device)
    print('my_decoder_gru:', my_decoder_gru)

    # 准备预测样本
    sample_pairs = [
        ['i m impressed with your french .', 'je suis impressionne par votre francais .'],
        ['i m more than a friend .', 'je suis plus qu une amie .'],
        ['she is beautiful like her mother .', 'elle est belle comme sa mere .']
    ]
    print('len(sample_pairs):', len(sample_pairs))

    # 预测：一个样本一个样本预测
    for item, (x, y) in enumerate(sample_pairs):
        # x  'i m impressed with your french .'
        # y  'je suis impressionne par votre francais .'
        # 文本数值化，数值张量化
        x_list = [english_word2index[i] for i in x.split(' ')]
        x_list.append(EOS_token)
        # [8] --> [1,8]
        tensor_x = torch.tensor(x_list, dtype=torch.long, device=device).view(1, -1)

        decoder_list, decoder_attention_weights = seq2seq_evaluate(tensor_x, my_encoder_gru, my_decoder_gru)
        print('*' * 70)
        french_str = ' '.join(decoder_list)
        print('英文：', x)
        print('预测法文：', french_str)
        print('真实法文：', y)

if __name__ == '__main__':
    # s = '   You are5555 my best friend.  '
    # r = normal_str(s)
    # print(r)
    english_word2index, english_index2word, english_word_n, \
        french_word2index, french_index2word, french_word_n, my_pairs = get_data()
    #
    # dataset = MyPairDataset(my_pairs)
    # print(len(dataset))
    # print(dataset[0])

    # demo_MyDecoderGRU_test()
    # demo_AttentionDecoderGRU_test()
    # train_model()
    # show_results()
    model_predict()