import torch

from until import device, MAX_LENGTH, EOS_token
from encoder import MyEncoderGRU
from attentiondecoder import AttentionDecoderGRU
from test_model import seq2seq_evaluate
import matplotlib.pyplot as plt
from get_dict import get_data

# 调用获取数据函数
english_word2index, english_index2word, english_word_n, \
    french_word2index, french_index2word, french_word_n, my_pairs = get_data()


def show_Attention():
    # 加载训练好的模型
    # 实例化编码器模型
    english_vocab_size = english_word_n  # 2803
    hidden_size1 = 256
    my_encoder_gru = MyEncoderGRU(vocab_size=english_vocab_size, hidden_size=hidden_size1)
    my_encoder_gru.load_state_dict(torch.load("./model/encoder_gru_2.bin", weights_only=True))
    # 如果模型是GPU训练的，通过map_location='cpu'映射到cpu中
    # 如果模型变量名不一致，结构一样，可以strict=False忽略调变量名不一致
    # my_encoder_gru.load_state_dict(torch.load('./model/encoder_gru_2.bin', map_location='cpu'), strict=False)
    my_encoder_gru = my_encoder_gru.to(device)
    print('my_encoder_gru:', my_encoder_gru)

    # 实例化带有attention解码器模型
    french_vocab_size = french_word_n  # 4345
    hidden_size2 = 256
    dropout_p = 0.1
    max_len = MAX_LENGTH
    my_decoder_gru = AttentionDecoderGRU(vocab_size=french_vocab_size, hidden_size=hidden_size2,
                                         dropout_p=dropout_p, max_len=max_len)
    my_decoder_gru.load_state_dict(torch.load("./model/decoder_gru_2.bin", weights_only=True))
    my_decoder_gru = my_decoder_gru.to(device)
    print('my_decoder_gru:', my_decoder_gru)

    sentence = "we re both teachers ."
    target_sentence = "nous sommes tous deux enseignants ."
    # 样本x -> 文本数值化 -> 数值张量化
    tmp_x = [english_word2index[word] for word in sentence.split(' ')]
    tmp_x.append(EOS_token)
    tensor_x = torch.tensor(tmp_x, dtype=torch.long, device=device).view(1, -1)
    # tensor_x.shape  [1,6]

    decoder_list, decoder_attention_weights = seq2seq_evaluate(tensor_x, my_encoder_gru, my_decoder_gru)
    print(decoder_list)
    print('decoder_list:', decoder_list)
    french_str = ' '.join(decoder_list)
    print('英文:', sentence)
    print('预测法文:', french_str)
    print('实际法文:', target_sentence)

    src_words = []
    for word in sentence.split(' '):
        src_words.append(word)
    tgt_words = []
    for word in french_str.split(' '):
        tgt_words.append(word)

    # 以矩阵列表的形式显示
    plt.matshow(decoder_attention_weights.cpu().numpy())
    plt.xticks(range(len(src_words)), src_words, rotation=45)
    plt.yticks(range(len(tgt_words)), tgt_words)
    plt.savefig("./img/attention.png")
    plt.show()

    print(decoder_attention_weights.cpu().numpy())
    print(decoder_attention_weights.shape)

if __name__ == '__main__':
    show_Attention()
