import re

import jieba
import csv

import numpy
import torch
import torch.nn as nn
from matplotlib import pyplot as plt, ticker
from src.Main.config import *
from src.Networks.Attn import Attn, LuongAttnDecoderRNN, GreedySearchDecoder
from src.Networks.EncoderRNN import EncoderRNN
from src.Main.train import loadData, embeddingWord
from src.LoadData.dataLoader import Voc
# import tensorflow as tf


def evaluate(encoder, decoder, searcher, voc, indexes_batch, max_length=MAX_LENGTH):

    indexes_batch = [indexes_batch]
    # 创建lengths张量
    lengths = torch.tensor([len(indexes) for indexes in indexes_batch])
    # 转换batch的维度，转置矩阵
    input_batch = torch.LongTensor(indexes_batch).transpose(0, 1)
    input_batch = input_batch.to(device)
    lengths = lengths.to("cpu")
    # 解码
    tokens, scores = searcher(input_batch, lengths, max_length)
    # 索引转换成词
    decoded_words = [voc.index2word[token.item()] for token in tokens]
    return decoded_words


def testModel(test_model_path="../Models/2-2_500/100_checkpoint.tar"):
    voc, pairs = loadData(voc_path=VOC_PATH, pairs_path=TEST_PAIRS_PATH)
    embedding_layer = nn.Embedding(voc.num_words, embedding_dim=embedding_size)

    checkpoint = torch.load(test_model_path, map_location=device)
    # start_iteration = checkpoint['iteration']
    encoder_state_dict = checkpoint['encoder']
    decoder_state_dict = checkpoint['decoder']
    # encoder_optim_dict = checkpoint['encoder_opt']
    # decoder_optim_dict = checkpoint['decoder_opt']
    embedding_layer_state_dict = checkpoint['embedding_layer']
    embedding_layer.load_state_dict(embedding_layer_state_dict)
    voc.__dict__ = checkpoint['voc_dict']

    # 编码器
    encoder = EncoderRNN(input_size=voc.num_words,
                         hidden_size=hidden_size,
                         embedding_layer=embedding_layer,
                         n_layer=encoder_n_layers,
                         dropout=dropout)
    # 解码器
    decoder = LuongAttnDecoderRNN(attn_model=attn_model,
                                  embedding=embedding_layer,
                                  hidden_size=hidden_size,
                                  output_size=voc.num_words,
                                  n_layers=decoder_n_layers,
                                  drop_out=dropout)
    encoder.load_state_dict(encoder_state_dict)
    decoder.load_state_dict(decoder_state_dict)

    # 开启测试模式
    encoder.eval(), decoder.eval()

    searcher = GreedySearchDecoder(encoder=encoder, decoder=decoder)
    all_test_data = embeddingWord(batch_pairs=pairs)

    pad_inp, input_lengths, pad_output, mask, max_target_len = all_test_data
    header_list = ["input", "target_out", "Bot", "score"]
    data_list = []
    for i in range(pad_inp.shape[1]):
        if i == 300:
            break
        data_list.append(
            out(voc, pad_inp[:, i], pad_output[:, i], encoder, decoder, searcher))
    with open("../../Datasets/MidData/" + test_model_path.split("/")[-2] + "_" + test_model_path.split("/")[-1].replace("tar", "csv"), mode='w', encoding="utf-8", newline='') as t:
        # 这一步是创建一个csv的写入器
        writer = csv.DictWriter(t, header_list)
        # 写入 header
        writer.writeheader()
        # 写入数据
        writer.writerows(data_list)


def out(voc, pad_inp, pad_output, encoder, decoder, searcher):
    chinese_input = [str(voc.index2word[token.item()]) for token in pad_inp]
    chinese_input = " ".join(chinese_input)
    chinese_input = chinese_input[:chinese_input.index('EOS')]
    
    data_dic = {}
    tmp = {"input": str(chinese_input)}
    data_dic.update(tmp)
    
    chinese_target_output = [str(voc.index2word[token.item()]) for token in pad_output]
    chinese_target_output = " ".join(chinese_target_output)
    chinese_target_output = chinese_target_output[:chinese_target_output.index('EOS')]
    tmp = {"target_out": str(chinese_target_output)}
    data_dic.update(tmp)
    
    output_words = evaluate(encoder=encoder,
                            decoder=decoder,
                            searcher=searcher,
                            voc=voc,
                            indexes_batch=pad_inp.tolist())
    # print(output_words)

    output_words = output_words[:output_words.index('EOS')]
    
    output_words_tmp = " ".join(output_words)
    tmp = {"Bot": output_words_tmp}
    data_dic.update(tmp)
    tmp = {"score": ""}  # score
    data_dic.update(tmp)
    output_words[:] = [x for x in output_words if not (x == 'EOS' or x == 'PAD')]
    print(data_dic)
    return data_dic


def testByInput(word, test_model_path="../Models/2-2_500/100_checkpoint.tar"):

    voc = torch.load(VOC_PATH)
    input_sentence = word
    input_seq_list = jieba.cut(input_sentence)
    # print(input_seq_list)
    input_seq_index_list = []
    # voc = Voc("1")
    for word in input_seq_list:
        if word in voc.word2index:
            input_seq_index_list.append(voc.word2index[word])
    input_seq_index_list.append(EOS_token)
    # print(input_seq_index_list)

    embedding_layer = nn.Embedding(voc.num_words, embedding_dim=embedding_size)
    # 编码器
    encoder = EncoderRNN(input_size=voc.num_words,
                         hidden_size=hidden_size,
                         embedding_layer=embedding_layer,
                         n_layer=encoder_n_layers,
                         dropout=dropout)
    # 解码器
    decoder = LuongAttnDecoderRNN(attn_model=attn_model,
                                  embedding=embedding_layer,
                                  hidden_size=hidden_size,
                                  output_size=voc.num_words,
                                  n_layers=decoder_n_layers,
                                  drop_out=dropout)

    checkpoint = torch.load(test_model_path, map_location=device)
    # start_iteration = checkpoint['iteration']
    encoder_state_dict = checkpoint['encoder']
    decoder_state_dict = checkpoint['decoder']
    # encoder_optim_dict = checkpoint['encoder_opt']
    # decoder_optim_dict = checkpoint['decoder_opt']
    embedding_layer_state_dict = checkpoint['embedding_layer']
    embedding_layer.load_state_dict(embedding_layer_state_dict)
    voc.__dict__ = checkpoint['voc_dict']

    encoder.load_state_dict(encoder_state_dict)
    decoder.load_state_dict(decoder_state_dict)

    # 开启测试模式
    encoder.eval(), decoder.eval()
    # print(attn_weights_list)
    searcher = GreedySearchDecoder(encoder=encoder, decoder=decoder)
    output_words = evaluate(encoder=encoder,
                            decoder=decoder,
                            searcher=searcher,
                            voc=voc,
                            indexes_batch=input_seq_index_list)
    # print(output_words)
    output_words = output_words[:output_words.index('EOS')]
    output_words[:] = [x for x in output_words if not (x == 'EOS' or x == 'PAD')]
    print('【Bot:】', ' '.join(output_words))
    print(output_words)
    showAttention(input_sentence, output_words, attentions_list=searcher.att)


def cutInput(intput_sentence):
    intput_sentence = ' '.join(jieba.cut(intput_sentence)).strip()
    return intput_sentence


def showAttention(input_sentence, output_words, attentions_list):
    # 用色条设置图形
    attentions = torch.ones(len(output_words), attentions_list[0].shape[2])
    # print(attentions.shape)
    for i in range(len(output_words)):
        attentions[i] = attentions_list[i].view(1, -1)
    print(attentions.shape)
    fig = plt.figure()
    ax = fig.add_subplot(111)
    # cax = ax.matshow(numpy.array(attentions).detach.numpy())  # colormap, cmap='bone'
    cax = ax.matshow(attentions.detach().numpy())  # colormap, cmap='bone'
    fig.colorbar(cax)

    plt.rcParams['font.sans-serif'] = ['SimHei']

    # 设置轴
    input_sentence = cutInput(input_sentence)
    ax.set_xticklabels([''] + input_sentence.split(' ') + ['<EOS>'], rotation=0)
    ax.set_yticklabels([''] + output_words)

    # Show label at every tick
    ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
    ax.yaxis.set_major_locator(ticker.MultipleLocator(1))

    plt.show()


if __name__ == '__main__':

    voc = torch.load(VOC_PATH)
    for word in voc.word2index:
        jieba.add_word(word, freq=10000)

    # testModel()

    while True:
        input_sentences = input()
        testByInput(word=input_sentences)
