import torch
from utils.DataProcesser import cut_sentence, cut_paragraph
from entity.DataEntity import sentence_to_index, get_dataloader

device = "cuda" if torch.cuda.is_available() else "cpu"


def evaluate(dataset):
    """
    计算 测试集/验证集 准确率
    :param dataset:
    :param smash:
    :return:
    """
    smash = torch.load(r"D:\data\sohu\LL_topic\smash_model.pkl")
    total_acc, total_count = 0, 0
    with torch.no_grad():
        for idx, (source_w, target_w, source_s, target_s, source_p, target_p, label) in enumerate(
                dataset):
            source_w = source_w.to(device)
            target_w = target_w.to(device)
            source_s = source_s.to(device)
            target_s = target_s.to(device)
            source_p = source_p.to(device)
            target_p = target_p.to(device)
            label = label.to(device)
            label_ = smash(source_w, target_w, source_s, target_s, source_p, target_p, 32)
            for i, j in zip(label_, label):
                if i.argmax().item() == j.argmax().item():
                    total_acc += 1
            total_count += label.size(0)
    return total_acc / total_count


def prediction(source, target):
    """
    预测
    :param source:没有被处理的 文章
    :param target:没有被处理的 文章
    :return: 预测结果
    """
    vocab = torch.load(r'D:\data\sohu\LL_topic\vocab')
    model = torch.load(r"D:\data\sohu\LL_topic\smash_model.pkl")
    sw = torch.zeros(size=(1, 500), dtype=torch.long)
    tw = torch.zeros(size=(1, 500), dtype=torch.long)
    sw[0] = torch.tensor(sentence_to_index(vocab, source, 500))
    tw[0] = torch.tensor(sentence_to_index(vocab, target, 500))
    ss_tem, ts_tem = torch.zeros(size=(1, 10, 20), dtype=torch.long), torch.zeros(size=(1, 10, 20), dtype=torch.long)
    for ss, ts, idx in zip(cut_sentence(source, 10), cut_sentence(target, 10), range(10)):
        ss_tem[0][idx] = torch.tensor(sentence_to_index(vocab, ss, 20))
        ts_tem[0][idx] = torch.tensor(sentence_to_index(vocab, ts, 20))
    sp_tem, tp_tem = torch.zeros(size=(1, 3, 5, 20), dtype=torch.long), torch.zeros(size=(1, 3, 5, 20),
                                                                                    dtype=torch.long)
    for sp, tp, id1 in zip(cut_paragraph(source, 3, 5), cut_paragraph(target, 3, 5), range(3)):
        mid_tems, mid_temt = torch.zeros(size=(3, 5, 20)), torch.zeros(size=(3, 5, 20))
        for sps, tps, id2 in zip(sp, tp, range(5)):
            mid_tems[id1][id2] = torch.tensor(sentence_to_index(vocab, sps, 20))
            mid_temt[id1][id2] = torch.tensor(sentence_to_index(vocab, tps, 20))
        sp_tem[0] = mid_tems
        tp_tem[0] = mid_temt

    with torch.no_grad():
        output = model(sw, tw, ss_tem, ts_tem, sp_tem, tp_tem, 1)
    if output.argmax().item() == 0:
        return '不匹配'
    else:
        return '匹配'


if __name__ == '__main__':
    import json

    with open(r"D:\data\sohu\LL_topic\train.txt", mode='r', encoding='utf8') as f:
        for i in f:
            source = json.loads(i)["source"]
            target = json.loads(i)["target"]
            print(prediction(source, target))

    # test_dataset = get_dataloader(r'D:\data\sohu\LL_topic\valid.txt')
    # accuracy = evaluate(test_dataset)
    # print("测试集准确率", accuracy)


