# 检查即兴委婉语的检测精度
from transformers import T5Tokenizer, T5ForConditionalGeneration
from utils.datasets import T5MLM
from torch.utils.data import DataLoader
from utils import emerging_euph
import pickle
from utils import stopwords
import random
import json
import torch
from confs import step2 as conf
from tqdm import tqdm
import pandas as pd

drugjson = json.load(open("./drugeuphs.json", "r", encoding="utf-8"))
euph2drug = dict(zip(list(emerging_euph.keys()), list(drugjson.keys())))
tokenizer = T5Tokenizer.from_pretrained(conf.T5)
# 毒品名称seed的首token------------------------
drug_id = {}
for k in drugjson.keys():
    input_ids = tokenizer(k).input_ids
    # drug_id[k] = input_ids[0]
    if input_ids[0] != 3:
        drug_id[k] = input_ids[0]
    else:
        drug_id[k] = input_ids[1]
# print(drug_id)
drug_ids = set(drug_id.values())
print(f"drug_ids : {drug_ids}")

# drug_id = {} # 委婉语的首token----------------
# for k in emerging_euph.keys():
#     input_ids = tokenizer(k).input_ids
#     if input_ids[0] != 3:
#         drug_id[k] = input_ids[0]
#     else:
#         drug_id[k] = input_ids[1]
# print(drug_id)
# drug_ids = set(drug_id.values())
# print(f"drug_ids : {drug_ids}")
# -------------------------------------------------


batch_size = 20
device = "cpu"

model = T5ForConditionalGeneration.from_pretrained(conf.T5).to(device)

## ---------------------------------bint5 classification----------------------------------------
# modelname = "./models/step2train/Epoch16_step2train_01.pt"
# model.load_state_dict(torch.load(modelname, map_location=device))
#
# out = {}
# for pick in emerging_euph.keys():
#     out[pick] = []
#     devset = DataSet4T5Check(conf, tokenizer, pick, euph_corpus, withidx=True)
#     dev_loader = DataLoader(
#         devset,
#         batch_size=batch_size,
#     )
#
#     model.eval()
#     with torch.no_grad():
#         for euphcorp_idx, d in dev_loader:
#             for k in d.keys():
#                 d[k] = d[k].squeeze(1).to(device)
#             outputs = model.generate(**d)
#             cnt = 0
#             for each in range(batch_size):
#                 _ = tokenizer.decode(outputs[each], skip_special_tokens=True)
#                 if "1" in _:
#                     out[pick].append(euphcorp_idx[each].item())
# pickle.dump(out, open(f"./corps/rankdics/target.pt", "wb"))
#
# print("pass")
# # -------------------------------prepare rankdic--------------------------------------------------
target = pickle.load(open(f"./corpus/target_corp.pkl", "rb"))
modelname = "Epoch16_step2train_01"
model.load_state_dict(torch.load(f"./models/step2train/{modelname}.pt", map_location=device))
print(modelname)
rankdic = {}
for pick in target.keys():
    dataset = T5MLM(conf, tokenizer, target[pick], type="checktarget")  # 检测所有440个
    d_loader = DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=False
    )

    model.eval()
    target_rank = []
    with torch.no_grad():
        for _ in d_loader:
            for k in _.keys():
                _[k] = _[k].squeeze(1).to(device)

            outputs = model.generate(**_, return_dict_in_generate=True, output_scores=True)
            first_char_pos = 1 if outputs.sequences[0][2].item() != 3 else 2
            # first_char_pos = 2
            target_score = outputs.scores[first_char_pos]
            _, target_score_rank = torch.sort(target_score, descending=True)
            target_rank = [False for i in range(target_score_rank.shape[0])]
            for colidx in range(target_score_rank.shape[1]):
                for rawidx in range(target_score_rank.shape[0]):
                    # if target_score_rank[rawidx, colidx].item() in (drug_ids - {drug_id[euph2drug[pick]]}):
                    if target_score_rank[rawidx, colidx].item() in drug_ids:
                        target_rank[rawidx] = colidx
                if False not in target_rank:
                    break
    rankdic[pick] = target_rank
for k in rankdic.keys():
    rankdic[k] = sorted(rankdic[k])
pickle.dump(rankdic, open(f"./corpus/rankdics/{modelname}.pkl", "wb"))
# ------------------read rankdic---------------------------------------------------------
rankdic = pickle.load(open(f"./corpus/rankdics/{modelname}.pkl", "rb"))
for topk in [1, 5, 10, 20, 30, 40, 50, 100]:
    res = {}
    for k in rankdic.keys():
        res[k] = 0
        for each in rankdic[k]:
            if each <= topk:
                res[k] += 1
    print(res)

    print(f"top{topk}: {sum(res.values())}")
print("pass")
