# %% 运行多任务模型给出结果，同时保存一个带logits的结果文件
import json
import torch
from tqdm import tqdm
from train import OCRFilterNet, EarlyStoppingWithWarmup
from torch.utils.data import DataLoader
from transformers import BertTokenizer
from dataset import InferCollator, POIDataset

# %%
with open("./preprocess_data/test.json", "r", encoding="utf-8") as f:
    test_data = json.load(f)
tokenizer = BertTokenizer.from_pretrained("./pretrained_models/poi-bert")
# %%
load_image = False
testset = POIDataset(test_data, train=False, load_image=load_image)
test_loader = DataLoader(testset, batch_size=32, collate_fn=InferCollator(tokenizer))

# %%
model_paths = [
"models/PureNLPModel_pairwise_alpha10_eta5e-2_3_prelu_512_layer3_dropout02_linear005_e50-seed1234-full.ckpt",
"models/PureNLPModel_pairwise_alpha10_eta5e-2_3_relu_512_layer3_dropout02_linear005_e50-seed72-full.ckpt",
"models/PureNLPModel_pairwise_alpha10_eta5e-2_3_relu_512_layer3_dropout02_linear005_e50-seed12345-full.ckpt",
"models/PureNLPModel_pairwise_alpha10_eta5e-2_3_relu_512_layer3_dropout02_linear005_e50-seed8888-full.ckpt",


]
models = [OCRFilterNet.load_from_checkpoint(x) for x in model_paths]


# %%
result_with_logits = {}
for batch in tqdm(test_loader):
    # 进行模型融合
    logits_multi = None
    for model in models:
        logits, lengths = model(batch)
        if logits_multi is None:
            logits_multi = logits
        else:
            logits_multi += logits
    logits = logits_multi / len(models)
    key_with_idx = batch[2]
    for i, (k, _, _) in enumerate(key_with_idx):
        text = batch[3][k]
        result_with_logits[k] = {
            "text": text,
            "logit": logits[i, :lengths[i], 0].tolist(),
            "rankscore": logits[i, :lengths[i], 1].tolist()
        }

# %%
import os

if not os.path.exists("./result"):
    os.makedirs("./result")
with open(os.path.join("./result", "fusion.json"), "w", encoding="utf-8") as f:
    f.write(json.dumps(result_with_logits, ensure_ascii=False))
# 制作提交版模型
submit_result = {}
for seq_id, v in tqdm(result_with_logits.items(), desc="写入提交文件"):
    texts = v["text"]
    filter_score = v["logit"]
    filter_position = [i for i in range(len(texts)) if filter_score[i] > 0]
    if len(filter_position) > 0:
        rankscore = [v["rankscore"][i] for i in filter_position]
        texts = [texts[i] for i in filter_position]
    else:
        # 如果全过滤掉，就不执行filter
        rankscore = v["rankscore"]
    # 按照rankscore从大到小排布
    texts = [x for _, x in sorted(zip(rankscore, texts), reverse=True)]
    submit_result[seq_id] = "".join(texts)
if not os.path.exists("./submit"):
    os.makedirs("./submit")
with open(f"./submit/fusion.json", "w", encoding="utf-8") as f:
    f.write(json.dumps(submit_result, ensure_ascii=False))
