"""利用训练得到的模型，对于候选的OCR进行过滤
"""
# %%
import json
import torch
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from tqdm import tqdm
from argparse import ArgumentParser
from transformers import BertTokenizer
from train import OCRFilterNet
from dataset import InferCollator, POIDataset

# %%
seed = 12345
pl.seed_everything(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
with open("./preprocess_data/test.json", "r", encoding="utf-8") as f:
    data = json.load(f)

# %%
tokenizer = BertTokenizer.from_pretrained("./models/roberta-pretrained-with-expand-vocab")
testset = POIDataset(data, train=False)
collator = InferCollator(tokenizer)
test_loader = DataLoader(testset, batch_size=32, collate_fn=collator)
# %%
batch = next(iter(test_loader))
# %%
model = OCRFilterNet.load_from_checkpoint("./models/OCRClassifier-roberta-layer3-head4-dropout0.1-relu-constant-warmup-lr1e-3-e200/model-epoch=07-valid_loss=0.207-valid_instance_accuracy=0.674-valid_accuracy=0.917-valid_auc=0.000.ckpt")

# %%
result = {}
for batch in tqdm(test_loader):
    logits, lengths = model(batch)
    key_with_idx = batch[2]
    for i, (k, _, _) in enumerate(key_with_idx):
        text = batch[3][k]
        pred = (logits[i, :lengths[i]] > 0).tolist()
        result[k] = {
            "text": text,
            "pred": pred,
            "logit": logits[i, :lengths[i]].tolist()
        }




# %%
import os
if not os.path.exists("./result"):
    os.makedirs("./result")
with open(f"./result/{model.config['model_name']}.json", "w", encoding="utf-8") as f:
    f.write(json.dumps(result, ensure_ascii=False))
# %%
