"""利用语言模型对过滤后的OCR排布方式进行排序
"""
# %%
import json
import random
import torch
from tqdm import tqdm
from itertools import permutations
from transformers import AutoTokenizer, AutoModelWithLMHead



device = "cuda"
tokenizer = AutoTokenizer.from_pretrained("hfl/chinese-xlnet-base")
model = AutoModelWithLMHead.from_pretrained("hfl/chinese-xlnet-base")
with open("./result/OCRClassifier-roberta-layer3-head4-dropout0.1-relu-constant-warmup-lr1e-3-e200.json", "r", encoding="utf-8") as f:
    testset = json.load(f)
model.to(device)
keys = list(testset.keys())
key = random.choice(keys)
print(testset[key])
filter_testset = {}
for k in tqdm(keys):
    texts = testset[k]["text"]
    preds = testset[k]["pred"]
    filter_text = [t for t, p in zip(texts, preds) if p]
    filter_testset[k] = filter_text
key = random.choice(keys)
print(filter_testset[key])
def sent_scoring(model_tokenizer, text, cuda):
    """计算句子的概率（通过loss实现），具体原理不清楚，先用了再说
    """
    model = model_tokenizer[0]
    tokenizer = model_tokenizer[1]
    assert model is not None
    assert tokenizer is not None
    input_ids = torch.tensor(tokenizer.encode(text)).unsqueeze(0)  # Batch size 1
    if cuda:
        input_ids = input_ids.to('cuda')
    with torch.no_grad():
        outputs = model(input_ids, labels=input_ids)
    loss, logits = outputs[:2]
    sentence_prob = loss.item()
    return sentence_prob


result = {}
for k in tqdm(filter_testset):
    texts = filter_testset[k]
    permutation_texts = list(permutations(texts))
    texts = ["".join(t) for t in permutation_texts]
    max_score = -float('inf')
    max_text = None
    for text in texts:
        score = sent_scoring((model, tokenizer), text, device)
        if score > max_score:
            max_text = text
            max_score = score
    result[k] = max_text

with open("result.json", "w", encoding="utf-8") as f:
    f.write(json.dumps(result))
# %%
