import os
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import torch
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import json
from tqdm import tqdm


if __name__ == '__main__':
    model_checkpoint = "adapter/full/bert-base-chinese/deie"
    data_path = "data/init_data/deie"
    output_path = "output/bert-base-chinese/deie.jsonl"
    dataset = load_dataset(data_path)
    tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=True)
    model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=2).to("cuda")
    f = open(output_path, "w", encoding="utf-8")
    for examples in tqdm(dataset['test']):
        inputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=512, return_tensors='pt').to("cuda")

        with torch.no_grad():
            logits = model(**inputs).logits
        predicted_class_id = logits.argmax().item()
        examples['pred'] = predicted_class_id
        f.write(json.dumps(examples, ensure_ascii=False) + "\n")
        f.flush()  # flush the buffer to disk
        
    f.close()