from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
from tqdm import tqdm

test_path = "/home/mbk/rubbish/sen_cls/data/SENTI_ROBUST/test.tsv"
model_path = "./fine_tuned_roberta_sentiment_chinese"


tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForSequenceClassification.from_pretrained(model_path)

def predict_sentiment(text):
    inputs = tokenizer(text, 
                      return_tensors="pt", 
                      truncation=True, 
                      padding=True,
                      max_length=256)
    
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted_label = torch.argmax(logits, dim=1).item()
    return predicted_label


res = "index	prediction\n"

with open(test_path, 'r') as f:
    data = f.read().split('\n')[1:]
    for line in tqdm(data):
        qid, text = tuple(line.split('\t'))
        label = predict_sentiment(text)
        res += f"{qid}\t{label}\n"

with open('SENTI_ROBUST.tsv', 'w') as f:
    f.write(res)
