from transformers import AutoModelForSequenceClassification, AutoTokenizer import torch import json # Load configuration with open('../config/config.json') as f: config = json.load(f) # Load model and tokenizer model = AutoModelForSequenceClassification.from_pretrained('../model') tokenizer = AutoTokenizer.from_pretrained(config['model_name']) def predict(text): inputs = tokenizer(text, return_tensors="pt", padding="max_length", truncation=True) with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits prediction = torch.argmax(logits, dim=-1) return prediction.item() # Example usage text = "Example text for prediction" prediction = predict(text) print(f"Prediction: {prediction}")