Spaces:
Runtime error
Runtime error
File size: 1,861 Bytes
47241b4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch.nn.functional as F
def load_model(model_directory):
# Assuming 'config.json' and 'pytorch_model.bin' are in 'model_directory'
model = AutoModelForSequenceClassification.from_pretrained(model_directory)
tokenizer = AutoTokenizer.from_pretrained(model_directory)
return model, tokenizer
def predict(model, tokenizer, input_text):
# Preprocess the input
inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True)
# Move tensors to the same device as the model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
inputs = {k: v.to(device) for k, v in inputs.items()}
# Model in evaluation mode
model.eval()
# Make the model generate a prediction
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
# Convert logits to probabilities
probabilities = F.softmax(logits, dim=1)
# Get the predicted class and the probabilities
predicted_class = torch.argmax(probabilities, dim=1).cpu().numpy()
probabilities = probabilities.cpu().numpy()
return predicted_class, probabilities
def main():
# Replace 'your-model-directory' with the actual path to your model directory
model_directory = "Kurkur99/modeling" # e.g., "Kurkur99/Kurkur99/transactionmerchant/model_directory"
model, tokenizer = load_model(model_directory)
# Example input text
input_text = "Example input text for prediction"
# Get predictions
predicted_class, probabilities = predict(model, tokenizer, input_text)
# Output the results
print(f"Predicted Class: {predicted_class[0]}")
print(f"Probabilities: {probabilities[0]}")
if __name__ == "__main__":
main()
|