|
import os |
|
from dotenv import load_dotenv |
|
import torch |
|
from transformers import AutoModelForSequenceClassification, AutoTokenizer |
|
|
|
|
|
load_dotenv() |
|
|
|
def load_model(model_path): |
|
model = AutoModelForSequenceClassification.from_pretrained(model_path) |
|
tokenizer = AutoTokenizer.from_pretrained(model_path) |
|
return model, tokenizer |
|
|
|
def predict(text, model, tokenizer): |
|
inputs = tokenizer(text, return_tensors="pt") |
|
outputs = model(**inputs) |
|
return outputs |
|
|
|
def main(): |
|
model_path = os.getenv('MODEL_PATH') |
|
model, tokenizer = load_model(model_path) |
|
|
|
text = "Sample input text" |
|
result = predict(text, model, tokenizer) |
|
print(result) |
|
|
|
if __name__ == "__main__": |
|
main() |