Spaces:
Running
Running
import gradio as gr | |
from transformers import AutoTokenizer | |
import torch | |
from tiny_finbert import TinyFinBERTRegressor, preprocess_texts | |
import os | |
import nltk | |
nltk.download('stopwords') | |
MODEL_DIR = "./saved_model" | |
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR) | |
model = TinyFinBERTRegressor().to(DEVICE) | |
model.load_state_dict(torch.load(os.path.join(MODEL_DIR, "regressor_model.pt"), map_location=DEVICE)) | |
model.eval() | |
def predict_sentiment(text): | |
print(f"[DEBUG] Input text: {text}") | |
processed = preprocess_texts([text])[0] | |
print(f"[DEBUG] Processed text: {processed}") | |
inputs = tokenizer(processed, return_tensors="pt", truncation=True, padding='max_length', max_length=128) | |
inputs = {k: v.to(DEVICE) for k, v in inputs.items() if k != "token_type_ids"} | |
with torch.no_grad(): | |
score = model(**inputs)["score"].item() | |
print(f"[DEBUG] Score: {score}") | |
if score > 0.3: | |
interpretation = "positive" | |
elif score < -0.3: | |
interpretation = "negative" | |
else: | |
interpretation = "neutral" | |
return round(score, 4), interpretation | |
iface = gr.Interface( | |
fn=predict_sentiment, | |
inputs=gr.Textbox(label="Enter financial sentence"), | |
outputs=[ | |
gr.Number(label="Sentiment Score"), | |
gr.Textbox(label="Interpretation") | |
], | |
title="TinyFinBERT Sentiment Analysis", | |
#allow_api=True, | |
api_name="predict" | |
) | |
#iface.launch() | |
iface.launch( | |
# server_name="0.0.0.0", | |
# share=True, | |
# #enable_queue=False | |
# max_threads=40, | |
# show_api=True | |
) |