Spaces:
Runtime error
Runtime error
| import os | |
| # Force cache inside local folder | |
| os.environ["TRANSFORMERS_CACHE"] = "./hf_cache" | |
| os.environ["HF_HOME"] = "./hf_cache" | |
| os.environ["XDG_CACHE_HOME"] = "./hf_cache" | |
| os.environ["TORCH_HOME"] = "./hf_cache" | |
| os.environ["HF_DATASETS_CACHE"] = "./hf_cache" | |
| os.environ["SAFE_TENSORS_CACHE"] = "./hf_cache" | |
| from fastapi import FastAPI, Request | |
| from pydantic import BaseModel | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
| import torch | |
| # Load model | |
| model_name = "tabularisai/multilingual-sentiment-analysis" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForSequenceClassification.from_pretrained(model_name) | |
| app = FastAPI() | |
| sentiment_map = { | |
| 0: "Very Negative", | |
| 1: "Negative", | |
| 2: "Neutral", | |
| 3: "Positive", | |
| 4: "Very Positive" | |
| } | |
| class ReviewRequest(BaseModel): | |
| text: str | |
| def predict_sentiment(review: ReviewRequest): | |
| inputs = tokenizer(review.text, return_tensors="pt", truncation=True, padding=True, max_length=512) | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1) | |
| predicted_label = torch.argmax(probabilities, dim=-1).item() | |
| sentiment = sentiment_map[predicted_label] | |
| return {"text": review.text, "sentiment": sentiment} | |