| | from fastapi import FastAPI |
| | from pydantic import BaseModel |
| | from transformers import AutoTokenizer, AutoModelForSequenceClassification |
| | import torch |
| | from typing import List |
| |
|
| | app = FastAPI( |
| | title="Indonesian Sentiment API", |
| | version="1.0" |
| | ) |
| |
|
| | MODEL_NAME = "taufiqdp/indonesian-sentiment" |
| |
|
| | tokenizer = AutoTokenizer.from_pretrained( |
| | MODEL_NAME, |
| | trust_remote_code=True |
| | ) |
| | model = AutoModelForSequenceClassification.from_pretrained( |
| | MODEL_NAME, |
| | trust_remote_code=True |
| | ) |
| | model.eval() |
| |
|
| | labels = ["negatif", "netral", "positif"] |
| |
|
| | |
| | @app.get("/") |
| | def root(): |
| | return {"status": "ok", "message": "Sentiment API is running"} |
| |
|
| | |
| | class InputBatch(BaseModel): |
| | texts: List[str] |
| |
|
| | @app.post("/predict-batch") |
| | def predict_batch(data: InputBatch): |
| | results = [] |
| |
|
| | inputs = tokenizer( |
| | data.texts, |
| | return_tensors="pt", |
| | truncation=True, |
| | padding=True, |
| | max_length=128 |
| | ) |
| |
|
| | with torch.no_grad(): |
| | outputs = model(**inputs) |
| |
|
| | probs = torch.softmax(outputs.logits, dim=1) |
| | preds = torch.argmax(probs, dim=1) |
| |
|
| | for text, idx, prob in zip(data.texts, preds, probs): |
| | results.append({ |
| | "text": text, |
| | "sentiment": labels[idx], |
| | "score": round(prob[idx].item(), 4) |
| | }) |
| |
|
| | return {"results": results} |
| |
|