import torch from fastapi import FastAPI, Request from fastapi.responses import HTMLResponse from fastapi.staticfiles import StaticFiles from starlette.responses import FileResponse from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline import torch.nn.functional as F app = FastAPI() model_name = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) app.mount("/static", StaticFiles(directory="static", html=True), name="static") @app.get("/", response_class=HTMLResponse) async def read_index(): return FileResponse("static/index.html") @app.get("/classify_text") async def classify_text(input: str): inputs = tokenizer(input, return_tensors="pt") outputs = model(**inputs) # Convert logits to probabilities probabilities = F.softmax(outputs.logits, dim=-1) # Assuming we're using a binary classification model here (positive, negative) # Adjust indices [0, 1] based on your model's specific output mapping positive_prob = probabilities[:, 1].item() # Probability of positive sentiment negative_prob = probabilities[:, 0].item() # Probability of negative sentiment # You can also use `torch.argmax` to just return the most likely class sentiment = "positive" if torch.argmax(probabilities) == 1 else "negative" return { "input": input, "positive_probability": positive_prob, "negative_probability": negative_prob, "sentiment": sentiment, } # uvicorn main:app --host 127.0.0.1 --port 8001 --reload