from fastapi import FastAPI, Request
from pydantic import BaseModel
from transformers import BertTokenizer, BertForSequenceClassification
import torch
import torch.nn.functional as F

model_path = "./toxic-bert"
tokenizer = BertTokenizer.from_pretrained(model_path)
model = BertForSequenceClassification.from_pretrained(model_path)
model.eval()

app = FastAPI()

@app.post("/predict")
async def predict(request: Request):
    data = await request.json()
    text = data.get("inputs", "")
    if not text:
        return {"error": "No input text provided."}

    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        probs = F.softmax(logits, dim=-1)
        pred = torch.argmax(probs, dim=-1).item()
        confidence = probs[0][pred].item()
    return {
        "text": text,
        "label": int(pred),
        "confidence": round(confidence, 4),
        "is_toxic": bool(pred)
    }
