Spaces:
Sleeping
Sleeping
File size: 1,586 Bytes
19d4944 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
from fastapi import FastAPI,Header,HTTPException,Depends,WebSocket,WebSocketDisconnect
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Allow all origins
allow_methods=["GET", "POST"], # Allow only GET and POST methods
allow_headers=["*"], # Allow all headers
)
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import torch
from transformers import RobertaTokenizer, RobertaForSequenceClassification
app = FastAPI()
# Load the tokenizer
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
# Load the model
model_path="model_ai_detection"
model = RobertaForSequenceClassification.from_pretrained(model_path)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
model.eval()
class TextData(BaseModel):
text: str
@app.post("/predict")
async def predict(data: TextData):
inputs = tokenizer(data.text, return_tensors="pt", padding=True, truncation=True)
inputs = {k: v.to(device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model(**inputs)
probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
ai_prob = probs[0][1].item() * 100 # Probability of the text being AI-generated
message = "The text is likely generated by AI." if ai_prob > 50 else "The text is likely generated by a human."
return {
"score": ai_prob,
"message": message
}
@app.get("/")
async def read_root():
return {"message": "Ready to go"}
|