Spaces:
Sleeping
Sleeping
'''import requests | |
import streamlit as st | |
# Replace with the actual URL of your deployed FastAPI backend | |
API_URL = "http://127.0.0.1:8000/predict" | |
def main(): | |
text_input = st.text_input("Enter text to score:") | |
if st.button("Score Text"): | |
response = requests.post(API_URL, json={"text": text_input}) | |
data = response.json() | |
st.write(f"Score: {data['score']}") | |
st.write(f"Message: {data['message']}") | |
if __name__ == "__main__": | |
main()''' | |
import streamlit as st | |
import torch | |
from transformers import RobertaTokenizer, RobertaForSequenceClassification | |
# Load the tokenizer | |
tokenizer = RobertaTokenizer.from_pretrained('roberta-base') | |
# Load the model | |
model_path = "model_ai_detection" | |
model = RobertaForSequenceClassification.from_pretrained(model_path) | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
model.to(device) | |
model.eval() | |
def predict(text): | |
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True) | |
inputs = {k: v.to(device) for k, v in inputs.items()} | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
probs = torch.nn.functional.softmax(outputs.logits, dim=-1) | |
ai_prob = probs[0][1].item() * 100 # Probability of the text being AI-generated | |
message = "The text is likely generated by AI." if ai_prob > 50 else "The text is likely generated by a human." | |
return { | |
"score": ai_prob, | |
"message": message | |
} | |
def main(): | |
st.title("AI Text Detector") | |
text_input = st.text_area("Enter text to score:") | |
if st.button("Score Text"): | |
if text_input: | |
result = predict(text_input) | |
st.write(f"Score: {result['score']:.2f}%") | |
st.write(f"Message: {result['message']}") | |
else: | |
st.write("Please enter some text to score.") | |
if __name__ == "__main__": | |
main() | |