ThanaritKanjanametawat
Change UI Options (1model, 3datasets) for Senior Project
2bb8a76
raw
history blame
2.03 kB
import streamlit as st
from transformers import pipeline
from ModelDriver import *
import numpy as np
# Add a title
st.title('GPT Detection Demo')
st.write("This is a demo for GPT detection. You can use this demo to test the model. There are 3 variations of the model, The model was trained on CHEAT, GPABenchmark, OpenGPT datasets. They are all in the domain of Scientific Abstract. You can choose dataset variation of the model on the sidebar.")
# st.write("Reference on how we built Roberta Sentinel: https://arxiv.org/abs/2305.07969")
# Add 4 options for 4 models
ModelOption = st.sidebar.selectbox(
'Which Model do you want to use?',
('RobertaClassifier'),
)
DatasetOption = st.sidebar.selectbox(
'Which Dataset the model was trained on?',
('OpenGPT', 'GPABenchmark', 'CHEAT'),
)
text = st.text_area('Enter text here (max 512 words)', '')
if st.button('Generate'):
# if ModelOption == 'RobertaSentinel':
# if DatasetOption == 'OpenGPT':
# result = RobertaSentinelOpenGPTInference(text)
# st.write("Model: RobertaSentinelOpenGPT")
# elif DatasetOption == 'CSAbstract':
# result = RobertaSentinelCSAbstractInference(text)
# st.write("Model: RobertaSentinelCSAbstract")
if ModelOption == 'RobertaClassifier':
if DatasetOption == 'OpenGPT':
result = RobertaClassifierOpenGPTInference(text)
st.write("Model: RobertaClassifierOpenGPT")
elif DatasetOption == 'GPABenchmark':
result = RobertaClassifierGPABenchmarkInference(text)
st.write("Model: RobertaClassifierGPABenchmark")
elif DatasetOption == 'CHEAT':
result = RobertaClassifierCHEATInference(text)
st.write("Model: RobertaClassifierCHEAT")
Prediction = "Human Written" if not np.argmax(result) else "Machine Generated"
st.write(f"Prediction: {Prediction} ")
st.write(f"Probabilty:", max(result))