File size: 1,366 Bytes
2d79f18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53cf9fc
2d79f18
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import streamlit as st

# Load model directly
from transformers import (AutoTokenizer,
                          AutoModelForSequenceClassification,
                          TextClassificationPipeline)

tokenizer = AutoTokenizer.from_pretrained("Hello-SimpleAI/chatgpt-detector-roberta")
model = AutoModelForSequenceClassification.from_pretrained("Hello-SimpleAI/chatgpt-detector-roberta")

pipe = TextClassificationPipeline(model=model, tokenizer=tokenizer, return_all_scores=True)

def score_and_visualize(text):
    prediction = pipe([text])
    f_score = 0
    f_label = ""
    label_0 = prediction[0][0]['label']
    score_0 = prediction[0][0]['score']
    label_1 = prediction[0][1]['label']
    score_1 = prediction[0][1]['score']
    if score_0 > score_1:
        f_score = (round(score_0))*100
        f_label = label_0
    else:
        f_score = (round(score_1))*100
        f_label = label_1
    return f_score, f_label

def main():
    st.title("Human vs ChatGPT Classification Model")

    # Create an input text box
    input_text = st.text_area("Enter your text", "")

    # Create a button to trigger model inference
    if st.button("Analyze"):
        # Perform inference using the loaded model
        score, label = score_and_visualize(input_text)
        st.write("The input text is " ,label , " based.")

if __name__ == "__main__":
    main()