Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,19 +1,48 @@
|
|
1 |
from flask import Flask, request
|
|
|
|
|
2 |
from transformers import RobertaForSequenceClassification, RobertaTokenizer, RobertaConfig
|
3 |
import torch
|
|
|
4 |
import gradio as gr
|
5 |
import os
|
6 |
-
|
7 |
app = Flask(__name__)
|
8 |
|
9 |
ACCESS_TOKEN = os.environ["ACCESS_TOKEN"]
|
10 |
-
config = RobertaConfig.from_pretrained("PirateXX/
|
11 |
-
model = RobertaForSequenceClassification.from_pretrained("PirateXX/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
|
14 |
-
|
|
|
15 |
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
tokens = tokenizer.encode(query)
|
18 |
all_tokens = len(tokens)
|
19 |
tokens = tokens[:tokenizer.model_max_length - 2]
|
@@ -26,14 +55,29 @@ def predict(query, device="cpu"):
|
|
26 |
probs = logits.softmax(dim=-1)
|
27 |
|
28 |
fake, real = probs.detach().cpu().flatten().numpy().tolist()
|
29 |
-
return
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from flask import Flask, request
|
2 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
3 |
+
from transformers import RobertaConfig
|
4 |
from transformers import RobertaForSequenceClassification, RobertaTokenizer, RobertaConfig
|
5 |
import torch
|
6 |
+
from torch import cuda
|
7 |
import gradio as gr
|
8 |
import os
|
9 |
+
import re
|
10 |
app = Flask(__name__)
|
11 |
|
12 |
ACCESS_TOKEN = os.environ["ACCESS_TOKEN"]
|
13 |
+
# config = RobertaConfig.from_pretrained("PirateXX/ChatGPT-Text-Detector", use_auth_token= ACCESS_TOKEN)
|
14 |
+
# model = RobertaForSequenceClassification.from_pretrained("PirateXX/ChatGPT-Text-Detector", use_auth_token= ACCESS_TOKEN, config = config)
|
15 |
+
|
16 |
+
device = 'cuda' if cuda.is_available() else 'cpu'
|
17 |
+
tokenizer = AutoTokenizer.from_pretrained("PirateXX/AI-Content-Detector", use_auth_token= ACCESS_TOKEN)
|
18 |
+
model = AutoModelForSequenceClassification.from_pretrained("PirateXX/AI-Content-Detector", use_auth_token= ACCESS_TOKEN)
|
19 |
+
model.to(device)
|
20 |
+
|
21 |
+
# model_name = "roberta-base"
|
22 |
+
# tokenizer = RobertaTokenizer.from_pretrained(model_name, map_location=torch.device('cpu'))
|
23 |
|
24 |
+
def text_to_sentences(text):
|
25 |
+
clean_text = text.replace('\n', ' ')
|
26 |
+
return re.split(r'(?<=[^A-Z].[.?]) +(?=[A-Z])', clean_text)
|
27 |
|
28 |
+
# function to concatenate sentences into chunks of size 900 or less
|
29 |
+
def chunks_of_900(text, chunk_size = 900):
|
30 |
+
sentences = text_to_sentences(text)
|
31 |
+
chunks = []
|
32 |
+
current_chunk = ""
|
33 |
+
for sentence in sentences:
|
34 |
+
if len(current_chunk + sentence) <= chunk_size:
|
35 |
+
if len(current_chunk)!=0:
|
36 |
+
current_chunk += " "+sentence
|
37 |
+
else:
|
38 |
+
current_chunk += sentence
|
39 |
+
else:
|
40 |
+
chunks.append(current_chunk)
|
41 |
+
current_chunk = sentence
|
42 |
+
chunks.append(current_chunk)
|
43 |
+
return chunks
|
44 |
+
|
45 |
+
def predict(query):
|
46 |
tokens = tokenizer.encode(query)
|
47 |
all_tokens = len(tokens)
|
48 |
tokens = tokens[:tokenizer.model_max_length - 2]
|
|
|
55 |
probs = logits.softmax(dim=-1)
|
56 |
|
57 |
fake, real = probs.detach().cpu().flatten().numpy().tolist()
|
58 |
+
return real
|
59 |
+
|
60 |
+
def findRealProb(text):
|
61 |
+
chunksOfText = (chunks_of_900(text))
|
62 |
+
results = []
|
63 |
+
for chunk in chunksOfText:
|
64 |
+
output = predict(chunk)
|
65 |
+
results.append([output, len(chunk)])
|
66 |
+
|
67 |
+
ans = 0
|
68 |
+
cnt = 0
|
69 |
+
for prob, length in results:
|
70 |
+
cnt += length
|
71 |
+
ans = ans + prob*length
|
72 |
+
realProb = ans/cnt
|
73 |
+
return {"Real": realProb, "Fake": 1-realProb}, results
|
74 |
+
|
75 |
+
demo = gr.Interface(
|
76 |
+
fn=findRealProb,
|
77 |
+
inputs=gr.Textbox(placeholder="Copy and paste here..."),
|
78 |
+
article = "Visit <a href = \"https://ai-content-detector.online/\">AI Content Detector</a> for better user experience!",
|
79 |
+
outputs=gr.outputs.JSON(),
|
80 |
+
interpretation="default",
|
81 |
+
examples=["Cristiano Ronaldo is a Portuguese professional soccer player who currently plays as a forward for Manchester United and the Portugal national team. He is widely considered one of the greatest soccer players of all time, having won numerous awards and accolades throughout his career. Ronaldo began his professional career with Sporting CP in Portugal before moving to Manchester United in 2003. He spent six seasons with the club, winning three Premier League titles and one UEFA Champions League title. In 2009, he transferred to Real Madrid for a then-world record transfer fee of $131 million. He spent nine seasons with the club, winning four UEFA Champions League titles, two La Liga titles, and two Copa del Rey titles. In 2018, he transferred to Juventus, where he spent three seasons before returning to Manchester United in 2021. He has also had a successful international career with the Portugal national team, having won the UEFA European Championship in 2016 and the UEFA Nations League in 2019.", "One rule of thumb which applies to everything that we do - professionally and personally : Know what the customer want and deliver. In this case, it is important to know what the organisation what from employee. Connect the same to the KRA. Are you part of a delivery which directly ties to the larger organisational objective. If yes, then the next question is success rate of one’s delivery. If the KRAs are achieved or exceeded, then the employee is entitled for a decent hike."])
|
82 |
+
|
83 |
+
demo.launch(show_api=False)
|