Spaces:
Sleeping
Sleeping
HakshaySundar
commited on
Commit
•
2610aed
1
Parent(s):
58eb5eb
Adding files for api integration
Browse files- app.py +11 -17
- arte_score.py +21 -0
- requirements.txt +2 -1
- text_converter.py +31 -4
app.py
CHANGED
@@ -1,7 +1,5 @@
|
|
1 |
import gradio as gr
|
2 |
-
from text_converter import generate_similar_sentence
|
3 |
-
from text_converter import user_input_readability_level
|
4 |
-
from text_converter import reading_levels
|
5 |
|
6 |
APP_DESCRIPTION = '''# Reading Level Converter
|
7 |
<div id="content_align">Convert any text to a specified reading level while retaining the core text meaning</div>'''
|
@@ -10,19 +8,6 @@ MIN_ENTAILMENT = 0.5
|
|
10 |
MAX_ITER = 5
|
11 |
SYSTEM_PROMPT = "You are a writing assistant. You help convert complex texts to simpler texts while maintaining the core meaning of the text."
|
12 |
|
13 |
-
# Dictionary mapping grade levels to reading ease scores
|
14 |
-
"""
|
15 |
-
reading_levels = {
|
16 |
-
"5th Grade (90-100)": (90, 100),
|
17 |
-
"6th Grade (80-90)": (80, 90),
|
18 |
-
"7th Grade (70-80)": (70, 80),
|
19 |
-
"8th - 9th Grade (60-70)": (60, 70),
|
20 |
-
"10th - 12th Grade (50-60)": (50, 60),
|
21 |
-
"College (30-50)": (30, 50),
|
22 |
-
"College Graduate + Professionals (0-30)": (0, 30)
|
23 |
-
}
|
24 |
-
"""
|
25 |
-
|
26 |
def convert_text(input_text, grade_level, input_reading_score):
|
27 |
min_level, max_level = reading_levels[grade_level]
|
28 |
output_text, similarity, reading_level, message = generate_similar_sentence(input_text, min_level, max_level, MIN_ENTAILMENT, SYSTEM_PROMPT, MAX_ITER, float(input_reading_score))
|
@@ -32,6 +17,15 @@ with gr.Blocks(css='styles.css') as app:
|
|
32 |
gr.Markdown(APP_DESCRIPTION)
|
33 |
|
34 |
with gr.Tab("Reading Level Calculator"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
input_text = gr.Textbox(label="Input Text", placeholder="Type here...", lines=4)
|
36 |
|
37 |
fetch_score_and_lvl_btn = gr.Button("Fetch Score and Level")
|
@@ -45,7 +39,7 @@ with gr.Blocks(css='styles.css') as app:
|
|
45 |
outputs=[output_input_reading_score, output_input_reading_level]
|
46 |
)
|
47 |
|
48 |
-
grade_level = gr.Radio(choices=list(reading_levels.keys()), label="Target Reading Level", value=list(reading_levels.keys())[0])
|
49 |
|
50 |
output_reading_level = gr.Textbox(label="Output Reading Level", placeholder="Output Reading Level...", lines=1)
|
51 |
output_similarity = gr.Textbox(label="Similarity", placeholder="Similarity Score...", lines=1)
|
|
|
1 |
import gradio as gr
|
2 |
+
from text_converter import reading_levels, model_types, generate_similar_sentence, user_input_readability_level, set_reading_levels
|
|
|
|
|
3 |
|
4 |
APP_DESCRIPTION = '''# Reading Level Converter
|
5 |
<div id="content_align">Convert any text to a specified reading level while retaining the core text meaning</div>'''
|
|
|
8 |
MAX_ITER = 5
|
9 |
SYSTEM_PROMPT = "You are a writing assistant. You help convert complex texts to simpler texts while maintaining the core meaning of the text."
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
def convert_text(input_text, grade_level, input_reading_score):
|
12 |
min_level, max_level = reading_levels[grade_level]
|
13 |
output_text, similarity, reading_level, message = generate_similar_sentence(input_text, min_level, max_level, MIN_ENTAILMENT, SYSTEM_PROMPT, MAX_ITER, float(input_reading_score))
|
|
|
17 |
gr.Markdown(APP_DESCRIPTION)
|
18 |
|
19 |
with gr.Tab("Reading Level Calculator"):
|
20 |
+
with gr.Row():
|
21 |
+
model_select = gr.Radio(choices=model_types, label="Readability Score Model", value=model_types[1], interactive=True, scale=2)
|
22 |
+
model_select_btn = gr.Button("Select Readability Score Model", scale=1)
|
23 |
+
|
24 |
+
model_select_btn.click(
|
25 |
+
fn = set_reading_levels,
|
26 |
+
inputs=[model_select]
|
27 |
+
)
|
28 |
+
|
29 |
input_text = gr.Textbox(label="Input Text", placeholder="Type here...", lines=4)
|
30 |
|
31 |
fetch_score_and_lvl_btn = gr.Button("Fetch Score and Level")
|
|
|
39 |
outputs=[output_input_reading_score, output_input_reading_level]
|
40 |
)
|
41 |
|
42 |
+
grade_level = gr.Radio(choices=list(reading_levels.keys()), label="Target Reading Level", value=list(reading_levels.keys())[0], interactive=True)
|
43 |
|
44 |
output_reading_level = gr.Textbox(label="Output Reading Level", placeholder="Output Reading Level...", lines=1)
|
45 |
output_similarity = gr.Textbox(label="Similarity", placeholder="Similarity Score...", lines=1)
|
arte_score.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
|
5 |
+
ARTE_API_KEY = os.environ.get("ARTE_API_KEY")
|
6 |
+
ARTE_BASE_URL = os.environ.get("ARTE_BASE_URL")
|
7 |
+
arte_api_endpoint="ping_api"
|
8 |
+
|
9 |
+
def ping_api(text, model_type="SBERT"):
|
10 |
+
payload = {
|
11 |
+
"APIkey": ARTE_API_KEY, # Replace with your actual API key
|
12 |
+
"selection": [model_type],
|
13 |
+
"text": text
|
14 |
+
}
|
15 |
+
json_payload = json.dumps(payload)
|
16 |
+
headers = {
|
17 |
+
'Content-Type': 'application/json'
|
18 |
+
}
|
19 |
+
response = requests.post(ARTE_BASE_URL + arte_api_endpoint, data=json_payload, headers=headers)
|
20 |
+
data = json.loads(response.text)
|
21 |
+
return data.get(model_type).get("sc")
|
requirements.txt
CHANGED
@@ -3,4 +3,5 @@ keras==2.15.0
|
|
3 |
torch
|
4 |
transformers
|
5 |
textstat
|
6 |
-
openai
|
|
|
|
3 |
torch
|
4 |
transformers
|
5 |
textstat
|
6 |
+
openai
|
7 |
+
requests
|
text_converter.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
from bert_similarity import get_similarity
|
2 |
from text_generator import get_gpt_response
|
|
|
3 |
from textstat import flesch_reading_ease
|
4 |
|
5 |
def generate_user_prompt(prompt_type, base_text):
|
@@ -24,7 +25,10 @@ def generate_user_prompt(prompt_type, base_text):
|
|
24 |
|
25 |
return prompts[prompt_type].format(base_text=base_text)
|
26 |
|
27 |
-
|
|
|
|
|
|
|
28 |
"5th Grade (90-100)": (90, 100),
|
29 |
"6th Grade (80-90)": (80, 90),
|
30 |
"7th Grade (70-80)": (70, 80),
|
@@ -35,11 +39,33 @@ reading_levels = {
|
|
35 |
"Research + Nobel laureate ((-infinity)-(-1))": (-float('inf'), -1)
|
36 |
}
|
37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
inverse_reading_levels = {v: k for k, v in reading_levels.items()}
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
def user_input_readability_level(input_text):
|
41 |
-
current_score = flesch_reading_ease(input_text)
|
42 |
-
print(f'Reading score for user input is: {current_score}')
|
43 |
current_level = ''
|
44 |
for (min, max), level in inverse_reading_levels.items():
|
45 |
if min <= current_score <= max:
|
@@ -85,7 +111,7 @@ def generate_similar_sentence(input_text, min_reading_level, max_reading_level,
|
|
85 |
generated_texts.append(response)
|
86 |
|
87 |
similarity = get_similarity(response, input_text)
|
88 |
-
reading_level = flesch_reading_ease(response)
|
89 |
|
90 |
# We add the generated text's reading level to the list.
|
91 |
generated_text_scores.append((similarity, reading_level))
|
@@ -108,6 +134,7 @@ def generate_similar_sentence(input_text, min_reading_level, max_reading_level,
|
|
108 |
i += 1
|
109 |
|
110 |
# Printing all generated texts to the console.
|
|
|
111 |
for index, text in enumerate(generated_texts):
|
112 |
print(f"=============== Iteration {index} ===============")
|
113 |
print(f"Generated text: {text}\nSimilarity: {generated_text_scores[index][0]}\nReadability Score: {generated_text_scores[index][1]}\n")
|
|
|
1 |
from bert_similarity import get_similarity
|
2 |
from text_generator import get_gpt_response
|
3 |
+
from arte_score import ping_api
|
4 |
from textstat import flesch_reading_ease
|
5 |
|
6 |
def generate_user_prompt(prompt_type, base_text):
|
|
|
25 |
|
26 |
return prompts[prompt_type].format(base_text=base_text)
|
27 |
|
28 |
+
model_types = ["FRE", "SBERT"]
|
29 |
+
model_type = model_types[1]
|
30 |
+
|
31 |
+
fre_levels = {
|
32 |
"5th Grade (90-100)": (90, 100),
|
33 |
"6th Grade (80-90)": (80, 90),
|
34 |
"7th Grade (70-80)": (70, 80),
|
|
|
39 |
"Research + Nobel laureate ((-infinity)-(-1))": (-float('inf'), -1)
|
40 |
}
|
41 |
|
42 |
+
sbert_levels = {
|
43 |
+
"Difficult (-100 to -1.88)": (-100, -1.88),
|
44 |
+
"Somewhat Difficult (-1.87 to -1.21)": (-1.87, -1.21),
|
45 |
+
"Intermediate (-1.20 to -0.65)": (-1.20, -0.65),
|
46 |
+
"Somewhat Easy (-0.64 to -0.05)": (-0.64, -0.05),
|
47 |
+
"Easy (> -0.04)": (-0.04, 100)
|
48 |
+
}
|
49 |
+
|
50 |
+
# Setting SBert to be default to avoid any issues.
|
51 |
+
reading_levels = sbert_levels
|
52 |
+
|
53 |
inverse_reading_levels = {v: k for k, v in reading_levels.items()}
|
54 |
|
55 |
+
def set_reading_levels(level_type):
|
56 |
+
global reading_levels
|
57 |
+
global inverse_reading_levels
|
58 |
+
global model_type
|
59 |
+
if level_type == "FRE":
|
60 |
+
reading_levels = fre_levels
|
61 |
+
elif level_type == "SBERT":
|
62 |
+
reading_levels = sbert_levels
|
63 |
+
inverse_reading_levels = {v: k for k, v in reading_levels.items()}
|
64 |
+
model_type = level_type
|
65 |
+
|
66 |
def user_input_readability_level(input_text):
|
67 |
+
current_score = ping_api(input_text, model_type) #flesch_reading_ease(input_text)
|
68 |
+
print(f'Reading score for user input is: {current_score} for model type: {model_type}')
|
69 |
current_level = ''
|
70 |
for (min, max), level in inverse_reading_levels.items():
|
71 |
if min <= current_score <= max:
|
|
|
111 |
generated_texts.append(response)
|
112 |
|
113 |
similarity = get_similarity(response, input_text)
|
114 |
+
reading_level = ping_api(response, model_type) #flesch_reading_ease(response)
|
115 |
|
116 |
# We add the generated text's reading level to the list.
|
117 |
generated_text_scores.append((similarity, reading_level))
|
|
|
134 |
i += 1
|
135 |
|
136 |
# Printing all generated texts to the console.
|
137 |
+
print(f"=============== Model Type: {model_type} ===============\n")
|
138 |
for index, text in enumerate(generated_texts):
|
139 |
print(f"=============== Iteration {index} ===============")
|
140 |
print(f"Generated text: {text}\nSimilarity: {generated_text_scores[index][0]}\nReadability Score: {generated_text_scores[index][1]}\n")
|