readinglevelconverter / text_converter.py
Mjwarren3's picture
Update text_converter.py
54cb0d0 verified
from bert_similarity import get_similarity
from text_generator import get_gpt_response
from arte_score import ping_api
def generate_user_prompt(prompt_type, base_text):
prompts = {
"too_simple": f"""
Convert this text to a higher reading level of the original text.
The higher reading level text should have more syllables per word and more words per sentence.
It should retain the core meaning of the original text.
The output text should also have a similar number of total words as the input text.
Here is the input text:
{base_text}
""",
"too_complex": f"""
Convert this text to a simpler version of the original text.
The simpler version should have simpler words, fewer syllables per word, and fewer words per sentence.
It should retain the core meaning of the original text.
The output text should also have a similar number of total words as the input text.
Here is the input text:
{base_text}
"""
}
return prompts[prompt_type].format(base_text=base_text)
__model_types = ["FRE", "SBERT"]
fre_levels = {
"5th Grade (90-100)": (90, 100),
"6th Grade (80-90)": (80, 90),
"7th Grade (70-80)": (70, 80),
"8th - 9th Grade (60-70)": (60, 70),
"10th - 12th Grade (50-60)": (50, 60),
"College (30-50)": (30, 50),
"College Graduate + Professionals (0-30)": (0, 30),
"Research + Nobel laureate ((-infinity)-(-1))": (-float('inf'), -1)
}
sbert_levels = {
"Easy (> -0.04)": (-0.04, 100),
"Somewhat Easy (-0.64 to -0.05)": (-0.64, -0.05),
"Intermediate (-1.20 to -0.65)": (-1.20, -0.65),
"Somewhat Difficult (-1.87 to -1.21)": (-1.87, -1.21),
"Difficult (-100 to -1.88)": (-100, -1.88)
}
# Setting FRE to be default.
__reading_levels = fre_levels
__inverse_reading_levels = {v: k for k, v in __reading_levels.items()}
def __set_reading_levels(level_type):
global __reading_levels
global __inverse_reading_levels
if level_type == "FRE":
__reading_levels = fre_levels
elif level_type == "SBERT":
__reading_levels = sbert_levels
__inverse_reading_levels = {v: k for k, v in __reading_levels.items()}
return level_type
def fetch_readability_score_fre(input_text):
__set_reading_levels(__model_types[0])
print(f"=============== Setting Reading Levels to FRE ===============")
print(f"Reading Level Dictionary: {__reading_levels}\n")
print(f"Reading Level Dictionary: {__inverse_reading_levels}")
print(f"=============== Finished Setting Reading Levels to FRE ===============")
return __user_input_readability_level(input_text, True)
def fetch_readability_score_sbert(input_text):
__set_reading_levels(__model_types[1])
print(f"=============== Setting Reading Levels to SBERT ===============")
print(f"Reading Level Dictionary: {__reading_levels}\n")
print(f"Reading Level Dictionary: {__inverse_reading_levels}")
print(f"=============== Finished Setting Reading Levels to SBERT ===============")
return __user_input_readability_level(input_text, False)
def __user_input_readability_level(input_text, is_fre):
current_score = ping_api(input_text, __model_types[0] if is_fre else __model_types[1])
print(f'Reading score for user input is: {current_score} for model type: {__model_types[0] if is_fre else __model_types[1]}')
current_level = ''
for (min, max), level in __inverse_reading_levels.items():
if min <= current_score <= max:
print(f'Reading level for user input is: {level}')
current_level = level
break
return current_score, current_level
def generate_sim_fre(input_text, min_reading_level, max_reading_level, min_entailment, system_prompt, max_iter, curr_reading_level):
return __generate_similar_sentence(input_text, min_reading_level, max_reading_level, min_entailment, system_prompt, max_iter, curr_reading_level, True)
def generate_sim_sbert(input_text, min_reading_level, max_reading_level, min_entailment, system_prompt, max_iter, curr_reading_level):
return __generate_similar_sentence(input_text, min_reading_level, max_reading_level, min_entailment, system_prompt, max_iter, curr_reading_level, False)
def __generate_similar_sentence(input_text, min_reading_level, max_reading_level, min_entailment, system_prompt, max_iter, curr_reading_level, is_fre):
i = 0
completed = False
user_prompt = ""
curr_text = input_text
similarity = 0
reading_level = 0
generated_texts = []
generated_text_scores = []
result_index = -1
closeness = float('inf')
if min_reading_level < curr_reading_level < max_reading_level:
return input_text, 1, curr_reading_level, "Input text was already within the target reading level!"
else:
while i < max_iter and not completed:
if curr_reading_level > max_reading_level:
print(f"Too simple, current reading level is {curr_reading_level}")
user_prompt = generate_user_prompt("too_simple", curr_text)
elif curr_reading_level < min_reading_level:
print(f"Too complex, current reading level is {curr_reading_level}")
user_prompt = generate_user_prompt("too_complex", curr_text)
elif similarity < min_entailment:
print(f"Entailment level is too low: {similarity}")
user_prompt = f"Can you convert this text '{input_text}' to a grade level more similar to this text '{curr_text}'"
else:
print(f"Reading level is within target range: {curr_reading_level}")
completed = True
break
response = get_gpt_response(user_prompt, system_prompt)
# We add the generated text to the list of generated texts.
generated_texts.append(response)
similarity = get_similarity(response, input_text)
reading_level = ping_api(response, __model_types[0] if is_fre else __model_types[1])
# We add the generated text's reading level to the list.
generated_text_scores.append((similarity, reading_level))
# Determine the closeness of the reading level to the target reading level and store.
if similarity >= min_entailment and min_reading_level <= reading_level <= max_reading_level:
result_index = i
completed = True
break
elif result_index == -1:
result_index = i
elif ((reading_level < min_reading_level and abs(reading_level - min_reading_level) < closeness) \
or (reading_level > max_reading_level and abs(reading_level - max_reading_level) < closeness)) and \
(similarity > generated_text_scores[result_index][0] or similarity > min_entailment):
closeness = abs(reading_level - min_reading_level) if reading_level < min_reading_level else abs(reading_level - max_reading_level)
result_index = i
curr_text = response
curr_reading_level = reading_level
i += 1
# Printing all generated texts to the console.
print(f"=============== Model Type: {__model_types[0] if is_fre else __model_types[1]} ===============\n")
for index, text in enumerate(generated_texts):
print(f"=============== Iteration {index} ===============")
print(f"Generated text: {text}\nSimilarity: {generated_text_scores[index][0]}\nReadability Score: {generated_text_scores[index][1]}\n")
# Printing the selected index and text.
print(f"=============== Start Printing Selected Information ===============")
print(f"Selected Iteration: {result_index + 1}")
print(f"Selected Result: {generated_texts[result_index]}")
print(f"Selected Result's Similarity score: {generated_text_scores[result_index][0]}")
print(f"Selected Result's Readability score: {generated_text_scores[result_index][1]}")
print(f"=============== End Printing Selected Information ===============")
# Returning the final result.
if completed:
return generated_texts[result_index], generated_text_scores[result_index][0], generated_text_scores[result_index][1], "Success! Please see the converted text at your target reading level."
else:
return generated_texts[result_index], generated_text_scores[result_index][0], generated_text_scores[result_index][1], "Failed. We could not reach the target reading level while maintaining the text meaning."