Update app.py
Browse files
app.py
CHANGED
@@ -3,26 +3,22 @@ from transformers import pipeline
|
|
3 |
import random
|
4 |
import logging
|
5 |
|
6 |
-
#
|
7 |
logging.basicConfig(level=logging.INFO)
|
8 |
|
9 |
-
# Load
|
10 |
paraphraser = pipeline("text2text-generation", model="t5-base", max_length=512)
|
11 |
-
|
12 |
-
# Load grammar correction model
|
13 |
grammar_corrector = pipeline("text2text-generation", model="vennify/t5-base-grammar-correction", max_length=512)
|
14 |
|
15 |
-
# Function to rephrase content
|
16 |
def rephrase_content(text: str) -> str:
|
17 |
try:
|
18 |
-
prompt = f"paraphrase: {text}
|
19 |
-
result = paraphraser(prompt, do_sample=True, top_k=50, top_p=0.95)[0]
|
20 |
-
return result
|
21 |
except Exception as e:
|
22 |
-
logging.error(f"
|
23 |
return text
|
24 |
|
25 |
-
# Function to inject critical thinking phrase
|
26 |
def inject_critical_thinking(text: str) -> str:
|
27 |
critical_phrases = [
|
28 |
"However, this perspective may be limited due to...",
|
@@ -33,33 +29,30 @@ def inject_critical_thinking(text: str) -> str:
|
|
33 |
]
|
34 |
return f"{text} {random.choice(critical_phrases)}"
|
35 |
|
36 |
-
# Function to correct grammar
|
37 |
def correct_grammar(text: str) -> str:
|
38 |
try:
|
39 |
prompt = f"grammar: {text}"
|
40 |
-
result = grammar_corrector(prompt)[0]
|
41 |
-
return result
|
42 |
except Exception as e:
|
43 |
-
logging.error(f"
|
44 |
return text
|
45 |
|
46 |
-
# Main processing function
|
47 |
def process_text(text: str) -> str:
|
48 |
-
logging.info("
|
49 |
rephrased = rephrase_content(text)
|
50 |
injected = inject_critical_thinking(rephrased)
|
51 |
corrected = correct_grammar(injected)
|
52 |
-
|
|
|
53 |
|
54 |
-
# Gradio
|
55 |
iface = gr.Interface(
|
56 |
fn=process_text,
|
57 |
-
inputs=gr.Textbox(lines=15, label="
|
58 |
-
outputs=gr.Textbox(lines=15, label="
|
59 |
-
title="Academic Enhancer
|
60 |
-
description="
|
61 |
)
|
62 |
|
63 |
-
iface.launch(
|
64 |
-
server_port=7860,
|
65 |
-
show_api=True)
|
|
|
3 |
import random
|
4 |
import logging
|
5 |
|
6 |
+
# Enable logging
|
7 |
logging.basicConfig(level=logging.INFO)
|
8 |
|
9 |
+
# Load Hugging Face models
|
10 |
paraphraser = pipeline("text2text-generation", model="t5-base", max_length=512)
|
|
|
|
|
11 |
grammar_corrector = pipeline("text2text-generation", model="vennify/t5-base-grammar-correction", max_length=512)
|
12 |
|
|
|
13 |
def rephrase_content(text: str) -> str:
|
14 |
try:
|
15 |
+
prompt = f"paraphrase: {text}"
|
16 |
+
result = paraphraser(prompt, do_sample=True, top_k=50, top_p=0.95)[0]['generated_text']
|
17 |
+
return result.strip()
|
18 |
except Exception as e:
|
19 |
+
logging.error(f"Error in rephrasing: {e}")
|
20 |
return text
|
21 |
|
|
|
22 |
def inject_critical_thinking(text: str) -> str:
|
23 |
critical_phrases = [
|
24 |
"However, this perspective may be limited due to...",
|
|
|
29 |
]
|
30 |
return f"{text} {random.choice(critical_phrases)}"
|
31 |
|
|
|
32 |
def correct_grammar(text: str) -> str:
|
33 |
try:
|
34 |
prompt = f"grammar: {text}"
|
35 |
+
result = grammar_corrector(prompt)[0]['generated_text']
|
36 |
+
return result.strip()
|
37 |
except Exception as e:
|
38 |
+
logging.error(f"Error in grammar correction: {e}")
|
39 |
return text
|
40 |
|
|
|
41 |
def process_text(text: str) -> str:
|
42 |
+
logging.info("Starting academic text enhancement...")
|
43 |
rephrased = rephrase_content(text)
|
44 |
injected = inject_critical_thinking(rephrased)
|
45 |
corrected = correct_grammar(injected)
|
46 |
+
logging.info("Final output generated.")
|
47 |
+
return corrected
|
48 |
|
49 |
+
# Gradio UI
|
50 |
iface = gr.Interface(
|
51 |
fn=process_text,
|
52 |
+
inputs=gr.Textbox(lines=15, label="Paste your academic content"),
|
53 |
+
outputs=gr.Textbox(lines=15, label="Enhanced Content"),
|
54 |
+
title="Academic Writing Enhancer",
|
55 |
+
description="Rephrase, enhance and correct academic content for clarity and originality."
|
56 |
)
|
57 |
|
58 |
+
iface.launch()
|
|
|
|