SimplifyPro / app.py
yasserrmd's picture
Update app.py
59e64b5 verified
raw
history blame contribute delete
2.26 kB
import gradio as gr
from huggingface_hub import InferenceClient
# Initialize the Hugging Face Inference Client
client = InferenceClient()
# Function to stream simplified text as it is generated
def simplify_text(input_text, audience):
prompt = f"Transform the following text into a clear and concise version suitable for a '{audience}' audience. Use language, tone, and style appropriate to their understanding level, while retaining the core meaning and key details:\n\n{input_text}"
messages = [
{"role": "user", "content": prompt}
]
# Create a stream to receive generated content
stream = client.chat.completions.create(
model="Qwen/QwQ-32B-Preview",
messages=messages,
temperature=0.7,
max_tokens=1024,
top_p=0.8,
stream=True
)
# Stream content as it is generated
simplified_output = ""
for chunk in stream:
simplified_output += chunk.choices[0].delta.content
yield simplified_output # Yield incremental content to display immediately
# Create Gradio interface with the modified layout
with gr.Blocks() as app:
gr.Markdown("## Contextual Text Simplifier")
gr.Markdown("Simplify complex text into context-appropriate language for specific audiences using AI.")
with gr.Row():
# First column for input components
with gr.Column():
text_input = gr.Textbox(
lines=6,
label="Input Text",
placeholder="Paste the text you want to simplify here.",
elem_id="full_width"
)
audience = gr.Dropdown(
choices=["Children", "General Public", "Professionals"],
label="Target Audience",
value="General Public"
)
simplify_button = gr.Button("Simplify Text")
# Second column for output
with gr.Column():
gr.Markdown("### Simplified Text") # This acts as the label for the output
output_markdown = gr.Markdown()
# Link button to function with inputs and outputs
simplify_button.click(fn=simplify_text, inputs=[text_input, audience], outputs=output_markdown)
# Run the Gradio app
app.launch(debug=True)