|
import gradio as gr |
|
from transformers import pipeline |
|
import time |
|
|
|
|
|
print("Loading model...") |
|
start_time = time.time() |
|
|
|
|
|
try: |
|
model = pipeline( |
|
"text2text-generation", |
|
model="humarin/chatgpt_paraphraser_on_T5_base", |
|
device="cpu" |
|
) |
|
load_time = time.time() - start_time |
|
print(f"Model loaded successfully in {load_time:.2f} seconds") |
|
except Exception as e: |
|
print(f"Model loading failed: {str(e)}") |
|
raise |
|
|
|
def paraphrase(text): |
|
try: |
|
output = model( |
|
f"paraphrase: {text}", |
|
max_length=128, |
|
num_beams=5, |
|
temperature=0.7, |
|
) |
|
return output[0]["generated_text"] |
|
except Exception as e: |
|
print(f"Generation error: {str(e)}") |
|
return text |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("## T5 Paraphraser") |
|
|
|
with gr.Row(): |
|
input_text = gr.Textbox(label="Input Text", lines=3) |
|
output_text = gr.Textbox(label="Paraphrased Text", lines=3) |
|
|
|
submit_btn = gr.Button("Paraphrase") |
|
submit_btn.click( |
|
fn=paraphrase, |
|
inputs=input_text, |
|
outputs=output_text, |
|
api_name="predict" |
|
) |
|
|
|
gr.Examples( |
|
examples=["The quick brown fox jumps over the lazy dog."], |
|
inputs=input_text |
|
) |
|
|
|
|
|
demo.launch( |
|
server_name="0.0.0.0", |
|
server_port=7860, |
|
show_api=True |
|
) |