File size: 1,536 Bytes
fb345a4 09665bb 53bac69 fb345a4 53bac69 09665bb ae71aec 53bac69 ae71aec 53bac69 0460230 53bac69 0460230 53bac69 cd8bbc0 53bac69 ae71aec 53bac69 ae71aec 0460230 ae71aec 53bac69 ae71aec 09665bb 53bac69 ae71aec 53bac69 ae71aec |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import gradio as gr
from transformers import pipeline
import time
# Show loading status
print("Loading model...")
start_time = time.time()
# Initialize model with error handling
try:
model = pipeline(
"text2text-generation",
model="humarin/chatgpt_paraphraser_on_T5_base",
device="cpu"
)
load_time = time.time() - start_time
print(f"Model loaded successfully in {load_time:.2f} seconds")
except Exception as e:
print(f"Model loading failed: {str(e)}")
raise
def paraphrase(text):
try:
output = model(
f"paraphrase: {text}",
max_length=128,
num_beams=5,
temperature=0.7,
)
return output[0]["generated_text"]
except Exception as e:
print(f"Generation error: {str(e)}")
return text
# Create interface with modern Gradio API
with gr.Blocks() as demo:
gr.Markdown("## T5 Paraphraser")
with gr.Row():
input_text = gr.Textbox(label="Input Text", lines=3)
output_text = gr.Textbox(label="Paraphrased Text", lines=3)
submit_btn = gr.Button("Paraphrase")
submit_btn.click(
fn=paraphrase,
inputs=input_text,
outputs=output_text,
api_name="predict" # Standard endpoint name
)
gr.Examples(
examples=["The quick brown fox jumps over the lazy dog."],
inputs=input_text
)
# Launch configuration for current Gradio
demo.launch(
server_name="0.0.0.0",
server_port=7860,
show_api=True
) |