Update app.py
Browse files
app.py
CHANGED
|
@@ -55,9 +55,9 @@ def gradio_generate_text(prompt, max_length=100, num_return_sequences=1, top_p=0
|
|
| 55 |
generated_text = generate_text(tokenizer, model, device, prompt, max_length, num_return_sequences, top_p, temperature)
|
| 56 |
return generated_text
|
| 57 |
|
| 58 |
-
def gradio_generate_text(prompt, max_length, num_sequences, top_p, temperature):
|
| 59 |
-
|
| 60 |
-
|
| 61 |
|
| 62 |
# Ensure the models directory exists
|
| 63 |
if not os.path.exists('models'):
|
|
@@ -68,10 +68,8 @@ if not os.path.exists('models/vi-medical-t5-finetune-qa'):
|
|
| 68 |
run_shell_command('cd models && git clone https://huggingface.co/danhtran2mind/vi-medical-t5-finetune-qa && cd ..')
|
| 69 |
|
| 70 |
# Load the trained model and tokenizer
|
| 71 |
-
print('dqwdqqqqqqqqqqqqqqqqq')
|
| 72 |
model_path = "models/vi-medical-t5-finetune-qa"
|
| 73 |
tokenizer, model, device = load_model_and_tokenizer(model_path)
|
| 74 |
-
print('dqwdqqqqqqqqqqqqqqqqq_2')
|
| 75 |
# Create Gradio interface
|
| 76 |
|
| 77 |
|
|
@@ -80,8 +78,8 @@ iface = gr.Interface(
|
|
| 80 |
fn=gradio_generate_text,
|
| 81 |
inputs=[
|
| 82 |
gr.Textbox(lines=5, label="Input Prompt"),
|
| 83 |
-
gr.Slider(minimum=10, maximum=
|
| 84 |
-
gr.Slider(minimum=1, maximum=
|
| 85 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, label="Top-p Sampling"),
|
| 86 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Temperature")
|
| 87 |
],
|
|
|
|
| 55 |
generated_text = generate_text(tokenizer, model, device, prompt, max_length, num_return_sequences, top_p, temperature)
|
| 56 |
return generated_text
|
| 57 |
|
| 58 |
+
# def gradio_generate_text(prompt, max_length, num_sequences, top_p, temperature):
|
| 59 |
+
# # Placeholder for your text generation logic
|
| 60 |
+
# return f"Generated text based on: {prompt}"
|
| 61 |
|
| 62 |
# Ensure the models directory exists
|
| 63 |
if not os.path.exists('models'):
|
|
|
|
| 68 |
run_shell_command('cd models && git clone https://huggingface.co/danhtran2mind/vi-medical-t5-finetune-qa && cd ..')
|
| 69 |
|
| 70 |
# Load the trained model and tokenizer
|
|
|
|
| 71 |
model_path = "models/vi-medical-t5-finetune-qa"
|
| 72 |
tokenizer, model, device = load_model_and_tokenizer(model_path)
|
|
|
|
| 73 |
# Create Gradio interface
|
| 74 |
|
| 75 |
|
|
|
|
| 78 |
fn=gradio_generate_text,
|
| 79 |
inputs=[
|
| 80 |
gr.Textbox(lines=5, label="Input Prompt"),
|
| 81 |
+
gr.Slider(minimum=10, maximum=768, value=32, label="Max Length"),
|
| 82 |
+
gr.Slider(minimum=1, maximum=5, value=1, label="Number of Sequences"),
|
| 83 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, label="Top-p Sampling"),
|
| 84 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Temperature")
|
| 85 |
],
|