Spaces:
Running
Running
salomonsky
commited on
Commit
•
ac00586
1
Parent(s):
f0f180b
Update app.py
Browse files
app.py
CHANGED
@@ -71,7 +71,7 @@ async def improve_prompt(prompt):
|
|
71 |
try:
|
72 |
instruction = ("With this idea, describe in English a detailed img2vid prompt in a single paragraph of up to 200 characters maximun, developing atmosphere, characters, lighting, and cameras.")
|
73 |
formatted_prompt = f"{prompt}: {instruction}"
|
74 |
-
response = llm_client.text_generation(formatted_prompt, max_new_tokens=
|
75 |
improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
|
76 |
|
77 |
return improved_text
|
@@ -99,27 +99,23 @@ with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:
|
|
99 |
choices=["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"],
|
100 |
value="XLabs-AI/flux-RealismLora"
|
101 |
)
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
fn=gen,
|
122 |
-
inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora],
|
123 |
-
outputs=output_res
|
124 |
-
)
|
125 |
demo.launch()
|
|
|
71 |
try:
|
72 |
instruction = ("With this idea, describe in English a detailed img2vid prompt in a single paragraph of up to 200 characters maximun, developing atmosphere, characters, lighting, and cameras.")
|
73 |
formatted_prompt = f"{prompt}: {instruction}"
|
74 |
+
response = llm_client.text_generation(formatted_prompt, max_new_tokens=200)
|
75 |
improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
|
76 |
|
77 |
return improved_text
|
|
|
99 |
choices=["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"],
|
100 |
value="XLabs-AI/flux-RealismLora"
|
101 |
)
|
102 |
+
with gr.Row():
|
103 |
+
process_lora = gr.Checkbox(label="Procesar LORA")
|
104 |
+
process_upscale = gr.Checkbox(label="Procesar Escalador")
|
105 |
+
improved_prompt = gr.Textbox(label="Prompt Mejorado", interactive=False)
|
106 |
+
improve_btn = gr.Button("Mejora mi prompt")
|
107 |
+
improve_btn.click(fn=improve_prompt, inputs=[prompt], outputs=improved_prompt)
|
108 |
+
with gr.Accordion(label="Opciones Avanzadas", open=False):
|
109 |
+
width = gr.Slider(label="Ancho", minimum=512, maximum=1280, step=8, value=1280)
|
110 |
+
height = gr.Slider(label="Alto", minimum=512, maximum=1280, step=8, value=768)
|
111 |
+
upscale_factor = gr.Radio(label="Factor de Escala", choices=[2, 4, 8], value=2)
|
112 |
+
scales = gr.Slider(label="Escalado", minimum=1, maximum=20, step=1, value=10)
|
113 |
+
steps = gr.Slider(label="Pasos", minimum=1, maximum=100, step=1, value=20)
|
114 |
+
seed = gr.Number(label="Semilla", value=-1)
|
115 |
+
btn = gr.Button("Generar")
|
116 |
+
btn.click(
|
117 |
+
fn=gen,
|
118 |
+
inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora],
|
119 |
+
outputs=output_res
|
120 |
+
)
|
|
|
|
|
|
|
|
|
121 |
demo.launch()
|