salomonsky commited on
Commit
077426a
1 Parent(s): c9f21b2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -73
app.py CHANGED
@@ -7,6 +7,7 @@ import streamlit as st
7
  from huggingface_hub import InferenceClient, AsyncInferenceClient
8
  from gradio_client import Client, handle_file
9
  import asyncio
 
10
 
11
  MAX_SEED = np.iinfo(np.int32).max
12
  HF_TOKEN = os.environ.get("HF_TOKEN")
@@ -16,6 +17,13 @@ llm_client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
16
  DATA_PATH = Path("./data")
17
  DATA_PATH.mkdir(exist_ok=True)
18
 
 
 
 
 
 
 
 
19
  def enable_lora(lora_add, basemodel):
20
  return lora_add if lora_add else basemodel
21
 
@@ -52,10 +60,13 @@ def save_prompt(prompt_text, seed):
52
  st.error(f"Error al guardar el prompt: {e}")
53
  return None
54
 
55
- async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
56
  model = enable_lora(lora_model, basemodel) if process_lora else basemodel
57
- improved_prompt = await improve_prompt(prompt)
58
- combined_prompt = f"{prompt} {improved_prompt}"
 
 
 
59
 
60
  if seed == -1:
61
  seed = random.randint(0, MAX_SEED)
@@ -86,9 +97,13 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
86
  progress_bar.progress(100)
87
  return [str(image_path), str(prompt_file_path)]
88
 
89
- async def improve_prompt(prompt):
90
  try:
91
- instruction = ("With this idea, describe in English a detailed txt2img prompt in 300 characters at most...")
 
 
 
 
92
  formatted_prompt = f"{prompt}: {instruction}"
93
  response = llm_client.text_generation(formatted_prompt, max_new_tokens=300)
94
  improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
@@ -96,51 +111,61 @@ async def improve_prompt(prompt):
96
  except Exception as e:
97
  return f"Error mejorando el prompt: {e}"
98
 
99
- def save_image(image, seed):
100
- try:
101
- image_path = DATA_PATH / f"image_{seed}.jpg"
102
- image.save(image_path, format="JPEG")
103
- return image_path
104
- except Exception as e:
105
- st.error(f"Error al guardar la imagen: {e}")
106
- return None
107
 
108
- def get_storage():
109
- files = [file for file in DATA_PATH.glob("*.jpg") if file.is_file()]
110
- files.sort(key=lambda x: x.stat().st_mtime, reverse=True)
111
- usage = sum([file.stat().st_size for file in files])
112
- return [str(file.resolve()) for file in files], f"Uso total: {usage/(1024.0 ** 3):.3f}GB"
 
 
 
 
 
 
113
 
 
 
 
114
 
115
- def get_prompts():
116
- prompt_files = [file for file in DATA_PATH.glob("*.txt") if file.is_file()]
117
- return {file.stem.replace("prompt_", ""): file for file in prompt_files}
118
 
119
- def delete_image(image_path):
120
- try:
121
- if Path(image_path).exists():
122
- Path(image_path).unlink()
123
- st.success(f"Imagen {image_path} borrada.")
 
 
 
124
  else:
125
- st.error("El archivo de imagen no existe.")
126
- except Exception as e:
127
- st.error(f"Error al borrar la imagen: {e}")
 
 
 
128
 
129
  def main():
130
  st.set_page_config(layout="wide")
131
- st.title("FLUX with enhancer upscaler and LORA realism training")
132
- prompt = st.sidebar.text_input("Descripción de la imagen", max_chars=500)
133
-
134
- with st.sidebar.expander("Opciones avanzadas", expanded=False):
135
- basemodel = st.selectbox("Modelo Base", ["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"])
136
- lora_model = st.selectbox("LORA Realismo", ["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"])
137
- format_option = st.selectbox("Formato", ["9:16", "16:9"])
138
- process_lora = st.checkbox("Procesar LORA")
139
- process_upscale = st.checkbox("Procesar Escalador")
140
- upscale_factor = st.selectbox("Factor de Escala", [2, 4, 8], index=0)
141
- scales = st.slider("Escalado", 1, 20, 10)
142
- steps = st.slider("Pasos", 1, 100, 20)
143
- seed = st.number_input("Semilla", value=-1)
 
144
 
145
  if format_option == "9:16":
146
  width = 720
@@ -151,11 +176,7 @@ def main():
151
 
152
  if st.sidebar.button("Generar Imagen"):
153
  with st.spinner("Mejorando y generando imagen..."):
154
- improved_prompt = asyncio.run(improve_prompt(prompt))
155
- st.session_state.improved_prompt = improved_prompt
156
- prompt_to_use = st.session_state.get('improved_prompt', prompt)
157
- result = asyncio.run(gen(prompt_to_use, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora))
158
-
159
  image_paths = result[0]
160
  prompt_file = result[1]
161
 
@@ -172,29 +193,5 @@ def main():
172
  st.write(f"Prompt utilizado: {prompt_text}")
173
  else:
174
  st.write("El archivo del prompt no está disponible.")
175
-
176
- files, usage = get_storage()
177
- st.text(usage)
178
- cols = st.columns(6)
179
- prompts = get_prompts()
180
-
181
- for idx, file in enumerate(files):
182
- with cols[idx % 6]:
183
- image = Image.open(file)
184
- prompt_file = prompts.get(Path(file).stem.replace("image_", ""), None)
185
- prompt_text = Path(prompt_file).read_text() if prompt_file else "No disponible"
186
-
187
- st.image(image, caption=f"Imagen {idx+1}")
188
- st.write(f"Prompt: {prompt_text}")
189
-
190
- if st.button(f"Borrar Imagen {idx+1}", key=f"delete_{idx}"):
191
- try:
192
- os.remove(file)
193
- if prompt_file:
194
- os.remove(prompt_file)
195
- st.success(f"Imagen {idx+1} y su prompt fueron borrados.")
196
- except Exception as e:
197
- st.error(f"Error al borrar la imagen o prompt: {e}")
198
-
199
- if __name__ == "__main__":
200
- main()
 
7
  from huggingface_hub import InferenceClient, AsyncInferenceClient
8
  from gradio_client import Client, handle_file
9
  import asyncio
10
+ from concurrent.futures import ThreadPoolExecutor
11
 
12
  MAX_SEED = np.iinfo(np.int32).max
13
  HF_TOKEN = os.environ.get("HF_TOKEN")
 
17
  DATA_PATH = Path("./data")
18
  DATA_PATH.mkdir(exist_ok=True)
19
 
20
+ def run_async(func):
21
+ loop = asyncio.new_event_loop()
22
+ asyncio.set_event_loop(loop)
23
+ executor = ThreadPoolExecutor(max_workers=1)
24
+ result = loop.run_in_executor(executor, func)
25
+ return loop.run_until_complete(result)
26
+
27
  def enable_lora(lora_add, basemodel):
28
  return lora_add if lora_add else basemodel
29
 
 
60
  st.error(f"Error al guardar el prompt: {e}")
61
  return None
62
 
63
+ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora, process_enhancer):
64
  model = enable_lora(lora_model, basemodel) if process_lora else basemodel
65
+ combined_prompt = prompt
66
+
67
+ if process_enhancer:
68
+ improved_prompt = await improve_prompt(prompt)
69
+ combined_prompt = f"{prompt} {improved_prompt}"
70
 
71
  if seed == -1:
72
  seed = random.randint(0, MAX_SEED)
 
97
  progress_bar.progress(100)
98
  return [str(image_path), str(prompt_file_path)]
99
 
100
+ async def improve_prompt(prompt, language):
101
  try:
102
+ if language == "English":
103
+ instruction = ("With this idea, describe in English a detailed txt2img prompt in 500 characters at most, add illumination, atmosphere, cinematic elements, and characters...")
104
+ else: # Spanish
105
+ instruction = ("Con esta idea, describe en español un prompt detallado de txt2img en un máximo de 500 caracteres, añadiendo iluminación, atmósfera, elementos cinematográficos y personajes...")
106
+
107
  formatted_prompt = f"{prompt}: {instruction}"
108
  response = llm_client.text_generation(formatted_prompt, max_new_tokens=300)
109
  improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
 
111
  except Exception as e:
112
  return f"Error mejorando el prompt: {e}"
113
 
114
+ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora, process_enhancer, prompt_language):
115
+ model = enable_lora(lora_model, basemodel) if process_lora else basemodel
116
+ combined_prompt = prompt # Usar el prompt original por defecto
 
 
 
 
 
117
 
118
+ # Mejorar el prompt si el checkbox está activado y según el idioma seleccionado
119
+ if process_enhancer:
120
+ improved_prompt = await improve_prompt(prompt, prompt_language)
121
+ combined_prompt = f"{prompt} {improved_prompt}"
122
+
123
+ if seed == -1:
124
+ seed = random.randint(0, MAX_SEED)
125
+ seed = int(seed)
126
+ progress_bar = st.progress(0)
127
+ image, seed = await generate_image(combined_prompt, model, width, height, scales, steps, seed)
128
+ progress_bar.progress(50)
129
 
130
+ if isinstance(image, str) and image.startswith("Error"):
131
+ progress_bar.empty()
132
+ return [image, None, combined_prompt]
133
 
134
+ image_path = save_image(image, seed)
135
+ prompt_file_path = save_prompt(combined_prompt, seed)
 
136
 
137
+ if process_upscale:
138
+ upscale_image_path = get_upscale_finegrain(combined_prompt, image_path, upscale_factor)
139
+ if upscale_image_path:
140
+ upscale_image = Image.open(upscale_image_path)
141
+ upscale_image.save(DATA_PATH / f"upscale_image_{seed}.jpg", format="JPEG")
142
+ progress_bar.progress(100)
143
+ image_path.unlink()
144
+ return [str(DATA_PATH / f"upscale_image_{seed}.jpg"), str(prompt_file_path)]
145
  else:
146
+ progress_bar.empty()
147
+ return [str(image_path), str(prompt_file_path)]
148
+ else:
149
+ progress_bar.progress(100)
150
+ return [str(image_path), str(prompt_file_path)]
151
+
152
 
153
  def main():
154
  st.set_page_config(layout="wide")
155
+ st.title("FLUX with enhancer and upscaler with LORA model training")
156
+
157
+ prompt = st.sidebar.text_input("Descripción de la imagen", max_chars=200)
158
+ process_enhancer = st.sidebar.checkbox("Mejorar Prompt", value=True)
159
+ prompt_language = st.sidebar.selectbox("Idioma para mejorar el prompt", ["English", "Spanish"]) # Selector de idioma
160
+ basemodel = st.sidebar.selectbox("Modelo Base", ["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"])
161
+ lora_model = st.sidebar.selectbox("LORA Realismo", ["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"])
162
+ format_option = st.sidebar.selectbox("Formato", ["9:16", "16:9"])
163
+ process_lora = st.sidebar.checkbox("Procesar LORA", value=True)
164
+ process_upscale = st.sidebar.checkbox("Procesar Escalador", value=True)
165
+ upscale_factor = st.sidebar.selectbox("Factor de Escala", [2, 4, 8], index=0)
166
+ scales = st.sidebar.slider("Escalado", 1, 20, 10)
167
+ steps = st.sidebar.slider("Pasos", 1, 100, 20)
168
+ seed = st.sidebar.number_input("Semilla", value=-1)
169
 
170
  if format_option == "9:16":
171
  width = 720
 
176
 
177
  if st.sidebar.button("Generar Imagen"):
178
  with st.spinner("Mejorando y generando imagen..."):
179
+ result = asyncio.run(gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora, process_enhancer, prompt_language))
 
 
 
 
180
  image_paths = result[0]
181
  prompt_file = result[1]
182
 
 
193
  st.write(f"Prompt utilizado: {prompt_text}")
194
  else:
195
  st.write("El archivo del prompt no está disponible.")
196
+ else:
197
+ st.error("No se pudo generar la imagen.")