salomonsky commited on
Commit
dcb68b8
1 Parent(s): 0d49f14

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -24
app.py CHANGED
@@ -54,19 +54,18 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
54
  if seed == -1:
55
  seed = random.randint(0, MAX_SEED)
56
  seed = int(seed)
57
-
58
  progress_bar = st.progress(0)
59
- progress_bar.progress(10)
60
  image, seed = await generate_image(combined_prompt, model, width, height, scales, steps, seed)
61
- progress_bar.progress(50)
62
-
63
  if isinstance(image, str) and image.startswith("Error"):
64
  progress_bar.empty()
65
  return [image, None, combined_prompt]
66
 
67
  image_path = DATA_PATH / f"image_{seed}.jpg"
68
  image.save(image_path, format="JPEG")
69
-
 
70
  prompt_file_path = DATA_PATH / f"prompt_{seed}.txt"
71
  with open(prompt_file_path, "w") as prompt_file:
72
  prompt_file.write(combined_prompt)
@@ -83,9 +82,9 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
83
  progress_bar.empty()
84
  return [str(image_path), str(prompt_file_path)]
85
  else:
86
- progress_bar.progress(100)
87
  return [str(image_path), str(prompt_file_path)]
88
-
89
  async def improve_prompt(prompt):
90
  try:
91
  instruction = ("With this idea, describe in English a detailed txt2img prompt in a single paragraph of up to 200 characters maximum, developing atmosphere, characters, lighting, and cameras.")
@@ -97,21 +96,25 @@ async def improve_prompt(prompt):
97
  return f"Error mejorando el prompt: {e}"
98
 
99
  def get_storage():
100
- files = list(DATA_PATH.glob("*.jpg"))
101
- usage = sum(file.stat().st_size for file in files)
102
- return files, f"Uso total: {usage/(1024.0 ** 3):.3f}GB"
 
 
103
 
104
  def get_prompts():
105
- prompt_files = list(DATA_PATH.glob("*.txt"))
106
  return {file.stem.replace("prompt_", ""): file for file in prompt_files}
107
 
108
- def run_gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
109
  loop = asyncio.new_event_loop()
110
  asyncio.set_event_loop(loop)
111
- return loop.run_until_complete(gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora))
 
112
 
113
  st.set_page_config(layout="wide")
114
- st.title("Generador de Imágenes FLUX")
 
115
  prompt = st.sidebar.text_input("Descripción de la imagen")
116
  basemodel = st.sidebar.selectbox("Modelo Base", ["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"])
117
  lora_model = st.sidebar.selectbox("LORA Realismo", ["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"])
@@ -138,12 +141,12 @@ if st.sidebar.button("Mejorar prompt"):
138
 
139
  if st.sidebar.button("Generar Imagen"):
140
  with st.spinner("Generando imagen..."):
141
- image_paths, prompt_file = run_gen(st.session_state.get('improved_prompt', prompt), basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora)
142
 
143
- if image_paths and isinstance(image_paths[0], str) and Path(image_paths[0]).exists():
144
- st.image(image_paths[0], caption="Imagen Generada")
145
- prompt_text = Path(prompt_file).read_text() if prompt_file else "No disponible"
146
- st.write(f"Prompt utilizado: {prompt_text}")
147
 
148
  files, usage = get_storage()
149
  st.text(usage)
@@ -153,10 +156,7 @@ prompts = get_prompts()
153
  for idx, file in enumerate(files):
154
  with cols[idx % 6]:
155
  image = Image.open(file)
156
- prompt_file = prompts.get(file.stem.replace("image_", ""), None)
157
  prompt_text = Path(prompt_file).read_text() if prompt_file else "No disponible"
158
  st.image(image, caption=f"Imagen {idx+1}")
159
-
160
- if st.button(f"Eliminar Imagen {idx+1}", key=f"delete_{idx}"):
161
- file.unlink()
162
- st.experimental_rerun()
 
54
  if seed == -1:
55
  seed = random.randint(0, MAX_SEED)
56
  seed = int(seed)
 
57
  progress_bar = st.progress(0)
 
58
  image, seed = await generate_image(combined_prompt, model, width, height, scales, steps, seed)
59
+ progress_bar.progress(50)
60
+
61
  if isinstance(image, str) and image.startswith("Error"):
62
  progress_bar.empty()
63
  return [image, None, combined_prompt]
64
 
65
  image_path = DATA_PATH / f"image_{seed}.jpg"
66
  image.save(image_path, format="JPEG")
67
+
68
+ # Save the prompt used in a text file
69
  prompt_file_path = DATA_PATH / f"prompt_{seed}.txt"
70
  with open(prompt_file_path, "w") as prompt_file:
71
  prompt_file.write(combined_prompt)
 
82
  progress_bar.empty()
83
  return [str(image_path), str(prompt_file_path)]
84
  else:
85
+ progress_bar.progress(100)
86
  return [str(image_path), str(prompt_file_path)]
87
+
88
  async def improve_prompt(prompt):
89
  try:
90
  instruction = ("With this idea, describe in English a detailed txt2img prompt in a single paragraph of up to 200 characters maximum, developing atmosphere, characters, lighting, and cameras.")
 
96
  return f"Error mejorando el prompt: {e}"
97
 
98
  def get_storage():
99
+ files = [{"name": str(file.resolve()), "size": file.stat().st_size,}
100
+ for file in DATA_PATH.glob("*.jpg")
101
+ if file.is_file()]
102
+ usage = sum([f['size'] for f in files])
103
+ return [file["name"] for file in files], f"Uso total: {usage/(1024.0 ** 3):.3f}GB"
104
 
105
  def get_prompts():
106
+ prompt_files = [file for file in DATA_PATH.glob("*.txt") if file.is_file()]
107
  return {file.stem.replace("prompt_", ""): file for file in prompt_files}
108
 
109
+ def run_gen():
110
  loop = asyncio.new_event_loop()
111
  asyncio.set_event_loop(loop)
112
+ prompt_to_use = st.session_state.get('improved_prompt', prompt)
113
+ return loop.run_until_complete(gen(prompt_to_use, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora))
114
 
115
  st.set_page_config(layout="wide")
116
+ st.title("Generador de Imágenes FLUX y Escalador con IA")
117
+
118
  prompt = st.sidebar.text_input("Descripción de la imagen")
119
  basemodel = st.sidebar.selectbox("Modelo Base", ["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"])
120
  lora_model = st.sidebar.selectbox("LORA Realismo", ["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"])
 
141
 
142
  if st.sidebar.button("Generar Imagen"):
143
  with st.spinner("Generando imagen..."):
144
+ image_paths, prompt_file = run_gen()
145
 
146
+ if image_paths:
147
+ st.image(image_paths[0], caption="Imagen Generada")
148
+ prompt_text = Path(prompt_file).read_text()
149
+ st.write(f"Prompt utilizado: {prompt_text}")
150
 
151
  files, usage = get_storage()
152
  st.text(usage)
 
156
  for idx, file in enumerate(files):
157
  with cols[idx % 6]:
158
  image = Image.open(file)
159
+ prompt_file = prompts.get(Path(file).stem.replace("image_", ""), None)
160
  prompt_text = Path(prompt_file).read_text() if prompt_file else "No disponible"
161
  st.image(image, caption=f"Imagen {idx+1}")
162
+ st.write(f"Prompt: {prompt_text}")