Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -5,13 +5,19 @@ import os, random, numpy as np, yaml, time
|
|
5 |
from dataclasses import dataclass
|
6 |
from typing import List
|
7 |
from huggingface_hub import InferenceClient
|
|
|
8 |
|
9 |
st.set_page_config(layout="wide")
|
10 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
|
|
11 |
|
12 |
if not HF_TOKEN:
|
13 |
-
st.error("Error en el token! 'HF_TOKEN'.")
|
14 |
-
st.stop()
|
|
|
|
|
|
|
|
|
15 |
|
16 |
try:
|
17 |
with open("config.yaml", "r") as file:
|
@@ -29,10 +35,15 @@ MAX_SEED = AppConfig.MAX_SEED
|
|
29 |
DATA_PATH = Path("./data")
|
30 |
DATA_PATH.mkdir(exist_ok=True)
|
31 |
|
32 |
-
def
|
33 |
return InferenceClient(token=HF_TOKEN)
|
34 |
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
def authenticate_user(username, password):
|
38 |
return username == credentials["username"] and password == credentials["password"]
|
@@ -40,7 +51,7 @@ def authenticate_user(username, password):
|
|
40 |
def list_saved_images():
|
41 |
return sorted(DATA_PATH.glob("*.jpg"), key=lambda x: x.stat().st_mtime, reverse=True)
|
42 |
|
43 |
-
def enhance_prompt(text, client=
|
44 |
if not use_enhancement:
|
45 |
return text[:200]
|
46 |
|
@@ -48,15 +59,21 @@ def enhance_prompt(text, client=client, use_enhancement=True):
|
|
48 |
return text[:200]
|
49 |
|
50 |
try:
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
|
|
|
|
|
56 |
except Exception as e:
|
57 |
st.warning(f"Prompt enhancement error: {e}")
|
58 |
return text[:200]
|
59 |
|
|
|
|
|
|
|
|
|
60 |
def generate_variations(prompt, num_variants=8, use_enhanced=True):
|
61 |
instructions = [
|
62 |
"Photorealistic description for txt2img prompt: ",
|
@@ -69,12 +86,12 @@ def generate_variations(prompt, num_variants=8, use_enhanced=True):
|
|
69 |
"Lifelike txt2img, focusing on photorealistic depth: "
|
70 |
]
|
71 |
if use_enhanced:
|
72 |
-
prompts = [enhance_prompt(f"{instructions[i % len(instructions)]}{prompt}") for i in range(num_variants)]
|
73 |
else:
|
74 |
-
prompts = [prompt
|
75 |
return prompts
|
76 |
|
77 |
-
def generate_image(prompt, width, height, seed, model_name, client=
|
78 |
if not client:
|
79 |
st.error("No Hugging Face client available")
|
80 |
return None, seed, None
|
@@ -82,7 +99,7 @@ def generate_image(prompt, width, height, seed, model_name, client=client):
|
|
82 |
try:
|
83 |
with st.spinner("Generando imagen..."):
|
84 |
seed = int(seed) if seed != -1 else random.randint(0, AppConfig.MAX_SEED)
|
85 |
-
enhanced_prompt = enhance_prompt(prompt)
|
86 |
image = client.text_to_image(
|
87 |
prompt=enhanced_prompt,
|
88 |
height=height,
|
@@ -106,7 +123,7 @@ def gen(prompts, width, height, model_name, num_variants=8):
|
|
106 |
for i in range(num_variants):
|
107 |
current_prompt = prompts[i] if len(prompts) > i else prompts[-1]
|
108 |
with st.spinner(f"Generando imagen {i+1}/{num_variants}"):
|
109 |
-
image, used_seed, enhanced_prompt = generate_image(current_prompt, width, height, seeds[i], model_name)
|
110 |
if image:
|
111 |
image_path = DATA_PATH / f"generated_image_{used_seed}.jpg"
|
112 |
image.save(image_path)
|
@@ -173,8 +190,8 @@ def main():
|
|
173 |
|
174 |
st.title("Flux +Upscale +Prompt Enhancer")
|
175 |
|
176 |
-
if not
|
177 |
-
st.error("No se pudo establecer conexi贸n con
|
178 |
return
|
179 |
|
180 |
prompt = st.sidebar.text_area("Descripci贸n de la imagen", height=150, max_chars=500)
|
|
|
5 |
from dataclasses import dataclass
|
6 |
from typing import List
|
7 |
from huggingface_hub import InferenceClient
|
8 |
+
import google.generativeai as genai
|
9 |
|
10 |
st.set_page_config(layout="wide")
|
11 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
12 |
+
GEMINI_TOKEN = os.getenv("GEMINI_TOKEN")
|
13 |
|
14 |
if not HF_TOKEN:
|
15 |
+
st.error("Error en el token de Hugging Face! 'HF_TOKEN'.")
|
16 |
+
st.stop()
|
17 |
+
|
18 |
+
if not GEMINI_TOKEN:
|
19 |
+
st.error("Error en el token de Gemini! 'GEMINI_TOKEN'.")
|
20 |
+
st.stop()
|
21 |
|
22 |
try:
|
23 |
with open("config.yaml", "r") as file:
|
|
|
35 |
DATA_PATH = Path("./data")
|
36 |
DATA_PATH.mkdir(exist_ok=True)
|
37 |
|
38 |
+
def get_hf_client():
|
39 |
return InferenceClient(token=HF_TOKEN)
|
40 |
|
41 |
+
def get_gemini_client():
|
42 |
+
genai.configure(api_key=GEMINI_TOKEN)
|
43 |
+
return genai.GenerativeModel('gemini-1.5-flash-8b')
|
44 |
+
|
45 |
+
hf_client = get_hf_client()
|
46 |
+
gemini_client = get_gemini_client()
|
47 |
|
48 |
def authenticate_user(username, password):
|
49 |
return username == credentials["username"] and password == credentials["password"]
|
|
|
51 |
def list_saved_images():
|
52 |
return sorted(DATA_PATH.glob("*.jpg"), key=lambda x: x.stat().st_mtime, reverse=True)
|
53 |
|
54 |
+
def enhance_prompt(text, client=None, use_enhancement=True):
|
55 |
if not use_enhancement:
|
56 |
return text[:200]
|
57 |
|
|
|
59 |
return text[:200]
|
60 |
|
61 |
try:
|
62 |
+
enhancement_instruction = "Enhance this text description to be more suitable for text-to-image generation. Focus on vivid, descriptive language that will help an AI generate a photorealistic image. Be specific about colors, composition, lighting, and key details."
|
63 |
+
|
64 |
+
response = client.generate_content(f"{enhancement_instruction}\n\nOriginal prompt: {text}")
|
65 |
+
enhanced_prompt = response.text.strip()
|
66 |
|
67 |
+
return enhanced_prompt[:200]
|
68 |
+
|
69 |
except Exception as e:
|
70 |
st.warning(f"Prompt enhancement error: {e}")
|
71 |
return text[:200]
|
72 |
|
73 |
+
def save_prompt(image_name, enhanced_prompt):
|
74 |
+
with open(DATA_PATH / "prompts.txt", "a") as f:
|
75 |
+
f.write(f"{image_name}: {enhanced_prompt}\n")
|
76 |
+
|
77 |
def generate_variations(prompt, num_variants=8, use_enhanced=True):
|
78 |
instructions = [
|
79 |
"Photorealistic description for txt2img prompt: ",
|
|
|
86 |
"Lifelike txt2img, focusing on photorealistic depth: "
|
87 |
]
|
88 |
if use_enhanced:
|
89 |
+
prompts = [enhance_prompt(f"{instructions[i % len(instructions)]}{prompt}", client=gemini_client, use_enhancement=True) for i in range(num_variants)]
|
90 |
else:
|
91 |
+
prompts = [enhance_prompt(prompt, use_enhancement=False) for i in range(num_variants)]
|
92 |
return prompts
|
93 |
|
94 |
+
def generate_image(prompt, width, height, seed, model_name, client=None):
|
95 |
if not client:
|
96 |
st.error("No Hugging Face client available")
|
97 |
return None, seed, None
|
|
|
99 |
try:
|
100 |
with st.spinner("Generando imagen..."):
|
101 |
seed = int(seed) if seed != -1 else random.randint(0, AppConfig.MAX_SEED)
|
102 |
+
enhanced_prompt = enhance_prompt(prompt, client=gemini_client)
|
103 |
image = client.text_to_image(
|
104 |
prompt=enhanced_prompt,
|
105 |
height=height,
|
|
|
123 |
for i in range(num_variants):
|
124 |
current_prompt = prompts[i] if len(prompts) > i else prompts[-1]
|
125 |
with st.spinner(f"Generando imagen {i+1}/{num_variants}"):
|
126 |
+
image, used_seed, enhanced_prompt = generate_image(current_prompt, width, height, seeds[i], model_name, client=hf_client)
|
127 |
if image:
|
128 |
image_path = DATA_PATH / f"generated_image_{used_seed}.jpg"
|
129 |
image.save(image_path)
|
|
|
190 |
|
191 |
st.title("Flux +Upscale +Prompt Enhancer")
|
192 |
|
193 |
+
if not hf_client or not gemini_client:
|
194 |
+
st.error("No se pudo establecer conexi贸n con los servicios. Verifique sus tokens.")
|
195 |
return
|
196 |
|
197 |
prompt = st.sidebar.text_area("Descripci贸n de la imagen", height=150, max_chars=500)
|