Synced repo using 'sync_with_huggingface' Github Action
Browse files- app.py +3 -12
- deepfloydif.py +134 -84
app.py
CHANGED
@@ -4,7 +4,7 @@ import threading
|
|
4 |
import discord
|
5 |
import gradio as gr
|
6 |
from audioldm2 import audioldm2_create
|
7 |
-
from deepfloydif import
|
8 |
from discord import app_commands
|
9 |
from discord.ext import commands
|
10 |
from falcon import continue_falcon, try_falcon
|
@@ -38,7 +38,7 @@ async def on_ready():
|
|
38 |
|
39 |
|
40 |
@client.hybrid_command(
|
41 |
-
name="
|
42 |
with_app_command=True,
|
43 |
description="Enter some text to chat with the bot! Like this: /falcon Hello, how are you?",
|
44 |
)
|
@@ -84,7 +84,7 @@ async def on_message(message):
|
|
84 |
async def deepfloydif(ctx, prompt: str):
|
85 |
"""DeepfloydIF stage 1 generation"""
|
86 |
try:
|
87 |
-
await
|
88 |
except Exception as e:
|
89 |
print(f"Error: {e}")
|
90 |
|
@@ -113,15 +113,6 @@ async def audioldm2(ctx, prompt: str):
|
|
113 |
print(f"Error: (app.py){e}")
|
114 |
|
115 |
|
116 |
-
@client.event
|
117 |
-
async def on_reaction_add(reaction, user):
|
118 |
-
"""Checks for a reaction in order to call dfif2"""
|
119 |
-
try:
|
120 |
-
await deepfloydif_stage_2_react_check(reaction, user)
|
121 |
-
except Exception as e:
|
122 |
-
print(f"Error: {e} (known error, does not cause issues, low priority)")
|
123 |
-
|
124 |
-
|
125 |
def run_bot():
|
126 |
client.run(DISCORD_TOKEN)
|
127 |
|
|
|
4 |
import discord
|
5 |
import gradio as gr
|
6 |
from audioldm2 import audioldm2_create
|
7 |
+
from deepfloydif import deepfloydif_generate64
|
8 |
from discord import app_commands
|
9 |
from discord.ext import commands
|
10 |
from falcon import continue_falcon, try_falcon
|
|
|
38 |
|
39 |
|
40 |
@client.hybrid_command(
|
41 |
+
name="falcon",
|
42 |
with_app_command=True,
|
43 |
description="Enter some text to chat with the bot! Like this: /falcon Hello, how are you?",
|
44 |
)
|
|
|
84 |
async def deepfloydif(ctx, prompt: str):
|
85 |
"""DeepfloydIF stage 1 generation"""
|
86 |
try:
|
87 |
+
await deepfloydif_generate64(ctx, prompt, client)
|
88 |
except Exception as e:
|
89 |
print(f"Error: {e}")
|
90 |
|
|
|
113 |
print(f"Error: (app.py){e}")
|
114 |
|
115 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
def run_bot():
|
117 |
client.run(DISCORD_TOKEN)
|
118 |
|
deepfloydif.py
CHANGED
@@ -8,6 +8,8 @@ import discord
|
|
8 |
from gradio_client import Client
|
9 |
from PIL import Image
|
10 |
|
|
|
|
|
11 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
12 |
deepfloydif_client = Client("huggingface-projects/IF", HF_TOKEN)
|
13 |
|
@@ -15,8 +17,8 @@ BOT_USER_ID = 1086256910572986469 if os.getenv("TEST_ENV", False) else 110223665
|
|
15 |
DEEPFLOYDIF_CHANNEL_ID = 1121834257959092234 if os.getenv("TEST_ENV", False) else 1119313215675973714
|
16 |
|
17 |
|
18 |
-
def
|
19 |
-
"""Generates
|
20 |
negative_prompt = ""
|
21 |
seed = random.randint(0, 1000)
|
22 |
number_of_images = 4
|
@@ -26,7 +28,7 @@ def deepfloydif_stage_1_inference(prompt):
|
|
26 |
(
|
27 |
stage_1_images,
|
28 |
stage_1_param_path,
|
29 |
-
|
30 |
) = deepfloydif_client.predict(
|
31 |
prompt,
|
32 |
negative_prompt,
|
@@ -37,19 +39,19 @@ def deepfloydif_stage_1_inference(prompt):
|
|
37 |
number_of_inference_steps,
|
38 |
api_name="/generate64",
|
39 |
)
|
40 |
-
return [stage_1_images, stage_1_param_path,
|
41 |
|
42 |
|
43 |
-
def
|
44 |
-
"""Upscales one of the images from
|
45 |
-
|
46 |
seed_2 = 0
|
47 |
guidance_scale_2 = 4
|
48 |
custom_timesteps_2 = "smart50"
|
49 |
number_of_inference_steps_2 = 50
|
50 |
result_path = deepfloydif_client.predict(
|
51 |
-
|
52 |
-
|
53 |
seed_2,
|
54 |
guidance_scale_2,
|
55 |
custom_timesteps_2,
|
@@ -59,10 +61,34 @@ def deepfloydif_stage_2_inference(index, path_for_stage_2_upscaling):
|
|
59 |
return result_path
|
60 |
|
61 |
|
62 |
-
|
63 |
-
"""
|
64 |
-
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
|
68 |
def load_image(png_files, stage_1_images):
|
@@ -76,7 +102,7 @@ def load_image(png_files, stage_1_images):
|
|
76 |
|
77 |
def combine_images(png_files, stage_1_images, partial_path):
|
78 |
if os.environ.get("TEST_ENV") == "True":
|
79 |
-
print("Combining images for
|
80 |
images = load_image(png_files, stage_1_images)
|
81 |
combined_image = Image.new("RGB", (images[0].width * 2, images[0].height * 2))
|
82 |
combined_image.paste(images[0], (0, 0))
|
@@ -88,108 +114,132 @@ def combine_images(png_files, stage_1_images, partial_path):
|
|
88 |
return combined_image_path
|
89 |
|
90 |
|
91 |
-
async def
|
92 |
"""DeepfloydIF command (generate images with realistic text using slash commands)"""
|
93 |
try:
|
94 |
if ctx.author.id != BOT_USER_ID:
|
95 |
if ctx.channel.id == DEEPFLOYDIF_CHANNEL_ID:
|
96 |
if os.environ.get("TEST_ENV") == "True":
|
97 |
-
print("Safety checks passed for
|
|
|
98 |
# interaction.response message can't be used to create a thread, so we create another message
|
99 |
-
message = await ctx.send(f"**{prompt}** - {ctx.author.mention}")
|
100 |
-
if len(prompt) > 99:
|
101 |
-
small_prompt = prompt[:99]
|
102 |
-
else:
|
103 |
-
small_prompt = prompt
|
104 |
-
thread = await message.create_thread(name=f"{small_prompt}", auto_archive_duration=60)
|
105 |
-
await thread.send(f"{ctx.author.mention} Generating images in thread, can take ~1 minute...")
|
106 |
|
107 |
loop = asyncio.get_running_loop()
|
108 |
-
result = await loop.run_in_executor(None,
|
109 |
stage_1_images = result[0]
|
110 |
-
|
111 |
|
112 |
-
partial_path = pathlib.Path(
|
113 |
png_files = list(glob.glob(f"{stage_1_images}/**/*.png"))
|
114 |
|
115 |
if png_files:
|
|
|
116 |
combined_image_path = combine_images(png_files, stage_1_images, partial_path)
|
117 |
if os.environ.get("TEST_ENV") == "True":
|
118 |
-
print("Images combined for
|
119 |
-
|
120 |
-
|
121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
file=discord.File(f, f"{partial_path}.png"),
|
|
|
123 |
)
|
124 |
-
emoji_list = ["βοΈ", "βοΈ", "βοΈ", "βοΈ"]
|
125 |
-
await react_1234(emoji_list, combined_image_dfif)
|
126 |
else:
|
127 |
-
await
|
128 |
|
129 |
except Exception as e:
|
130 |
print(f"Error: {e}")
|
131 |
|
132 |
|
133 |
-
async def
|
134 |
-
"""
|
135 |
try:
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
thread_parent_id = thread.parent.id
|
143 |
-
if thread_parent_id == DEEPFLOYDIF_CHANNEL_ID:
|
144 |
-
if reaction.message.attachments:
|
145 |
-
if user.id == reaction.message.mentions[0].id:
|
146 |
-
attachment = reaction.message.attachments[0]
|
147 |
-
image_name = attachment.filename
|
148 |
-
partial_path = image_name[:-4]
|
149 |
-
full_path = "/tmp/" + partial_path
|
150 |
-
emoji = reaction.emoji
|
151 |
-
if emoji == "βοΈ":
|
152 |
-
index = 0
|
153 |
-
elif emoji == "βοΈ":
|
154 |
-
index = 1
|
155 |
-
elif emoji == "βοΈ":
|
156 |
-
index = 2
|
157 |
-
elif emoji == "βοΈ":
|
158 |
-
index = 3
|
159 |
-
path_for_stage_2_upscaling = full_path
|
160 |
-
thread = reaction.message.channel
|
161 |
-
await deepfloydif_stage_2(
|
162 |
-
index,
|
163 |
-
path_for_stage_2_upscaling,
|
164 |
-
thread,
|
165 |
-
)
|
166 |
except Exception as e:
|
167 |
-
print(f"Error: {e}
|
168 |
|
169 |
|
170 |
-
async def
|
171 |
"""upscaling function for images generated using /deepfloydif"""
|
172 |
try:
|
173 |
-
if os.environ.get("TEST_ENV") == "True":
|
174 |
-
print("Running deepfloydif_stage_2")
|
175 |
-
if index == 0:
|
176 |
-
position = "top left"
|
177 |
-
elif index == 1:
|
178 |
-
position = "top right"
|
179 |
-
elif index == 2:
|
180 |
-
position = "bottom left"
|
181 |
-
elif index == 3:
|
182 |
-
position = "bottom right"
|
183 |
-
await thread.send(f"Upscaling the {position} image...")
|
184 |
-
|
185 |
-
# run blocking function in executor
|
186 |
loop = asyncio.get_running_loop()
|
187 |
result_path = await loop.run_in_executor(
|
188 |
-
None,
|
189 |
)
|
|
|
190 |
|
191 |
-
with open(result_path, "rb") as f:
|
192 |
-
await thread.send("Here is the upscaled image!", file=discord.File(f, "result.png"))
|
193 |
-
await thread.edit(archived=True)
|
194 |
except Exception as e:
|
195 |
print(f"Error: {e}")
|
|
|
8 |
from gradio_client import Client
|
9 |
from PIL import Image
|
10 |
|
11 |
+
from discord.ui import Button, View
|
12 |
+
|
13 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
14 |
deepfloydif_client = Client("huggingface-projects/IF", HF_TOKEN)
|
15 |
|
|
|
17 |
DEEPFLOYDIF_CHANNEL_ID = 1121834257959092234 if os.getenv("TEST_ENV", False) else 1119313215675973714
|
18 |
|
19 |
|
20 |
+
def deepfloydif_generate64_inference(prompt):
|
21 |
+
"""Generates four images based on a prompt"""
|
22 |
negative_prompt = ""
|
23 |
seed = random.randint(0, 1000)
|
24 |
number_of_images = 4
|
|
|
28 |
(
|
29 |
stage_1_images,
|
30 |
stage_1_param_path,
|
31 |
+
path_for_upscale256_upscaling,
|
32 |
) = deepfloydif_client.predict(
|
33 |
prompt,
|
34 |
negative_prompt,
|
|
|
39 |
number_of_inference_steps,
|
40 |
api_name="/generate64",
|
41 |
)
|
42 |
+
return [stage_1_images, stage_1_param_path, path_for_upscale256_upscaling]
|
43 |
|
44 |
|
45 |
+
def deepfloydif_upscale256_inference(index, path_for_upscale256_upscaling):
|
46 |
+
"""Upscales one of the images from deepfloydif_generate64_inference based on the chosen index"""
|
47 |
+
selected_index_for_upscale256 = index
|
48 |
seed_2 = 0
|
49 |
guidance_scale_2 = 4
|
50 |
custom_timesteps_2 = "smart50"
|
51 |
number_of_inference_steps_2 = 50
|
52 |
result_path = deepfloydif_client.predict(
|
53 |
+
path_for_upscale256_upscaling,
|
54 |
+
selected_index_for_upscale256,
|
55 |
seed_2,
|
56 |
guidance_scale_2,
|
57 |
custom_timesteps_2,
|
|
|
61 |
return result_path
|
62 |
|
63 |
|
64 |
+
def deepfloydif_upscale1024_inference(index, path_for_upscale256_upscaling, prompt):
|
65 |
+
"""Upscales to stage 2, then stage 3"""
|
66 |
+
selected_index_for_upscale256 = index
|
67 |
+
seed_2 = 0 # default seed for stage 2 256 upscaling
|
68 |
+
guidance_scale_2 = 4 # default for stage 2
|
69 |
+
custom_timesteps_2 = "smart50" # default for stage 2
|
70 |
+
number_of_inference_steps_2 = 50 # default for stage 2
|
71 |
+
negative_prompt = "" # empty (not used, could add in the future)
|
72 |
+
|
73 |
+
seed_3 = 0 # default for stage 3 1024 upscaling
|
74 |
+
guidance_scale_3 = 9 # default for stage 3
|
75 |
+
number_of_inference_steps_3 = 40 # default for stage 3
|
76 |
+
|
77 |
+
result_path = deepfloydif_client.predict(
|
78 |
+
path_for_upscale256_upscaling,
|
79 |
+
selected_index_for_upscale256,
|
80 |
+
seed_2,
|
81 |
+
guidance_scale_2,
|
82 |
+
custom_timesteps_2,
|
83 |
+
number_of_inference_steps_2,
|
84 |
+
prompt,
|
85 |
+
negative_prompt,
|
86 |
+
seed_3,
|
87 |
+
guidance_scale_3,
|
88 |
+
number_of_inference_steps_3,
|
89 |
+
api_name="/upscale1024",
|
90 |
+
)
|
91 |
+
return result_path
|
92 |
|
93 |
|
94 |
def load_image(png_files, stage_1_images):
|
|
|
102 |
|
103 |
def combine_images(png_files, stage_1_images, partial_path):
|
104 |
if os.environ.get("TEST_ENV") == "True":
|
105 |
+
print("Combining images for deepfloydif_generate64")
|
106 |
images = load_image(png_files, stage_1_images)
|
107 |
combined_image = Image.new("RGB", (images[0].width * 2, images[0].height * 2))
|
108 |
combined_image.paste(images[0], (0, 0))
|
|
|
114 |
return combined_image_path
|
115 |
|
116 |
|
117 |
+
async def deepfloydif_generate64(ctx, prompt, client):
|
118 |
"""DeepfloydIF command (generate images with realistic text using slash commands)"""
|
119 |
try:
|
120 |
if ctx.author.id != BOT_USER_ID:
|
121 |
if ctx.channel.id == DEEPFLOYDIF_CHANNEL_ID:
|
122 |
if os.environ.get("TEST_ENV") == "True":
|
123 |
+
print("Safety checks passed for deepfloydif_generate64")
|
124 |
+
channel = client.get_channel(DEEPFLOYDIF_CHANNEL_ID)
|
125 |
# interaction.response message can't be used to create a thread, so we create another message
|
126 |
+
message = await ctx.send(f"**{prompt}** - {ctx.author.mention} <a:loading:1114111677990981692>")
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
|
128 |
loop = asyncio.get_running_loop()
|
129 |
+
result = await loop.run_in_executor(None, deepfloydif_generate64_inference, prompt)
|
130 |
stage_1_images = result[0]
|
131 |
+
path_for_upscale256_upscaling = result[2]
|
132 |
|
133 |
+
partial_path = pathlib.Path(path_for_upscale256_upscaling).name
|
134 |
png_files = list(glob.glob(f"{stage_1_images}/**/*.png"))
|
135 |
|
136 |
if png_files:
|
137 |
+
await message.delete()
|
138 |
combined_image_path = combine_images(png_files, stage_1_images, partial_path)
|
139 |
if os.environ.get("TEST_ENV") == "True":
|
140 |
+
print("Images combined for deepfloydif_generate64")
|
141 |
+
|
142 |
+
with Image.open(combined_image_path) as img:
|
143 |
+
width, height = img.size
|
144 |
+
new_width = width * 3
|
145 |
+
new_height = height * 3
|
146 |
+
resized_img = img.resize((new_width, new_height))
|
147 |
+
x2_combined_image_path = combined_image_path
|
148 |
+
resized_img.save(x2_combined_image_path)
|
149 |
+
|
150 |
+
# making image bigger, more readable
|
151 |
+
with open(x2_combined_image_path, "rb") as f: # was combined_image_path
|
152 |
+
button1 = Button(custom_id="0", emoji="β")
|
153 |
+
button2 = Button(custom_id="1", emoji="β")
|
154 |
+
button3 = Button(custom_id="2", emoji="β")
|
155 |
+
button4 = Button(custom_id="3", emoji="β")
|
156 |
+
|
157 |
+
async def button_callback(interaction):
|
158 |
+
index = int(interaction.data["custom_id"]) # 0,1,2,3
|
159 |
+
|
160 |
+
await interaction.response.send_message(
|
161 |
+
f"{ctx.author.mention} <a:loading:1114111677990981692>", ephemeral=True
|
162 |
+
)
|
163 |
+
result_path = await deepfloydif_upscale256(index, path_for_upscale256_upscaling)
|
164 |
+
|
165 |
+
# create and use upscale 1024 button
|
166 |
+
with open(result_path, "rb") as f:
|
167 |
+
upscale1024 = Button(
|
168 |
+
label="High-quality upscale (x4)", custom_id=str(index)
|
169 |
+
) # "0", "1" etc
|
170 |
+
upscale1024.callback = upscale1024_callback
|
171 |
+
view = View(timeout=None)
|
172 |
+
view.add_item(upscale1024)
|
173 |
+
|
174 |
+
await interaction.delete_original_response()
|
175 |
+
await channel.send(
|
176 |
+
content=(
|
177 |
+
f"{ctx.author.mention} Here is the upscaled image! Click to upscale even more!"
|
178 |
+
),
|
179 |
+
file=discord.File(f, f"{prompt}.png"),
|
180 |
+
view=view,
|
181 |
+
)
|
182 |
+
|
183 |
+
async def upscale1024_callback(interaction):
|
184 |
+
index = int(interaction.data["custom_id"])
|
185 |
+
|
186 |
+
await interaction.response.send_message(
|
187 |
+
f"{ctx.author.mention} <a:loading:1114111677990981692>", ephemeral=True
|
188 |
+
)
|
189 |
+
result_path = await deepfloydif_upscale1024(index, path_for_upscale256_upscaling, prompt)
|
190 |
+
|
191 |
+
with open(result_path, "rb") as f:
|
192 |
+
await interaction.delete_original_response()
|
193 |
+
await channel.send(
|
194 |
+
content=f"{ctx.author.mention} Here's your high-quality x16 image!",
|
195 |
+
file=discord.File(f, f"{prompt}.png"),
|
196 |
+
)
|
197 |
+
|
198 |
+
button1.callback = button_callback
|
199 |
+
button2.callback = button_callback
|
200 |
+
button3.callback = button_callback
|
201 |
+
button4.callback = button_callback
|
202 |
+
|
203 |
+
view = View(timeout=None)
|
204 |
+
view.add_item(button1)
|
205 |
+
view.add_item(button2)
|
206 |
+
view.add_item(button3)
|
207 |
+
view.add_item(button4)
|
208 |
+
|
209 |
+
# could store this message as combined_image_dfif in case it's useful for future testing
|
210 |
+
await ctx.send(
|
211 |
+
f"{ctx.author.mention} Click a button to upscale! (make larger + enhance quality)",
|
212 |
file=discord.File(f, f"{partial_path}.png"),
|
213 |
+
view=view,
|
214 |
)
|
|
|
|
|
215 |
else:
|
216 |
+
await ctx.send(f"{ctx.author.mention} No PNG files were found, cannot post them!")
|
217 |
|
218 |
except Exception as e:
|
219 |
print(f"Error: {e}")
|
220 |
|
221 |
|
222 |
+
async def deepfloydif_upscale256(index: int, path_for_upscale256_upscaling):
|
223 |
+
"""upscaling function for images generated using /deepfloydif"""
|
224 |
try:
|
225 |
+
loop = asyncio.get_running_loop()
|
226 |
+
result_path = await loop.run_in_executor(
|
227 |
+
None, deepfloydif_upscale256_inference, index, path_for_upscale256_upscaling
|
228 |
+
)
|
229 |
+
return result_path
|
230 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
231 |
except Exception as e:
|
232 |
+
print(f"Error: {e}")
|
233 |
|
234 |
|
235 |
+
async def deepfloydif_upscale1024(index: int, path_for_upscale256_upscaling, prompt):
|
236 |
"""upscaling function for images generated using /deepfloydif"""
|
237 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
238 |
loop = asyncio.get_running_loop()
|
239 |
result_path = await loop.run_in_executor(
|
240 |
+
None, deepfloydif_upscale1024_inference, index, path_for_upscale256_upscaling, prompt
|
241 |
)
|
242 |
+
return result_path
|
243 |
|
|
|
|
|
|
|
244 |
except Exception as e:
|
245 |
print(f"Error: {e}")
|