[MyClient] -> revert to stable
Browse files
app.py
CHANGED
@@ -38,155 +38,128 @@ sdlu = Client("huggingface-projects/stable-diffusion-latent-upscaler", HF_TOKEN)
|
|
38 |
|
39 |
# Set up discord bot
|
40 |
class MyClient(discord.Client):
|
41 |
-
desired_messages = {}
|
42 |
-
|
43 |
async def on_ready(self):
|
44 |
print('Logged on as', self.user)
|
45 |
|
46 |
|
47 |
async def on_message(self, message):
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
bot_member = guild.get_member(self.user.id)
|
64 |
-
if any(role.id == OFFLINE_ROLE_ID for role in bot_member.roles):
|
65 |
-
return
|
66 |
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
stage_1_results, stage_1_param_path, stage_1_result_path = df.predict(
|
101 |
-
prompt,
|
102 |
-
negative_prompt,
|
103 |
-
seed,
|
104 |
-
number_of_images,
|
105 |
-
guidance_scale,
|
106 |
-
custom_timesteps_1,
|
107 |
-
number_of_inference_steps,
|
108 |
-
api_name='/generate64')
|
109 |
-
|
110 |
-
#stage_1_results, stage_1_param_path, stage_1_result_path = df.predict("gradio written on a wall", "blur", 1,1,7.0, 'smart100',50, api_name="/generate64")
|
111 |
-
|
112 |
-
# stage_1_results -> path to directory with png files, so we isolate those
|
113 |
-
png_files = [f for f in os.listdir(stage_1_results) if f.endswith('.png')]
|
114 |
-
|
115 |
-
# merge images into larger, 2x2 image the way midjourney does it
|
116 |
-
if png_files:
|
117 |
-
first_png = png_files[0]
|
118 |
-
second_png = png_files[1]
|
119 |
-
third_png = png_files[2]
|
120 |
-
fourth_png = png_files[3]
|
121 |
-
|
122 |
-
'''
|
123 |
-
[],[],[],[] -> [][]
|
124 |
-
[][]
|
125 |
-
|
126 |
-
'''
|
127 |
-
|
128 |
-
first_png_path = os.path.join(stage_1_results, first_png)
|
129 |
-
second_png_path = os.path.join(stage_1_results, second_png)
|
130 |
-
third_png_path = os.path.join(stage_1_results, third_png)
|
131 |
-
fourth_png_path = os.path.join(stage_1_results, fourth_png)
|
132 |
-
|
133 |
-
img1 = Image.open(first_png_path)
|
134 |
-
img2 = Image.open(second_png_path)
|
135 |
-
img3 = Image.open(third_png_path)
|
136 |
-
img4 = Image.open(fourth_png_path)
|
137 |
-
|
138 |
-
# create a new blank image with the size of the combined images (2x2)
|
139 |
-
combined_image = Image.new('RGB', (img1.width * 2, img1.height * 2))
|
140 |
-
|
141 |
-
# paste the individual images into the combined image
|
142 |
-
combined_image.paste(img1, (0, 0))
|
143 |
-
combined_image.paste(img2, (img1.width, 0))
|
144 |
-
combined_image.paste(img3, (0, img1.height))
|
145 |
-
combined_image.paste(img4, (img1.width, img1.height))
|
146 |
-
|
147 |
-
# save the combined image
|
148 |
-
combined_image_path = os.path.join(stage_1_results, 'combined_image.png')
|
149 |
-
combined_image.save(combined_image_path)
|
150 |
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
|
164 |
-
|
165 |
#-------------------------------------------------------------------------------------------------------------
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
# Get the index corresponding to the reacted emoji
|
176 |
-
emoji_index = get_index_from_emoji(reaction.emoji)
|
177 |
-
|
178 |
-
# Use the index for the stage 2 upscaling
|
179 |
-
result_path = df.predict(stage_1_result_path, emoji_index, seed_2,
|
180 |
-
guidance_scale_2, custom_timesteps_2, number_of_inference_steps_2, api_name='/upscale256')
|
181 |
-
|
182 |
-
with open(result_path, 'rb') as f:
|
183 |
-
await reaction.message.reply('Here is the upscaled image!', file=discord.File(f, 'result.png'))
|
184 |
-
break # Exit the loop once a matching user and message pair is found
|
185 |
-
|
186 |
-
except Exception as e:
|
187 |
-
print(f'An error occurred while processing the reaction: {e}')
|
188 |
-
await reaction.message.reply('An error occurred during stage 2.')
|
189 |
-
|
190 |
|
191 |
DISCORD_TOKEN = os.environ.get("GRADIOTEST_TOKEN", None)
|
192 |
intents = discord.Intents.default()
|
|
|
38 |
|
39 |
# Set up discord bot
|
40 |
class MyClient(discord.Client):
|
|
|
|
|
41 |
async def on_ready(self):
|
42 |
print('Logged on as', self.user)
|
43 |
|
44 |
|
45 |
async def on_message(self, message):
|
46 |
+
|
47 |
+
#safety checks----------------------------------------------------------------------------------------------------
|
48 |
+
|
49 |
+
# tldr, bot should run if
|
50 |
+
#1) it does not have @offline role
|
51 |
+
#2) user has @verified role
|
52 |
+
#3) bot is in #bot-test channel
|
53 |
+
|
54 |
+
# bot won't respond to itself, prevents feedback loop + API spam
|
55 |
+
if message.author == self.user:
|
56 |
+
return
|
57 |
+
|
58 |
+
# if the bot has this role, it won't run
|
59 |
+
OFFLINE_ROLE_ID = 1103676632667017266 # 1103676632667017266 = @offline / under maintenance
|
60 |
+
guild = message.guild
|
61 |
+
bot_member = guild.get_member(self.user.id)
|
62 |
+
if any(role.id == OFFLINE_ROLE_ID for role in bot_member.roles):
|
63 |
+
return
|
64 |
+
|
65 |
+
# the message author needs this role in order to use the bot
|
66 |
+
REQUIRED_ROLE_ID = 897376942817419265 # 900063512829755413 = @verified, 897376942817419265 = @huggingfolks
|
67 |
+
if not any(role.id == REQUIRED_ROLE_ID for role in message.author.roles):
|
68 |
+
return
|
69 |
+
|
70 |
+
# channels where bot will accept commands
|
71 |
+
ALLOWED_CHANNEL_IDS = [1100458786826747945] # 1100458786826747945 = #bot-test
|
72 |
+
if message.channel.id not in ALLOWED_CHANNEL_IDS:
|
73 |
+
return
|
74 |
+
|
75 |
+
# function for reacting
|
76 |
+
async def bot_react(message):
|
77 |
+
emojis = ['1️⃣', '2️⃣', '3️⃣', '4️⃣']
|
78 |
+
for emoji in emojis:
|
79 |
+
await message.add_reaction(emoji)
|
80 |
+
|
81 |
+
#deepfloydif----------------------------------------------------------------------------------------------------
|
82 |
|
83 |
+
if message.content.startswith('!deepfloydif'): # change to application commands, more intuitive
|
84 |
+
|
85 |
+
#(prompt, negative_prompt, seed, number_of_images, guidance_scale,custom_timesteps_1, number_of_inference_steps, api_name="/generate64")
|
86 |
+
#-> (stage_1_results, stage_1_param_path, stage_1_result_path)
|
87 |
+
|
88 |
+
# input prompt
|
89 |
+
prompt = message.content[12:].strip()
|
|
|
|
|
|
|
90 |
|
91 |
+
negative_prompt = ''
|
92 |
+
seed = random.randint(0, 2**32 - 1)
|
93 |
+
number_of_images = 4
|
94 |
+
guidance_scale = 7
|
95 |
+
custom_timesteps_1 = 'smart50'
|
96 |
+
number_of_inference_steps = 50
|
97 |
+
|
98 |
+
stage_1_results, stage_1_param_path, stage_1_result_path = df.predict(
|
99 |
+
prompt,
|
100 |
+
negative_prompt,
|
101 |
+
seed,
|
102 |
+
number_of_images,
|
103 |
+
guidance_scale,
|
104 |
+
custom_timesteps_1,
|
105 |
+
number_of_inference_steps,
|
106 |
+
api_name='/generate64')
|
107 |
+
|
108 |
+
#stage_1_results, stage_1_param_path, stage_1_result_path = df.predict("gradio written on a wall", "blur", 1,1,7.0, 'smart100',50, api_name="/generate64")
|
109 |
+
|
110 |
+
# stage_1_results -> path to directory with png files, so we isolate those
|
111 |
+
png_files = [f for f in os.listdir(stage_1_results) if f.endswith('.png')]
|
112 |
+
|
113 |
+
# merge images into larger, 2x2 image the way midjourney does it
|
114 |
+
if png_files:
|
115 |
+
first_png = png_files[0]
|
116 |
+
second_png = png_files[1]
|
117 |
+
third_png = png_files[2]
|
118 |
+
fourth_png = png_files[3]
|
119 |
+
|
120 |
+
'''
|
121 |
+
[],[],[],[] -> [][]
|
122 |
+
[][]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
123 |
|
124 |
+
'''
|
125 |
+
|
126 |
+
first_png_path = os.path.join(stage_1_results, first_png)
|
127 |
+
second_png_path = os.path.join(stage_1_results, second_png)
|
128 |
+
third_png_path = os.path.join(stage_1_results, third_png)
|
129 |
+
fourth_png_path = os.path.join(stage_1_results, fourth_png)
|
130 |
+
|
131 |
+
img1 = Image.open(first_png_path)
|
132 |
+
img2 = Image.open(second_png_path)
|
133 |
+
img3 = Image.open(third_png_path)
|
134 |
+
img4 = Image.open(fourth_png_path)
|
135 |
+
|
136 |
+
# create a new blank image with the size of the combined images (2x2)
|
137 |
+
combined_image = Image.new('RGB', (img1.width * 2, img1.height * 2))
|
138 |
+
|
139 |
+
# paste the individual images into the combined image
|
140 |
+
combined_image.paste(img1, (0, 0))
|
141 |
+
combined_image.paste(img2, (img1.width, 0))
|
142 |
+
combined_image.paste(img3, (0, img1.height))
|
143 |
+
combined_image.paste(img4, (img1.width, img1.height))
|
144 |
+
|
145 |
+
# save the combined image
|
146 |
+
combined_image_path = os.path.join(stage_1_results, 'combined_image.png')
|
147 |
+
combined_image.save(combined_image_path)
|
148 |
+
|
149 |
+
# send the combined image file as a discord attachment
|
150 |
+
with open(combined_image_path, 'rb') as f:
|
151 |
+
sent_message = await message.reply('Here is the combined image', file=discord.File(f, 'combined_image.png'))
|
152 |
|
153 |
+
await bot_react(sent_message)
|
154 |
#-------------------------------------------------------------------------------------------------------------
|
155 |
+
|
156 |
+
|
157 |
+
|
158 |
+
|
159 |
+
|
160 |
+
# deepfloydif -> bot_react ->
|
161 |
+
# await bot_react
|
162 |
+
# on_react -> if we react to a message, then we call dfif2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
|
164 |
DISCORD_TOKEN = os.environ.get("GRADIOTEST_TOKEN", None)
|
165 |
intents = discord.Intents.default()
|