revert to stable MyClient class, debug Error: None
Browse files
app.py
CHANGED
@@ -1,183 +1,160 @@
|
|
1 |
import discord
|
|
|
|
|
|
|
2 |
import os
|
3 |
import threading
|
4 |
-
|
|
|
5 |
import requests
|
6 |
import json
|
7 |
import random
|
8 |
-
import time
|
9 |
-
import re
|
10 |
-
from discord import Embed, Color
|
11 |
-
from discord.ext import commands
|
12 |
-
# test
|
13 |
-
from gradio_client import Client
|
14 |
from PIL import Image
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
from pytz import timezone
|
19 |
-
#
|
20 |
-
import asyncio
|
21 |
-
|
22 |
-
breaking temporarily
|
23 |
-
|
24 |
-
|
25 |
-
zurich_tz = timezone("Europe/Zurich")
|
26 |
-
|
27 |
-
def convert_to_timezone(dt, tz):
|
28 |
-
return dt.astimezone(tz).strftime("%Y-%m-%d %H:%M:%S %Z")
|
29 |
-
|
30 |
-
DFIF_TOKEN = os.getenv('HF_TOKEN')
|
31 |
-
df = Client("huggingface-projects/IF", DFIF_TOKEN)
|
32 |
-
sdlu = Client("huggingface-projects/stable-diffusion-latent-upscaler", DFIF_TOKEN)
|
33 |
-
|
34 |
-
DISCORD_TOKEN = os.environ.get("GRADIOTEST_TOKEN", None)
|
35 |
-
intents = discord.Intents.default()
|
36 |
-
intents.message_content = True
|
37 |
-
|
38 |
-
bot = commands.Bot(command_prefix='!', intents=intents)
|
39 |
-
|
40 |
|
41 |
-
|
42 |
|
43 |
-
@bot.event
|
44 |
-
async def on_ready():
|
45 |
-
print('Logged on as', bot.user)
|
46 |
-
bot.log_channel = bot.get_channel(1100458786826747945) # 1100458786826747945 = bot-test, 1107006391547342910 = lunarbot server
|
47 |
|
48 |
-
|
49 |
-
#
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
prompt = re.sub(r'[^\w\s]', '', prompt) # Remove special characters
|
55 |
|
56 |
-
def check_reaction(reaction, user):
|
57 |
-
return user == ctx.author and str(reaction.emoji) in ['1️⃣', '2️⃣', '3️⃣', '4️⃣']
|
58 |
|
59 |
-
await ctx.message.add_reaction('👍')
|
60 |
-
thread = await ctx.message.create_thread(name=f'{ctx.author} Image Upscaling Thread ')
|
61 |
-
# create thread -> send new message inside thread + combined_image -> add reactions -> dfif2
|
62 |
-
await thread.send(f'{ctx.author.mention}Generating images in thread, can take ~1 minute...')
|
63 |
-
|
64 |
-
number_of_images = 4
|
65 |
-
current_time = int(time.time())
|
66 |
-
random.seed(current_time)
|
67 |
-
seed = random.randint(0, 2**32 - 1)
|
68 |
-
stage_1_results, stage_1_param_path, stage_1_result_path = df.predict(prompt, "blur", seed, number_of_images, 7.0, 'smart100', 50, api_name="/generate64")
|
69 |
-
png_files = [f for f in os.listdir(stage_1_results) if f.endswith('.png')]
|
70 |
|
71 |
-
if png_files:
|
72 |
-
first_png = png_files[0]
|
73 |
-
second_png = png_files[1]
|
74 |
-
third_png = png_files[2]
|
75 |
-
fourth_png = png_files[3]
|
76 |
|
77 |
-
|
78 |
-
second_png_path = os.path.join(stage_1_results, second_png)
|
79 |
-
third_png_path = os.path.join(stage_1_results, third_png)
|
80 |
-
fourth_png_path = os.path.join(stage_1_results, fourth_png)
|
81 |
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
img4 = Image.open(fourth_png_path)
|
86 |
|
87 |
-
|
|
|
88 |
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
|
94 |
-
combined_image_path = os.path.join(stage_1_results, 'combined_image.png')
|
95 |
-
combined_image.save(combined_image_path)
|
96 |
|
97 |
-
|
98 |
-
#await dfif2(ctx, stage_1_result_path)
|
99 |
|
|
|
100 |
|
101 |
-
#
|
102 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
|
104 |
-
#
|
105 |
-
|
106 |
-
|
|
|
107 |
|
108 |
-
#
|
109 |
-
|
110 |
-
|
111 |
-
#thread = await ctx.message.create_thread(name='Image Upscaling Thread')
|
112 |
|
113 |
-
|
114 |
-
|
115 |
|
116 |
-
|
117 |
-
|
118 |
-
threadmsg = await thread.send(f'{ctx.author.mention}React with the image number you want to upscale!', file=discord.File(f, 'combined_image.png'))
|
119 |
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
if str(reaction.emoji) == '1️⃣':
|
127 |
-
await thread.send(f"{ctx.author.mention}Upscaling the first image...")
|
128 |
-
index = 0
|
129 |
-
await dfif2(ctx, index, stage_1_result_path, thread)
|
130 |
-
elif str(reaction.emoji) == '2️⃣':
|
131 |
-
await thread.send(f"{ctx.author.mention}Upscaling the second image...")
|
132 |
-
index = 1
|
133 |
-
await dfif2(ctx, index, stage_1_result_path, thread)
|
134 |
-
elif str(reaction.emoji) == '3️⃣':
|
135 |
-
await thread.send(f"{ctx.author.mention}Upscaling the third image...")
|
136 |
-
index = 2
|
137 |
-
await dfif2(ctx, index, stage_1_result_path, thread)
|
138 |
-
elif str(reaction.emoji) == '4️⃣':
|
139 |
-
await thread.send(f"{ctx.author.mention}Upscaling the fourth image...")
|
140 |
-
index = 3
|
141 |
-
await dfif2(ctx, index, stage_1_result_path, thread)
|
142 |
-
|
143 |
-
#deepfloydif try/except
|
144 |
-
except Exception as e:
|
145 |
-
print(f"Error: {e}")
|
146 |
-
await ctx.reply('An error occurred while processing your request. Please wait 5 seconds before retrying.')
|
147 |
-
await ctx.message.add_reaction('❌')
|
148 |
-
|
149 |
-
#----------------------------------------------------------------------------------------------------------------------------
|
150 |
-
# Stage 2
|
151 |
-
async def dfif2(ctx, index: int, stage_1_result_path, thread):
|
152 |
-
try:
|
153 |
-
selected_index_for_stage_2 = index
|
154 |
-
seed_2 = 0
|
155 |
-
guidance_scale_2 = 4
|
156 |
-
custom_timesteps_2 = 'smart50'
|
157 |
-
number_of_inference_steps_2 = 50
|
158 |
-
result_path = df.predict(stage_1_result_path, selected_index_for_stage_2, seed_2,
|
159 |
-
guidance_scale_2, custom_timesteps_2, number_of_inference_steps_2, api_name='/upscale256')
|
160 |
-
|
161 |
-
|
162 |
-
with open(result_path, 'rb') as f:
|
163 |
-
await thread.send(f'{ctx.author.mention}Here is the upscaled image! :) ', file=discord.File(f, 'result.png'))
|
164 |
-
#await ctx.reply('Here is the result of the second stage', file=discord.File(f, 'result.png'))
|
165 |
-
await ctx.message.add_reaction('✔️')
|
166 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
167 |
|
168 |
-
|
169 |
-
|
170 |
-
except Exception as e:
|
171 |
-
print(f"Error: {e}")
|
172 |
-
await ctx.reply('An error occurred while processing stage 2 upscaling. Please try again later.')
|
173 |
-
await ctx.message.add_reaction('❌')
|
174 |
-
#----------------------------------------------------------------------------------------------------------------------------
|
175 |
|
|
|
|
|
176 |
|
177 |
-
|
|
|
|
|
|
|
178 |
|
179 |
def run_bot():
|
180 |
-
|
181 |
|
182 |
threading.Thread(target=run_bot).start()
|
183 |
|
@@ -185,4 +162,4 @@ def greet(name):
|
|
185 |
return "Hello " + name + "!"
|
186 |
|
187 |
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
188 |
-
demo.launch()
|
|
|
1 |
import discord
|
2 |
+
import gradio_client
|
3 |
+
from gradio_client import Client
|
4 |
+
import gradio as gr
|
5 |
import os
|
6 |
import threading
|
7 |
+
|
8 |
+
#for deepfloydif
|
9 |
import requests
|
10 |
import json
|
11 |
import random
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
from PIL import Image
|
13 |
+
import matplotlib.pyplot as plt
|
14 |
+
import matplotlib.image as mpimg
|
15 |
+
import time
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
+
# random + small llama #
|
18 |
|
|
|
|
|
|
|
|
|
19 |
|
20 |
+
#todos
|
21 |
+
#alert
|
22 |
+
#fix error on first command on bot startup
|
23 |
+
#stable diffusion upscale
|
24 |
+
#buttons for deepfloydIF (1,2,3,4)
|
25 |
+
#application commands instead of message content checks (more user-friendly)
|
|
|
26 |
|
|
|
|
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
+
DFIF_TOKEN = os.getenv('DFIF_TOKEN')
|
|
|
|
|
|
|
31 |
|
32 |
+
#deepfloydIF
|
33 |
+
#df = Client("DeepFloyd/IF", DFIF_TOKEN) #not reliable at the moment
|
34 |
+
df = Client("huggingface-projects/IF", DFIF_TOKEN)
|
|
|
35 |
|
36 |
+
#stable diffusion upscaler
|
37 |
+
sdlu = Client("huggingface-projects/stable-diffusion-latent-upscaler", DFIF_TOKEN)
|
38 |
|
39 |
+
# Set up discord bot
|
40 |
+
class MyClient(discord.Client):
|
41 |
+
async def on_ready(self):
|
42 |
+
print('Logged on as', self.user)
|
43 |
|
|
|
|
|
44 |
|
45 |
+
async def on_message(self, message):
|
|
|
46 |
|
47 |
+
#safety checks----------------------------------------------------------------------------------------------------
|
48 |
|
49 |
+
# tldr, bot should run if
|
50 |
+
#1) it does not have @offline role
|
51 |
+
#2) user has @verified role
|
52 |
+
#3) bot is in #bot-test channel
|
53 |
+
|
54 |
+
# bot won't respond to itself, prevents feedback loop + API spam
|
55 |
+
if message.author == self.user:
|
56 |
+
return
|
57 |
+
|
58 |
+
# if the bot has this role, it won't run
|
59 |
+
OFFLINE_ROLE_ID = 1103676632667017266 # 1103676632667017266 = @offline / under maintenance
|
60 |
+
guild = message.guild
|
61 |
+
bot_member = guild.get_member(self.user.id)
|
62 |
+
if any(role.id == OFFLINE_ROLE_ID for role in bot_member.roles):
|
63 |
+
return
|
64 |
+
|
65 |
+
# the message author needs this role in order to use the bot
|
66 |
+
REQUIRED_ROLE_ID = 897376942817419265 # 900063512829755413 = @verified, 897376942817419265 = @huggingfolks
|
67 |
+
if not any(role.id == REQUIRED_ROLE_ID for role in message.author.roles):
|
68 |
+
return
|
69 |
|
70 |
+
# channels where bot will accept commands
|
71 |
+
ALLOWED_CHANNEL_IDS = [1100458786826747945] # 1100458786826747945 = #bot-test
|
72 |
+
if message.channel.id not in ALLOWED_CHANNEL_IDS:
|
73 |
+
return
|
74 |
|
75 |
+
#deepfloydif----------------------------------------------------------------------------------------------------
|
76 |
+
|
77 |
+
if message.content.startswith('!deepfloydif'): # change to application commands, more intuitive
|
|
|
78 |
|
79 |
+
#(prompt, negative_prompt, seed, number_of_images, guidance_scale,custom_timesteps_1, number_of_inference_steps, api_name="/generate64")
|
80 |
+
#-> (stage_1_results, stage_1_param_path, stage_1_result_path)
|
81 |
|
82 |
+
# input prompt
|
83 |
+
prompt = message.content[12:].strip()
|
|
|
84 |
|
85 |
+
negative_prompt = ''
|
86 |
+
seed = 0
|
87 |
+
number_of_images = 4
|
88 |
+
guidance_scale = 7
|
89 |
+
custom_timesteps_1 = 'smart50'
|
90 |
+
number_of_inference_steps = 50
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
|
92 |
+
stage_1_results, stage_1_param_path, stage_1_result_path = df.predict(
|
93 |
+
prompt,
|
94 |
+
negative_prompt,
|
95 |
+
seed,
|
96 |
+
number_of_images,
|
97 |
+
guidance_scale,
|
98 |
+
custom_timesteps_1,
|
99 |
+
number_of_inference_steps,
|
100 |
+
api_name='/generate64')
|
101 |
+
|
102 |
+
#stage_1_results, stage_1_param_path, stage_1_result_path = df.predict("gradio written on a wall", "blur", 1,1,7.0, 'smart100',50, api_name="/generate64")
|
103 |
+
|
104 |
+
# stage_1_results -> path to directory with png files, so we isolate those
|
105 |
+
png_files = [f for f in os.listdir(stage_1_results) if f.endswith('.png')]
|
106 |
+
|
107 |
+
# merge images into larger, 2x2 image the way midjourney does it
|
108 |
+
if png_files:
|
109 |
+
first_png = png_files[0]
|
110 |
+
second_png = png_files[1]
|
111 |
+
third_png = png_files[2]
|
112 |
+
fourth_png = png_files[3]
|
113 |
+
|
114 |
+
'''
|
115 |
+
[],[],[],[] -> [][]
|
116 |
+
[][]
|
117 |
+
|
118 |
+
'''
|
119 |
+
|
120 |
+
first_png_path = os.path.join(stage_1_results, first_png)
|
121 |
+
second_png_path = os.path.join(stage_1_results, second_png)
|
122 |
+
third_png_path = os.path.join(stage_1_results, third_png)
|
123 |
+
fourth_png_path = os.path.join(stage_1_results, fourth_png)
|
124 |
+
|
125 |
+
img1 = Image.open(first_png_path)
|
126 |
+
img2 = Image.open(second_png_path)
|
127 |
+
img3 = Image.open(third_png_path)
|
128 |
+
img4 = Image.open(fourth_png_path)
|
129 |
+
|
130 |
+
# create a new blank image with the size of the combined images (2x2)
|
131 |
+
combined_image = Image.new('RGB', (img1.width * 2, img1.height * 2))
|
132 |
+
|
133 |
+
# paste the individual images into the combined image
|
134 |
+
combined_image.paste(img1, (0, 0))
|
135 |
+
combined_image.paste(img2, (img1.width, 0))
|
136 |
+
combined_image.paste(img3, (0, img1.height))
|
137 |
+
combined_image.paste(img4, (img1.width, img1.height))
|
138 |
+
|
139 |
+
# save the combined image
|
140 |
+
combined_image_path = os.path.join(stage_1_results, 'combined_image.png')
|
141 |
+
combined_image.save(combined_image_path)
|
142 |
+
|
143 |
+
# send the combined image file as a discord attachment
|
144 |
+
with open(combined_image_path, 'rb') as f:
|
145 |
+
await message.reply('Here is the combined image', file=discord.File(f, 'combined_image.png'))
|
146 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
|
148 |
+
|
149 |
+
|
150 |
|
151 |
+
DISCORD_TOKEN = os.environ.get("GRADIOTEST_TOKEN", None)
|
152 |
+
intents = discord.Intents.default()
|
153 |
+
intents.message_content = True
|
154 |
+
client = MyClient(intents=intents)
|
155 |
|
156 |
def run_bot():
|
157 |
+
client.run(DISCORD_TOKEN)
|
158 |
|
159 |
threading.Thread(target=run_bot).start()
|
160 |
|
|
|
162 |
return "Hello " + name + "!"
|
163 |
|
164 |
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
165 |
+
demo.launch()
|