File size: 6,845 Bytes
5546580
 
f1578a9
5546580
 
 
b6ca34b
5654dca
 
 
 
 
 
 
3d72860
5654dca
0f21369
5654dca
db9a3a6
b939335
 
8843fcf
25167c7
 
cc2fbe1
b939335
5546580
744e267
 
db9a3a6
cc2fbe1
 
 
 
 
 
744e267
9c95049
5546580
 
 
 
 
cc2fbe1
5546580
ac7dd69
cc2fbe1
 
 
b6ca34b
 
 
 
cc2fbe1
 
 
b6ca34b
88edf1a
cc2fbe1
ac7dd69
 
 
 
 
cc2fbe1
 
3484400
 
 
acb1ed0
 
 
 
 
cc2fbe1
 
 
b6ca34b
154eb2f
 
 
cc2fbe1
25167c7
 
 
154eb2f
 
 
25167c7
e4554fa
25167c7
ee5699a
e4554fa
b6ca34b
cc2fbe1
b6ca34b
 
cc2fbe1
b6ca34b
 
423b78f
 
 
cc2fbe1
 
 
 
 
 
423b78f
b6ca34b
423b78f
 
 
 
744e267
 
 
 
 
cc2fbe1
744e267
 
cc2fbe1
744e267
 
 
 
 
cc2fbe1
744e267
 
 
cc2fbe1
744e267
 
b6ca34b
 
 
 
 
 
 
 
 
5654dca
 
 
eae5c52
5654dca
 
 
 
3d08d5f
5654dca
 
 
 
 
 
b6ca34b
 
 
5654dca
5546580
05024ea
5546580
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
import discord
import gradio_client
from gradio_client import Client
import gradio as gr
import os
import threading

#for deepfloydif
import requests
import json
import random
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import time

# random + small llama #


#todos
#alert
#fix error on first command on bot startup
#stable diffusion upscale
#buttons for deepfloydIF (1,2,3,4)
#application commands instead of message content checks (more user-friendly)




DFIF_TOKEN = os.getenv('DFIF_TOKEN')

#deepfloydIF
df = Client("DeepFloyd/IF", DFIF_TOKEN) #not reliable at the moment
#df = Client("huggingface-projects/IF", DFIF_TOKEN)

#stable diffusion upscaler
sdlu = Client("huggingface-projects/stable-diffusion-latent-upscaler", DFIF_TOKEN)

# Set up discord bot 
class MyClient(discord.Client):
    async def on_ready(self):
        print('Logged on as', self.user)


    async def on_message(self, message):

        #safety checks----------------------------------------------------------------------------------------------------

        #   tldr, bot should run if
        #1) it does not have @offline role
        #2) user has @verified role
        #3) bot is in #bot-test channel
        
        # bot won't respond to itself, prevents feedback loop + API spam
        if message.author == self.user:
            return       

        # if the bot has this role, it won't run
        OFFLINE_ROLE_ID = 1103676632667017266  # 1103676632667017266 = @offline / under maintenance
        guild = message.guild
        bot_member = guild.get_member(self.user.id)
        if any(role.id == OFFLINE_ROLE_ID for role in bot_member.roles):
            return
        
        # the message author needs this role in order to use the bot
        REQUIRED_ROLE_ID = 897376942817419265 # 900063512829755413 = @verified, 897376942817419265 = @huggingfolks
        if not any(role.id == REQUIRED_ROLE_ID for role in message.author.roles):
            return            

        # channels where bot will accept commands
        ALLOWED_CHANNEL_IDS = [1100458786826747945] # 1100458786826747945 = #bot-test
        if message.channel.id not in ALLOWED_CHANNEL_IDS:
            return

        #deepfloydif----------------------------------------------------------------------------------------------------
            
        if message.content.startswith('!deepfloydif'): # change to application commands, more intuitive

            #(prompt, negative_prompt, seed, number_of_images, guidance_scale,custom_timesteps_1, number_of_inference_steps, api_name="/generate64") 
            #-> (stage_1_results, stage_1_param_path, stage_1_result_path)

            # input prompt
            prompt = message.content[12:].strip()   
            number_of_images = 4

            # random seed
            current_time = int(time.time())
            random.seed(current_time)
            seed = random.randint(0, 2**32 - 1)
            
            stage_1_results, stage_1_param_path, stage_1_result_path = df.predict(prompt, "blur", seed,number_of_images,7.0, 'smart100',50, api_name="/generate64")

            #stage_1_results, stage_1_param_path, stage_1_result_path = df.predict("gradio written on a wall", "blur", 1,1,7.0, 'smart100',50, api_name="/generate64")
            
            # stage_1_results -> path to directory with png files, so we isolate those
            png_files = [f for f in os.listdir(stage_1_results) if f.endswith('.png')]

            # merge images into larger, 2x2 image the way midjourney does it
            if png_files:
                first_png = png_files[0]
                second_png = png_files[1]
                third_png = png_files[2]
                fourth_png = png_files[3]

                '''
                [],[],[],[] -> [][]
                               [][]
                
                '''
            
                first_png_path = os.path.join(stage_1_results, first_png)
                second_png_path = os.path.join(stage_1_results, second_png)
                third_png_path = os.path.join(stage_1_results, third_png)
                fourth_png_path = os.path.join(stage_1_results, fourth_png)
            
                img1 = Image.open(first_png_path)
                img2 = Image.open(second_png_path)
                img3 = Image.open(third_png_path)
                img4 = Image.open(fourth_png_path)
            
                # create a new blank image with the size of the combined images (2x2)
                combined_image = Image.new('RGB', (img1.width * 2, img1.height * 2))
            
                # paste the individual images into the combined image
                combined_image.paste(img1, (0, 0))
                combined_image.paste(img2, (img1.width, 0))
                combined_image.paste(img3, (0, img1.height))
                combined_image.paste(img4, (img1.width, img1.height))
            
                # save the combined image
                combined_image_path = os.path.join(stage_1_results, 'combined_image.png')
                combined_image.save(combined_image_path)
            
            # send the combined image file as a discord attachment
            with open(combined_image_path, 'rb') as f:
                await message.reply('Here is the combined image', file=discord.File(f, 'combined_image.png'))








            '''
            # stage 2
            selected_index_for_stage_2 = -1
            custom_timesteps_2 = 'smart100' # could reset to smart50 if index was the issue
            seed = 362572064 # again, could randomize this        seed = 362572064      seed = random.randint(0, 2**32 - 1)
            
            # predict(stage_1_result_path, selected_index_for_stage_2, seed, guidance_scale, custom_timesteps_2, number_of_inference_steps, api_name="/upscale256") -> result
            
            img = df.predict(stage_1_result_path, selected_index_for_stage_2, seed, guidance_scale, custom_timesteps_2, number_of_inference_steps, api_name="/upscale256")
    
            # Save the generated image to a file
            img_path = "/tmp/generated_image.png"
            img.save(img_path)
            
            # Send the image file as a Discord attachment
            with open(img_path, 'rb') as f:
                await message.reply(f'Here is the generated image', file=discord.File(f, 'generated_image.png'))                
            '''
       
            

DISCORD_TOKEN = os.environ.get("GRADIOTEST_TOKEN", None)
intents = discord.Intents.default()
intents.message_content = True
client = MyClient(intents=intents)

def run_bot():
  client.run(DISCORD_TOKEN)

threading.Thread(target=run_bot).start()

def greet(name):
    return "Hello " + name + "!"

demo = gr.Interface(fn=greet, inputs="text", outputs="text")
demo.launch()