File size: 6,551 Bytes
5546580
c7a9a19
 
 
5546580
 
c7a9a19
 
5654dca
 
 
c41bcc3
c7a9a19
 
 
2998219
c7a9a19
9650ea7
 
c7a9a19
 
 
 
 
 
d7fdc0e
 
9650ea7
 
4ad6f12
bc0d1ce
c7a9a19
4ad6f12
 
bc0d1ce
c7a9a19
4ad6f12
d7fdc0e
c7a9a19
 
 
 
9dad23e
 
c7a9a19
deb809f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d7fdc0e
deb809f
 
 
 
 
 
 
d7fdc0e
deb809f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9114452
deb809f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d7fdc0e
deb809f
389bfcb
deb809f
 
 
 
 
 
 
 
d7fdc0e
c7a9a19
 
 
 
5546580
 
c7a9a19
5546580
 
 
 
 
 
 
c7a9a19
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
import discord
import gradio_client
from gradio_client import Client
import gradio as gr
import os
import threading

#for deepfloydif
import requests
import json
import random
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import time

# random + small llama #


#todos
#alert
#fix error on first command on bot startup
#stable diffusion upscale
#buttons for deepfloydIF (1,2,3,4)
#application commands instead of message content checks (more user-friendly)




HF_TOKEN = os.getenv('HF_TOKEN')

#deepfloydIF
#df = Client("DeepFloyd/IF", HF_TOKEN) #not reliable at the moment
df = Client("huggingface-projects/IF", HF_TOKEN)

#stable diffusion upscaler
sdlu = Client("huggingface-projects/stable-diffusion-latent-upscaler", HF_TOKEN)

# Set up discord bot 
class MyClient(discord.Client):
    async def on_ready(self):
        print('Logged on as', self.user)


    async def on_message(self, message):

        #safety checks----------------------------------------------------------------------------------------------------

        #   tldr, bot should run if
        #1) it does not have @offline role
        #2) user has @verified role
        #3) bot is in #bot-test channel
        
        # bot won't respond to itself, prevents feedback loop + API spam
        if message.author == self.user:
            return       

        # if the bot has this role, it won't run
        OFFLINE_ROLE_ID = 1103676632667017266  # 1103676632667017266 = @offline / under maintenance
        guild = message.guild
        bot_member = guild.get_member(self.user.id)
        if any(role.id == OFFLINE_ROLE_ID for role in bot_member.roles):
            return
        
        # the message author needs this role in order to use the bot
        REQUIRED_ROLE_ID = 897376942817419265 # 900063512829755413 = @verified, 897376942817419265 = @huggingfolks
        if not any(role.id == REQUIRED_ROLE_ID for role in message.author.roles):
            return            

        # channels where bot will accept commands
        ALLOWED_CHANNEL_IDS = [1100458786826747945] # 1100458786826747945 = #bot-test
        if message.channel.id not in ALLOWED_CHANNEL_IDS:
            return

        # function for reacting
        async def bot_react(message):
            emojis = ['1️⃣', '2️⃣', '3️⃣', '4️⃣']
            for emoji in emojis:
                await message.add_reaction(emoji)        

        #deepfloydif----------------------------------------------------------------------------------------------------
            
        if message.content.startswith('!deepfloydif'): # change to application commands, more intuitive

            #(prompt, negative_prompt, seed, number_of_images, guidance_scale,custom_timesteps_1, number_of_inference_steps, api_name="/generate64") 
            #-> (stage_1_results, stage_1_param_path, stage_1_result_path)

            # input prompt
            prompt = message.content[12:].strip()  
            
            negative_prompt = ''
            seed = random.randint(0, 2**32 - 1)
            number_of_images = 4
            guidance_scale = 7
            custom_timesteps_1 = 'smart50'
            number_of_inference_steps = 50
            
            stage_1_results, stage_1_param_path, stage_1_result_path = df.predict(
                prompt,
                negative_prompt,
                seed,
                number_of_images,
                guidance_scale,
                custom_timesteps_1,
                number_of_inference_steps,
                api_name='/generate64')

            #stage_1_results, stage_1_param_path, stage_1_result_path = df.predict("gradio written on a wall", "blur", 1,1,7.0, 'smart100',50, api_name="/generate64")
            
            # stage_1_results -> path to directory with png files, so we isolate those
            png_files = [f for f in os.listdir(stage_1_results) if f.endswith('.png')]

            # merge images into larger, 2x2 image the way midjourney does it
            if png_files:
                first_png = png_files[0]
                second_png = png_files[1]
                third_png = png_files[2]
                fourth_png = png_files[3]

                '''
                [],[],[],[] -> [][]
                               [][]
                
                '''
            
                first_png_path = os.path.join(stage_1_results, first_png)
                second_png_path = os.path.join(stage_1_results, second_png)
                third_png_path = os.path.join(stage_1_results, third_png)
                fourth_png_path = os.path.join(stage_1_results, fourth_png)
            
                img1 = Image.open(first_png_path)
                img2 = Image.open(second_png_path)
                img3 = Image.open(third_png_path)
                img4 = Image.open(fourth_png_path)
            
                # create a new blank image with the size of the combined images (2x2)
                combined_image = Image.new('RGB', (img1.width * 2, img1.height * 2))
            
                # paste the individual images into the combined image
                combined_image.paste(img1, (0, 0))
                combined_image.paste(img2, (img1.width, 0))
                combined_image.paste(img3, (0, img1.height))
                combined_image.paste(img4, (img1.width, img1.height))
            
                # save the combined image
                combined_image_path = os.path.join(stage_1_results, 'combined_image.png')
                combined_image.save(combined_image_path)
            
            # send the combined image file as a discord attachment
            with open(combined_image_path, 'rb') as f:
                sent_message = await message.reply('Here is the combined image', file=discord.File(f, 'combined_image.png'))

            await bot_react(sent_message)                
    #-------------------------------------------------------------------------------------------------------------

            
       


    # deepfloydif -> bot_react -> 
    # await bot_react
    # on_react -> if we react to a message, then we call dfif2            

DISCORD_TOKEN = os.environ.get("GRADIOTEST_TOKEN", None)
intents = discord.Intents.default()
intents.message_content = True
client = MyClient(intents=intents)

def run_bot():
  client.run(DISCORD_TOKEN)

threading.Thread(target=run_bot).start()

def greet(name):
    return "Hello " + name + "!"

demo = gr.Interface(fn=greet, inputs="text", outputs="text")
demo.launch()