huggingbots / app.py
lunarflu's picture
lunarflu HF Staff
0.6 some cleanup, testing original DFIF
cc2fbe1
raw
history blame
6.85 kB
import discord
import gradio_client
from gradio_client import Client
import gradio as gr
import os
import threading
#for deepfloydif
import requests
import json
import random
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import time
# random + small llama #
#todos
#alert
#fix error on first command on bot startup
#stable diffusion upscale
#buttons for deepfloydIF (1,2,3,4)
#application commands instead of message content checks (more user-friendly)
DFIF_TOKEN = os.getenv('DFIF_TOKEN')
#deepfloydIF
df = Client("DeepFloyd/IF", DFIF_TOKEN) #not reliable at the moment
#df = Client("huggingface-projects/IF", DFIF_TOKEN)
#stable diffusion upscaler
sdlu = Client("huggingface-projects/stable-diffusion-latent-upscaler", DFIF_TOKEN)
# Set up discord bot
class MyClient(discord.Client):
async def on_ready(self):
print('Logged on as', self.user)
async def on_message(self, message):
#safety checks----------------------------------------------------------------------------------------------------
# tldr, bot should run if
#1) it does not have @offline role
#2) user has @verified role
#3) bot is in #bot-test channel
# bot won't respond to itself, prevents feedback loop + API spam
if message.author == self.user:
return
# if the bot has this role, it won't run
OFFLINE_ROLE_ID = 1103676632667017266 # 1103676632667017266 = @offline / under maintenance
guild = message.guild
bot_member = guild.get_member(self.user.id)
if any(role.id == OFFLINE_ROLE_ID for role in bot_member.roles):
return
# the message author needs this role in order to use the bot
REQUIRED_ROLE_ID = 897376942817419265 # 900063512829755413 = @verified, 897376942817419265 = @huggingfolks
if not any(role.id == REQUIRED_ROLE_ID for role in message.author.roles):
return
# channels where bot will accept commands
ALLOWED_CHANNEL_IDS = [1100458786826747945] # 1100458786826747945 = #bot-test
if message.channel.id not in ALLOWED_CHANNEL_IDS:
return
#deepfloydif----------------------------------------------------------------------------------------------------
if message.content.startswith('!deepfloydif'): # change to application commands, more intuitive
#(prompt, negative_prompt, seed, number_of_images, guidance_scale,custom_timesteps_1, number_of_inference_steps, api_name="/generate64")
#-> (stage_1_results, stage_1_param_path, stage_1_result_path)
# input prompt
prompt = message.content[12:].strip()
number_of_images = 4
# random seed
current_time = int(time.time())
random.seed(current_time)
seed = random.randint(0, 2**32 - 1)
stage_1_results, stage_1_param_path, stage_1_result_path = df.predict(prompt, "blur", seed,number_of_images,7.0, 'smart100',50, api_name="/generate64")
#stage_1_results, stage_1_param_path, stage_1_result_path = df.predict("gradio written on a wall", "blur", 1,1,7.0, 'smart100',50, api_name="/generate64")
# stage_1_results -> path to directory with png files, so we isolate those
png_files = [f for f in os.listdir(stage_1_results) if f.endswith('.png')]
# merge images into larger, 2x2 image the way midjourney does it
if png_files:
first_png = png_files[0]
second_png = png_files[1]
third_png = png_files[2]
fourth_png = png_files[3]
'''
[],[],[],[] -> [][]
[][]
'''
first_png_path = os.path.join(stage_1_results, first_png)
second_png_path = os.path.join(stage_1_results, second_png)
third_png_path = os.path.join(stage_1_results, third_png)
fourth_png_path = os.path.join(stage_1_results, fourth_png)
img1 = Image.open(first_png_path)
img2 = Image.open(second_png_path)
img3 = Image.open(third_png_path)
img4 = Image.open(fourth_png_path)
# create a new blank image with the size of the combined images (2x2)
combined_image = Image.new('RGB', (img1.width * 2, img1.height * 2))
# paste the individual images into the combined image
combined_image.paste(img1, (0, 0))
combined_image.paste(img2, (img1.width, 0))
combined_image.paste(img3, (0, img1.height))
combined_image.paste(img4, (img1.width, img1.height))
# save the combined image
combined_image_path = os.path.join(stage_1_results, 'combined_image.png')
combined_image.save(combined_image_path)
# send the combined image file as a discord attachment
with open(combined_image_path, 'rb') as f:
await message.reply('Here is the combined image', file=discord.File(f, 'combined_image.png'))
'''
# stage 2
selected_index_for_stage_2 = -1
custom_timesteps_2 = 'smart100' # could reset to smart50 if index was the issue
seed = 362572064 # again, could randomize this seed = 362572064 seed = random.randint(0, 2**32 - 1)
# predict(stage_1_result_path, selected_index_for_stage_2, seed, guidance_scale, custom_timesteps_2, number_of_inference_steps, api_name="/upscale256") -> result
img = df.predict(stage_1_result_path, selected_index_for_stage_2, seed, guidance_scale, custom_timesteps_2, number_of_inference_steps, api_name="/upscale256")
# Save the generated image to a file
img_path = "/tmp/generated_image.png"
img.save(img_path)
# Send the image file as a Discord attachment
with open(img_path, 'rb') as f:
await message.reply(f'Here is the generated image', file=discord.File(f, 'generated_image.png'))
'''
DISCORD_TOKEN = os.environ.get("GRADIOTEST_TOKEN", None)
intents = discord.Intents.default()
intents.message_content = True
client = MyClient(intents=intents)
def run_bot():
client.run(DISCORD_TOKEN)
threading.Thread(target=run_bot).start()
def greet(name):
return "Hello " + name + "!"
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
demo.launch()