huggingbots / app.py
lunarflu's picture
lunarflu HF Staff
Update app.py
a2c4e6e
raw
history blame
7.52 kB
import discord
from discord.ui import Button
import gradio_client
from gradio_client import Client
import gradio as gr
import os
import threading
#for deepfloydif
import requests
import json
import random
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import time
# random + small llama #
#todos
#alert
#fix error on first command on bot startup
#stable diffusion upscale
#buttons for deepfloydIF (1,2,3,4)
#application commands instead of message content checks (more user-friendly)
DFIF_TOKEN = os.getenv('DFIF_TOKEN')
#deepfloydIF
#df = Client("DeepFloyd/IF", DFIF_TOKEN) #not reliable at the moment
df = Client("huggingface-projects/IF", DFIF_TOKEN)
#stable diffusion upscaler
sdlu = Client("huggingface-projects/stable-diffusion-latent-upscaler", DFIF_TOKEN)
# Set up discord bot
class MyClient(discord.Client):
async def on_ready(self):
print('Logged on as', self.user)
async def on_message(self, message):
#safety checks----------------------------------------------------------------------------------------------------
# tldr, bot should run if
#1) it does not have @offline role
#2) user has @verified role
#3) bot is in #bot-test channel
# bot won't respond to itself, prevents feedback loop + API spam
if message.author == self.user:
return
# if the bot has this role, it won't run
OFFLINE_ROLE_ID = 1103676632667017266 # 1103676632667017266 = @offline / under maintenance
guild = message.guild
bot_member = guild.get_member(self.user.id)
if any(role.id == OFFLINE_ROLE_ID for role in bot_member.roles):
return
# the message author needs this role in order to use the bot
REQUIRED_ROLE_ID = 897376942817419265 # 900063512829755413 = @verified, 897376942817419265 = @huggingfolks
if not any(role.id == REQUIRED_ROLE_ID for role in message.author.roles):
return
# channels where bot will accept commands
ALLOWED_CHANNEL_IDS = [1100458786826747945] # 1100458786826747945 = #bot-test
if message.channel.id not in ALLOWED_CHANNEL_IDS:
return
#deepfloydif----------------------------------------------------------------------------------------------------
if message.content.startswith('!deepfloydif'): # change to application commands, more intuitive
#(prompt, negative_prompt, seed, number_of_images, guidance_scale,custom_timesteps_1, number_of_inference_steps, api_name="/generate64")
#-> (stage_1_results, stage_1_param_path, stage_1_result_path)
# input prompt
prompt = message.content[12:].strip()
negative_prompt = ''
seed = 0
number_of_images = 4
guidance_scale = 7
custom_timesteps_1 = 'smart50'
number_of_inference_steps = 50
stage_1_results, stage_1_param_path, stage_1_result_path = df.predict(
prompt,
negative_prompt,
seed,
number_of_images,
guidance_scale,
custom_timesteps_1,
number_of_inference_steps,
api_name='/generate64')
#stage_1_results, stage_1_param_path, stage_1_result_path = df.predict("gradio written on a wall", "blur", 1,1,7.0, 'smart100',50, api_name="/generate64")
# stage_1_results -> path to directory with png files, so we isolate those
png_files = [f for f in os.listdir(stage_1_results) if f.endswith('.png')]
# merge images into larger, 2x2 image the way midjourney does it
if png_files:
first_png = png_files[0]
second_png = png_files[1]
third_png = png_files[2]
fourth_png = png_files[3]
'''
[],[],[],[] -> [][]
[][]
'''
first_png_path = os.path.join(stage_1_results, first_png)
second_png_path = os.path.join(stage_1_results, second_png)
third_png_path = os.path.join(stage_1_results, third_png)
fourth_png_path = os.path.join(stage_1_results, fourth_png)
img1 = Image.open(first_png_path)
img2 = Image.open(second_png_path)
img3 = Image.open(third_png_path)
img4 = Image.open(fourth_png_path)
# create a new blank image with the size of the combined images (2x2)
combined_image = Image.new('RGB', (img1.width * 2, img1.height * 2))
# paste the individual images into the combined image
combined_image.paste(img1, (0, 0))
combined_image.paste(img2, (img1.width, 0))
combined_image.paste(img3, (0, img1.height))
combined_image.paste(img4, (img1.width, img1.height))
# save the combined image
combined_image_path = os.path.join(stage_1_results, 'combined_image.png')
combined_image.save(combined_image_path)
embed = discord.Embed(title="Combined Image")
embed.set_image(url="attachment://combined_image.png")
components = discord.ui.MessageComponents()
components.add_action_row(
Button(style=ButtonStyle.blue, label="Image 1", custom_id="image_1"),
Button(style=ButtonStyle.blue, label="Image 2", custom_id="image_2"),
Button(style=ButtonStyle.blue, label="Image 3", custom_id="image_3"),
Button(style=ButtonStyle.blue, label="Image 4", custom_id="image_4"),
)
await message.channel.send(embed=embed, file=discord.File(combined_image_path, 'combined_image.png'), components=components)
async def on_button_click(self, interaction):
if interaction.custom_id.startswith("image_"):
image_index = int(interaction.custom_id.split("_")[1])
await interaction.respond(content=f"You clicked image {image_index}")
#dfif stage 2
selected_index_for_stage_2 = image_index - 1
seed_2 = 0
guidance_scale_2 = 4
custom_timesteps_2 = 'smart50'
number_of_inference_steps_2 = 50
result_path = df.predict(stage_1_result_path, selected_index_for_stage_2, seed_2, guidance_scale_2, custom_timesteps_2, number_of_inference_steps_2, api_name='/upscale256')
# Send the upscaled image in a new message
upscale_image_path = os.path.join(result_path, "output.png")
with open(upscale_image_path, 'rb') as f:
await interaction.channel.send('Here is the upscaled image:', file=discord.File(f, 'upscaled_image.png'))
DISCORD_TOKEN = os.environ.get("GRADIOTEST_TOKEN", None)
intents = discord.Intents.default()
intents.message_content = True
client = MyClient(intents=intents)
def run_bot():
client.run(DISCORD_TOKEN)
threading.Thread(target=run_bot).start()
def greet(name):
return "Hello " + name + "!"
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
demo.launch()