|
import discord |
|
import os |
|
import threading |
|
import gradio as gr |
|
import requests |
|
import json |
|
import random |
|
import time |
|
import re |
|
from discord import Embed, Color |
|
from discord.ext import commands |
|
|
|
from gradio_client import Client |
|
from PIL import Image |
|
|
|
|
|
import asyncio |
|
import concurrent.futures |
|
import multiprocessing |
|
|
|
import shutil |
|
|
|
|
|
|
|
|
|
import uuid |
|
import glob |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GRADIOTEST_TOKEN = os.getenv('HF_TOKEN') |
|
DISCORD_TOKEN = os.environ.get("GRADIOTEST_TOKEN", None) |
|
|
|
df = Client("huggingface-projects/IF", GRADIOTEST_TOKEN) |
|
jojogan = Client("akhaliq/JoJoGAN", GRADIOTEST_TOKEN) |
|
falconclient = Client("HuggingFaceH4/falcon-chat", GRADIOTEST_TOKEN) |
|
|
|
intents = discord.Intents.default() |
|
intents.message_content = True |
|
|
|
bot = commands.Bot(command_prefix='!', intents=intents) |
|
|
|
|
|
@bot.event |
|
async def on_ready(): |
|
print('Logged on as', bot.user) |
|
bot.log_channel = bot.get_channel(1100458786826747945) |
|
|
|
async def safetychecks(ctx): |
|
failure_emoji = '<:disagree:1098628957521313892>' |
|
try: |
|
if ctx.author.bot: |
|
print(f"Error: The bot is not allowed to use its own commands.") |
|
await ctx.message.add_reaction(failure_emoji) |
|
return False |
|
|
|
|
|
offline_bot_role_id = 1103676632667017266 |
|
bot_member = ctx.guild.get_member(bot.user.id) |
|
if any(role.id == offline_bot_role_id for role in bot_member.roles): |
|
print(f"Error: {ctx.author} The bot is offline or under maintenance. (Remove the offline-bot role to bring it online)") |
|
thread = await ctx.message.create_thread(name=f'Offline Error') |
|
await thread.send(f"Error: {ctx.author.mention} The bot is offline or under maintenance. (Remove the offline-bot role to bring it online)") |
|
await ctx.message.add_reaction(failure_emoji) |
|
return False |
|
|
|
''' |
|
# review this, may be able to remove |
|
#β
β
check if the command is in the allowed channel(s) |
|
bot_test = 1100458786826747945 |
|
deepfloydif_channel = 1119313215675973714 |
|
falcon_channel = 1119313248056004729 |
|
|
|
channel_ids = [bot_test, deepfloydif_channel, jojo_channel, spidey_channel, sketch_channel, falcon_channel] |
|
if ctx.channel.id not in channel_ids: |
|
print(f"{ctx.author}, commands are not permitted in {ctx.channel}") |
|
thread = await ctx.message.create_thread(name=f'Channel Error') |
|
await thread.send(f"Error: {ctx.author.mention} commands are not permitted in {ctx.channel}") |
|
await ctx.message.add_reaction(failure_emoji) |
|
return False |
|
''' |
|
|
|
''' |
|
#β
β
check if the user has the required role(s) |
|
guild_id = 879548962464493619 |
|
verified_role_id = 900063512829755413 # @verified = 900063512829755413, HF = 897376942817419265, fellows = 963431900825919498 |
|
huggingfolks_role_id = 897376942817419265 |
|
fellows_role_id = 963431900825919498 |
|
contentcreator_role_id = 928589475968323636 |
|
betatester_role_id = 1113511652990668893 |
|
|
|
allowed_role_ids = [huggingfolks_role_id, fellows_role_id, contentcreator_role_id, betatester_role_id] |
|
guild = bot.get_guild(guild_id) |
|
user_roles = ctx.author.roles |
|
has_allowed_role = any(role.id in allowed_role_ids for role in user_roles) |
|
if not has_allowed_role: |
|
print(f"Error: {ctx.author} does not have any of the required roles to use that command.") |
|
thread = await ctx.message.create_thread(name=f'Perms Error') |
|
await thread.send(f"Error: {ctx.author.mention} does not have any of the required roles to use that command.") |
|
await ctx.message.add_reaction(failure_emoji) |
|
return False |
|
''' |
|
|
|
return True |
|
|
|
|
|
except Exception as e: |
|
print(f"Error: safetychecks failed somewhere, command will not continue, {e}") |
|
await ctx.message.reply(f"β <@811235357663297546> SC failed somewhere β {e}") |
|
await ctx.message.add_reaction(failure_emoji) |
|
|
|
async def fullqueue(e, thread): |
|
error_message = str(e) |
|
if "Error: Expecting value: line 1 column 1 (char 0)" in error_message: |
|
await thread.send("Queue is full! Please try again.") |
|
elif "Error: Queue is full! Please try again." in error_message: |
|
await thread.send("Queue is full! Please try again.") |
|
|
|
elif "local variable 'stage_1_results' referenced before assignment" in error_message: |
|
await thread.send("Space is building! Please try again after a few minutes.") |
|
|
|
|
|
|
|
def inference(prompt): |
|
negative_prompt = '' |
|
seed = random.randint(0, 1000) |
|
|
|
number_of_images = 4 |
|
guidance_scale = 7 |
|
custom_timesteps_1 = 'smart50' |
|
number_of_inference_steps = 50 |
|
|
|
stage_1_results, stage_1_param_path, stage_1_result_path = df.predict( |
|
prompt, negative_prompt, seed, number_of_images, guidance_scale, custom_timesteps_1, number_of_inference_steps, api_name='/generate64') |
|
|
|
return [stage_1_results, stage_1_param_path, stage_1_result_path] |
|
|
|
|
|
def inference2(index, stage_1_result_path): |
|
selected_index_for_stage_2 = index |
|
seed_2 = 0 |
|
guidance_scale_2 = 4 |
|
custom_timesteps_2 = 'smart50' |
|
number_of_inference_steps_2 = 50 |
|
result_path = df.predict(stage_1_result_path, selected_index_for_stage_2, seed_2, |
|
guidance_scale_2, custom_timesteps_2, number_of_inference_steps_2, api_name='/upscale256') |
|
|
|
return result_path |
|
|
|
|
|
async def react1234(reaction_emojis, combined_image_dfif): |
|
for emoji in reaction_emojis: |
|
await combined_image_dfif.add_reaction(emoji) |
|
|
|
|
|
@bot.command() |
|
async def deepfloydif(ctx, *, prompt: str): |
|
thread = None |
|
try: |
|
try: |
|
if await safetychecks(ctx): |
|
if ctx.channel.id == 1119313215675973714: |
|
await ctx.message.add_reaction('<a:loading:1114111677990981692>') |
|
dfif_command_message_id = ctx.message.id |
|
thread = await ctx.message.create_thread(name=f'DeepfloydIF | {prompt}', auto_archive_duration=60) |
|
|
|
|
|
|
|
|
|
|
|
negative_prompt = '' |
|
seed = random.randint(0, 1000) |
|
|
|
number_of_images = 4 |
|
guidance_scale = 7 |
|
custom_timesteps_1 = 'smart50' |
|
number_of_inference_steps = 50 |
|
api_name = '/generate64' |
|
await thread.send(f'{ctx.author.mention}Generating images in thread, can take ~1 minute...') |
|
|
|
except Exception as e: |
|
print(f"Error: {e}") |
|
if thread is None: |
|
thread = await ctx.message.create_thread(name=f'DFIF1 Error') |
|
await thread.send(f"{ctx.author.mention} Error before stage 1 generation, {e}. If error code: 50035, upscale can still work.") |
|
await fullqueue(e, thread) |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
await thread.edit(archived=True) |
|
|
|
try: |
|
|
|
|
|
|
|
|
|
|
|
loop = asyncio.get_running_loop() |
|
result = await loop.run_in_executor(None, inference, prompt) |
|
|
|
|
|
stage_1_results = result[0] |
|
stage_1_result_path = result[2] |
|
|
|
partialpath = stage_1_result_path[5:] |
|
|
|
except Exception as e: |
|
print(f"Error: {e}") |
|
if thread is None: |
|
thread = await ctx.message.create_thread(name=f'Generation Error') |
|
await thread.send(f"{ctx.author.mention} Error during stage 1 generation, {e}") |
|
await fullqueue(e, thread) |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
await thread.edit(archived=True) |
|
|
|
try: |
|
|
|
|
|
|
|
png_files = list(glob.glob(f"{stage_1_results}/**/*.png")) |
|
|
|
if png_files: |
|
first_png = png_files[0] |
|
second_png = png_files[1] |
|
third_png = png_files[2] |
|
fourth_png = png_files[3] |
|
|
|
first_png_path = os.path.join(stage_1_results, first_png) |
|
second_png_path = os.path.join(stage_1_results, second_png) |
|
third_png_path = os.path.join(stage_1_results, third_png) |
|
fourth_png_path = os.path.join(stage_1_results, fourth_png) |
|
|
|
img1 = Image.open(first_png_path) |
|
img2 = Image.open(second_png_path) |
|
img3 = Image.open(third_png_path) |
|
img4 = Image.open(fourth_png_path) |
|
|
|
combined_image = Image.new('RGB', (img1.width * 2, img1.height * 2)) |
|
|
|
combined_image.paste(img1, (0, 0)) |
|
combined_image.paste(img2, (img1.width, 0)) |
|
combined_image.paste(img3, (0, img1.height)) |
|
combined_image.paste(img4, (img1.width, img1.height)) |
|
|
|
combined_image_path = os.path.join(stage_1_results, f'{partialpath}{dfif_command_message_id}.png') |
|
combined_image.save(combined_image_path) |
|
|
|
with open(combined_image_path, 'rb') as f: |
|
combined_image_dfif = await thread.send(f'{ctx.author.mention}React with the image number you want to upscale!', file=discord.File( |
|
f, f'{partialpath}{dfif_command_message_id}.png')) |
|
|
|
|
|
emoji_list = ['βοΈ', 'βοΈ', 'βοΈ', 'βοΈ'] |
|
await react1234(emoji_list, combined_image_dfif) |
|
|
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await ctx.message.add_reaction('<:agree:1098629085955113011>') |
|
''' individual images |
|
if png_files: |
|
for i, png_file in enumerate(png_files): |
|
png_file_path = os.path.join(stage_1_results, png_file) |
|
img = Image.open(png_file_path) |
|
image_path = os.path.join(stage_1_results, f'{i+1}{partialpath}.png') |
|
img.save(image_path) |
|
with open(image_path, 'rb') as f: |
|
await thread.send(f'{ctx.author.mention}Image {i+1}', file=discord.File(f, f'{i+1}{partialpath}.png')) |
|
await asyncio.sleep(1) |
|
|
|
''' |
|
|
|
except Exception as e: |
|
print(f"Error: {e}") |
|
if thread is None: |
|
thread = await ctx.message.create_thread(name=f'Posting Error') |
|
await thread.send(f"{ctx.author.mention} Encountered error while posting combined image in thread, {e}") |
|
await fullqueue(e, thread) |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
await thread.edit(archived=True) |
|
|
|
except Exception as e: |
|
print(f"Error: {e}") |
|
if thread is None: |
|
thread = await ctx.message.create_thread(name=f'deepfloydif Error') |
|
await thread.send(f"{ctx.author.mention} Overall error with deepfloydif, {e}") |
|
await fullqueue(e, thread) |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
await thread.edit(archived=True) |
|
|
|
|
|
async def dfif2(index: int, stage_1_result_path, thread, dfif_command_message_id): |
|
try: |
|
parent_channel = thread.parent |
|
dfif_command_message = await parent_channel.fetch_message(dfif_command_message_id) |
|
await dfif_command_message.remove_reaction('<:agree:1098629085955113011>', bot.user) |
|
await dfif_command_message.add_reaction('<a:loading:1114111677990981692>') |
|
|
|
number = index + 1 |
|
if number == 1: |
|
position = "top left" |
|
elif number == 2: |
|
position = "top right" |
|
elif number == 3: |
|
position = "bottom left" |
|
elif number == 4: |
|
position = "bottom right" |
|
await thread.send(f"Upscaling the {position} image...") |
|
|
|
|
|
loop = asyncio.get_running_loop() |
|
result_path = await loop.run_in_executor(None, inference2, index, stage_1_result_path) |
|
|
|
|
|
with open(result_path, 'rb') as f: |
|
await thread.send(f'Here is the upscaled image! :) ', file=discord.File(f, 'result.png')) |
|
|
|
await dfif_command_message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await dfif_command_message.add_reaction('<:agree:1098629085955113011>') |
|
await thread.edit(archived=True) |
|
|
|
except Exception as e: |
|
print(f"Error: {e}") |
|
parent_channel = thread.parent |
|
dfif_command_message = await parent_channel.fetch_message(dfif_command_message_id) |
|
await dfif_command_message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await dfif_command_message.add_reaction('<:disagree:1098628957521313892>') |
|
await thread.send(f"Error during stage 2 upscaling, {e}") |
|
await fullqueue(e, thread) |
|
await thread.edit(archived=True) |
|
|
|
|
|
@bot.event |
|
async def on_reaction_add(reaction, user): |
|
try: |
|
|
|
|
|
|
|
|
|
if not user.bot: |
|
thread = reaction.message.channel |
|
threadparentid = thread.parent.id |
|
if threadparentid == 1119313215675973714: |
|
|
|
if reaction.message.attachments: |
|
if user.id == reaction.message.mentions[0].id: |
|
|
|
|
|
attachment = reaction.message.attachments[0] |
|
image_name = attachment.filename |
|
|
|
partialpathmessageid = image_name[:-4] |
|
|
|
partialpath = partialpathmessageid[:11] |
|
messageid = partialpathmessageid[11:] |
|
|
|
fullpath = "/tmp/" + partialpath |
|
|
|
emoji = reaction.emoji |
|
|
|
if emoji == "βοΈ": |
|
index = 0 |
|
elif emoji == "βοΈ": |
|
index = 1 |
|
elif emoji == "βοΈ": |
|
index = 2 |
|
elif emoji == "βοΈ": |
|
index = 3 |
|
|
|
|
|
index = index |
|
stage_1_result_path = fullpath |
|
thread = reaction.message.channel |
|
dfif_command_message_id = messageid |
|
ctx = await bot.get_context(reaction.message) |
|
|
|
await dfif2(index, stage_1_result_path, thread, dfif_command_message_id) |
|
|
|
except Exception as e: |
|
print(f"Error: {e} (known error, does not cause issues, fix later)") |
|
|
|
chathistory = None |
|
falcon_users = [] |
|
|
|
falcon_dictionary = {} |
|
falcon_userid_threadid_dictionary = {} |
|
|
|
@bot.command() |
|
async def falcon(ctx, *, prompt: str): |
|
|
|
try: |
|
global falcon_users |
|
|
|
global falcon_dictionary |
|
global falcon_userid_threadid_dictionary |
|
|
|
|
|
|
|
|
|
if not ctx.author.bot: |
|
if ctx.channel.id == 1119313248056004729: |
|
|
|
if ctx.author.id not in falcon_userid_threadid_dictionary: |
|
await ctx.message.add_reaction('<a:loading:1114111677990981692>') |
|
thread = await ctx.message.create_thread(name=f'{ctx.author}') |
|
|
|
|
|
await thread.send(f"[DISCLAIMER: HuggingBot is a **highly experimental** beta feature; The Falcon model and system prompt can be found here: https://huggingface.co/spaces/HuggingFaceH4/falcon-chat]") |
|
|
|
|
|
chathistory = falconclient.predict( |
|
fn_index=5 |
|
) |
|
instructions = "The following is a conversation between a highly knowledgeable and intelligent AI assistant, called Falcon, and a human user, called User. In the following interactions, User and Falcon will converse in natural language, and Falcon will answer User's questions. Falcon was built to be respectful, polite and inclusive. Falcon was built by the Technology Innovation Institute in Abu Dhabi. Falcon will never decline to answer a question, and always attempts to give an answer that User would be satisfied with. It knows a lot, and always tells the truth. The conversation begins." |
|
job = falconclient.submit(prompt, chathistory, instructions, 0.8, 0.9, fn_index=1) |
|
while job.done() == False: |
|
status = job.status() |
|
|
|
else: |
|
file_paths = job.outputs() |
|
full_generation = file_paths[-1] |
|
with open(full_generation, 'r') as file: |
|
data = json.load(file) |
|
output_text = data[-1][-1] |
|
|
|
falcon_dictionary[ctx.author.id] = full_generation |
|
falcon_userid_threadid_dictionary[ctx.author.id] = thread.id |
|
|
|
print(output_text) |
|
await thread.send(f"{output_text}") |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
|
|
elif ctx.author.id in falcon_userid_threadid_dictionary: |
|
|
|
thread_id = falcon_userid_threadid_dictionary[ctx.author.id] |
|
thread_link = f"https://discord.com/channels/879548962464493619/1119313248056004729/{thread_id}" |
|
await ctx.reply(f"{ctx.author.mention}, you already have an existing conversation here {thread_link}! Use !falconclear in the #falcon channel to start a new one.") |
|
|
|
|
|
|
|
if ctx.channel.id in falcon_userid_threadid_dictionary.values(): |
|
if ctx.channel.id == falcon_userid_threadid_dictionary[ctx.author.id]: |
|
await ctx.message.add_reaction('<a:loading:1114111677990981692>') |
|
chathistory = falcon_dictionary[ctx.author.id] |
|
|
|
instructions = "The following is a conversation between a highly knowledgeable and intelligent AI assistant, called Falcon, and a human user, called User. In the following interactions, User and Falcon will converse in natural language, and Falcon will answer User's questions. Falcon was built to be respectful, polite and inclusive. Falcon was built by the Technology Innovation Institute in Abu Dhabi. Falcon will never decline to answer a question, and always attempts to give an answer that User would be satisfied with. It knows a lot, and always tells the truth. The conversation begins." |
|
job = falconclient.submit(prompt, chathistory, instructions, 0.8, 0.9, fn_index=1) |
|
while job.done() == False: |
|
status = job.status() |
|
|
|
else: |
|
file_paths = job.outputs() |
|
full_generation = file_paths[-1] |
|
with open(full_generation, 'r') as file: |
|
data = json.load(file) |
|
output_text = data[-1][-1] |
|
falcon_dictionary[ctx.author.id] = full_generation |
|
print(output_text) |
|
await ctx.reply(f"{output_text}") |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
|
|
except Exception as e: |
|
print(f"Error: {e}") |
|
await ctx.reply(f"{e} cc <@811235357663297546> (falconprivate error)") |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
|
|
@bot.command() |
|
async def falconclear(ctx): |
|
if not ctx.author.bot: |
|
if ctx.channel.id == 1119313248056004729: |
|
if ctx.author.id in falcon_userid_threadid_dictionary: |
|
if ctx.author.id in falcon_dictionary: |
|
del falcon_userid_threadid_dictionary[ctx.author.id] |
|
del falcon_dictionary[ctx.author.id] |
|
await ctx.reply(f"{ctx.author.mention}'s conversation has been cleared. Feel free to start a new one!") |
|
|
|
|
|
def run_bot(): |
|
bot.run(DISCORD_TOKEN) |
|
|
|
threading.Thread(target=run_bot).start() |
|
|
|
def greet(name): |
|
return "Hello " + name + "!" |
|
|
|
demo = gr.Interface(fn=greet, inputs="text", outputs="text") |
|
|
|
demo.queue(concurrency_count=20) |
|
demo.launch() |
|
|
|
|