|
import discord |
|
import os |
|
import threading |
|
import gradio as gr |
|
import requests |
|
import json |
|
import random |
|
import time |
|
import re |
|
from discord import Embed, Color |
|
from discord.ext import commands |
|
|
|
from gradio_client import Client |
|
from PIL import Image |
|
|
|
|
|
import asyncio |
|
import concurrent.futures |
|
import multiprocessing |
|
|
|
import shutil |
|
|
|
|
|
|
|
|
|
import uuid |
|
import glob |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GRADIOTEST_TOKEN = os.getenv('HF_TOKEN') |
|
DISCORD_TOKEN = os.environ.get("GRADIOTEST_TOKEN", None) |
|
|
|
df = Client("huggingface-projects/IF", GRADIOTEST_TOKEN) |
|
jojogan = Client("akhaliq/JoJoGAN", GRADIOTEST_TOKEN) |
|
falconclient = Client("HuggingFaceH4/falcon-chat", GRADIOTEST_TOKEN) |
|
|
|
intents = discord.Intents.default() |
|
intents.message_content = True |
|
|
|
bot = commands.Bot(command_prefix='!', intents=intents) |
|
|
|
|
|
@bot.command() |
|
async def info(ctx): |
|
current_directory = os.getcwd() |
|
temp_directory = tempfile.gettempdir() |
|
await ctx.reply(f"current_directory={current_directory}\n temp_directory={temp_directory}") |
|
|
|
@bot.event |
|
async def on_ready(): |
|
print('Logged on as', bot.user) |
|
bot.log_channel = bot.get_channel(1100458786826747945) |
|
|
|
@bot.command() |
|
async def commands(ctx): |
|
try: |
|
if await safetychecks(ctx): |
|
await ctx.reply(f"Use !deepfloydif [prompt], !jojo !spidey or !sketch. Have fun! π€π") |
|
except Exception as e: |
|
print(f"Error: unable to help :( {e}") |
|
|
|
async def safetychecks(ctx): |
|
failure_emoji = '<:disagree:1098628957521313892>' |
|
try: |
|
if ctx.author.bot: |
|
print(f"Error: The bot is not allowed to use its own commands.") |
|
await ctx.message.add_reaction(failure_emoji) |
|
return False |
|
|
|
|
|
offline_bot_role_id = 1103676632667017266 |
|
bot_member = ctx.guild.get_member(bot.user.id) |
|
if any(role.id == offline_bot_role_id for role in bot_member.roles): |
|
print(f"Error: {ctx.author} The bot is offline or under maintenance. (Remove the offline-bot role to bring it online)") |
|
thread = await ctx.message.create_thread(name=f'Offline Error') |
|
await thread.send(f"Error: {ctx.author.mention} The bot is offline or under maintenance. (Remove the offline-bot role to bring it online)") |
|
await ctx.message.add_reaction(failure_emoji) |
|
return False |
|
|
|
|
|
bot_test = 1100458786826747945 |
|
deepfloydif_channel = 1113182673859518514 |
|
jojo_channel = 1114217739473649764 |
|
sketch_channel = 1114218145343877180 |
|
spidey_channel = 1114218191594471514 |
|
falcon_channel = 1116089829147557999 |
|
|
|
channel_ids = [bot_test, deepfloydif_channel, jojo_channel, spidey_channel, sketch_channel, falcon_channel] |
|
if ctx.channel.id not in channel_ids: |
|
print(f"{ctx.author}, commands are not permitted in {ctx.channel}") |
|
thread = await ctx.message.create_thread(name=f'Channel Error') |
|
await thread.send(f"Error: {ctx.author.mention} commands are not permitted in {ctx.channel}") |
|
await ctx.message.add_reaction(failure_emoji) |
|
return False |
|
|
|
|
|
guild_id = 879548962464493619 |
|
verified_role_id = 900063512829755413 |
|
huggingfolks_role_id = 897376942817419265 |
|
fellows_role_id = 963431900825919498 |
|
contentcreator_role_id = 928589475968323636 |
|
betatester_role_id = 1113511652990668893 |
|
|
|
allowed_role_ids = [huggingfolks_role_id, fellows_role_id, contentcreator_role_id, betatester_role_id] |
|
guild = bot.get_guild(guild_id) |
|
user_roles = ctx.author.roles |
|
has_allowed_role = any(role.id in allowed_role_ids for role in user_roles) |
|
if not has_allowed_role: |
|
print(f"Error: {ctx.author} does not have any of the required roles to use that command.") |
|
thread = await ctx.message.create_thread(name=f'Perms Error') |
|
await thread.send(f"Error: {ctx.author.mention} does not have any of the required roles to use that command.") |
|
await ctx.message.add_reaction(failure_emoji) |
|
return False |
|
|
|
|
|
return True |
|
|
|
|
|
except Exception as e: |
|
print(f"Error: safetychecks failed somewhere, command will not continue, {e}") |
|
await ctx.message.reply(f"β <@811235357663297546> SC failed somewhere β {e}") |
|
await ctx.message.add_reaction(failure_emoji) |
|
|
|
async def on_message_safetychecks(message): |
|
failure_emoji = '<:disagree:1098628957521313892>' |
|
try: |
|
if message.author.bot: |
|
print(f"The bot will ignore its own messages.") |
|
return False |
|
|
|
|
|
offline_bot_role_id = 1103676632667017266 |
|
bot_member = message.guild.get_member(bot.user.id) |
|
if any(role.id == offline_bot_role_id for role in bot_member.roles): |
|
print(f"{message.author} The bot is offline or under maintenance. (Remove the offline-bot role to bring it online)") |
|
return False |
|
|
|
|
|
guild_id = 879548962464493619 |
|
verified_role_id = 900063512829755413 |
|
huggingfolks_role_id = 897376942817419265 |
|
fellows_role_id = 963431900825919498 |
|
contentcreator_role_id = 928589475968323636 |
|
betatester_role_id = 1113511652990668893 |
|
|
|
allowed_role_ids = [huggingfolks_role_id, fellows_role_id, contentcreator_role_id, betatester_role_id] |
|
guild = bot.get_guild(guild_id) |
|
user_roles = message.author.roles |
|
has_allowed_role = any(role.id in allowed_role_ids for role in user_roles) |
|
if not has_allowed_role: |
|
print(f"{ctx.author} does not have any of the required roles to activate the on_message check") |
|
return False |
|
|
|
return True |
|
|
|
except Exception as e: |
|
print(f"Error: on_message_safetychecks failed somewhere, command will not continue {e}") |
|
|
|
|
|
@bot.command() |
|
async def deepfloydifdemo(ctx): |
|
try: |
|
thread = await ctx.message.create_thread(name=f'{ctx.author} Demo Thread') |
|
await thread.send(f'{ctx.author.mention} Here is a demo for the !deepfloydif command!') |
|
await asyncio.sleep(0.5) |
|
await thread.send(f'https://cdn.discordapp.com/attachments/932563860597121054/1113483403258499142/image.png') |
|
except Exception as e: |
|
print(f"Error: {e}") |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
|
|
@bot.command() |
|
async def jojodemo(ctx): |
|
try: |
|
thread = await ctx.message.create_thread(name=f'JoJo Demo {ctx.author} ') |
|
await thread.send(f'{ctx.author.mention} Here is a demo for the !jojo command!') |
|
await asyncio.sleep(0.5) |
|
await thread.send(f'https://cdn.discordapp.com/attachments/932563860597121054/1114220616199966810/image.png') |
|
await thread.edit(archived=True) |
|
except Exception as e: |
|
print(f"Error: {e}") |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
await thread.edit(archived=True) |
|
|
|
@bot.command() |
|
async def sketchdemo(ctx): |
|
try: |
|
thread = await ctx.message.create_thread(name=f'Sketch Demo {ctx.author} ') |
|
await thread.send(f'{ctx.author.mention} Here is a demo for the !sketch command!') |
|
await asyncio.sleep(0.5) |
|
await thread.send(f'https://cdn.discordapp.com/attachments/932563860597121054/1114220716498370641/image.png') |
|
await thread.edit(archived=True) |
|
except Exception as e: |
|
print(f"Error: {e}") |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
await thread.edit(archived=True) |
|
|
|
@bot.command() |
|
async def spideydemo(ctx): |
|
try: |
|
thread = await ctx.message.create_thread(name=f'Spidey Demo {ctx.author} ') |
|
await thread.send(f'{ctx.author.mention} Here is a demo for the !spidey command!') |
|
await asyncio.sleep(0.5) |
|
await thread.send(f'https://cdn.discordapp.com/attachments/932563860597121054/1114220798085959690/image.png') |
|
await thread.edit(archived=True) |
|
except Exception as e: |
|
print(f"Error: {e}") |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
await thread.edit(archived=True) |
|
|
|
|
|
@bot.command() |
|
async def jojo(ctx): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
if await safetychecks(ctx): |
|
if ctx.channel.id == 1114217739473649764: |
|
await ctx.message.add_reaction('<a:loading:1114111677990981692>') |
|
thread = await ctx.message.create_thread(name=f'Jojo | {ctx.author}', auto_archive_duration=60) |
|
if ctx.message.attachments: |
|
await thread.send(f'{ctx.author.mention} Generating images in thread, can take ~1 minute...yare yare, daze ...') |
|
attachment = ctx.message.attachments[0] |
|
style = 'JoJo' |
|
|
|
im = await asyncio.get_running_loop().run_in_executor(None, jojogan.predict, attachment.url, style) |
|
|
|
await thread.send(f'{ctx.author.mention} Here is the {style} version of it', file=discord.File(im)) |
|
|
|
|
|
|
|
await ctx.message.add_reaction('<:agree:1098629085955113011>') |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await thread.edit(archived=True) |
|
else: |
|
await thread.send(f"{ctx.author.mention} No attachments to be found...Can't animify dat! Try sending me an image π") |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await thread.edit(archived=True) |
|
except Exception as e: |
|
await fullqueue(e, thread) |
|
print(f"Error: {e}") |
|
await thread.send(f"{ctx.author.mention} Error: {e}") |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await thread.edit(archived=True) |
|
|
|
|
|
|
|
|
|
@bot.command() |
|
async def spidey(ctx): |
|
try: |
|
if await safetychecks(ctx): |
|
if ctx.channel.id == 1114218191594471514: |
|
await ctx.message.add_reaction('<a:loading:1114111677990981692>') |
|
thread = await ctx.message.create_thread(name=f'Spider-verse | {ctx.author}', auto_archive_duration=60) |
|
if ctx.message.attachments: |
|
await thread.send(f'{ctx.author.mention} Generating images in thread, can take ~1 minute...') |
|
attachment = ctx.message.attachments[0] |
|
style = 'Spider-Verse' |
|
im = await asyncio.get_running_loop().run_in_executor(None, jojogan.predict, attachment.url, style) |
|
await thread.send(f'{ctx.author.mention} Here is the {style} version of it', file=discord.File(im)) |
|
await ctx.message.add_reaction('<:agree:1098629085955113011>') |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await thread.edit(archived=True) |
|
else: |
|
await thread.send(f"{ctx.author.mention} No attachments to be found...Can't animify dat! Try sending me an image π") |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await thread.edit(archived=True) |
|
except Exception as e: |
|
await fullqueue(e, thread) |
|
print(f"Error: {e}") |
|
await thread.send(f"{ctx.author.mention} Error: {e}") |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await thread.edit(archived=True) |
|
|
|
|
|
@bot.command() |
|
async def sketch(ctx): |
|
try: |
|
if await safetychecks(ctx): |
|
if ctx.channel.id == 1114218145343877180: |
|
await ctx.message.add_reaction('<a:loading:1114111677990981692>') |
|
thread = await ctx.message.create_thread(name=f'Sketch | {ctx.author}', auto_archive_duration=60) |
|
if ctx.message.attachments: |
|
await thread.send(f'{ctx.author.mention} Generating images in thread, can take ~1 minute...') |
|
attachment = ctx.message.attachments[0] |
|
|
|
im = await asyncio.get_running_loop().run_in_executor(None, jojogan.predict, attachment.url, 'sketch') |
|
await thread.send(f'{ctx.author.mention} Here is the sketch version of it', file=discord.File(im)) |
|
await ctx.message.add_reaction('<:agree:1098629085955113011>') |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await thread.edit(archived=True) |
|
else: |
|
await thread.send(f"{ctx.author.mention} No attachments to be found...Can't animify dat! Try sending me an image π") |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await thread.edit(archived=True) |
|
except Exception as e: |
|
await fullqueue(e, thread) |
|
print(f"Error: {e}") |
|
await thread.send(f"{ctx.author.mention} Error: {e}") |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await thread.edit(archived=True) |
|
|
|
async def fullqueue(e, thread): |
|
error_message = str(e) |
|
if "Error: Expecting value: line 1 column 1 (char 0)" in error_message: |
|
await thread.send("Queue is full! Please try again.") |
|
elif "Error: Queue is full! Please try again." in error_message: |
|
await thread.send("Queue is full! Please try again.") |
|
|
|
elif "local variable 'stage_1_results' referenced before assignment" in error_message: |
|
await thread.send("Space is building! Please try again after a few minutes.") |
|
|
|
|
|
|
|
|
|
|
|
def inference(prompt): |
|
negative_prompt = '' |
|
seed = random.randint(0, 1000) |
|
|
|
number_of_images = 4 |
|
guidance_scale = 7 |
|
custom_timesteps_1 = 'smart50' |
|
number_of_inference_steps = 50 |
|
|
|
stage_1_results, stage_1_param_path, stage_1_result_path = df.predict( |
|
prompt, negative_prompt, seed, number_of_images, guidance_scale, custom_timesteps_1, number_of_inference_steps, api_name='/generate64') |
|
|
|
return [stage_1_results, stage_1_param_path, stage_1_result_path] |
|
|
|
|
|
def inference2(index, stage_1_result_path): |
|
selected_index_for_stage_2 = index |
|
seed_2 = 0 |
|
guidance_scale_2 = 4 |
|
custom_timesteps_2 = 'smart50' |
|
number_of_inference_steps_2 = 50 |
|
result_path = df.predict(stage_1_result_path, selected_index_for_stage_2, seed_2, |
|
guidance_scale_2, custom_timesteps_2, number_of_inference_steps_2, api_name='/upscale256') |
|
|
|
return result_path |
|
|
|
|
|
async def react1234(reaction_emojis, combined_image_dfif): |
|
for emoji in reaction_emojis: |
|
await combined_image_dfif.add_reaction(emoji) |
|
|
|
|
|
@bot.command() |
|
async def deepfloydif(ctx, *, prompt: str): |
|
thread = None |
|
try: |
|
try: |
|
if await safetychecks(ctx): |
|
if ctx.channel.id == 1113182673859518514: |
|
await ctx.message.add_reaction('<a:loading:1114111677990981692>') |
|
dfif_command_message_id = ctx.message.id |
|
thread = await ctx.message.create_thread(name=f'DeepfloydIF | {prompt}', auto_archive_duration=60) |
|
|
|
|
|
|
|
|
|
|
|
negative_prompt = '' |
|
seed = random.randint(0, 1000) |
|
|
|
number_of_images = 4 |
|
guidance_scale = 7 |
|
custom_timesteps_1 = 'smart50' |
|
number_of_inference_steps = 50 |
|
api_name = '/generate64' |
|
await thread.send(f'{ctx.author.mention}Generating images in thread, can take ~1 minute...') |
|
|
|
except Exception as e: |
|
print(f"Error: {e}") |
|
if thread is None: |
|
thread = await ctx.message.create_thread(name=f'DFIF1 Error') |
|
await thread.send(f"{ctx.author.mention} Error before stage 1 generation, {e}. If error code: 50035, upscale can still work.") |
|
await fullqueue(e, thread) |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
await thread.edit(archived=True) |
|
|
|
try: |
|
|
|
|
|
|
|
|
|
|
|
loop = asyncio.get_running_loop() |
|
result = await loop.run_in_executor(None, inference, prompt) |
|
await thread.send(f'{ctx.author.mention}after executor') |
|
|
|
stage_1_results = result[0] |
|
stage_1_result_path = result[2] |
|
|
|
partialpath = stage_1_result_path[5:] |
|
|
|
except Exception as e: |
|
print(f"Error: {e}") |
|
if thread is None: |
|
thread = await ctx.message.create_thread(name=f'Generation Error') |
|
await thread.send(f"{ctx.author.mention} Error during stage 1 generation, {e}") |
|
await fullqueue(e, thread) |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
await thread.edit(archived=True) |
|
|
|
try: |
|
|
|
|
|
|
|
png_files = list(glob.glob(f"{stage_1_results}/**/*.png")) |
|
|
|
if png_files: |
|
first_png = png_files[0] |
|
second_png = png_files[1] |
|
third_png = png_files[2] |
|
fourth_png = png_files[3] |
|
|
|
first_png_path = os.path.join(stage_1_results, first_png) |
|
second_png_path = os.path.join(stage_1_results, second_png) |
|
third_png_path = os.path.join(stage_1_results, third_png) |
|
fourth_png_path = os.path.join(stage_1_results, fourth_png) |
|
|
|
img1 = Image.open(first_png_path) |
|
img2 = Image.open(second_png_path) |
|
img3 = Image.open(third_png_path) |
|
img4 = Image.open(fourth_png_path) |
|
|
|
combined_image = Image.new('RGB', (img1.width * 2, img1.height * 2)) |
|
|
|
combined_image.paste(img1, (0, 0)) |
|
combined_image.paste(img2, (img1.width, 0)) |
|
combined_image.paste(img3, (0, img1.height)) |
|
combined_image.paste(img4, (img1.width, img1.height)) |
|
|
|
combined_image_path = os.path.join(stage_1_results, f'{partialpath}{dfif_command_message_id}.png') |
|
combined_image.save(combined_image_path) |
|
|
|
with open(combined_image_path, 'rb') as f: |
|
combined_image_dfif = await thread.send(f'{ctx.author.mention}React with the image number you want to upscale!', file=discord.File( |
|
f, f'{partialpath}{dfif_command_message_id}.png')) |
|
|
|
|
|
emoji_list = ['βοΈ', 'βοΈ', 'βοΈ', 'βοΈ'] |
|
await react1234(emoji_list, combined_image_dfif) |
|
|
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await ctx.message.add_reaction('<:agree:1098629085955113011>') |
|
''' individual images |
|
if png_files: |
|
for i, png_file in enumerate(png_files): |
|
png_file_path = os.path.join(stage_1_results, png_file) |
|
img = Image.open(png_file_path) |
|
image_path = os.path.join(stage_1_results, f'{i+1}{partialpath}.png') |
|
img.save(image_path) |
|
with open(image_path, 'rb') as f: |
|
await thread.send(f'{ctx.author.mention}Image {i+1}', file=discord.File(f, f'{i+1}{partialpath}.png')) |
|
await asyncio.sleep(1) |
|
|
|
''' |
|
|
|
except Exception as e: |
|
print(f"Error: {e}") |
|
if thread is None: |
|
thread = await ctx.message.create_thread(name=f'Posting Error') |
|
await thread.send(f"{ctx.author.mention} Encountered error while posting combined image in thread, {e}") |
|
await fullqueue(e, thread) |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
await thread.edit(archived=True) |
|
|
|
except Exception as e: |
|
print(f"Error: {e}") |
|
if thread is None: |
|
thread = await ctx.message.create_thread(name=f'deepfloydif Error') |
|
await thread.send(f"{ctx.author.mention} Overall error with deepfloydif, {e}") |
|
await fullqueue(e, thread) |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
await thread.edit(archived=True) |
|
|
|
|
|
async def dfif2(index: int, stage_1_result_path, thread, dfif_command_message_id): |
|
try: |
|
parent_channel = thread.parent |
|
dfif_command_message = await parent_channel.fetch_message(dfif_command_message_id) |
|
await dfif_command_message.remove_reaction('<:agree:1098629085955113011>', bot.user) |
|
await dfif_command_message.add_reaction('<a:loading:1114111677990981692>') |
|
|
|
number = index + 1 |
|
if number == 1: |
|
position = "top left" |
|
elif number == 2: |
|
position = "top right" |
|
elif number == 3: |
|
position = "bottom left" |
|
elif number == 4: |
|
position = "bottom right" |
|
await thread.send(f"Upscaling the {position} image...") |
|
|
|
|
|
loop = asyncio.get_running_loop() |
|
result_path = await loop.run_in_executor(None, inference2, index, stage_1_result_path) |
|
|
|
|
|
with open(result_path, 'rb') as f: |
|
await thread.send(f'Here is the upscaled image! :) ', file=discord.File(f, 'result.png')) |
|
|
|
await dfif_command_message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await dfif_command_message.add_reaction('<:agree:1098629085955113011>') |
|
await thread.edit(archived=True) |
|
|
|
except Exception as e: |
|
print(f"Error: {e}") |
|
parent_channel = thread.parent |
|
dfif_command_message = await parent_channel.fetch_message(dfif_command_message_id) |
|
await dfif_command_message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await dfif_command_message.add_reaction('<:disagree:1098628957521313892>') |
|
await thread.send(f"Error during stage 2 upscaling, {e}") |
|
await fullqueue(e, thread) |
|
await thread.edit(archived=True) |
|
|
|
|
|
@bot.event |
|
async def on_reaction_add(reaction, user): |
|
try: |
|
|
|
|
|
|
|
|
|
if not user.bot: |
|
thread = reaction.message.channel |
|
threadparentid = thread.parent.id |
|
if threadparentid == 1113182673859518514: |
|
|
|
if reaction.message.attachments: |
|
if user.id == reaction.message.mentions[0].id: |
|
|
|
|
|
attachment = reaction.message.attachments[0] |
|
image_name = attachment.filename |
|
|
|
partialpathmessageid = image_name[:-4] |
|
|
|
partialpath = partialpathmessageid[:11] |
|
messageid = partialpathmessageid[11:] |
|
|
|
fullpath = "/tmp/" + partialpath |
|
|
|
emoji = reaction.emoji |
|
|
|
if emoji == "βοΈ": |
|
index = 0 |
|
elif emoji == "βοΈ": |
|
index = 1 |
|
elif emoji == "βοΈ": |
|
index = 2 |
|
elif emoji == "βοΈ": |
|
index = 3 |
|
|
|
|
|
index = index |
|
stage_1_result_path = fullpath |
|
thread = reaction.message.channel |
|
dfif_command_message_id = messageid |
|
ctx = await bot.get_context(reaction.message) |
|
|
|
await dfif2(index, stage_1_result_path, thread, dfif_command_message_id) |
|
|
|
except Exception as e: |
|
print(f"Error: {e} (known error, does not cause issues, fix later)") |
|
|
|
chathistory = None |
|
falcon_users = [] |
|
falcon_threads = [] |
|
falcon_dictionary = {} |
|
falcon_userid_threadid_dictionary = {} |
|
|
|
@bot.command() |
|
async def falcon(ctx, *, prompt: str): |
|
|
|
try: |
|
global falcon_users |
|
global falcon_threads |
|
global falcon_dictionary |
|
global falcon_userid_threadid_dictionary |
|
if ctx.author not user.bot: |
|
if ctx.channel.id == 1116089829147557999: |
|
if ctx.author.id not in falcon_users: |
|
await ctx.message.add_reaction('<a:loading:1114111677990981692>') |
|
thread = await ctx.message.create_thread(name=f'{ctx.author}') |
|
falcon_users = [ctx.author.id] + falcon_users |
|
falcon_threads = [thread.id] + falcon_threads |
|
await thread.send(f"Thread created") |
|
|
|
|
|
chathistory = falconclient.predict( |
|
fn_index=5 |
|
) |
|
instructions = "The following is a conversation between a highly knowledgeable and intelligent AI assistant, called Falcon, and a human user, called User. In the following interactions, User and Falcon will converse in natural language, and Falcon will answer User's questions. Falcon was built to be respectful, polite and inclusive. Falcon was built by the Technology Innovation Institute in Abu Dhabi. Falcon will never decline to answer a question, and always attempts to give an answer that User would be satisfied with. It knows a lot, and always tells the truth. The conversation begins." |
|
job = falconclient.submit(prompt, chathistory, instructions, 0.8, 0.9, fn_index=1) |
|
while job.done() == False: |
|
status = job.status() |
|
|
|
else: |
|
file_paths = job.outputs() |
|
full_generation = file_paths[-1] |
|
with open(full_generation, 'r') as file: |
|
data = json.load(file) |
|
output_text = data[-1][-1] |
|
falcon_dictionary[ctx.author.id] = full_generation |
|
falcon_userid_threadid_dictionary[ctx.author.id] = thread.id |
|
print(output_text) |
|
await thread.send(f"{output_text}") |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
elif ctx.author.id in falcon_users: |
|
await ctx.reply(f"{ctx.author.mention}, you already have an existing conversation! ") |
|
|
|
if ctx.channel.id in falcon_threads: |
|
await ctx.message.add_reaction('<a:loading:1114111677990981692>') |
|
|
|
|
|
thread_id_test = falcon_userid_threadid_dictionary[ctx.author.id] |
|
if ctx.channel.id == thread_id_test: |
|
chathistory = falcon_dictionary[ctx.author.id] |
|
|
|
instructions = "The following is a conversation between a highly knowledgeable and intelligent AI assistant, called Falcon, and a human user, called User. In the following interactions, User and Falcon will converse in natural language, and Falcon will answer User's questions. Falcon was built to be respectful, polite and inclusive. Falcon was built by the Technology Innovation Institute in Abu Dhabi. Falcon will never decline to answer a question, and always attempts to give an answer that User would be satisfied with. It knows a lot, and always tells the truth. The conversation begins." |
|
job = falconclient.submit(prompt, chathistory, instructions, 0.8, 0.9, fn_index=1) |
|
while job.done() == False: |
|
status = job.status() |
|
|
|
else: |
|
file_paths = job.outputs() |
|
full_generation = file_paths[-1] |
|
with open(full_generation, 'r') as file: |
|
data = json.load(file) |
|
output_text = data[-1][-1] |
|
falcon_dictionary[ctx.author.id] = full_generation |
|
print(output_text) |
|
await ctx.reply(f"{output_text}") |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
|
|
except Exception as e: |
|
print(f"Error: {e}") |
|
await ctx.reply(f"{e} cc <@811235357663297546> (falconprivate error)") |
|
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
await ctx.message.add_reaction('<:disagree:1098628957521313892>') |
|
|
|
''' |
|
@bot.event |
|
async def on_message(message): |
|
await asyncio.sleep(5) |
|
# message.author.roles |
|
# bot |
|
# channel? |
|
try: |
|
if await on_message_safetychecks(message): |
|
global falcon_userid_threadid_dictionary |
|
global falcon_dictionary |
|
# is this our unique channel (falcon thread?) |
|
if message.author.id in falcon_userid_threadid_dictionary: |
|
if message.channel.id == falcon_userid_threadid_dictionary[message.author.id]: |
|
await message.add_reaction('<a:loading:1114111677990981692>') # helps the bot appear responsive |
|
chathistory = falcon_dictionary[message.author.id] |
|
instructions = "The following is a conversation between a highly knowledgeable and intelligent AI assistant, " \ |
|
"called Falcon, and a human user, called User. In the following interactions, " \ |
|
"User and Falcon will converse in natural language, and Falcon will answer User's questions. " \ |
|
"Falcon was built to be respectful, polite and inclusive. " \ |
|
"Falcon was built by the Technology Innovation Institute in Abu Dhabi. " \ |
|
"Falcon will never decline to answer a question, and always attempts to give an answer that " \ |
|
"User would be satisfied with. It knows a lot, and always tells the truth. The conversation begins." |
|
# handles the blocking task in the background so the discord bot stays responsive |
|
# for added security, we could verify if message.content contains symbols used in commands; But those will fail anyways (see safetychecks) |
|
job = falconclient.submit(message.content, chathistory, instructions, 0.8, 0.9, fn_index=1) # job is similar to run_in_executor (but better) |
|
while job.done() == False: |
|
status = job.status() # this could be used for debugging etc |
|
#print(status) |
|
else: |
|
file_paths = job.outputs() # file_paths = ["tmp123.json", "tmp456.json," etc...] |
|
full_generation = file_paths[-1] # the last filepath contains the full generated text |
|
with open(full_generation, 'r') as file: |
|
data = json.load(file) |
|
output_text = data[-1][-1] # we only need the very last/latest string for the discord bot to output |
|
falcon_dictionary[message.author.id] = full_generation # update our unique conversation |
|
print(output_text) |
|
await message.reply(f"{output_text}") # reply to user's prompt (whatever they typed) |
|
await message.remove_reaction('<a:loading:1114111677990981692>', bot.user) |
|
except Exception as e: |
|
print(f"Error: {e}") |
|
if message.channel.id == 1116089829147557999: |
|
await message.reply(f"{e} cc <@811235357663297546> (falcon error)") # ping lunarflu if something breaks |
|
await asyncio.sleep(5) |
|
''' |
|
|
|
|
|
def run_bot(): |
|
bot.run(DISCORD_TOKEN) |
|
|
|
threading.Thread(target=run_bot).start() |
|
|
|
def greet(name): |
|
return "Hello " + name + "!" |
|
|
|
demo = gr.Interface(fn=greet, inputs="text", outputs="text") |
|
|
|
demo.queue(concurrency_count=20) |
|
demo.launch() |
|
|