Synced repo using 'sync_with_huggingface' Github Action
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ from discord.ext import commands
|
|
13 |
from gradio_client import Client
|
14 |
from PIL import Image
|
15 |
#from ratelimiter import RateLimiter
|
16 |
-
|
17 |
import asyncio
|
18 |
import concurrent.futures
|
19 |
import multiprocessing
|
@@ -133,7 +133,43 @@ async def safetychecks(ctx):
|
|
133 |
print(f"Error: safetychecks failed somewhere, command will not continue, {e}")
|
134 |
await ctx.message.reply(f"❌ <@811235357663297546> SC failed somewhere ❌ {e}") # this will always ping, as long as the bot has access to the channel
|
135 |
await ctx.message.add_reaction(failure_emoji)
|
136 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
@bot.command()
|
138 |
async def deepfloydifdemo(ctx):
|
139 |
try:
|
@@ -570,65 +606,66 @@ falcon_userid_threadid_dictionary = {}
|
|
570 |
@bot.command()
|
571 |
async def falcon(ctx, *, prompt: str):
|
572 |
# todo: need to be careful with these, rework into something simpler
|
573 |
-
global falcon_users
|
574 |
-
global falcon_threads
|
575 |
-
global falcon_dictionary
|
576 |
-
global falcon_userid_threadid_dictionary
|
577 |
try:
|
578 |
-
|
579 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
580 |
await ctx.message.add_reaction('<a:loading:1114111677990981692>')
|
581 |
-
|
582 |
-
|
583 |
-
|
584 |
-
|
585 |
-
|
586 |
-
|
587 |
-
|
588 |
-
|
589 |
-
|
590 |
-
|
591 |
-
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
-
output_text
|
601 |
-
|
602 |
-
falcon_userid_threadid_dictionary[ctx.author.id] = thread.id
|
603 |
-
print(output_text)
|
604 |
-
await thread.send(f"{output_text}")
|
605 |
-
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user)
|
606 |
-
elif ctx.author.id in falcon_users:
|
607 |
-
await ctx.reply(f"{ctx.author.mention}, you already have an existing conversation! ")
|
608 |
-
#------------------------------------
|
609 |
-
if ctx.channel.id in falcon_threads: # subsequent chatting inside threads of #falcon
|
610 |
-
await ctx.message.add_reaction('<a:loading:1114111677990981692>')
|
611 |
-
#await ctx.reply(f"inside thread, only {ctx.author} is allowed to chat here")
|
612 |
-
# post all other generations here
|
613 |
-
thread_id_test = falcon_userid_threadid_dictionary[ctx.author.id]
|
614 |
-
if ctx.channel.id == thread_id_test:
|
615 |
-
chathistory = falcon_dictionary[ctx.author.id]
|
616 |
-
|
617 |
-
instructions = "The following is a conversation between a highly knowledgeable and intelligent AI assistant, called Falcon, and a human user, called User. In the following interactions, User and Falcon will converse in natural language, and Falcon will answer User's questions. Falcon was built to be respectful, polite and inclusive. Falcon was built by the Technology Innovation Institute in Abu Dhabi. Falcon will never decline to answer a question, and always attempts to give an answer that User would be satisfied with. It knows a lot, and always tells the truth. The conversation begins."
|
618 |
-
job = falconclient.submit(prompt, chathistory, instructions, 0.8, 0.9, fn_index=1) # This is not blocking, similar to run_in_executor (but better)
|
619 |
-
while job.done() == False:
|
620 |
-
status = job.status() # could be spammy, let's test anyways
|
621 |
-
#print(status)
|
622 |
-
else:
|
623 |
-
file_paths = job.outputs()
|
624 |
-
full_generation = file_paths[-1] # tmp12345678.json
|
625 |
-
with open(full_generation, 'r') as file:
|
626 |
-
data = json.load(file)
|
627 |
-
output_text = data[-1][-1] # we output this as the bot
|
628 |
-
falcon_dictionary[ctx.author.id] = full_generation
|
629 |
-
print(output_text)
|
630 |
-
await ctx.reply(f"{output_text}")
|
631 |
-
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user)
|
632 |
|
633 |
except Exception as e:
|
634 |
print(f"Error: {e}")
|
@@ -638,39 +675,46 @@ async def falcon(ctx, *, prompt: str):
|
|
638 |
#----------------------------------------------------------------------------------------------------------------------------
|
639 |
@bot.event
|
640 |
async def on_message(message):
|
641 |
-
|
642 |
-
|
643 |
-
|
644 |
-
|
645 |
-
|
646 |
-
|
647 |
-
|
648 |
-
|
649 |
-
|
650 |
-
|
651 |
-
|
652 |
-
|
653 |
-
|
654 |
-
|
655 |
-
|
656 |
-
|
657 |
-
|
658 |
-
|
659 |
-
|
660 |
-
#
|
661 |
-
|
662 |
-
|
663 |
-
|
664 |
-
|
665 |
-
|
666 |
-
|
667 |
-
|
668 |
-
|
669 |
-
|
670 |
-
|
|
|
|
|
|
|
|
|
|
|
671 |
except Exception as e:
|
672 |
print(f"Error: {e}")
|
673 |
-
|
|
|
|
|
674 |
#----------------------------------------------------------------------------------------------------------------------------
|
675 |
# hackerllama magic to run the bot in a Hugging Face Space
|
676 |
def run_bot():
|
|
|
13 |
from gradio_client import Client
|
14 |
from PIL import Image
|
15 |
#from ratelimiter import RateLimiter
|
16 |
+
|
17 |
import asyncio
|
18 |
import concurrent.futures
|
19 |
import multiprocessing
|
|
|
133 |
print(f"Error: safetychecks failed somewhere, command will not continue, {e}")
|
134 |
await ctx.message.reply(f"❌ <@811235357663297546> SC failed somewhere ❌ {e}") # this will always ping, as long as the bot has access to the channel
|
135 |
await ctx.message.add_reaction(failure_emoji)
|
136 |
+
#------------------------------------------------------------------------------------------------------------------------------
|
137 |
+
async def on_message_safetychecks(message):
|
138 |
+
failure_emoji = '<:disagree:1098628957521313892>'
|
139 |
+
try:
|
140 |
+
if message.author.bot:
|
141 |
+
print(f"The bot will ignore its own messages.")
|
142 |
+
return False
|
143 |
+
|
144 |
+
# check if the bot is offline
|
145 |
+
offline_bot_role_id = 1103676632667017266
|
146 |
+
bot_member = message.guild.get_member(bot.user.id)
|
147 |
+
if any(role.id == offline_bot_role_id for role in bot_member.roles):
|
148 |
+
print(f"{message.author} The bot is offline or under maintenance. (Remove the offline-bot role to bring it online)")
|
149 |
+
return False
|
150 |
+
|
151 |
+
#✅✅ check if the user has the required role(s)
|
152 |
+
guild_id = 879548962464493619
|
153 |
+
verified_role_id = 900063512829755413 # @verified = 900063512829755413, HF = 897376942817419265, fellows = 963431900825919498
|
154 |
+
huggingfolks_role_id = 897376942817419265
|
155 |
+
fellows_role_id = 963431900825919498
|
156 |
+
contentcreator_role_id = 928589475968323636
|
157 |
+
betatester_role_id = 1113511652990668893
|
158 |
+
|
159 |
+
allowed_role_ids = [huggingfolks_role_id, fellows_role_id, contentcreator_role_id, betatester_role_id]
|
160 |
+
guild = bot.get_guild(guild_id)
|
161 |
+
user_roles = message.author.roles
|
162 |
+
has_allowed_role = any(role.id in allowed_role_ids for role in user_roles)
|
163 |
+
if not has_allowed_role:
|
164 |
+
print(f"{ctx.author} does not have any of the required roles to activate the on_message check")
|
165 |
+
return False
|
166 |
+
|
167 |
+
return True
|
168 |
+
|
169 |
+
except Exception as e:
|
170 |
+
print(f"Error: on_message_safetychecks failed somewhere, command will not continue {e}")
|
171 |
+
#------------------------------------------------------------------------------------------------------------------------------
|
172 |
+
|
173 |
@bot.command()
|
174 |
async def deepfloydifdemo(ctx):
|
175 |
try:
|
|
|
606 |
@bot.command()
|
607 |
async def falcon(ctx, *, prompt: str):
|
608 |
# todo: need to be careful with these, rework into something simpler
|
|
|
|
|
|
|
|
|
609 |
try:
|
610 |
+
global falcon_users
|
611 |
+
global falcon_threads
|
612 |
+
global falcon_dictionary
|
613 |
+
global falcon_userid_threadid_dictionary
|
614 |
+
if await safetychecks(ctx):
|
615 |
+
if ctx.channel.id == 1116089829147557999: # initial thread creation inside #falcon
|
616 |
+
if ctx.author.id not in falcon_users: # create a new one
|
617 |
+
await ctx.message.add_reaction('<a:loading:1114111677990981692>')
|
618 |
+
thread = await ctx.message.create_thread(name=f'{ctx.author}')
|
619 |
+
falcon_users = [ctx.author.id] + falcon_users
|
620 |
+
falcon_threads = [thread.id] + falcon_threads
|
621 |
+
await thread.send(f"Thread created")
|
622 |
+
|
623 |
+
# initial generation here
|
624 |
+
chathistory = falconclient.predict(
|
625 |
+
fn_index=5
|
626 |
+
) # []
|
627 |
+
instructions = "The following is a conversation between a highly knowledgeable and intelligent AI assistant, called Falcon, and a human user, called User. In the following interactions, User and Falcon will converse in natural language, and Falcon will answer User's questions. Falcon was built to be respectful, polite and inclusive. Falcon was built by the Technology Innovation Institute in Abu Dhabi. Falcon will never decline to answer a question, and always attempts to give an answer that User would be satisfied with. It knows a lot, and always tells the truth. The conversation begins."
|
628 |
+
job = falconclient.submit(prompt, chathistory, instructions, 0.8, 0.9, fn_index=1) # This is not blocking, similar to run_in_executor (but better)
|
629 |
+
while job.done() == False:
|
630 |
+
status = job.status() # could be spammy, let's test anyways
|
631 |
+
#print(status)
|
632 |
+
else:
|
633 |
+
file_paths = job.outputs()
|
634 |
+
full_generation = file_paths[-1] # tmp12345678.json
|
635 |
+
with open(full_generation, 'r') as file:
|
636 |
+
data = json.load(file)
|
637 |
+
output_text = data[-1][-1] # we output this as the bot
|
638 |
+
falcon_dictionary[ctx.author.id] = full_generation # 1234567890: tmp12345678.json
|
639 |
+
falcon_userid_threadid_dictionary[ctx.author.id] = thread.id
|
640 |
+
print(output_text)
|
641 |
+
await thread.send(f"{output_text}")
|
642 |
+
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user)
|
643 |
+
elif ctx.author.id in falcon_users:
|
644 |
+
await ctx.reply(f"{ctx.author.mention}, you already have an existing conversation! ")
|
645 |
+
#------------------------------------
|
646 |
+
if ctx.channel.id in falcon_threads: # subsequent chatting inside threads of #falcon
|
647 |
await ctx.message.add_reaction('<a:loading:1114111677990981692>')
|
648 |
+
#await ctx.reply(f"inside thread, only {ctx.author} is allowed to chat here")
|
649 |
+
# post all other generations here
|
650 |
+
thread_id_test = falcon_userid_threadid_dictionary[ctx.author.id]
|
651 |
+
if ctx.channel.id == thread_id_test:
|
652 |
+
chathistory = falcon_dictionary[ctx.author.id]
|
653 |
+
|
654 |
+
instructions = "The following is a conversation between a highly knowledgeable and intelligent AI assistant, called Falcon, and a human user, called User. In the following interactions, User and Falcon will converse in natural language, and Falcon will answer User's questions. Falcon was built to be respectful, polite and inclusive. Falcon was built by the Technology Innovation Institute in Abu Dhabi. Falcon will never decline to answer a question, and always attempts to give an answer that User would be satisfied with. It knows a lot, and always tells the truth. The conversation begins."
|
655 |
+
job = falconclient.submit(prompt, chathistory, instructions, 0.8, 0.9, fn_index=1) # This is not blocking, similar to run_in_executor (but better)
|
656 |
+
while job.done() == False:
|
657 |
+
status = job.status() # could be spammy, let's test anyways
|
658 |
+
#print(status)
|
659 |
+
else:
|
660 |
+
file_paths = job.outputs()
|
661 |
+
full_generation = file_paths[-1] # tmp12345678.json
|
662 |
+
with open(full_generation, 'r') as file:
|
663 |
+
data = json.load(file)
|
664 |
+
output_text = data[-1][-1] # we output this as the bot
|
665 |
+
falcon_dictionary[ctx.author.id] = full_generation
|
666 |
+
print(output_text)
|
667 |
+
await ctx.reply(f"{output_text}")
|
668 |
+
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
669 |
|
670 |
except Exception as e:
|
671 |
print(f"Error: {e}")
|
|
|
675 |
#----------------------------------------------------------------------------------------------------------------------------
|
676 |
@bot.event
|
677 |
async def on_message(message):
|
678 |
+
await asyncio.sleep(5)
|
679 |
+
# message.author.roles
|
680 |
+
# bot
|
681 |
+
# channel?
|
682 |
+
try:
|
683 |
+
if on_message_safetychecks(message):
|
684 |
+
global falcon_userid_threadid_dictionary
|
685 |
+
global falcon_dictionary
|
686 |
+
# is this our unique channel (falcon thread?)
|
687 |
+
if message.channel.id == falcon_userid_threadid_dictionary[message.author.id]:
|
688 |
+
await message.add_reaction('<a:loading:1114111677990981692>') # helps the bot appear responsive
|
689 |
+
chathistory = falcon_dictionary[message.author.id]
|
690 |
+
instructions = "The following is a conversation between a highly knowledgeable and intelligent AI assistant, " \
|
691 |
+
"called Falcon, and a human user, called User. In the following interactions, " \
|
692 |
+
"User and Falcon will converse in natural language, and Falcon will answer User's questions. " \
|
693 |
+
"Falcon was built to be respectful, polite and inclusive. " \
|
694 |
+
"Falcon was built by the Technology Innovation Institute in Abu Dhabi. " \
|
695 |
+
"Falcon will never decline to answer a question, and always attempts to give an answer that " \
|
696 |
+
"User would be satisfied with. It knows a lot, and always tells the truth. The conversation begins."
|
697 |
+
# handles the blocking task in the background so the discord bot stays responsive
|
698 |
+
# for added security, we could verify if message.content contains symbols used in commands; But those will fail anyways (see safetychecks)
|
699 |
+
job = falconclient.submit(message.content, chathistory, instructions, 0.8, 0.9, fn_index=1) # job is similar to run_in_executor (but better)
|
700 |
+
while job.done() == False:
|
701 |
+
status = job.status() # this could be used for debugging etc
|
702 |
+
#print(status)
|
703 |
+
else:
|
704 |
+
file_paths = job.outputs() # file_paths = ["tmp123.json", "tmp456.json," etc...]
|
705 |
+
full_generation = file_paths[-1] # the last filepath contains the full generated text
|
706 |
+
with open(full_generation, 'r') as file:
|
707 |
+
data = json.load(file)
|
708 |
+
output_text = data[-1][-1] # we only need the very last/latest string for the discord bot to output
|
709 |
+
falcon_dictionary[message.author.id] = full_generation # update our unique conversation
|
710 |
+
print(output_text)
|
711 |
+
await message.reply(f"{output_text}") # reply to user's prompt (whatever they typed)
|
712 |
+
await message.remove_reaction('<a:loading:1114111677990981692>', bot.user)
|
713 |
except Exception as e:
|
714 |
print(f"Error: {e}")
|
715 |
+
if message.channel.id == 1116089829147557999:
|
716 |
+
await message.reply(f"{e} cc <@811235357663297546> (falcon error)") # ping lunarflu if something breaks
|
717 |
+
await asyncio.sleep(5)
|
718 |
#----------------------------------------------------------------------------------------------------------------------------
|
719 |
# hackerllama magic to run the bot in a Hugging Face Space
|
720 |
def run_bot():
|