Synced repo using 'sync_with_huggingface' Github Action
Browse files
app.py
CHANGED
@@ -560,15 +560,7 @@ async def on_reaction_add(reaction, user): # ctx = await bot.get_context(reac
|
|
560 |
|
561 |
except Exception as e:
|
562 |
print(f"Error: {e} (known error, does not cause issues, fix later)")
|
563 |
-
|
564 |
-
|
565 |
#----------------------------------------------------------------------------------------------------------------------------
|
566 |
-
|
567 |
-
# command 1 = creates thread, name = {ctx.author}
|
568 |
-
# on_message =
|
569 |
-
|
570 |
-
# custom path based on name
|
571 |
-
|
572 |
chathistory = None
|
573 |
falcon_users = []
|
574 |
falcon_threads = []
|
@@ -576,16 +568,13 @@ falcon_dictionary = {}
|
|
576 |
falcon_userid_threadid_dictionary = {}
|
577 |
|
578 |
@bot.command()
|
579 |
-
async def
|
580 |
-
#todo
|
581 |
-
|
582 |
-
|
|
|
|
|
583 |
try:
|
584 |
-
global falcon_users
|
585 |
-
global falcon_threads
|
586 |
-
global falcon_chats
|
587 |
-
global falcon_dictionary
|
588 |
-
global falcon_userid_threadid_dictionary
|
589 |
if ctx.channel.id == 1116089829147557999: # initial thread creation inside #falcon
|
590 |
if ctx.author.id not in falcon_users: # create a new one
|
591 |
await ctx.message.add_reaction('<a:loading:1114111677990981692>')
|
@@ -606,7 +595,6 @@ async def falconprivate(ctx, *, prompt: str):
|
|
606 |
else:
|
607 |
file_paths = job.outputs()
|
608 |
full_generation = file_paths[-1] # tmp12345678.json
|
609 |
-
unique_file = "{}{}".format(ctx.author.id, ".json") # could use ID, more robust
|
610 |
with open(full_generation, 'r') as file:
|
611 |
data = json.load(file)
|
612 |
output_text = data[-1][-1] # we output this as the bot
|
@@ -617,7 +605,7 @@ async def falconprivate(ctx, *, prompt: str):
|
|
617 |
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user)
|
618 |
elif ctx.author.id in falcon_users:
|
619 |
await ctx.reply(f"{ctx.author.mention}, you already have an existing conversation! ")
|
620 |
-
|
621 |
if ctx.channel.id in falcon_threads: # subsequent chatting inside threads of #falcon
|
622 |
await ctx.message.add_reaction('<a:loading:1114111677990981692>')
|
623 |
#await ctx.reply(f"inside thread, only {ctx.author} is allowed to chat here")
|
@@ -634,7 +622,6 @@ async def falconprivate(ctx, *, prompt: str):
|
|
634 |
else:
|
635 |
file_paths = job.outputs()
|
636 |
full_generation = file_paths[-1] # tmp12345678.json
|
637 |
-
unique_file_2 = "{}{}".format(ctx.author.id, ".json") # could use ID, more robust
|
638 |
with open(full_generation, 'r') as file:
|
639 |
data = json.load(file)
|
640 |
output_text = data[-1][-1] # we output this as the bot
|
@@ -648,68 +635,44 @@ async def falconprivate(ctx, *, prompt: str):
|
|
648 |
await ctx.reply(f"{e} cc <@811235357663297546> (falconprivate error)")
|
649 |
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user)
|
650 |
await ctx.message.add_reaction('<:disagree:1098628957521313892>')
|
651 |
-
|
652 |
-
@bot.command()
|
653 |
-
async def falcon(ctx, *, prompt: str):
|
654 |
-
try:
|
655 |
-
if await safetychecks(ctx):
|
656 |
-
if ctx.channel.id == 1116089829147557999:
|
657 |
-
global chathistory
|
658 |
-
|
659 |
-
if chathistory is not None: # This handles all subsequent discussions/prompts to the chatbot/model
|
660 |
-
instructions = "The following is a conversation between a highly knowledgeable and intelligent AI assistant, called Falcon, and a human user, called User. In the following interactions, User and Falcon will converse in natural language, and Falcon will answer User's questions. Falcon was built to be respectful, polite and inclusive. Falcon was built by the Technology Innovation Institute in Abu Dhabi. Falcon will never decline to answer a question, and always attempts to give an answer that User would be satisfied with. It knows a lot, and always tells the truth. The conversation begins."
|
661 |
-
job = falconclient.submit(prompt, chathistory, instructions, 0.8, 0.9, fn_index=1) # This is not blocking
|
662 |
-
while job.done() == False:
|
663 |
-
status = job.status()
|
664 |
-
else:
|
665 |
-
file_paths = job.outputs()
|
666 |
-
full_generation = file_paths[-1]
|
667 |
-
chathistory = full_generation # we store this and pass it in the loop
|
668 |
-
with open(full_generation, 'r') as file:
|
669 |
-
data = json.load(file)
|
670 |
-
output_text = data[-1][-1] # we output this as the bot
|
671 |
-
print(output_text)
|
672 |
-
await ctx.reply(f"{output_text}")
|
673 |
-
|
674 |
-
if chathistory == None: # This handles the start of a conversation/chathistory file with the model
|
675 |
-
chathistory = falconclient.predict(
|
676 |
-
fn_index=5
|
677 |
-
)
|
678 |
-
instructions = "The following is a conversation between a highly knowledgeable and intelligent AI assistant, called Falcon, and a human user, called User. In the following interactions, User and Falcon will converse in natural language, and Falcon will answer User's questions. Falcon was built to be respectful, polite and inclusive. Falcon was built by the Technology Innovation Institute in Abu Dhabi. Falcon will never decline to answer a question, and always attempts to give an answer that User would be satisfied with. It knows a lot, and always tells the truth. The conversation begins."
|
679 |
-
job = falconclient.submit(prompt, chathistory, instructions, 0.8, 0.9, fn_index=1) # This is not blocking
|
680 |
-
while job.done() == False:
|
681 |
-
status = job.status()
|
682 |
-
else:
|
683 |
-
file_paths = job.outputs()
|
684 |
-
full_generation = file_paths[-1]
|
685 |
-
chathistory = full_generation # we store this and pass it in the loop
|
686 |
-
with open(full_generation, 'r') as file:
|
687 |
-
data = json.load(file)
|
688 |
-
output_text = data[-1][-1] # we output this as the bot
|
689 |
-
await ctx.reply(f"{output_text}")
|
690 |
-
|
691 |
-
except Exception as e:
|
692 |
-
print(f"Error: {e}")
|
693 |
-
await ctx.reply(f"{e} cc <@811235357663297546> (falcon error)")
|
694 |
#----------------------------------------------------------------------------------------------------------------------------
|
695 |
-
'''
|
696 |
@bot.event
|
697 |
async def on_message(message):
|
|
|
|
|
698 |
try:
|
699 |
-
|
700 |
-
|
701 |
-
|
702 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
703 |
except Exception as e:
|
704 |
print(f"Error: {e}")
|
705 |
-
await
|
706 |
-
|
707 |
-
'''
|
708 |
#----------------------------------------------------------------------------------------------------------------------------
|
709 |
-
|
710 |
-
|
711 |
-
|
712 |
-
|
713 |
def run_bot():
|
714 |
bot.run(DISCORD_TOKEN)
|
715 |
|
@@ -722,3 +685,4 @@ demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
|
722 |
#demo.queue(concurrency_count=10)
|
723 |
demo.queue(concurrency_count=20)
|
724 |
demo.launch()
|
|
|
|
560 |
|
561 |
except Exception as e:
|
562 |
print(f"Error: {e} (known error, does not cause issues, fix later)")
|
|
|
|
|
563 |
#----------------------------------------------------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
564 |
chathistory = None
|
565 |
falcon_users = []
|
566 |
falcon_threads = []
|
|
|
568 |
falcon_userid_threadid_dictionary = {}
|
569 |
|
570 |
@bot.command()
|
571 |
+
async def falcon(ctx, *, prompt: str):
|
572 |
+
# todo: need to be careful with these, rework into something simpler
|
573 |
+
global falcon_users
|
574 |
+
global falcon_threads
|
575 |
+
global falcon_dictionary
|
576 |
+
global falcon_userid_threadid_dictionary
|
577 |
try:
|
|
|
|
|
|
|
|
|
|
|
578 |
if ctx.channel.id == 1116089829147557999: # initial thread creation inside #falcon
|
579 |
if ctx.author.id not in falcon_users: # create a new one
|
580 |
await ctx.message.add_reaction('<a:loading:1114111677990981692>')
|
|
|
595 |
else:
|
596 |
file_paths = job.outputs()
|
597 |
full_generation = file_paths[-1] # tmp12345678.json
|
|
|
598 |
with open(full_generation, 'r') as file:
|
599 |
data = json.load(file)
|
600 |
output_text = data[-1][-1] # we output this as the bot
|
|
|
605 |
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user)
|
606 |
elif ctx.author.id in falcon_users:
|
607 |
await ctx.reply(f"{ctx.author.mention}, you already have an existing conversation! ")
|
608 |
+
#------------------------------------
|
609 |
if ctx.channel.id in falcon_threads: # subsequent chatting inside threads of #falcon
|
610 |
await ctx.message.add_reaction('<a:loading:1114111677990981692>')
|
611 |
#await ctx.reply(f"inside thread, only {ctx.author} is allowed to chat here")
|
|
|
622 |
else:
|
623 |
file_paths = job.outputs()
|
624 |
full_generation = file_paths[-1] # tmp12345678.json
|
|
|
625 |
with open(full_generation, 'r') as file:
|
626 |
data = json.load(file)
|
627 |
output_text = data[-1][-1] # we output this as the bot
|
|
|
635 |
await ctx.reply(f"{e} cc <@811235357663297546> (falconprivate error)")
|
636 |
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user)
|
637 |
await ctx.message.add_reaction('<:disagree:1098628957521313892>')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
638 |
#----------------------------------------------------------------------------------------------------------------------------
|
|
|
639 |
@bot.event
|
640 |
async def on_message(message):
|
641 |
+
global falcon_userid_threadid_dictionary
|
642 |
+
global falcon_dictionary
|
643 |
try:
|
644 |
+
# is this our unique channel (falcon thread?)
|
645 |
+
if message.channel.id == falcon_userid_threadid_dictionary[message.author.id]:
|
646 |
+
await message.add_reaction('<a:loading:1114111677990981692>') # helps the bot appear responsive
|
647 |
+
chathistory = falcon_dictionary[message.author.id]
|
648 |
+
instructions = "The following is a conversation between a highly knowledgeable and intelligent AI assistant, " \
|
649 |
+
"called Falcon, and a human user, called User. In the following interactions, " \
|
650 |
+
"User and Falcon will converse in natural language, and Falcon will answer User's questions. " \
|
651 |
+
"Falcon was built to be respectful, polite and inclusive. " \
|
652 |
+
"Falcon was built by the Technology Innovation Institute in Abu Dhabi. " \
|
653 |
+
"Falcon will never decline to answer a question, and always attempts to give an answer that " \
|
654 |
+
"User would be satisfied with. It knows a lot, and always tells the truth. The conversation begins."
|
655 |
+
# handles the blocking task in the background so the discord bot stays responsive
|
656 |
+
# for added security, we could verify if message.content contains symbols used in commands; But those will fail anyways (see safetychecks)
|
657 |
+
job = falconclient.submit(message.content, chathistory, instructions, 0.8, 0.9, fn_index=1) # job is similar to run_in_executor (but better)
|
658 |
+
while job.done() == False:
|
659 |
+
#status = job.status() # this could be used for debugging etc
|
660 |
+
#print(status)
|
661 |
+
else:
|
662 |
+
file_paths = job.outputs() # file_paths = ["tmp123.json", "tmp456.json," etc...]
|
663 |
+
full_generation = file_paths[-1] # the last filepath contains the full generated text
|
664 |
+
with open(full_generation, 'r') as file:
|
665 |
+
data = json.load(file)
|
666 |
+
output_text = data[-1][-1] # we only need the very last/latest string for the discord bot to output
|
667 |
+
falcon_dictionary[message.author.id] = full_generation # update our unique conversation
|
668 |
+
print(output_text)
|
669 |
+
await message.reply(f"{output_text}") # reply to user's prompt (whatever they typed)
|
670 |
+
await message.remove_reaction('<a:loading:1114111677990981692>', bot.user)
|
671 |
except Exception as e:
|
672 |
print(f"Error: {e}")
|
673 |
+
await message.reply(f"{e} cc <@811235357663297546> (falcon error)") # ping lunarflu if something breaks
|
|
|
|
|
674 |
#----------------------------------------------------------------------------------------------------------------------------
|
675 |
+
# hackerllama magic to run the bot in a Hugging Face Space
|
|
|
|
|
|
|
676 |
def run_bot():
|
677 |
bot.run(DISCORD_TOKEN)
|
678 |
|
|
|
685 |
#demo.queue(concurrency_count=10)
|
686 |
demo.queue(concurrency_count=20)
|
687 |
demo.launch()
|
688 |
+
#----------------------------------------------------------------------------------------------------------------------------
|