Synced repo using 'sync_with_huggingface' Github Action
Browse files
app.py
CHANGED
@@ -568,6 +568,7 @@ chathistory = None
|
|
568 |
falcon_users = []
|
569 |
falcon_threads = []
|
570 |
falcon_dictionary = {}
|
|
|
571 |
|
572 |
@bot.command()
|
573 |
async def falconprivate(ctx, *, prompt: str):
|
@@ -579,6 +580,7 @@ async def falconprivate(ctx, *, prompt: str):
|
|
579 |
global falcon_threads
|
580 |
global falcon_chats
|
581 |
global falcon_dictionary
|
|
|
582 |
if ctx.channel.id == 1116089829147557999: # initial thread creation inside #falcon
|
583 |
if ctx.author.id not in falcon_users: # create a new one
|
584 |
await ctx.message.add_reaction('<a:loading:1114111677990981692>')
|
@@ -604,6 +606,7 @@ async def falconprivate(ctx, *, prompt: str):
|
|
604 |
data = json.load(file)
|
605 |
output_text = data[-1][-1] # we output this as the bot
|
606 |
falcon_dictionary[ctx.author.id] = full_generation # 1234567890: tmp12345678.json
|
|
|
607 |
print(output_text)
|
608 |
await thread.send(f"{output_text}")
|
609 |
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user)
|
@@ -614,24 +617,26 @@ async def falconprivate(ctx, *, prompt: str):
|
|
614 |
await ctx.message.add_reaction('<a:loading:1114111677990981692>')
|
615 |
#await ctx.reply(f"inside thread, only {ctx.author} is allowed to chat here")
|
616 |
# post all other generations here
|
617 |
-
|
618 |
-
|
619 |
-
|
620 |
-
|
621 |
-
|
622 |
-
|
623 |
-
|
624 |
-
|
625 |
-
|
626 |
-
|
627 |
-
|
628 |
-
|
629 |
-
|
630 |
-
|
631 |
-
|
632 |
-
|
633 |
-
|
634 |
-
|
|
|
|
|
635 |
|
636 |
except Exception as e:
|
637 |
print(f"Error: {e}")
|
|
|
568 |
falcon_users = []
|
569 |
falcon_threads = []
|
570 |
falcon_dictionary = {}
|
571 |
+
falcon_userid_threadid_dictionary = {}
|
572 |
|
573 |
@bot.command()
|
574 |
async def falconprivate(ctx, *, prompt: str):
|
|
|
580 |
global falcon_threads
|
581 |
global falcon_chats
|
582 |
global falcon_dictionary
|
583 |
+
global falcon_userid_threadid_dictionary
|
584 |
if ctx.channel.id == 1116089829147557999: # initial thread creation inside #falcon
|
585 |
if ctx.author.id not in falcon_users: # create a new one
|
586 |
await ctx.message.add_reaction('<a:loading:1114111677990981692>')
|
|
|
606 |
data = json.load(file)
|
607 |
output_text = data[-1][-1] # we output this as the bot
|
608 |
falcon_dictionary[ctx.author.id] = full_generation # 1234567890: tmp12345678.json
|
609 |
+
falcon_userid_threadid_dictionary[ctx.author.id] = thread.id
|
610 |
print(output_text)
|
611 |
await thread.send(f"{output_text}")
|
612 |
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user)
|
|
|
617 |
await ctx.message.add_reaction('<a:loading:1114111677990981692>')
|
618 |
#await ctx.reply(f"inside thread, only {ctx.author} is allowed to chat here")
|
619 |
# post all other generations here
|
620 |
+
thread_id_test = falcon_userid_threadid_dictionary[ctx.author.id]
|
621 |
+
if ctx.channel.id == thread_id_test:
|
622 |
+
chathistory = falcon_dictionary[ctx.author.id]
|
623 |
+
|
624 |
+
instructions = "The following is a conversation between a highly knowledgeable and intelligent AI assistant, called Falcon, and a human user, called User. In the following interactions, User and Falcon will converse in natural language, and Falcon will answer User's questions. Falcon was built to be respectful, polite and inclusive. Falcon was built by the Technology Innovation Institute in Abu Dhabi. Falcon will never decline to answer a question, and always attempts to give an answer that User would be satisfied with. It knows a lot, and always tells the truth. The conversation begins."
|
625 |
+
job = falconclient.submit(prompt, chathistory, instructions, 0.8, 0.9, fn_index=1) # This is not blocking, similar to run_in_executor (but better)
|
626 |
+
while job.done() == False:
|
627 |
+
status = job.status() # could be spammy, let's test anyways
|
628 |
+
print(status)
|
629 |
+
else:
|
630 |
+
file_paths = job.outputs()
|
631 |
+
full_generation = file_paths[-1] # tmp12345678.json
|
632 |
+
unique_file_2 = "{}{}".format(ctx.author.id, ".json") # could use ID, more robust
|
633 |
+
with open(full_generation, 'r') as file:
|
634 |
+
data = json.load(file)
|
635 |
+
output_text = data[-1][-1] # we output this as the bot
|
636 |
+
falcon_dictionary[ctx.author.id] = full_generation
|
637 |
+
print(output_text)
|
638 |
+
await ctx.reply(f"{output_text}")
|
639 |
+
await ctx.message.remove_reaction('<a:loading:1114111677990981692>', bot.user)
|
640 |
|
641 |
except Exception as e:
|
642 |
print(f"Error: {e}")
|