lunarflu HF staff commited on
Commit
ad54df9
1 Parent(s): 1aa00a4

Synced repo using 'sync_with_huggingface' Github Action

Browse files
Files changed (1) hide show
  1. codellama.py +187 -0
codellama.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ import os
4
+ import threading
5
+ from threading import Event
6
+
7
+ import discord
8
+ import gradio as gr
9
+ from discord.ext import commands
10
+ from gradio_client import Client
11
+
12
+ event = Event()
13
+ DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")
14
+ HF_TOKEN = os.getenv("HF_TOKEN")
15
+ codellama_client = Client("https://huggingface-projects-codellama-13b-chat.hf.space/", HF_TOKEN)
16
+ codellama_threadid_userid_dictionary = {}
17
+ codellama_threadid_conversation = {}
18
+ intents = discord.Intents.all()
19
+ bot = commands.Bot(command_prefix="/", intents=intents)
20
+
21
+
22
+ @bot.event
23
+ async def on_ready():
24
+ print(f"Logged in as {bot.user} (ID: {bot.user.id})")
25
+ synced = await bot.tree.sync()
26
+ print(f"Synced commands: {', '.join([s.name for s in synced])}.")
27
+ event.set()
28
+ print("------")
29
+
30
+
31
+ @bot.hybrid_command(
32
+ name="codellama",
33
+ description="Enter a prompt to generate code!",
34
+ )
35
+ async def codellama(ctx, prompt: str):
36
+ """Codellama generation"""
37
+ try:
38
+ await try_codellama(ctx, prompt)
39
+ except Exception as e:
40
+ print(f"Error: {e}")
41
+
42
+
43
+ @bot.event
44
+ async def on_message(message):
45
+ """Checks channel and continues codellama conversation if it's the right Discord Thread"""
46
+ try:
47
+ if not message.author.bot:
48
+ await continue_codellama(message)
49
+ except Exception as e:
50
+ print(f"Error: {e}")
51
+
52
+
53
+ async def try_codellama(ctx, prompt):
54
+ """Generates code based on a given prompt"""
55
+ try:
56
+ global codellama_threadid_userid_dictionary
57
+ global codellama_threadid_conversation
58
+
59
+ message = await ctx.send(f"**{prompt}** - {ctx.author.mention}")
60
+ thread = await message.create_thread(name=prompt[:100])
61
+
62
+ loop = asyncio.get_running_loop()
63
+ output_code = await loop.run_in_executor(None, codellama_initial_generation, prompt, thread)
64
+ codellama_threadid_userid_dictionary[thread.id] = ctx.author.id
65
+ await thread.send(output_code)
66
+ except Exception as e:
67
+ print(f"Error: {e}")
68
+
69
+
70
+ def codellama_initial_generation(prompt, thread):
71
+ """Job.submit inside of run_in_executor = more consistent bot behavior"""
72
+ global codellama_threadid_conversation
73
+
74
+ chat_history = f"{thread.id}.json"
75
+ conversation = []
76
+ with open(chat_history, "w") as json_file:
77
+ json.dump(conversation, json_file)
78
+
79
+ job = codellama_client.submit(prompt, chat_history, fn_index=0)
80
+
81
+ while job.done() is False:
82
+ pass
83
+ else:
84
+ result = job.outputs()[-1]
85
+ with open(result, "r") as json_file:
86
+ data = json.load(json_file)
87
+ response = data[-1][-1]
88
+ conversation.append((prompt, response))
89
+ with open(chat_history, "w") as json_file:
90
+ json.dump(conversation, json_file)
91
+
92
+ codellama_threadid_conversation[thread.id] = chat_history
93
+ if len(response) > 1300:
94
+ response = response[:1300] + "...\nTruncating response due to discord api limits."
95
+ return response
96
+
97
+
98
+ async def continue_codellama(message):
99
+ """Continues a given conversation based on chat_history"""
100
+ try:
101
+ if not message.author.bot:
102
+ global codellama_threadid_userid_dictionary # tracks userid-thread existence
103
+ if message.channel.id in codellama_threadid_userid_dictionary: # is this a valid thread?
104
+ if codellama_threadid_userid_dictionary[message.channel.id] == message.author.id:
105
+ global codellama_threadid_conversation
106
+
107
+ prompt = message.content
108
+ chat_history = codellama_threadid_conversation[message.channel.id]
109
+
110
+ # Check to see if conversation is ongoing or ended (>15000 characters)
111
+ with open(chat_history, "r") as json_file:
112
+ conversation = json.load(json_file)
113
+ total_characters = 0
114
+ for item in conversation:
115
+ for string in item:
116
+ total_characters += len(string)
117
+
118
+ if total_characters < 15000:
119
+ job = codellama_client.submit(prompt, chat_history, fn_index=0)
120
+ while job.done() is False:
121
+ pass
122
+ else:
123
+ result = job.outputs()[-1]
124
+ with open(result, "r") as json_file:
125
+ data = json.load(json_file)
126
+ response = data[-1][-1]
127
+ with open(chat_history, "r") as json_file:
128
+ conversation = json.load(json_file)
129
+ conversation.append((prompt, response))
130
+ with open(chat_history, "w") as json_file:
131
+ json.dump(conversation, json_file)
132
+ codellama_threadid_conversation[message.channel.id] = chat_history
133
+
134
+ if len(response) > 1300:
135
+ response = response[:1300] + "...\nTruncating response due to discord api limits."
136
+
137
+ await message.reply(response)
138
+
139
+ total_characters = 0
140
+ for item in conversation:
141
+ for string in item:
142
+ total_characters += len(string)
143
+
144
+ if total_characters >= 15000:
145
+ await message.reply("Conversation ending due to length, feel free to start a new one!")
146
+
147
+ except Exception as e:
148
+ print(f"Error: {e}")
149
+
150
+
151
+ def run_bot():
152
+ if not DISCORD_TOKEN:
153
+ print("DISCORD_TOKEN NOT SET")
154
+ event.set()
155
+ else:
156
+ bot.run(DISCORD_TOKEN)
157
+
158
+
159
+ threading.Thread(target=run_bot).start()
160
+ event.wait()
161
+
162
+ welcome_message = """
163
+ ## Add this bot to your server by clicking this link:
164
+
165
+ https://discord.com/api/oauth2/authorize?client_id=1152238037355474964&permissions=309237647360&scope=bot
166
+
167
+ ## How to use it?
168
+
169
+ The bot can be triggered via `/codellama` followed by your text prompt.
170
+
171
+ This will generate text based on the text prompt and create a thread for the discussion.
172
+
173
+ To continue the conversation, simply ask additional questions in the thread - no need for repeating the command!
174
+
175
+ ⚠️ Note ⚠️: Please make sure this bot's command does have the same name as another command in your server.
176
+
177
+ ⚠️ Note ⚠️: Bot commands do not work in DMs with the bot as of now.
178
+ """
179
+
180
+
181
+ with gr.Blocks() as demo:
182
+ gr.Markdown(f"""
183
+ # Discord bot of https://huggingface.co/spaces/codellama/codellama-13b-chat
184
+ {welcome_message}
185
+ """)
186
+
187
+ demo.launch()