lunarflu HF staff commited on
Commit
61a60c0
1 Parent(s): cded4d7

github->hf

Browse files
Files changed (1) hide show
  1. app.py +14 -24
app.py CHANGED
@@ -1,21 +1,13 @@
1
  import asyncio
 
2
  import os
3
  import threading
4
- import random
5
  from threading import Event
6
- from typing import Optional
7
- import json
8
 
9
  import discord
10
  import gradio as gr
11
- from discord import Permissions
12
  from discord.ext import commands
13
- from discord.utils import oauth_url
14
-
15
  from gradio_client import Client
16
- import gradio_client as grc
17
- from gradio_client.utils import QueueError
18
-
19
 
20
  event = Event()
21
  DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")
@@ -23,8 +15,7 @@ HF_TOKEN = os.getenv("HF_TOKEN")
23
  codellama_client = Client("https://huggingface-projects-codellama-13b-chat.hf.space/", HF_TOKEN)
24
  codellama_threadid_userid_dictionary = {}
25
  codellama_threadid_conversation = {}
26
- intents = discord.Intents.default()
27
- intents.message_content = True
28
  bot = commands.Bot(command_prefix="/", intents=intents)
29
 
30
 
@@ -42,11 +33,11 @@ async def on_ready():
42
  description="Enter a prompt to generate code!",
43
  )
44
  async def codellama(ctx, prompt: str):
45
- """Audioldm2 generation"""
46
  try:
47
  await try_codellama(ctx, prompt)
48
  except Exception as e:
49
- print(f"Error: (app.py){e}")
50
 
51
 
52
  @bot.event
@@ -60,7 +51,7 @@ async def on_message(message):
60
 
61
 
62
  async def try_codellama(ctx, prompt):
63
- """Generates text based on a given prompt"""
64
  try:
65
  global codellama_threadid_userid_dictionary
66
  global codellama_threadid_conversation
@@ -75,11 +66,11 @@ async def try_codellama(ctx, prompt):
75
  print(output_code)
76
  await thread.send(output_code)
77
  except Exception as e:
78
- print(f"try_codellama Error: {e}")
79
 
80
 
81
  def codellama_initial_generation(prompt, thread):
82
- """job.submit inside of run_in_executor = more consistent bot behavior"""
83
  global codellama_threadid_conversation
84
 
85
  chat_history = f"{thread.id}.json"
@@ -141,7 +132,7 @@ async def continue_codellama(message):
141
  with open(chat_history, "w") as json_file:
142
  json.dump(conversation, json_file)
143
  codellama_threadid_conversation[message.channel.id] = chat_history
144
-
145
  if len(response) > 1300:
146
  response = response[:1300] + "...\nTruncating response due to discord api limits."
147
 
@@ -156,8 +147,9 @@ async def continue_codellama(message):
156
  await message.reply("Conversation ending due to length, feel free to start a new one!")
157
 
158
  except Exception as e:
159
- print(f"continue_codellama Error: {e}")
160
- #---------------------------------------------------------------------------------------------------------------------
 
161
  def run_bot():
162
  if not DISCORD_TOKEN:
163
  print("DISCORD_TOKEN NOT SET")
@@ -170,11 +162,9 @@ threading.Thread(target=run_bot).start()
170
  event.wait()
171
 
172
  with gr.Blocks() as demo:
173
- gr.Markdown(
174
- """
175
  # Discord bot of https://huggingface.co/spaces/huggingface-projects/codellama-13b-chat
176
  https://discord.com/api/oauth2/authorize?client_id=1152238037355474964&permissions=326417516544&scope=bot
177
- """
178
- )
179
 
180
- demo.launch()
 
1
  import asyncio
2
+ import json
3
  import os
4
  import threading
 
5
  from threading import Event
 
 
6
 
7
  import discord
8
  import gradio as gr
 
9
  from discord.ext import commands
 
 
10
  from gradio_client import Client
 
 
 
11
 
12
  event = Event()
13
  DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")
 
15
  codellama_client = Client("https://huggingface-projects-codellama-13b-chat.hf.space/", HF_TOKEN)
16
  codellama_threadid_userid_dictionary = {}
17
  codellama_threadid_conversation = {}
18
+ intents = discord.Intents.all()
 
19
  bot = commands.Bot(command_prefix="/", intents=intents)
20
 
21
 
 
33
  description="Enter a prompt to generate code!",
34
  )
35
  async def codellama(ctx, prompt: str):
36
+ """Codellama generation"""
37
  try:
38
  await try_codellama(ctx, prompt)
39
  except Exception as e:
40
+ print(f"Error: {e}")
41
 
42
 
43
  @bot.event
 
51
 
52
 
53
  async def try_codellama(ctx, prompt):
54
+ """Generates code based on a given prompt"""
55
  try:
56
  global codellama_threadid_userid_dictionary
57
  global codellama_threadid_conversation
 
66
  print(output_code)
67
  await thread.send(output_code)
68
  except Exception as e:
69
+ print(f"Error: {e}")
70
 
71
 
72
  def codellama_initial_generation(prompt, thread):
73
+ """Job.submit inside of run_in_executor = more consistent bot behavior"""
74
  global codellama_threadid_conversation
75
 
76
  chat_history = f"{thread.id}.json"
 
132
  with open(chat_history, "w") as json_file:
133
  json.dump(conversation, json_file)
134
  codellama_threadid_conversation[message.channel.id] = chat_history
135
+
136
  if len(response) > 1300:
137
  response = response[:1300] + "...\nTruncating response due to discord api limits."
138
 
 
147
  await message.reply("Conversation ending due to length, feel free to start a new one!")
148
 
149
  except Exception as e:
150
+ print(f"Error: {e}")
151
+
152
+
153
  def run_bot():
154
  if not DISCORD_TOKEN:
155
  print("DISCORD_TOKEN NOT SET")
 
162
  event.wait()
163
 
164
  with gr.Blocks() as demo:
165
+ gr.Markdown("""
 
166
  # Discord bot of https://huggingface.co/spaces/huggingface-projects/codellama-13b-chat
167
  https://discord.com/api/oauth2/authorize?client_id=1152238037355474964&permissions=326417516544&scope=bot
168
+ """)
 
169
 
170
+ demo.launch()