seawolf2357 commited on
Commit
f3985af
1 Parent(s): 9f7d4a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -10
app.py CHANGED
@@ -1,15 +1,19 @@
1
  import discord
2
  import logging
 
 
 
3
 
4
- # 로깅 설정
5
- logging.basicConfig(level=logging.DEBUG,
6
- format='%(asctime)s:%(levelname)s:%(name)s: %(message)s',
7
- handlers=[logging.StreamHandler()]) # 로그를 콘솔에 출력
8
 
9
- # Intents 생성
10
  intents = discord.Intents.default()
11
  intents.messages = True
12
 
 
 
 
13
  class MyClient(discord.Client):
14
  def __init__(self, *args, **kwargs):
15
  super().__init__(*args, **kwargs)
@@ -21,10 +25,26 @@ class MyClient(discord.Client):
21
  if message.author == self.user:
22
  logging.info('Ignoring message from self.')
23
  return
24
- response = message.content + " hello"
25
- logging.debug(f'Responding to message: {message.content}')
 
 
 
26
  await message.channel.send(response)
27
 
28
- # 객체 생성 실행
29
- client = MyClient(intents=intents)
30
- client.run('MTIyODQyNTQxNDk0MzQ0MTEwNw.Gfd_ri.rrG_6-Sfp0FYvSIbv-zZ98dpHI-G_Fh9MFCzco')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import discord
2
  import logging
3
+ import gradio as gr
4
+ from huggingface_hub import InferenceClient
5
+ import os
6
 
7
+ # Setup logging
8
+ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(name)s: %(message)s', handlers=[logging.StreamHandler()])
 
 
9
 
10
+ # Setup Discord intents
11
  intents = discord.Intents.default()
12
  intents.messages = True
13
 
14
+ # Setup the Inference API client
15
+ client = InferenceClient("CohereForAI/c4ai-command-r-plus", token=os.getenv("HF_TOKEN"))
16
+
17
  class MyClient(discord.Client):
18
  def __init__(self, *args, **kwargs):
19
  super().__init__(*args, **kwargs)
 
25
  if message.author == self.user:
26
  logging.info('Ignoring message from self.')
27
  return
28
+
29
+ # Prepare the message for the model
30
+ system_message = "This is an automated assistant response."
31
+ history = []
32
+ response = await generate_response(message.content, history, system_message, 4000, 0.7, 0.95)
33
  await message.channel.send(response)
34
 
35
+ # Define the response generation function using Hugging Face Inference API
36
+ async def generate_response(user_input, history, system_message, max_tokens, temperature, top_p):
37
+ system_prefix = "You are a sentient AI assistant on Discord."
38
+ messages = [{"role": "system", "content": f"{system_prefix} {system_message}"}]
39
+ for val in history:
40
+ if val[0]:
41
+ messages.append({"role": "user", "content": val[0]})
42
+ if val[1]:
43
+ messages.append({"role": "assistant", "content": val[1]})
44
+ messages.append({"role": "user", "content": user_input})
45
+ response = next(client.chat_completion(messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p))
46
+ return response.choices[0].delta.content.strip()
47
+
48
+ # Instantiate and run the Discord bot
49
+ discord_client = MyClient(intents=intents)
50
+ discord_client.run('MTIyODQyNTQxNDk0MzQ0MTEwNw.Gfd_ri.rrG_6-Sfp0FYvSIbv-zZ98dpHI-G_Fh9MFCzco')