seawolf2357 commited on
Commit
c5dfb25
ยท
verified ยท
1 Parent(s): 36e3de2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -14
app.py CHANGED
@@ -2,7 +2,7 @@ import discord
2
  import logging
3
  import os
4
  import requests
5
- from huggingface_hub import InferenceClient
6
  from transformers import pipeline
7
  import asyncio
8
  import subprocess
@@ -73,20 +73,22 @@ class MyClient(discord.Client):
73
  loop = asyncio.get_event_loop()
74
 
75
  # AI-MO/NuminaMath-7B-TIR ๋ชจ๋ธ์—๊ฒŒ ์ˆ˜ํ•™ ๋ฌธ์ œ๋ฅผ ํ’€๋„๋ก ์š”์ฒญ
76
- math_response_future = loop.run_in_executor(None, lambda: self.math_pipe([{"role": "user", "content": question}], max_new_tokens=2000))
77
-
78
- ## math_response_future = loop.run_in_executor(None, lambda: self.math_pipe([{"role": "user", "content": question}]))
79
  math_response = await math_response_future
80
  math_result = math_response[0]['generated_text']
81
 
82
- # Cohere ๋ชจ๋ธ์—๊ฒŒ AI-MO/NuminaMath-7B-TIR ๋ชจ๋ธ์˜ ๊ฒฐ๊ณผ๋ฅผ ๋ฒˆ์—ญํ•˜๋„๋ก ์š”์ฒญ
83
- cohere_response_future = loop.run_in_executor(None, lambda: hf_client.chat_completion(
84
- [{"role": "system", "content": "๋‹ค์Œ ํ…์ŠคํŠธ๋ฅผ ํ•œ๊ธ€๋กœ ๋ฒˆ์—ญํ•˜์‹ญ์‹œ์˜ค: "}, {"role": "user", "content": math_result}], max_tokens=1000))
 
85
 
86
- cohere_response = await cohere_response_future
87
- cohere_result = ''.join([part.choices[0].delta.content for part in cohere_response if part.choices and part.choices[0].delta and part.choices[0].delta.content])
88
 
89
- combined_response = f"์ˆ˜ํ•™ ์„ ์ƒ๋‹˜ ๋‹ต๋ณ€: {cohere_result}"
 
 
 
90
 
91
  return combined_response
92
 
@@ -105,10 +107,16 @@ class MyClient(discord.Client):
105
  """
106
  conversation_history.append({"role": "user", "content": user_input})
107
  messages = [{"role": "system", "content": f"{system_prefix}"}] + conversation_history
108
- response = await asyncio.get_event_loop().run_in_executor(None, lambda: hf_client.chat_completion(
109
- messages, max_tokens=1000, stream=True, temperature=0.7, top_p=0.85))
110
- full_response = ''.join([part.choices[0].delta.content for part in response if part.choices and part.choices[0].delta and part.choices[0].delta.content])
111
- conversation_history.append({"role": "assistant", "content": full_response})
 
 
 
 
 
 
112
  return f"{user_mention}, {full_response}"
113
 
114
  async def send_long_message(self, channel, message):
 
2
  import logging
3
  import os
4
  import requests
5
+ from huggingface_hub import InferenceClient, HfHubHTTPError
6
  from transformers import pipeline
7
  import asyncio
8
  import subprocess
 
73
  loop = asyncio.get_event_loop()
74
 
75
  # AI-MO/NuminaMath-7B-TIR ๋ชจ๋ธ์—๊ฒŒ ์ˆ˜ํ•™ ๋ฌธ์ œ๋ฅผ ํ’€๋„๋ก ์š”์ฒญ
76
+ math_response_future = loop.run_in_executor(None, lambda: self.math_pipe(question, max_new_tokens=2000))
 
 
77
  math_response = await math_response_future
78
  math_result = math_response[0]['generated_text']
79
 
80
+ try:
81
+ # Cohere ๋ชจ๋ธ์—๊ฒŒ AI-MO/NuminaMath-7B-TIR ๋ชจ๋ธ์˜ ๊ฒฐ๊ณผ๋ฅผ ๋ฒˆ์—ญํ•˜๋„๋ก ์š”์ฒญ
82
+ cohere_response_future = loop.run_in_executor(None, lambda: hf_client.chat_completion(
83
+ [{"role": "system", "content": "๋‹ค์Œ ํ…์ŠคํŠธ๋ฅผ ํ•œ๊ธ€๋กœ ๋ฒˆ์—ญํ•˜์‹ญ์‹œ์˜ค: "}, {"role": "user", "content": math_result}], max_tokens=1000))
84
 
85
+ cohere_response = await cohere_response_future
86
+ cohere_result = ''.join([part.choices[0].delta.content for part in cohere_response if part.choices and part.choices[0].delta and part.choices[0].delta.content])
87
 
88
+ combined_response = f"์ˆ˜ํ•™ ์„ ์ƒ๋‹˜ ๋‹ต๋ณ€: {cohere_result}"
89
+ except HfHubHTTPError as e:
90
+ logging.error(f"Hugging Face API error: {e}")
91
+ combined_response = "An error occurred while processing the request."
92
 
93
  return combined_response
94
 
 
107
  """
108
  conversation_history.append({"role": "user", "content": user_input})
109
  messages = [{"role": "system", "content": f"{system_prefix}"}] + conversation_history
110
+
111
+ try:
112
+ response = await asyncio.get_event_loop().run_in_executor(None, lambda: hf_client.chat_completion(
113
+ messages, max_tokens=1000, stream=True, temperature=0.7, top_p=0.85))
114
+ full_response = ''.join([part.choices[0].delta.content for part in response if part.choices and part.choices[0].delta and part.choices[0].delta.content])
115
+ conversation_history.append({"role": "assistant", "content": full_response})
116
+ except HfHubHTTPError as e:
117
+ logging.error(f"Hugging Face API error: {e}")
118
+ full_response = "An error occurred while generating the response."
119
+
120
  return f"{user_mention}, {full_response}"
121
 
122
  async def send_long_message(self, channel, message):