robinroy03 commited on
Commit
e17b407
1 Parent(s): 3a0abc1

new db updates - new references + new error log

Browse files
Files changed (1) hide show
  1. main.py +13 -8
main.py CHANGED
@@ -5,7 +5,6 @@ import ast
5
  import os
6
  import threading
7
 
8
-
9
  intents = discord.Intents.default()
10
  intents.message_content = True
11
  bot = discord.Bot(intents = intents)
@@ -50,15 +49,17 @@ async def llm_output(question: str, context: str) -> str:
50
  Context: {context}
51
  """
52
  obj = {
53
- 'model': 'llama3-70b-8192',
54
- 'prompt': prompt,
55
- 'stream': False
56
  }
57
-
58
  async with aiohttp.ClientSession() as session:
59
  async with session.post(URL_LLM + "/api/generate", json=obj) as response:
 
 
60
  response_json = await response.json()
61
-
62
  return response_json['choices'][0]['message']['content']
63
 
64
  async def embedding_output(message: str) -> list:
@@ -116,11 +117,15 @@ async def on_message(message):
116
  data = db_knn['matches'][i]['metadata']['data']
117
  db_context += (data + "\n")
118
  data = ast.literal_eval(data)
119
- references += ("<https://github.com/fury-gl/fury/tree/master/" + data['path'] + ">")
120
  if data.get("function_name"):
121
  references += f"\tFunction Name: {data.get('function_name')}"
122
- else:
123
  references += f"\tClass Name: {data.get('class_name')}"
 
 
 
 
124
  references += "\n"
125
 
126
  llm_answer: str = await llm_output(question, db_context) # for the highest knn result (for the test only right now) TODO: make this better
 
5
  import os
6
  import threading
7
 
 
8
  intents = discord.Intents.default()
9
  intents.message_content = True
10
  bot = discord.Bot(intents = intents)
 
49
  Context: {context}
50
  """
51
  obj = {
52
+ "model": "llama3-70b-8192",
53
+ "prompt": prompt,
54
+ "stream": False
55
  }
56
+
57
  async with aiohttp.ClientSession() as session:
58
  async with session.post(URL_LLM + "/api/generate", json=obj) as response:
59
+ if response.status == 500:
60
+ return "Sorry, an internal Error happened. Please try again later.\nError 500."
61
  response_json = await response.json()
62
+
63
  return response_json['choices'][0]['message']['content']
64
 
65
  async def embedding_output(message: str) -> list:
 
117
  data = db_knn['matches'][i]['metadata']['data']
118
  db_context += (data + "\n")
119
  data = ast.literal_eval(data)
120
+ references += ("<https://github.com/fury-gl/fury/tree/master/" + data['path'] + ">").replace("//home/robin/Desktop/l/fury", "")
121
  if data.get("function_name"):
122
  references += f"\tFunction Name: {data.get('function_name')}"
123
+ elif data.get("class_name"):
124
  references += f"\tClass Name: {data.get('class_name')}"
125
+ elif data['type'] == 'rst':
126
+ references += f"\tDocumentation: {data['path'].split("/")[-1]}"
127
+ elif data['type'] == 'documentation_examples':
128
+ references += f"\tDocumentation: {data['path'].split("/")[-1]}"
129
  references += "\n"
130
 
131
  llm_answer: str = await llm_output(question, db_context) # for the highest knn result (for the test only right now) TODO: make this better