datacipen commited on
Commit
e3471a7
·
verified ·
1 Parent(s): b693354

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +15 -16
main.py CHANGED
@@ -123,7 +123,8 @@ def Connexion_Mistral():
123
  #return Mistral(api_key=os.environ["GITHUB_TOKEN"], server_url=endpoint)
124
  #repo_id = "mistralai/Mistral-7B-Instruct-v0.2"
125
  #repo_id = "Qwen/Qwen2.5-3B-Instruct"
126
- repo_id = "microsoft/Phi-3.5-mini-instruct"
 
127
  llm = HuggingFaceEndpoint(
128
  repo_id=repo_id, max_new_tokens=5000, temperature=0.1, task="text2text-generation", streaming=True
129
  )
@@ -186,29 +187,27 @@ def Generation_completion(client, data, question):
186
  @traceable(run_type="chain", name="Mistral Assistant des datas Gustaviz",)
187
  async def Affichage_reponse(response, question, data):
188
  memory = cl.user_session.get("memory")
189
- msg = cl.Message(author="COPILOT",content="")
190
- async for chunk in response.astream({"question": question, "data": data},config=RunnableConfig(callbacks=[cl.AsyncLangchainCallbackHandler(stream_final_answer=True)])):
191
- time.sleep(0.125)
192
- await msg.stream_token(chunk.replace('Ã','é').replace('©','').replace('Ã','è').replace('¨','').replace('â','\'').replace('€','').replace('™','').replace('Å','oe').replace('“','').replace('®','î').replace('´','ô').replace('<','').replace('>','').replace('/',''))
193
-
194
- answer = msg.content
195
 
196
  #### Avec Traduction #####
197
- #msg = response.invoke({"question": question, "data": data})
198
  #msg = msg.replace('Ã','é').replace('©','').replace('Ã','è').replace('¨','').replace('â','\'').replace('€','').replace('™','').replace('Å','oe').replace('“','').replace('®','î').replace('´','ô').replace('<','').replace('>','').replace('/','')
199
- #msg = GoogleTranslator(source='auto', target='fr').translate(msg[0:4999])
200
- #msgList = msg.split(' ')
201
- #for chunk in msgList:
202
- # time.sleep(0.125)
203
- # await result.stream_token(chunk + ' ')
204
  #### Avec Traduction #####
205
 
206
 
207
- await msg.send()
208
  memory.chat_memory.add_user_message(question)
209
- memory.chat_memory.add_ai_message(answer)
210
 
211
- return answer
212
 
213
  @cl.step(type="tool", show_input=True)
214
  async def Affichage_question_contexte(answer, question, contexte):
 
123
  #return Mistral(api_key=os.environ["GITHUB_TOKEN"], server_url=endpoint)
124
  #repo_id = "mistralai/Mistral-7B-Instruct-v0.2"
125
  #repo_id = "Qwen/Qwen2.5-3B-Instruct"
126
+ #repo_id = "microsoft/Phi-3.5-mini-instruct"
127
+ repo_id = "meta-llama/Llama-3.2-3B-Instruct"
128
  llm = HuggingFaceEndpoint(
129
  repo_id=repo_id, max_new_tokens=5000, temperature=0.1, task="text2text-generation", streaming=True
130
  )
 
187
  @traceable(run_type="chain", name="Mistral Assistant des datas Gustaviz",)
188
  async def Affichage_reponse(response, question, data):
189
  memory = cl.user_session.get("memory")
190
+ result = cl.Message(author="COPILOT",content="")
191
+ #async for chunk in response.astream({"question": question, "data": data},config=RunnableConfig(callbacks=[cl.AsyncLangchainCallbackHandler(stream_final_answer=True)])):
192
+ # time.sleep(0.125)
193
+ # await msg.stream_token(chunk.replace('Ã','é').replace('©','').replace('Ã','è').replace('¨','').replace('â','\'').replace('€','').replace('™','').replace('Å','oe').replace('“','').replace('®','î').replace('´','ô').replace('<','').replace('>','').replace('/',''))
 
 
194
 
195
  #### Avec Traduction #####
196
+ msg = response.invoke({"question": question, "data": data})
197
  #msg = msg.replace('Ã','é').replace('©','').replace('Ã','è').replace('¨','').replace('â','\'').replace('€','').replace('™','').replace('Å','oe').replace('“','').replace('®','î').replace('´','ô').replace('<','').replace('>','').replace('/','')
198
+ msg = GoogleTranslator(source='auto', target='fr').translate(msg[0:4999])
199
+ msgList = msg.split(' ')
200
+ for chunk in msgList:
201
+ time.sleep(0.125)
202
+ await result.stream_token(chunk + ' ')
203
  #### Avec Traduction #####
204
 
205
 
206
+ await result.send()
207
  memory.chat_memory.add_user_message(question)
208
+ memory.chat_memory.add_ai_message(msg)
209
 
210
+ return msg
211
 
212
  @cl.step(type="tool", show_input=True)
213
  async def Affichage_question_contexte(answer, question, contexte):