Spaces:
Sleeping
Sleeping
ElouarnLC
commited on
Commit
•
c11aeed
1
Parent(s):
89a1906
Length filer and format update
Browse files
app.py
CHANGED
@@ -1,5 +1,7 @@
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
|
|
|
|
3 |
|
4 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
5 |
|
@@ -34,15 +36,25 @@ def filter_sensitive_info(text):
|
|
34 |
return text
|
35 |
|
36 |
|
|
|
|
|
|
|
|
|
|
|
37 |
def format_prompt(message, history):
|
38 |
-
max_history_length =
|
39 |
history = history[-max_history_length:]
|
40 |
|
41 |
prompt = "<s>"
|
42 |
|
43 |
for user_prompt, bot_response in history:
|
44 |
prompt += f"[USER] {user_prompt} [/USER]"
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
46 |
prompt += f"</s>[INST][USER] {message} [/USER][/INST]"
|
47 |
|
48 |
# Ajouter le contexte statique et dynamique à chaque fois que l'utilisateur pose une question
|
@@ -89,6 +101,7 @@ def generate(
|
|
89 |
|
90 |
for response in stream:
|
91 |
response_text = filter_sensitive_info(response.token.text)
|
|
|
92 |
output += response_text
|
93 |
yield output
|
94 |
return output
|
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
3 |
+
import re
|
4 |
+
|
5 |
|
6 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
7 |
|
|
|
36 |
return text
|
37 |
|
38 |
|
39 |
+
def remove_length_info(response_text):
|
40 |
+
pattern = r"\(\d+ caractères\)"
|
41 |
+
return re.sub(pattern, "", response_text)
|
42 |
+
|
43 |
+
|
44 |
def format_prompt(message, history):
|
45 |
+
max_history_length = 5 # Limiter la longueur de l'historique
|
46 |
history = history[-max_history_length:]
|
47 |
|
48 |
prompt = "<s>"
|
49 |
|
50 |
for user_prompt, bot_response in history:
|
51 |
prompt += f"[USER] {user_prompt} [/USER]"
|
52 |
+
if (
|
53 |
+
user_prompt == history[-1][0]
|
54 |
+
): # N'ajouter les balises [BOT] qu'autour de la dernière réponse du bot
|
55 |
+
prompt += f" [BOT]{filter_sensitive_info(bot_response)}[/BOT] "
|
56 |
+
else:
|
57 |
+
prompt += f" {filter_sensitive_info(bot_response)} "
|
58 |
prompt += f"</s>[INST][USER] {message} [/USER][/INST]"
|
59 |
|
60 |
# Ajouter le contexte statique et dynamique à chaque fois que l'utilisateur pose une question
|
|
|
101 |
|
102 |
for response in stream:
|
103 |
response_text = filter_sensitive_info(response.token.text)
|
104 |
+
response_text = remove_length_info(response_text)
|
105 |
output += response_text
|
106 |
yield output
|
107 |
return output
|