Spaces:
Running
Running
Ilyas KHIAT
commited on
Commit
·
b0518b2
1
Parent(s):
297a29a
chatbot
Browse files- app.py +1 -3
- chatbot_page/chatbot.py +117 -1
app.py
CHANGED
@@ -2,8 +2,6 @@ import streamlit as st
|
|
2 |
import dotenv
|
3 |
import os
|
4 |
|
5 |
-
|
6 |
-
|
7 |
def main():
|
8 |
|
9 |
dotenv.load_dotenv(dotenv_path=os.path.join('.streamlit', '.env'))
|
@@ -20,7 +18,7 @@ def main():
|
|
20 |
pg = st.navigation(
|
21 |
{
|
22 |
"Audit de contenus": [audit_page, kg_page],
|
23 |
-
"Equipe d'agents IA": [agents_page
|
24 |
"Chatbot": [chatbot],
|
25 |
"Documentation": [documentation]
|
26 |
}
|
|
|
2 |
import dotenv
|
3 |
import os
|
4 |
|
|
|
|
|
5 |
def main():
|
6 |
|
7 |
dotenv.load_dotenv(dotenv_path=os.path.join('.streamlit', '.env'))
|
|
|
18 |
pg = st.navigation(
|
19 |
{
|
20 |
"Audit de contenus": [audit_page, kg_page],
|
21 |
+
"Equipe d'agents IA": [recommended_agents,agents_page],
|
22 |
"Chatbot": [chatbot],
|
23 |
"Documentation": [documentation]
|
24 |
}
|
chatbot_page/chatbot.py
CHANGED
@@ -1,3 +1,119 @@
|
|
1 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from langchain_openai import ChatOpenAI
|
3 |
+
from langchain_mistralai import ChatMistralAI
|
4 |
+
from langchain_core.prompts import ChatPromptTemplate
|
5 |
+
from langchain_core.output_parsers import StrOutputParser
|
6 |
+
from langchain_core.messages import AIMessage, HumanMessage
|
7 |
|
8 |
+
def get_response(user_query, chat_history, context,llm=None,history_limit=5,stream=True):
|
9 |
+
|
10 |
+
template = """
|
11 |
+
Étant donné l'historique de la conversation : {chat_history}, le contexte : {context}, et la question de l'utilisateur : {user_question}, veuillez fournir une réponse détaillée et complète. La réponse doit inclure un ou plusieurs des éléments suivants :
|
12 |
+
|
13 |
+
1. Une explication claire des concepts clés et des termes liés au sujet.
|
14 |
+
2. Un aperçu des meilleures pratiques, des stratégies courantes ou des cadres de référence pertinents pour la discussion.
|
15 |
+
3. Des exemples spécifiques ou des études de cas illustrant les principes abordés.
|
16 |
+
4. Les défis potentiels ou les considérations à prendre en compte.
|
17 |
+
5. Des suggestions de ressources supplémentaires ou des actions que l'utilisateur peut entreprendre pour approfondir sa compréhension.
|
18 |
+
|
19 |
+
Assurez-vous que la réponse soit adaptée au niveau d'expertise de l'utilisateur et aux spécificités du contexte fourni.
|
20 |
+
|
21 |
+
"""
|
22 |
+
|
23 |
+
prompt = ChatPromptTemplate.from_template(template)
|
24 |
+
|
25 |
+
#llm = ChatOpenAI(model="gpt-4o")
|
26 |
+
if not llm:
|
27 |
+
llm = ChatOpenAI(model="gpt-4o-mini")
|
28 |
+
elif llm == "GPT-4o":
|
29 |
+
llm = ChatOpenAI(model="gpt-4o")
|
30 |
+
elif llm == "Mistral Large 2 (FR)":
|
31 |
+
llm = ChatMistralAI(model_name="mistral-large-2407")
|
32 |
+
elif llm == "GPT-4o-mini":
|
33 |
+
llm = ChatOpenAI(model="gpt-4o-mini")
|
34 |
+
elif llm == "Mistral Nemo (FR)":
|
35 |
+
llm = ChatMistralAI(model_name="open-mistral-nemo-2407")
|
36 |
+
|
37 |
+
|
38 |
+
chain = prompt | llm
|
39 |
+
|
40 |
+
if not stream:
|
41 |
+
return chain.invoke({
|
42 |
+
"context": context,
|
43 |
+
"chat_history": chat_history[-history_limit:],
|
44 |
+
"user_question": user_query,
|
45 |
+
})
|
46 |
+
|
47 |
+
chain = chain | StrOutputParser()
|
48 |
+
|
49 |
+
if history_limit:
|
50 |
+
return chain.stream({
|
51 |
+
"context": context,
|
52 |
+
"chat_history": chat_history[-history_limit:],
|
53 |
+
"user_question": user_query,
|
54 |
+
})
|
55 |
+
|
56 |
+
return chain.stream({
|
57 |
+
"context": context,
|
58 |
+
"chat_history": chat_history,
|
59 |
+
"user_question": user_query,
|
60 |
+
})
|
61 |
+
|
62 |
+
def handle_display_models(index, models_names):
|
63 |
+
model = st.radio("Choisir un modèle",models_names, index=index)
|
64 |
+
return model
|
65 |
+
|
66 |
+
|
67 |
+
def chatbot_main():
|
68 |
+
st.title("Chatbot")
|
69 |
+
models_names = ["GPT-4o", "GPT-4o-mini"]
|
70 |
+
|
71 |
+
if "chat_history" not in st.session_state:
|
72 |
+
st.session_state.chat_history = [
|
73 |
+
AIMessage(content="Salut, Que puis-je faire pour vous ?"),
|
74 |
+
]
|
75 |
+
|
76 |
+
if "model" not in st.session_state:
|
77 |
+
st.session_state.model = "GPT-4o-mini"
|
78 |
+
|
79 |
+
header = st.container()
|
80 |
+
col1, col2 = header.columns([1, 2])
|
81 |
+
|
82 |
+
with col1.popover("Modèles disponibles"):
|
83 |
+
new_model = handle_display_models(models_names.index(st.session_state.model), models_names)
|
84 |
+
|
85 |
+
st.session_state.model = new_model
|
86 |
+
|
87 |
+
st.markdown(f"- **{st.session_state.model}**")
|
88 |
+
|
89 |
+
for message in st.session_state.chat_history:
|
90 |
+
if isinstance(message, AIMessage):
|
91 |
+
with st.chat_message("AI"):
|
92 |
+
st.markdown(message.content)
|
93 |
+
elif isinstance(message, HumanMessage):
|
94 |
+
with st.chat_message("Moi"):
|
95 |
+
st.write(message.content)
|
96 |
+
|
97 |
+
if "response_llm" not in st.session_state:
|
98 |
+
st.session_state.response_llm = ""
|
99 |
+
|
100 |
+
user_query = st.chat_input("Par ici ...")
|
101 |
+
if user_query is not None and user_query != "":
|
102 |
+
st.session_state.chat_history.append(HumanMessage(content=user_query))
|
103 |
+
|
104 |
+
with st.chat_message("Moi"):
|
105 |
+
st.markdown(user_query)
|
106 |
+
|
107 |
+
with st.chat_message("AI"):
|
108 |
+
st.markdown(f"**{st.session_state.model}**")
|
109 |
+
|
110 |
+
|
111 |
+
response = st.write_stream(get_response(user_query, st.session_state.chat_history, context=st.session_state.response_llm, llm=st.session_state.model, stream=True))
|
112 |
+
st.session_state.chat_history.append(AIMessage(content=response))
|
113 |
+
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
|
118 |
+
|
119 |
+
chatbot_main()
|