charlie0608 commited on
Commit
eca1299
1 Parent(s): 621c2ff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +132 -31
app.py CHANGED
@@ -1,43 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
- import random
3
- import time
 
4
 
 
 
 
5
 
6
- # Streamed response emulator
7
- def response_generator():
8
- response = random.choice(
9
- [
10
- "Hello there! How can I assist you today?",
11
- "Hi, human! Is there anything I can help you with?",
12
- "Do you need help?",
13
- ]
14
- )
15
- for word in response.split():
16
- yield word + " "
17
- time.sleep(0.05)
18
 
 
 
 
19
 
20
- st.title("Simple chat")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
- # Initialize chat history
23
  if "messages" not in st.session_state:
24
- st.session_state.messages = []
 
 
 
 
 
25
 
26
- # Display chat messages from history on app rerun
27
- for message in st.session_state.messages:
28
- with st.chat_message(message["role"]):
29
- st.markdown(message["content"])
30
 
31
- # Accept user input
32
- if prompt := st.chat_input("What is up?"):
33
- # Add user message to chat history
34
  st.session_state.messages.append({"role": "user", "content": prompt})
35
- # Display user message in chat message container
36
- with st.chat_message("user"):
37
- st.markdown(prompt)
38
 
39
- # Display assistant response in chat message container
40
  with st.chat_message("assistant"):
41
- response = st.write_stream(response_generator())
42
- # Add assistant response to chat history
43
- st.session_state.messages.append({"role": "assistant", "content": response})
 
 
 
 
 
 
 
 
 
 
 
1
+ # import streamlit as st
2
+ # import random
3
+ # import time
4
+ # from mistralai.client import MistralClient
5
+ # from mistralai.models.chat_completion import ChatMessage
6
+
7
+ # api_key = st.secrets["MISTRAL_API_KEY"]
8
+ # model = "mistral-large-latest"
9
+
10
+ # client = MistralClient(api_key=api_key)
11
+
12
+ # chat_response = client.chat(
13
+ # model=model,
14
+ # messages=[ChatMessage(role="user", content="What is the best French cheese?")]
15
+ # )
16
+
17
+
18
+
19
+ # # Streamed response emulator
20
+ # def response_generator():
21
+ # response = client.chat(
22
+ # model=model,
23
+ # messages=[ChatMessage(role="user", content="What is the best French cheese?")]
24
+ # )
25
+
26
+
27
+ # st.title("Personality test")
28
+
29
+ # # Initialize chat history
30
+ # if "messages" not in st.session_state:
31
+ # st.session_state.messages = []
32
+
33
+ # # Display chat messages from history on app rerun
34
+ # for message in st.session_state.messages:
35
+ # with st.chat_message(message["role"]):
36
+ # st.markdown(message["content"])
37
+
38
+ # # Accept user input
39
+ # if prompt := st.chat_input("What is up?"):
40
+ # # Add user message to chat history
41
+ # st.session_state.messages.append({"role": "user", "content": prompt})
42
+ # # Display user message in chat message container
43
+ # with st.chat_message("user"):
44
+ # st.markdown(prompt)
45
+
46
+ # # Display assistant response in chat message container
47
+ # with st.chat_message("assistant"):
48
+ # response = st.write_stream(response_generator())
49
+ # # Add assistant response to chat history
50
+ # st.session_state.messages.append({"role": "assistant", "content": response})
51
+
52
+
53
+
54
+ from mistralai.client import MistralClient
55
+ from mistralai.models.chat_completion import ChatMessage
56
  import streamlit as st
57
+ import json
58
+ import faiss
59
+ import numpy as np
60
 
61
+ model = "open-mixtral-8x7b"
62
+ mistral_api_key = st.secrets["MISTRAL_API_KEY"]
63
+ client = MistralClient(api_key=mistral_api_key)
64
 
65
+ st.title("Assistant ChatBot catalogue 2024")
 
 
 
 
 
 
 
 
 
 
 
66
 
67
+ def load_json(rep:str):
68
+ f = open(rep, encoding='UTF-8')
69
+ return json.load(f)
70
 
71
+ def split_chunk(data, chunk_size):
72
+ data_str = [json.dumps(entry) for entry in data]
73
+ chunk_size = chunk_size
74
+ chunks = [data_str[i:i + chunk_size] for i in range(0, len(data_str), chunk_size)]
75
+ print(f"Nb. chunks = {len(chunks)}")
76
+ return chunks
77
+
78
+ def get_text_embedding(input):
79
+ embeddings_batch_response = client.embeddings(
80
+ model='mistral-embed',
81
+ input=input
82
+ )
83
+ return embeddings_batch_response.data[0].embedding
84
+
85
+ def load_vector_db(text_embedded):
86
+ d = text_embedded.shape[1]
87
+ index = faiss.IndexFlatL2(d)
88
+ index.add(text_embedded)
89
+ return index
90
+
91
+ def find_similar_chunk(index, question_embeddings, chunks):
92
+ D, I = index.search(question_embeddings, k=2) # distance, index
93
+ return [chunks[i] for i in I.tolist()[0]]
94
+
95
+ def prompt_chat(retrieved_chunk, question):
96
+ return f"""
97
+ Les informations contextuelles sont les suivantes.
98
+ ---------------------
99
+ {retrieved_chunk}
100
+ ---------------------
101
+ Compte tenu des informations contextuelles et sans connaissances préalables,
102
+ réponds en français à la question suivante de manière concise.
103
+ Utilise des listes pour plus de lisibilité.
104
+ Question: {question}
105
+ Réponse:
106
+ """
107
+
108
+ # Chargement des données
109
+ data = load_json('catalogue_2024.json')
110
+ chunks = split_chunk(data, 3)
111
+ text_embeddings = np.load("catalogue_embeddings.npy")
112
+ index = load_vector_db(text_embeddings)
113
 
 
114
  if "messages" not in st.session_state:
115
+ st.session_state["messages"] = [{"role": "assistant", "content": "Comment puis-je vous aider?"}]
116
+ st.session_state["History"] = []
117
+ st.session_state.History.append(ChatMessage(role="assitant", content="Comment puis-je vous aider?"))
118
+
119
+ for msg in st.session_state.messages:
120
+ st.chat_message(msg["role"]).write(msg["content"])
121
 
122
+ if prompt := st.chat_input():
123
+ question_embeddings = np.array([get_text_embedding(prompt)])
124
+ retrieved_chunk = find_similar_chunk(index, question_embeddings, chunks)
125
+ p = prompt_chat(retrieved_chunk=retrieved_chunk, question=prompt)
126
 
 
 
 
127
  st.session_state.messages.append({"role": "user", "content": prompt})
128
+ st.session_state.History.append(ChatMessage(role="user", content=p))
129
+ st.chat_message("user").write(prompt)
 
130
 
 
131
  with st.chat_message("assistant"):
132
+ message_placeholder = st.empty()
133
+ full_response = ""
134
+ for response in client.chat_stream(
135
+ model=model,
136
+ messages=st.session_state.History[1:]
137
+ ):
138
+ full_response += (response.choices[0].delta.content or "")
139
+ message_placeholder.markdown(full_response + "|")
140
+
141
+ message_placeholder.markdown(full_response)
142
+
143
+ st.session_state.History.append(ChatMessage(role="assistant", content=full_response))
144
+ st.session_state.messages.append({"role": "assistant", "content": full_response})