charlie0608 commited on
Commit
9957766
1 Parent(s): abae377

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +126 -117
app.py CHANGED
@@ -1,132 +1,53 @@
1
- # import streamlit as st
2
- # import random
3
- # import time
4
- # from mistralai.client import MistralClient
5
- # from mistralai.models.chat_completion import ChatMessage
6
-
7
- # api_key = st.secrets["MISTRAL_API_KEY"]
8
- # model = "mistral-large-latest"
9
-
10
- # client = MistralClient(api_key=api_key)
11
-
12
- # chat_response = client.chat(
13
- # model=model,
14
- # messages=[ChatMessage(role="user", content="What is the best French cheese?")]
15
- # )
16
-
17
-
18
-
19
- # # Streamed response emulator
20
- # def response_generator():
21
- # response = client.chat(
22
- # model=model,
23
- # messages=[ChatMessage(role="user", content="What is the best French cheese?")]
24
- # )
25
-
26
 
27
- # st.title("Personality test")
 
28
 
29
- # # Initialize chat history
30
- # if "messages" not in st.session_state:
31
- # st.session_state.messages = []
32
 
33
- # # Display chat messages from history on app rerun
34
- # for message in st.session_state.messages:
35
- # with st.chat_message(message["role"]):
36
- # st.markdown(message["content"])
37
 
38
- # # Accept user input
39
- # if prompt := st.chat_input("What is up?"):
40
- # # Add user message to chat history
41
- # st.session_state.messages.append({"role": "user", "content": prompt})
42
- # # Display user message in chat message container
43
- # with st.chat_message("user"):
44
- # st.markdown(prompt)
45
 
46
- # # Display assistant response in chat message container
47
- # with st.chat_message("assistant"):
48
- # response = st.write_stream(response_generator())
49
- # # Add assistant response to chat history
50
- # st.session_state.messages.append({"role": "assistant", "content": response})
51
 
 
 
 
 
 
 
52
 
53
 
54
- from mistralai.client import MistralClient
55
- from mistralai.models.chat_completion import ChatMessage
56
- import streamlit as st
57
- import json
58
- import faiss
59
- import numpy as np
60
-
61
- model = "open-mixtral-8x7b"
62
- mistral_api_key = st.secrets["MISTRAL_API_KEY"]
63
- client = MistralClient(api_key=mistral_api_key)
64
-
65
- st.title("Assistant ChatBot catalogue 2024")
66
-
67
- def load_json(rep:str):
68
- f = open(rep, encoding='UTF-8')
69
- return json.load(f)
70
-
71
- def split_chunk(data, chunk_size):
72
- data_str = [json.dumps(entry) for entry in data]
73
- chunk_size = chunk_size
74
- chunks = [data_str[i:i + chunk_size] for i in range(0, len(data_str), chunk_size)]
75
- print(f"Nb. chunks = {len(chunks)}")
76
- return chunks
77
-
78
- def get_text_embedding(input):
79
- embeddings_batch_response = client.embeddings(
80
- model='mistral-embed',
81
- input=input
82
- )
83
- return embeddings_batch_response.data[0].embedding
84
-
85
- def load_vector_db(text_embedded):
86
- d = text_embedded.shape[1]
87
- index = faiss.IndexFlatL2(d)
88
- index.add(text_embedded)
89
- return index
90
-
91
- def find_similar_chunk(index, question_embeddings, chunks):
92
- D, I = index.search(question_embeddings, k=2) # distance, index
93
- return [chunks[i] for i in I.tolist()[0]]
94
-
95
- def prompt_chat(retrieved_chunk, question):
96
- return f"""
97
- Les informations contextuelles sont les suivantes.
98
- ---------------------
99
- {retrieved_chunk}
100
- ---------------------
101
- Compte tenu des informations contextuelles et sans connaissances préalables,
102
- réponds en français à la question suivante de manière concise.
103
- Utilise des listes pour plus de lisibilité.
104
- Question: {question}
105
- Réponse:
106
- """
107
-
108
- # Chargement des données
109
- data = load_json('catalogue_2024.json')
110
- chunks = split_chunk(data, 3)
111
- text_embeddings = np.load("catalogue_embeddings.npy")
112
- index = load_vector_db(text_embeddings)
113
 
 
114
  if "messages" not in st.session_state:
115
- st.session_state["messages"] = [{"role": "assistant", "content": "Comment puis-je vous aider?"}]
116
- st.session_state["History"] = []
117
- st.session_state.History.append(ChatMessage(role="assitant", content="Comment puis-je vous aider?"))
118
 
119
- for msg in st.session_state.messages:
120
- st.chat_message(msg["role"]).write(msg["content"])
121
-
122
- if prompt := st.chat_input():
123
- question_embeddings = np.array([get_text_embedding(prompt)])
124
- retrieved_chunk = find_similar_chunk(index, question_embeddings, chunks)
125
- p = prompt_chat(retrieved_chunk=retrieved_chunk, question=prompt)
126
 
 
 
 
127
  st.session_state.messages.append({"role": "user", "content": prompt})
128
- st.session_state.History.append(ChatMessage(role="user", content=p))
129
- st.chat_message("user").write(prompt)
 
 
 
 
 
 
 
130
 
131
  with st.chat_message("assistant"):
132
  message_placeholder = st.empty()
@@ -141,4 +62,92 @@ if prompt := st.chat_input():
141
  message_placeholder.markdown(full_response)
142
 
143
  st.session_state.History.append(ChatMessage(role="assistant", content=full_response))
144
- st.session_state.messages.append({"role": "assistant", "content": full_response})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import random
3
+ import time
4
+ from mistralai.client import MistralClient
5
+ from mistralai.models.chat_completion import ChatMessage
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ api_key = st.secrets["MISTRAL_API_KEY"]
8
+ model = "mistral-large-latest"
9
 
10
+ client = MistralClient(api_key=api_key)
 
 
11
 
12
+ chat_response = client.chat(
13
+ model=model,
14
+ messages=[ChatMessage(role="user", content="What is the best French cheese?")]
15
+ )
16
 
 
 
 
 
 
 
 
17
 
 
 
 
 
 
18
 
19
+ # Streamed response emulator
20
+ def response_generator():
21
+ response = client.chat(
22
+ model=model,
23
+ messages=[ChatMessage(role="user", content="What is the best French cheese?")]
24
+ )
25
 
26
 
27
+ st.title("Personality test")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
+ # Initialize chat history
30
  if "messages" not in st.session_state:
31
+ st.session_state.messages = []
 
 
32
 
33
+ # Display chat messages from history on app rerun
34
+ for message in st.session_state.messages:
35
+ with st.chat_message(message["role"]):
36
+ st.markdown(message["content"])
 
 
 
37
 
38
+ # Accept user input
39
+ if prompt := st.chat_input("What is up?"):
40
+ # Add user message to chat history
41
  st.session_state.messages.append({"role": "user", "content": prompt})
42
+ # Display user message in chat message container
43
+ with st.chat_message("user"):
44
+ st.markdown(prompt)
45
+
46
+ # # Display assistant response in chat message container
47
+ # with st.chat_message("assistant"):
48
+ # response = st.write_stream(response_generator())
49
+ # # Add assistant response to chat history
50
+ # st.session_state.messages.append({"role": "assistant", "content": response})
51
 
52
  with st.chat_message("assistant"):
53
  message_placeholder = st.empty()
 
62
  message_placeholder.markdown(full_response)
63
 
64
  st.session_state.History.append(ChatMessage(role="assistant", content=full_response))
65
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
66
+
67
+
68
+
69
+ # from mistralai.client import MistralClient
70
+ # from mistralai.models.chat_completion import ChatMessage
71
+ # import streamlit as st
72
+ # import json
73
+ # import faiss
74
+ # import numpy as np
75
+
76
+ # model = "open-mixtral-8x7b"
77
+ # mistral_api_key = st.secrets["MISTRAL_API_KEY"]
78
+ # client = MistralClient(api_key=mistral_api_key)
79
+
80
+ # st.title("Assistant ChatBot")
81
+
82
+
83
+
84
+ # def split_chunk(data, chunk_size):
85
+ # data_str = [json.dumps(entry) for entry in data]
86
+ # chunk_size = chunk_size
87
+ # chunks = [data_str[i:i + chunk_size] for i in range(0, len(data_str), chunk_size)]
88
+ # print(f"Nb. chunks = {len(chunks)}")
89
+ # return chunks
90
+
91
+ # def get_text_embedding(input):
92
+ # embeddings_batch_response = client.embeddings(
93
+ # model='mistral-embed',
94
+ # input=input
95
+ # )
96
+ # return embeddings_batch_response.data[0].embedding
97
+
98
+ # def load_vector_db(text_embedded):
99
+ # d = text_embedded.shape[1]
100
+ # index = faiss.IndexFlatL2(d)
101
+ # index.add(text_embedded)
102
+ # return index
103
+
104
+ # def find_similar_chunk(index, question_embeddings, chunks):
105
+ # D, I = index.search(question_embeddings, k=2) # distance, index
106
+ # return [chunks[i] for i in I.tolist()[0]]
107
+
108
+ # def prompt_chat(retrieved_chunk, question):
109
+ # return f"""
110
+ # Les informations contextuelles sont les suivantes.
111
+ # ---------------------
112
+ # {retrieved_chunk}
113
+ # ---------------------
114
+ # Compte tenu des informations contextuelles et sans connaissances préalables,
115
+ # réponds en français à la question suivante de manière concise.
116
+ # Utilise des listes pour plus de lisibilité.
117
+ # Question: {question}
118
+ # Réponse:
119
+ # """
120
+
121
+ # # Chargement des données
122
+
123
+ # if "messages" not in st.session_state:
124
+ # st.session_state["messages"] = [{"role": "assistant", "content": "Comment puis-je vous aider?"}]
125
+ # st.session_state["History"] = []
126
+ # st.session_state.History.append(ChatMessage(role="assitant", content="Comment puis-je vous aider?"))
127
+
128
+ # for msg in st.session_state.messages:
129
+ # st.chat_message(msg["role"]).write(msg["content"])
130
+
131
+ # if prompt := st.chat_input():
132
+ # question_embeddings = np.array([get_text_embedding(prompt)])
133
+ # retrieved_chunk = find_similar_chunk(index, question_embeddings, chunks)
134
+ # p = prompt_chat(retrieved_chunk=retrieved_chunk, question=prompt)
135
+
136
+ # st.session_state.messages.append({"role": "user", "content": prompt})
137
+ # st.session_state.History.append(ChatMessage(role="user", content=p))
138
+ # st.chat_message("user").write(prompt)
139
+
140
+ # with st.chat_message("assistant"):
141
+ # message_placeholder = st.empty()
142
+ # full_response = ""
143
+ # for response in client.chat_stream(
144
+ # model=model,
145
+ # messages=st.session_state.History[1:]
146
+ # ):
147
+ # full_response += (response.choices[0].delta.content or "")
148
+ # message_placeholder.markdown(full_response + "|")
149
+
150
+ # message_placeholder.markdown(full_response)
151
+
152
+ # st.session_state.History.append(ChatMessage(role="assistant", content=full_response))
153
+ # st.session_state.messages.append({"role": "assistant", "content": full_response})