Spaces:
Runtime error
Runtime error
import streamlit as st | |
import random | |
import time | |
from mistralai.client import MistralClient | |
from mistralai.models.chat_completion import ChatMessage | |
api_key = st.secrets["MISTRAL_API_KEY"] | |
model = "mistral-large-latest" | |
client = MistralClient(api_key=api_key) | |
chat_response = client.chat( | |
model=model, | |
messages=[ChatMessage(role="user", content="What is the best French cheese?")] | |
) | |
# Streamed response emulator | |
def response_generator(): | |
response = client.chat( | |
model=model, | |
messages=[ChatMessage(role="user", content="What is the best French cheese?")] | |
) | |
st.title("Personality test") | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
# Accept user input | |
if prompt := st.chat_input("What is up?"): | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
# Display user message in chat message container | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
# # Display assistant response in chat message container | |
# with st.chat_message("assistant"): | |
# response = st.write_stream(response_generator()) | |
# # Add assistant response to chat history | |
# st.session_state.messages.append({"role": "assistant", "content": response}) | |
with st.chat_message("assistant"): | |
message_placeholder = st.empty() | |
full_response = "" | |
for response in client.chat_stream( | |
model=model, | |
messages=st.session_state.History[1:] | |
): | |
full_response += (response.choices[0].delta.content or "") | |
message_placeholder.markdown(full_response + "|") | |
message_placeholder.markdown(full_response) | |
st.session_state.History.append(ChatMessage(role="assistant", content=full_response)) | |
st.session_state.messages.append({"role": "assistant", "content": full_response}) | |
# from mistralai.client import MistralClient | |
# from mistralai.models.chat_completion import ChatMessage | |
# import streamlit as st | |
# import json | |
# import faiss | |
# import numpy as np | |
# model = "open-mixtral-8x7b" | |
# mistral_api_key = st.secrets["MISTRAL_API_KEY"] | |
# client = MistralClient(api_key=mistral_api_key) | |
# st.title("Assistant ChatBot") | |
# def split_chunk(data, chunk_size): | |
# data_str = [json.dumps(entry) for entry in data] | |
# chunk_size = chunk_size | |
# chunks = [data_str[i:i + chunk_size] for i in range(0, len(data_str), chunk_size)] | |
# print(f"Nb. chunks = {len(chunks)}") | |
# return chunks | |
# def get_text_embedding(input): | |
# embeddings_batch_response = client.embeddings( | |
# model='mistral-embed', | |
# input=input | |
# ) | |
# return embeddings_batch_response.data[0].embedding | |
# def load_vector_db(text_embedded): | |
# d = text_embedded.shape[1] | |
# index = faiss.IndexFlatL2(d) | |
# index.add(text_embedded) | |
# return index | |
# def find_similar_chunk(index, question_embeddings, chunks): | |
# D, I = index.search(question_embeddings, k=2) # distance, index | |
# return [chunks[i] for i in I.tolist()[0]] | |
# def prompt_chat(retrieved_chunk, question): | |
# return f""" | |
# Les informations contextuelles sont les suivantes. | |
# --------------------- | |
# {retrieved_chunk} | |
# --------------------- | |
# Compte tenu des informations contextuelles et sans connaissances préalables, | |
# réponds en français à la question suivante de manière concise. | |
# Utilise des listes pour plus de lisibilité. | |
# Question: {question} | |
# Réponse: | |
# """ | |
# # Chargement des données | |
# if "messages" not in st.session_state: | |
# st.session_state["messages"] = [{"role": "assistant", "content": "Comment puis-je vous aider?"}] | |
# st.session_state["History"] = [] | |
# st.session_state.History.append(ChatMessage(role="assitant", content="Comment puis-je vous aider?")) | |
# for msg in st.session_state.messages: | |
# st.chat_message(msg["role"]).write(msg["content"]) | |
# if prompt := st.chat_input(): | |
# question_embeddings = np.array([get_text_embedding(prompt)]) | |
# retrieved_chunk = find_similar_chunk(index, question_embeddings, chunks) | |
# p = prompt_chat(retrieved_chunk=retrieved_chunk, question=prompt) | |
# st.session_state.messages.append({"role": "user", "content": prompt}) | |
# st.session_state.History.append(ChatMessage(role="user", content=p)) | |
# st.chat_message("user").write(prompt) | |
# with st.chat_message("assistant"): | |
# message_placeholder = st.empty() | |
# full_response = "" | |
# for response in client.chat_stream( | |
# model=model, | |
# messages=st.session_state.History[1:] | |
# ): | |
# full_response += (response.choices[0].delta.content or "") | |
# message_placeholder.markdown(full_response + "|") | |
# message_placeholder.markdown(full_response) | |
# st.session_state.History.append(ChatMessage(role="assistant", content=full_response)) | |
# st.session_state.messages.append({"role": "assistant", "content": full_response}) |