Spaces:
Runtime error
Runtime error
File size: 5,253 Bytes
9957766 eca1299 9957766 eca1299 9957766 eca1299 9957766 eca1299 9957766 eca1299 9957766 ce287ed 9957766 6294fd6 9957766 eca1299 9957766 6294fd6 9957766 6294fd6 9957766 6294fd6 eca1299 9957766 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 |
import streamlit as st
import random
import time
from mistralai.client import MistralClient
from mistralai.models.chat_completion import ChatMessage
api_key = st.secrets["MISTRAL_API_KEY"]
model = "mistral-large-latest"
client = MistralClient(api_key=api_key)
chat_response = client.chat(
model=model,
messages=[ChatMessage(role="user", content="What is the best French cheese?")]
)
# Streamed response emulator
def response_generator():
response = client.chat(
model=model,
messages=[ChatMessage(role="user", content="What is the best French cheese?")]
)
st.title("Personality test")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input("What is up?"):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# # Display assistant response in chat message container
# with st.chat_message("assistant"):
# response = st.write_stream(response_generator())
# # Add assistant response to chat history
# st.session_state.messages.append({"role": "assistant", "content": response})
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in client.chat_stream(
model=model,
messages=st.session_state.History[1:]
):
full_response += (response.choices[0].delta.content or "")
message_placeholder.markdown(full_response + "|")
message_placeholder.markdown(full_response)
st.session_state.History.append(ChatMessage(role="assistant", content=full_response))
st.session_state.messages.append({"role": "assistant", "content": full_response})
# from mistralai.client import MistralClient
# from mistralai.models.chat_completion import ChatMessage
# import streamlit as st
# import json
# import faiss
# import numpy as np
# model = "open-mixtral-8x7b"
# mistral_api_key = st.secrets["MISTRAL_API_KEY"]
# client = MistralClient(api_key=mistral_api_key)
# st.title("Assistant ChatBot")
# def split_chunk(data, chunk_size):
# data_str = [json.dumps(entry) for entry in data]
# chunk_size = chunk_size
# chunks = [data_str[i:i + chunk_size] for i in range(0, len(data_str), chunk_size)]
# print(f"Nb. chunks = {len(chunks)}")
# return chunks
# def get_text_embedding(input):
# embeddings_batch_response = client.embeddings(
# model='mistral-embed',
# input=input
# )
# return embeddings_batch_response.data[0].embedding
# def load_vector_db(text_embedded):
# d = text_embedded.shape[1]
# index = faiss.IndexFlatL2(d)
# index.add(text_embedded)
# return index
# def find_similar_chunk(index, question_embeddings, chunks):
# D, I = index.search(question_embeddings, k=2) # distance, index
# return [chunks[i] for i in I.tolist()[0]]
# def prompt_chat(retrieved_chunk, question):
# return f"""
# Les informations contextuelles sont les suivantes.
# ---------------------
# {retrieved_chunk}
# ---------------------
# Compte tenu des informations contextuelles et sans connaissances préalables,
# réponds en français à la question suivante de manière concise.
# Utilise des listes pour plus de lisibilité.
# Question: {question}
# Réponse:
# """
# # Chargement des données
# if "messages" not in st.session_state:
# st.session_state["messages"] = [{"role": "assistant", "content": "Comment puis-je vous aider?"}]
# st.session_state["History"] = []
# st.session_state.History.append(ChatMessage(role="assitant", content="Comment puis-je vous aider?"))
# for msg in st.session_state.messages:
# st.chat_message(msg["role"]).write(msg["content"])
# if prompt := st.chat_input():
# question_embeddings = np.array([get_text_embedding(prompt)])
# retrieved_chunk = find_similar_chunk(index, question_embeddings, chunks)
# p = prompt_chat(retrieved_chunk=retrieved_chunk, question=prompt)
# st.session_state.messages.append({"role": "user", "content": prompt})
# st.session_state.History.append(ChatMessage(role="user", content=p))
# st.chat_message("user").write(prompt)
# with st.chat_message("assistant"):
# message_placeholder = st.empty()
# full_response = ""
# for response in client.chat_stream(
# model=model,
# messages=st.session_state.History[1:]
# ):
# full_response += (response.choices[0].delta.content or "")
# message_placeholder.markdown(full_response + "|")
# message_placeholder.markdown(full_response)
# st.session_state.History.append(ChatMessage(role="assistant", content=full_response))
# st.session_state.messages.append({"role": "assistant", "content": full_response}) |