BjarneBepaData commited on
Commit
98f0633
1 Parent(s): 9957766

Steven is woest

Browse files
Files changed (6) hide show
  1. .dockerignore +0 -0
  2. Dockerfile +17 -0
  3. README.md +1 -1
  4. app.py +0 -153
  5. app/main.py +0 -0
  6. requirements.txt +2 -2
.dockerignore ADDED
File without changes
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ FROM python:3.9
3
+
4
+ #
5
+ WORKDIR /code
6
+
7
+ #
8
+ COPY ./requirements.txt /code/requirements.txt
9
+
10
+ #
11
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
12
+
13
+ #
14
+ COPY ./app /code/app
15
+
16
+ #
17
+ CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "80"]
README.md CHANGED
@@ -3,7 +3,7 @@ title: Team3
3
  emoji: 🦀
4
  colorFrom: yellow
5
  colorTo: purple
6
- sdk: streamlit
7
  sdk_version: 1.33.0
8
  app_file: app.py
9
  pinned: false
 
3
  emoji: 🦀
4
  colorFrom: yellow
5
  colorTo: purple
6
+ sdk: docker
7
  sdk_version: 1.33.0
8
  app_file: app.py
9
  pinned: false
app.py DELETED
@@ -1,153 +0,0 @@
1
- import streamlit as st
2
- import random
3
- import time
4
- from mistralai.client import MistralClient
5
- from mistralai.models.chat_completion import ChatMessage
6
-
7
- api_key = st.secrets["MISTRAL_API_KEY"]
8
- model = "mistral-large-latest"
9
-
10
- client = MistralClient(api_key=api_key)
11
-
12
- chat_response = client.chat(
13
- model=model,
14
- messages=[ChatMessage(role="user", content="What is the best French cheese?")]
15
- )
16
-
17
-
18
-
19
- # Streamed response emulator
20
- def response_generator():
21
- response = client.chat(
22
- model=model,
23
- messages=[ChatMessage(role="user", content="What is the best French cheese?")]
24
- )
25
-
26
-
27
- st.title("Personality test")
28
-
29
- # Initialize chat history
30
- if "messages" not in st.session_state:
31
- st.session_state.messages = []
32
-
33
- # Display chat messages from history on app rerun
34
- for message in st.session_state.messages:
35
- with st.chat_message(message["role"]):
36
- st.markdown(message["content"])
37
-
38
- # Accept user input
39
- if prompt := st.chat_input("What is up?"):
40
- # Add user message to chat history
41
- st.session_state.messages.append({"role": "user", "content": prompt})
42
- # Display user message in chat message container
43
- with st.chat_message("user"):
44
- st.markdown(prompt)
45
-
46
- # # Display assistant response in chat message container
47
- # with st.chat_message("assistant"):
48
- # response = st.write_stream(response_generator())
49
- # # Add assistant response to chat history
50
- # st.session_state.messages.append({"role": "assistant", "content": response})
51
-
52
- with st.chat_message("assistant"):
53
- message_placeholder = st.empty()
54
- full_response = ""
55
- for response in client.chat_stream(
56
- model=model,
57
- messages=st.session_state.History[1:]
58
- ):
59
- full_response += (response.choices[0].delta.content or "")
60
- message_placeholder.markdown(full_response + "|")
61
-
62
- message_placeholder.markdown(full_response)
63
-
64
- st.session_state.History.append(ChatMessage(role="assistant", content=full_response))
65
- st.session_state.messages.append({"role": "assistant", "content": full_response})
66
-
67
-
68
-
69
- # from mistralai.client import MistralClient
70
- # from mistralai.models.chat_completion import ChatMessage
71
- # import streamlit as st
72
- # import json
73
- # import faiss
74
- # import numpy as np
75
-
76
- # model = "open-mixtral-8x7b"
77
- # mistral_api_key = st.secrets["MISTRAL_API_KEY"]
78
- # client = MistralClient(api_key=mistral_api_key)
79
-
80
- # st.title("Assistant ChatBot")
81
-
82
-
83
-
84
- # def split_chunk(data, chunk_size):
85
- # data_str = [json.dumps(entry) for entry in data]
86
- # chunk_size = chunk_size
87
- # chunks = [data_str[i:i + chunk_size] for i in range(0, len(data_str), chunk_size)]
88
- # print(f"Nb. chunks = {len(chunks)}")
89
- # return chunks
90
-
91
- # def get_text_embedding(input):
92
- # embeddings_batch_response = client.embeddings(
93
- # model='mistral-embed',
94
- # input=input
95
- # )
96
- # return embeddings_batch_response.data[0].embedding
97
-
98
- # def load_vector_db(text_embedded):
99
- # d = text_embedded.shape[1]
100
- # index = faiss.IndexFlatL2(d)
101
- # index.add(text_embedded)
102
- # return index
103
-
104
- # def find_similar_chunk(index, question_embeddings, chunks):
105
- # D, I = index.search(question_embeddings, k=2) # distance, index
106
- # return [chunks[i] for i in I.tolist()[0]]
107
-
108
- # def prompt_chat(retrieved_chunk, question):
109
- # return f"""
110
- # Les informations contextuelles sont les suivantes.
111
- # ---------------------
112
- # {retrieved_chunk}
113
- # ---------------------
114
- # Compte tenu des informations contextuelles et sans connaissances préalables,
115
- # réponds en français à la question suivante de manière concise.
116
- # Utilise des listes pour plus de lisibilité.
117
- # Question: {question}
118
- # Réponse:
119
- # """
120
-
121
- # # Chargement des données
122
-
123
- # if "messages" not in st.session_state:
124
- # st.session_state["messages"] = [{"role": "assistant", "content": "Comment puis-je vous aider?"}]
125
- # st.session_state["History"] = []
126
- # st.session_state.History.append(ChatMessage(role="assitant", content="Comment puis-je vous aider?"))
127
-
128
- # for msg in st.session_state.messages:
129
- # st.chat_message(msg["role"]).write(msg["content"])
130
-
131
- # if prompt := st.chat_input():
132
- # question_embeddings = np.array([get_text_embedding(prompt)])
133
- # retrieved_chunk = find_similar_chunk(index, question_embeddings, chunks)
134
- # p = prompt_chat(retrieved_chunk=retrieved_chunk, question=prompt)
135
-
136
- # st.session_state.messages.append({"role": "user", "content": prompt})
137
- # st.session_state.History.append(ChatMessage(role="user", content=p))
138
- # st.chat_message("user").write(prompt)
139
-
140
- # with st.chat_message("assistant"):
141
- # message_placeholder = st.empty()
142
- # full_response = ""
143
- # for response in client.chat_stream(
144
- # model=model,
145
- # messages=st.session_state.History[1:]
146
- # ):
147
- # full_response += (response.choices[0].delta.content or "")
148
- # message_placeholder.markdown(full_response + "|")
149
-
150
- # message_placeholder.markdown(full_response)
151
-
152
- # st.session_state.History.append(ChatMessage(role="assistant", content=full_response))
153
- # st.session_state.messages.append({"role": "assistant", "content": full_response})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/main.py ADDED
File without changes
requirements.txt CHANGED
@@ -1,3 +1,3 @@
1
  # Add the other requirements here
2
- mistralai
3
- faiss-gpu
 
1
  # Add the other requirements here
2
+ fastapi
3
+ elevenlabs