Spaces:
Sleeping
Sleeping
MattiaSangermano
commited on
Commit
•
823b9f5
1
Parent(s):
f3a325b
Added app code
Browse files- EurLexChat.py +272 -0
- app.py +144 -0
- chat_utils.py +100 -0
- config.yaml +40 -0
- output/OaaEABb.json +0 -0
EurLexChat.py
ADDED
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_community.vectorstores import Qdrant
|
2 |
+
from langchain_core.runnables.history import RunnableWithMessageHistory
|
3 |
+
from langchain_core.runnables.base import RunnableLambda
|
4 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
5 |
+
from langchain_core.tools import StructuredTool
|
6 |
+
from langchain_core.utils.function_calling import convert_to_openai_tool
|
7 |
+
from langchain_core.messages import AIMessage
|
8 |
+
from typing import List
|
9 |
+
from chat_utils import get_init_modules, SYSTEM_PROMPT, SYSTEM_PROMPT_LOOP, ContextInput, Answer
|
10 |
+
from langchain_core.documents.base import Document
|
11 |
+
|
12 |
+
|
13 |
+
class EurLexChat:
|
14 |
+
def __init__(self, config: dict):
|
15 |
+
self.config = config
|
16 |
+
self.max_history_messages = self.config["max_history_messages"]
|
17 |
+
self.use_functions = (
|
18 |
+
'use_context_function' in config["llm"] and
|
19 |
+
config["llm"]["use_context_function"] and
|
20 |
+
config["llm"]["class"] == "ChatOpenAI")
|
21 |
+
|
22 |
+
self.embedder, self.llm, self.chatDB_class, self.retriever = get_init_modules(config)
|
23 |
+
self.max_context_size = config["llm"]["max_context_size"]
|
24 |
+
|
25 |
+
self.prompt = ChatPromptTemplate.from_messages([
|
26 |
+
("system", SYSTEM_PROMPT),
|
27 |
+
MessagesPlaceholder(variable_name="history"),
|
28 |
+
("human", "{question}"),
|
29 |
+
])
|
30 |
+
|
31 |
+
self.prompt_loop = ChatPromptTemplate.from_messages([
|
32 |
+
("system", SYSTEM_PROMPT_LOOP),
|
33 |
+
("human", "History: {history}. Message:"),
|
34 |
+
])
|
35 |
+
|
36 |
+
self.chain_loop_answer = ( self.prompt_loop | self.llm )
|
37 |
+
|
38 |
+
if self.use_functions:
|
39 |
+
|
40 |
+
GET_CONTEXT_TOOL = StructuredTool.from_function(
|
41 |
+
func=self.get_context,
|
42 |
+
name="get_context",
|
43 |
+
description="To be used whenever the provided context is empty or the user changes the topic of the conversation and you need the context for the topic. " +
|
44 |
+
"This function must be called only when is strictly necessary. " +
|
45 |
+
"This function must not be called if you already have the information to answer the user. ",
|
46 |
+
args_schema=ContextInput
|
47 |
+
)
|
48 |
+
|
49 |
+
# self.llm = self.llm.bind(tools=[convert_to_openai_tool(GET_CONTEXT_TOOL)])
|
50 |
+
self.llm_with_functions = self.llm.bind(tools=[convert_to_openai_tool(GET_CONTEXT_TOOL)])
|
51 |
+
|
52 |
+
chain = self.prompt | RunnableLambda(self._resize_history) | self.llm_with_functions
|
53 |
+
else:
|
54 |
+
chain = self.prompt | RunnableLambda(self._resize_history) | self.llm
|
55 |
+
|
56 |
+
self.chain_with_history = RunnableWithMessageHistory(
|
57 |
+
chain,
|
58 |
+
self.get_chat_history,
|
59 |
+
input_messages_key="question",
|
60 |
+
history_messages_key="history",
|
61 |
+
)
|
62 |
+
|
63 |
+
self.relevant_documents_pipeline = ( self.retriever | self._parse_documents )
|
64 |
+
|
65 |
+
|
66 |
+
def _resize_history(self, input_dict):
|
67 |
+
"""
|
68 |
+
Resize the message history.
|
69 |
+
|
70 |
+
Args:
|
71 |
+
input_dict: The llm input containing the message history.
|
72 |
+
|
73 |
+
Returns:
|
74 |
+
dict: The resized version of the input_dict.
|
75 |
+
"""
|
76 |
+
|
77 |
+
messages = input_dict.messages
|
78 |
+
if (len(messages) - 2) > self.max_history_messages:
|
79 |
+
messages = [messages[0]] + messages[-(self.max_history_messages +1):]
|
80 |
+
input_dict.messages = messages
|
81 |
+
return input_dict
|
82 |
+
|
83 |
+
|
84 |
+
def get_chat_history(self, session_id: str):
|
85 |
+
"""
|
86 |
+
Retrieve chat history instance for a specific session ID.
|
87 |
+
|
88 |
+
Args:
|
89 |
+
session_id (str): The unique identifier for the session.
|
90 |
+
|
91 |
+
Returns:
|
92 |
+
Chat history object: An instance of the appropriate chat history class.
|
93 |
+
"""
|
94 |
+
|
95 |
+
kwargs = self.config["chatDB"]["kwargs"]
|
96 |
+
if self.config["chatDB"]["class"] == 'FileChatMessageHistory':
|
97 |
+
file_path = f"{kwargs['output_path']}/{session_id}.json"
|
98 |
+
return self.chatDB_class(file_path=file_path)
|
99 |
+
else:
|
100 |
+
return self.chatDB_class(session_id=session_id, **kwargs)
|
101 |
+
|
102 |
+
|
103 |
+
def _parse_documents(self, docs: List[Document]) -> List[dict]:
|
104 |
+
"""
|
105 |
+
Parse a list of documents into a standardized format.
|
106 |
+
|
107 |
+
Args:
|
108 |
+
docs (List[Document]): A list of documents to parse.
|
109 |
+
|
110 |
+
Returns:
|
111 |
+
List[dict]: A list of dictionaries, each containing parsed information from the input documents.
|
112 |
+
"""
|
113 |
+
|
114 |
+
parsed_documents = []
|
115 |
+
|
116 |
+
for doc in docs:
|
117 |
+
parsed_documents.append({
|
118 |
+
'text': doc.page_content,
|
119 |
+
'source': doc.metadata["source"],
|
120 |
+
'_id': doc.metadata["_id"]
|
121 |
+
})
|
122 |
+
return parsed_documents
|
123 |
+
|
124 |
+
|
125 |
+
def _format_context_docs(self, context_docs: List[dict]) -> str:
|
126 |
+
"""
|
127 |
+
Format a list of documents into a single string.
|
128 |
+
|
129 |
+
Args:
|
130 |
+
context_docs (List[dict]): A list of dictionaries containing text from context documents.
|
131 |
+
|
132 |
+
Returns:
|
133 |
+
str: A string containing the concatenated text from all context documents.
|
134 |
+
"""
|
135 |
+
|
136 |
+
context_str = ''
|
137 |
+
for doc in context_docs:
|
138 |
+
context_str += doc['text'] + "\n\n"
|
139 |
+
return context_str
|
140 |
+
|
141 |
+
|
142 |
+
def get_relevant_docs(self, question:str) -> List[dict]:
|
143 |
+
"""
|
144 |
+
Retrieve relevant documents based on a given question.
|
145 |
+
|
146 |
+
Args:
|
147 |
+
question (str): The question for which relevant documents are retrieved.
|
148 |
+
|
149 |
+
Returns:
|
150 |
+
List[dict]: A list of relevant documents.
|
151 |
+
"""
|
152 |
+
|
153 |
+
docs = self.relevant_documents_pipeline.invoke(question)
|
154 |
+
return docs
|
155 |
+
|
156 |
+
|
157 |
+
def get_context(self, text:str) -> str:
|
158 |
+
"""
|
159 |
+
Retrieve context for a given text.
|
160 |
+
|
161 |
+
Args:
|
162 |
+
text (str): The text for which context is retrieved.
|
163 |
+
|
164 |
+
Returns:
|
165 |
+
str: A formatted string containing the relevant documents texts.
|
166 |
+
"""
|
167 |
+
|
168 |
+
docs = self.get_relevant_docs(text)
|
169 |
+
return self._format_context_docs(docs)
|
170 |
+
|
171 |
+
|
172 |
+
def _remove_last_messages(self, session_id:str, n:int) -> None:
|
173 |
+
"""
|
174 |
+
Remove last n messages from the chat history of a specific session.
|
175 |
+
|
176 |
+
Args:
|
177 |
+
session_id (str): The session ID for which messages are removed.
|
178 |
+
n (int): The number of last messages to remove.
|
179 |
+
"""
|
180 |
+
chat_history = self.get_chat_history(session_id=session_id)
|
181 |
+
message_history = chat_history.messages
|
182 |
+
chat_history.clear()
|
183 |
+
message_history = message_history[:-n]
|
184 |
+
for message in message_history:
|
185 |
+
chat_history.add_message(message)
|
186 |
+
|
187 |
+
|
188 |
+
def _format_history(self, session_id:str) -> str:
|
189 |
+
"""
|
190 |
+
Format chat history for a specific session into a string.
|
191 |
+
|
192 |
+
Args:
|
193 |
+
session_id (str): The session ID for which the chat history is formatted.
|
194 |
+
|
195 |
+
Returns:
|
196 |
+
str: A formatted string containing the chat history for the specified session.
|
197 |
+
"""
|
198 |
+
|
199 |
+
chat_history = self.get_chat_history(session_id).messages
|
200 |
+
formatted_history = ""
|
201 |
+
for message in chat_history:
|
202 |
+
formatted_history += f"{message.type}: {message.content}\n\n"
|
203 |
+
return formatted_history
|
204 |
+
|
205 |
+
|
206 |
+
def _resize_context(self, context_docs:List[dict]) -> List[dict]:
|
207 |
+
"""
|
208 |
+
Resize the dimension of the context in terms of number of tokens.
|
209 |
+
If the concatenation of document text exceeds max_context_size,
|
210 |
+
the document text is cut off to meet the limit.
|
211 |
+
|
212 |
+
Args:
|
213 |
+
context_docs (List[dict]): List of formatted documents.
|
214 |
+
|
215 |
+
Returns:
|
216 |
+
List[dict]: Returns the list of resized documents.
|
217 |
+
"""
|
218 |
+
lengths = [self.llm.get_num_tokens(doc['text']) for doc in context_docs]
|
219 |
+
resized_contexts = []
|
220 |
+
total_len = 0
|
221 |
+
for i, l in enumerate(lengths):
|
222 |
+
if l + total_len <= self.max_context_size:
|
223 |
+
resized_contexts.append(context_docs[i])
|
224 |
+
total_len += l
|
225 |
+
return resized_contexts
|
226 |
+
|
227 |
+
def get_answer(self, session_id:str, question:str, context_docs:List[dict], from_tool:bool=False) -> Answer:
|
228 |
+
"""
|
229 |
+
Get an answer to a question of a specific session, considering context documents and history messages.
|
230 |
+
|
231 |
+
Args:
|
232 |
+
session_id (str): The session ID for which the answer is retrieved.
|
233 |
+
question (str): The new user message.
|
234 |
+
context_docs (List[dict]): A list of documents used as context to answer the user message.
|
235 |
+
from_tool (bool, optional): Whether the question originates from a tool. Defaults to False.
|
236 |
+
|
237 |
+
Returns:
|
238 |
+
Answer: An object containing the answer along with a new list of context documents
|
239 |
+
if those provided are insufficient to answer the question.
|
240 |
+
|
241 |
+
"""
|
242 |
+
resized_docs = self._resize_context(context_docs)
|
243 |
+
context = self._format_context_docs(resized_docs)
|
244 |
+
|
245 |
+
result = self.chain_with_history.invoke(
|
246 |
+
{"context": context, "question": question},
|
247 |
+
config={"configurable": {"session_id": session_id}}
|
248 |
+
)
|
249 |
+
|
250 |
+
if self.use_functions and len(result.additional_kwargs) > 0:
|
251 |
+
if from_tool:
|
252 |
+
self._remove_last_messages(session_id=session_id, n=1)
|
253 |
+
history = self._format_history(session_id)
|
254 |
+
result = self.chain_loop_answer.invoke({'history': history})
|
255 |
+
self.get_chat_history(session_id=session_id).add_message(AIMessage(result.content))
|
256 |
+
return Answer(answer=result.content, status=-1)
|
257 |
+
text = eval(result.additional_kwargs['tool_calls'][0]['function']['arguments'])['text']
|
258 |
+
new_docs = self.get_relevant_docs(text)
|
259 |
+
self._remove_last_messages(session_id=session_id, n=2)
|
260 |
+
|
261 |
+
result = self.get_answer(
|
262 |
+
session_id=session_id,
|
263 |
+
question=question,
|
264 |
+
context_docs=new_docs,
|
265 |
+
from_tool=True
|
266 |
+
)
|
267 |
+
if result.status == 1:
|
268 |
+
return Answer(answer=result.answer, new_documents=new_docs)
|
269 |
+
else:
|
270 |
+
return Answer(answer=result.answer)
|
271 |
+
return Answer(answer=result.content)
|
272 |
+
|
app.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from EurLexChat import EurLexChat
|
3 |
+
import yaml
|
4 |
+
import random
|
5 |
+
import string
|
6 |
+
import argparse
|
7 |
+
import os
|
8 |
+
|
9 |
+
openai_org_key = os.getenv("OPENAI_ORG_KEY")
|
10 |
+
openai_key = os.getenv("OPENAI_KEY")
|
11 |
+
ui_pwd = os.getenv("pwd")
|
12 |
+
ui_user = os.getenv("user")
|
13 |
+
qdrant_url=os.getenv("url")
|
14 |
+
qdrant_key=os.getenv("qdrant_key")
|
15 |
+
|
16 |
+
|
17 |
+
def generate_random_string(length):
|
18 |
+
# Generate a random string of the specified length
|
19 |
+
# using letters and numbers
|
20 |
+
characters = string.ascii_letters + string.digits
|
21 |
+
random_string = ''.join(random.choice(characters) for _ in range(length))
|
22 |
+
return random_string
|
23 |
+
|
24 |
+
class Documents():
|
25 |
+
def __init__(self) -> None:
|
26 |
+
self.documents = []
|
27 |
+
|
28 |
+
parser = argparse.ArgumentParser(description="Chat-eur-lex ui")
|
29 |
+
|
30 |
+
parser.add_argument('--config_path',
|
31 |
+
dest='config_path',
|
32 |
+
metavar='config_path',
|
33 |
+
type=str,
|
34 |
+
help='The path to the config file that contains all the settings for the chat engine' ,
|
35 |
+
default='config.yaml')
|
36 |
+
args = parser.parse_args()
|
37 |
+
|
38 |
+
# Read config file
|
39 |
+
with open(args.config_path, 'r') as file:
|
40 |
+
config = yaml.safe_load(file)
|
41 |
+
|
42 |
+
config["embeddings"]["kwargs"]["openai_api_key"] = openai_key
|
43 |
+
config["embeddings"]["kwargs"]["openai_organization"] = openai_org_key
|
44 |
+
config["llm"]["kwargs"]["openai_api_key"] = openai_key
|
45 |
+
config["llm"]["kwargs"]["openai_organization"] = openai_org_key
|
46 |
+
config["vectorDB"]["kwargs"]["url"] = qdrant_url
|
47 |
+
config["vectorDB"]["kwargs"]["api_key"] = qdrant_key
|
48 |
+
|
49 |
+
|
50 |
+
|
51 |
+
|
52 |
+
|
53 |
+
chat = EurLexChat(config=config)
|
54 |
+
docs = Documents()
|
55 |
+
|
56 |
+
|
57 |
+
def remove_doc(btn):
|
58 |
+
docs.documents.pop(btn)
|
59 |
+
new_accordions, new_texts = set_new_docs_ui(docs.documents)
|
60 |
+
return [*new_accordions, *new_texts]
|
61 |
+
|
62 |
+
|
63 |
+
def get_answer(message, history, session_id):
|
64 |
+
s = session_id
|
65 |
+
if len(history) == 0:
|
66 |
+
docs.documents = chat.get_relevant_docs(question=message)
|
67 |
+
s = generate_random_string(7)
|
68 |
+
result = chat.get_answer(s, message, docs.documents)
|
69 |
+
history.append((message, result.answer))
|
70 |
+
if result.new_documents:
|
71 |
+
docs.documents = result.new_documents
|
72 |
+
accordions, list_texts = set_new_docs_ui(docs.documents)
|
73 |
+
return ['', history, gr.Column(scale=1, visible=True), *accordions, *list_texts, s]
|
74 |
+
|
75 |
+
|
76 |
+
def set_new_docs_ui(documents):
|
77 |
+
new_accordions = []
|
78 |
+
new_texts = []
|
79 |
+
for i in range(len(accordions)):
|
80 |
+
if i < len(documents):
|
81 |
+
new_accordions.append(gr.update(accordions[i].elem_id, label=f"{documents[i]['text'][:45]}...", visible=True, open=False))
|
82 |
+
new_texts.append(gr.update(list_texts[i].elem_id, value=f"{documents[i]['text']}...", visible=True))
|
83 |
+
else:
|
84 |
+
new_accordions.append(gr.update(accordions[i].elem_id, label="", visible=False))
|
85 |
+
new_texts.append(gr.update(list_texts[i].elem_id, value="", visible=False))
|
86 |
+
return new_accordions, new_texts
|
87 |
+
|
88 |
+
|
89 |
+
def clean_page():
|
90 |
+
docs.documents = []
|
91 |
+
accordions, list_texts = set_new_docs_ui(docs.documents)
|
92 |
+
return ["", [], None, *accordions, *list_texts]
|
93 |
+
|
94 |
+
list_texts = []
|
95 |
+
accordions = []
|
96 |
+
states = []
|
97 |
+
delete_buttons = []
|
98 |
+
|
99 |
+
block = gr.Blocks()
|
100 |
+
with block:
|
101 |
+
|
102 |
+
gr.Markdown("""
|
103 |
+
<h1><center>Chat-EUR-Lex prototype - Alpha version</center></h1>
|
104 |
+
""")
|
105 |
+
state = gr.State(value=None)
|
106 |
+
with gr.Row():
|
107 |
+
with gr.Column(scale=3):
|
108 |
+
chatbot = gr.Chatbot()
|
109 |
+
with gr.Row():
|
110 |
+
message = gr.Textbox(scale=10)
|
111 |
+
submit = gr.Button("Send", scale=1)
|
112 |
+
clear = gr.Button("Clear", scale=1)
|
113 |
+
|
114 |
+
with gr.Column(scale=1, visible=False) as col:
|
115 |
+
gr.Markdown("""<h3><center>Context documents</center></h3>""")
|
116 |
+
for i in range(config['vectorDB']['retriever_args']['search_kwargs']['k']):
|
117 |
+
with gr.Accordion(label="", elem_id=f'accordion_{i}', open=False) as acc:
|
118 |
+
list_texts.append(gr.Textbox("", elem_id=f'text_{i}', show_label=False, lines=10))
|
119 |
+
btn = gr.Button(f"Remove document")
|
120 |
+
delete_buttons.append(btn)
|
121 |
+
states.append(gr.State(i))
|
122 |
+
accordions.append(acc)
|
123 |
+
|
124 |
+
with gr.Row():
|
125 |
+
with gr.Column(scale=3):
|
126 |
+
gr.HTML("""""")
|
127 |
+
gr.HTML("""<div>
|
128 |
+
<h3>Disclaimer</h3>
|
129 |
+
<p><a href="https://github.com/Aptus-AI/chat-eur-lex/">Chat-EUR-Lex prototype</a> is a limited risk AI system realized by the
|
130 |
+
<a href="https://www.igsg.cnr.it/en/">Institute of Legal Informatics and Judicial Systems (IGSG-CNR)</a> and <a href="https://www.aptus.ai/">Aptus.AI</a>.
|
131 |
+
The prototype is an AI chatbot, therefore you are interacting with a machine, not with a human person. The prototype uses OpenAI GPT-4 language model. </p>
|
132 |
+
|
133 |
+
<p><a href="https://github.com/Aptus-AI/chat-eur-lex/">Chat-EUR-Lex project</a> is funded by the European Union within the framework of the NGI Search project under grant agreement No 101069364.
|
134 |
+
Views and opinions expressed are however those of the author(s) only and do not necessarily reflect those of the European Union or European Commission.
|
135 |
+
Contact us: <a href="mailto:chat-eur-lex@igsg.cnr.it">chat-eur-lex@igsg.cnr.it</a>.</p>
|
136 |
+
</div>""")
|
137 |
+
|
138 |
+
clear.click(clean_page, outputs=[message, chatbot, state, *accordions, *list_texts])
|
139 |
+
message.submit(get_answer, inputs=[message, chatbot, state], outputs=[message, chatbot, col, *accordions, *list_texts, state])
|
140 |
+
submit.click(get_answer, inputs=[message, chatbot, state], outputs=[message, chatbot, col, *accordions, *list_texts, state])
|
141 |
+
for i, b in enumerate(delete_buttons):
|
142 |
+
b.click(remove_doc, inputs=states[i], outputs=[*accordions, *list_texts])
|
143 |
+
|
144 |
+
block.launch(debug=True, auth=(ui_user, ui_pwd))
|
chat_utils.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from typing import Optional, List
|
3 |
+
from langchain.pydantic_v1 import BaseModel, Field
|
4 |
+
|
5 |
+
SYSTEM_PROMPT = (
|
6 |
+
"You are an assistant specialized in the legal and compliance field who must answer and converse with the user using the context provided. " +
|
7 |
+
"When you answer the user, if it is relevant, cite the laws and articles you are referring to. NEVER mention the use of context in your answers. "
|
8 |
+
"If you believe the question cannot be answered from the given context, do not make up an answer. Answer in the same language the user is speaking.\n\n ### Context:\n {context}"
|
9 |
+
)
|
10 |
+
|
11 |
+
SYSTEM_PROMPT_LOOP = (
|
12 |
+
"You are an assistant who must inform the user that you do not have enough information to answer and ask if the user can provide you with additional information. " +
|
13 |
+
"This answer, must be adapted to the conversation that occurred with the user that is provided to you. Just write down the answer "
|
14 |
+
)
|
15 |
+
|
16 |
+
@dataclass
|
17 |
+
class Answer():
|
18 |
+
answer: str
|
19 |
+
new_documents: Optional[List] = None
|
20 |
+
status: Optional[int] = 1
|
21 |
+
|
22 |
+
class ContextInput(BaseModel):
|
23 |
+
text: str = Field(
|
24 |
+
title="Text",
|
25 |
+
description="Self-explanatory summary describing what the user is asking for"
|
26 |
+
)
|
27 |
+
|
28 |
+
def get_instance_dynamic_class(lib_path:str, class_name:str, **kwargs):
|
29 |
+
"""
|
30 |
+
Instantiate a dynamically imported class from a given library path and class name.
|
31 |
+
|
32 |
+
Args:
|
33 |
+
lib_path (str): The path to the library/module containing the class.
|
34 |
+
class_name (str): The name of the class to instantiate.
|
35 |
+
**kwargs: Additional keyword arguments to pass to the class constructor.
|
36 |
+
|
37 |
+
Returns:
|
38 |
+
An instance of the dynamically imported class initialized with the provided arguments.
|
39 |
+
"""
|
40 |
+
|
41 |
+
mod = __import__(lib_path, fromlist=[class_name])
|
42 |
+
dynamic_class = getattr(mod, class_name)
|
43 |
+
return dynamic_class(**kwargs)
|
44 |
+
|
45 |
+
|
46 |
+
def get_init_modules(config):
|
47 |
+
embedder = get_instance_dynamic_class(
|
48 |
+
lib_path='langchain_community.embeddings',
|
49 |
+
class_name=config["embeddings"]["class"],
|
50 |
+
**config["embeddings"]["kwargs"]
|
51 |
+
)
|
52 |
+
|
53 |
+
llm = get_instance_dynamic_class(
|
54 |
+
lib_path='langchain_community.chat_models',
|
55 |
+
class_name=config["llm"]["class"],
|
56 |
+
**config["llm"]["kwargs"]
|
57 |
+
)
|
58 |
+
|
59 |
+
mod_chat = __import__("langchain_community.chat_message_histories",
|
60 |
+
fromlist=[config["chatDB"]["class"]])
|
61 |
+
chatDB_class = getattr(mod_chat, config["chatDB"]["class"])
|
62 |
+
retriever = get_vectorDB_module(config['vectorDB'], embedder)
|
63 |
+
|
64 |
+
return embedder, llm, chatDB_class, retriever
|
65 |
+
|
66 |
+
|
67 |
+
def get_vectorDB_module(db_config, embedder):
|
68 |
+
mod_chat = __import__("langchain_community.vectorstores",
|
69 |
+
fromlist=[db_config["class"]])
|
70 |
+
vectorDB_class = getattr(mod_chat, db_config["class"])
|
71 |
+
|
72 |
+
if db_config["class"] == 'Qdrant':
|
73 |
+
from qdrant_client import QdrantClient
|
74 |
+
import inspect
|
75 |
+
|
76 |
+
# Get QdrantClient init parameters name from signature
|
77 |
+
signature_params = inspect.signature(QdrantClient.__init__).parameters.values()
|
78 |
+
params_to_exclude = ['self', 'kwargs']
|
79 |
+
client_args = [el.name for el in list(signature_params) if el.name not in params_to_exclude]
|
80 |
+
|
81 |
+
client_kwargs = {k: v for k,
|
82 |
+
v in db_config['kwargs'].items() if k in client_args}
|
83 |
+
db_kwargs = {
|
84 |
+
k: v for k, v in db_config['kwargs'].items() if k not in client_kwargs}
|
85 |
+
|
86 |
+
client = QdrantClient(**client_kwargs)
|
87 |
+
|
88 |
+
retriever = vectorDB_class(
|
89 |
+
client, embeddings=embedder, **db_kwargs).as_retriever(
|
90 |
+
search_type=db_config["retriever_args"]["search_type"],
|
91 |
+
search_kwargs=db_config["retriever_args"]["search_kwargs"]
|
92 |
+
)
|
93 |
+
|
94 |
+
else:
|
95 |
+
retriever = vectorDB_class(embeddings=embedder, **db_config["kwargs"]).as_retriever(
|
96 |
+
search_type=db_config["retriever_args"]["search_type"],
|
97 |
+
search_kwargs=db_config["retriever_args"]["search_kwargs"]
|
98 |
+
)
|
99 |
+
|
100 |
+
return retriever
|
config.yaml
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
vectorDB:
|
2 |
+
class: Qdrant
|
3 |
+
kwargs:
|
4 |
+
url: ""
|
5 |
+
api_key: ""
|
6 |
+
collection_name: chat-eur-lex
|
7 |
+
|
8 |
+
retriever_args:
|
9 |
+
search_type: mmr
|
10 |
+
search_kwargs:
|
11 |
+
k: 15
|
12 |
+
fetch_k: 300
|
13 |
+
score_threshold: 0.0
|
14 |
+
lambda_mult: 0.8
|
15 |
+
|
16 |
+
embeddings:
|
17 |
+
class: OpenAIEmbeddings
|
18 |
+
kwargs:
|
19 |
+
openai_api_key: ""
|
20 |
+
openai_organization: ""
|
21 |
+
model: text-embedding-ada-002
|
22 |
+
|
23 |
+
|
24 |
+
llm:
|
25 |
+
class: ChatOpenAI
|
26 |
+
use_context_function: True
|
27 |
+
max_context_size: 6000
|
28 |
+
kwargs:
|
29 |
+
openai_organization: ""
|
30 |
+
openai_api_key: ""
|
31 |
+
model_name: gpt-4
|
32 |
+
temperature: 0.8
|
33 |
+
|
34 |
+
|
35 |
+
chatDB:
|
36 |
+
class: FileChatMessageHistory
|
37 |
+
kwargs:
|
38 |
+
output_path: ./output
|
39 |
+
|
40 |
+
max_history_messages: 5
|
output/OaaEABb.json
ADDED
File without changes
|