seanpedrickcase
commited on
Commit
β’
4a190c2
1
Parent(s):
232a079
Updated packages, models, preparing for use with AWS (in background)
Browse files- README.md +1 -1
- app.py +101 -37
- chatfuncs/auth.py +48 -0
- chatfuncs/aws_functions.py +205 -0
- chatfuncs/chatfuncs.py +242 -100
- chatfuncs/helper_functions.py +228 -0
- chatfuncs/ingest.py +10 -4
- chatfuncs/llm_api_call.py +925 -0
- chatfuncs/prompts.py +10 -1
- requirements.txt +11 -8
- requirements_cpu.txt +11 -7
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: π
|
|
4 |
colorFrom: yellow
|
5 |
colorTo: yellow
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
|
|
4 |
colorFrom: yellow
|
5 |
colorTo: yellow
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 5.8.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
app.py
CHANGED
@@ -1,9 +1,10 @@
|
|
1 |
# Load in packages
|
2 |
|
3 |
import os
|
|
|
4 |
|
5 |
from typing import Type
|
6 |
-
from
|
7 |
from langchain_community.vectorstores import FAISS
|
8 |
import gradio as gr
|
9 |
import pandas as pd
|
@@ -12,17 +13,37 @@ from transformers import AutoTokenizer
|
|
12 |
import torch
|
13 |
|
14 |
from llama_cpp import Llama
|
15 |
-
from huggingface_hub import hf_hub_download
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
PandasDataFrame = Type[pd.DataFrame]
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
# Disable cuda devices if necessary
|
20 |
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
21 |
|
22 |
#from chatfuncs.chatfuncs import *
|
23 |
import chatfuncs.ingest as ing
|
24 |
|
25 |
-
|
|
|
|
|
26 |
|
27 |
embeddings_name = "BAAI/bge-base-en-v1.5"
|
28 |
|
@@ -68,7 +89,7 @@ def load_model(model_type, gpu_layers, gpu_config=None, cpu_config=None, torch_d
|
|
68 |
if torch_device is None:
|
69 |
torch_device = chatf.torch_device
|
70 |
|
71 |
-
if model_type == "Phi 3 Mini (larger, slow)":
|
72 |
if torch_device == "cuda":
|
73 |
gpu_config.update_gpu(gpu_layers)
|
74 |
print("Loading with", gpu_config.n_gpu_layers, "model layers sent to GPU.")
|
@@ -84,8 +105,8 @@ def load_model(model_type, gpu_layers, gpu_config=None, cpu_config=None, torch_d
|
|
84 |
try:
|
85 |
model = Llama(
|
86 |
model_path=hf_hub_download(
|
87 |
-
repo_id=os.environ.get("REPO_ID", "QuantFactory/Phi-3-mini-
|
88 |
-
filename=os.environ.get("MODEL_FILE", "Phi-3-mini-
|
89 |
),
|
90 |
**vars(gpu_config) # change n_gpu_layers if you have more or less VRAM
|
91 |
)
|
@@ -95,17 +116,17 @@ def load_model(model_type, gpu_layers, gpu_config=None, cpu_config=None, torch_d
|
|
95 |
print(e)
|
96 |
model = Llama(
|
97 |
model_path=hf_hub_download(
|
98 |
-
repo_id=os.environ.get("REPO_ID", "QuantFactory/Phi-3-mini-
|
99 |
-
filename=os.environ.get("MODEL_FILE", "Phi-3-mini-
|
100 |
),
|
101 |
**vars(cpu_config)
|
102 |
)
|
103 |
|
104 |
tokenizer = []
|
105 |
|
106 |
-
if model_type == "
|
107 |
# Huggingface chat model
|
108 |
-
hf_checkpoint = 'declare-lab/flan-alpaca-large'#'declare-lab/flan-alpaca-base' # # #
|
109 |
|
110 |
def create_hf_model(model_name):
|
111 |
|
@@ -113,14 +134,14 @@ def load_model(model_type, gpu_layers, gpu_config=None, cpu_config=None, torch_d
|
|
113 |
|
114 |
if torch_device == "cuda":
|
115 |
if "flan" in model_name:
|
116 |
-
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, device_map="auto"
|
117 |
else:
|
118 |
-
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto"
|
119 |
else:
|
120 |
if "flan" in model_name:
|
121 |
-
model = AutoModelForSeq2SeqLM.from_pretrained(model_name
|
122 |
else:
|
123 |
-
model = AutoModelForCausalLM.from_pretrained(model_name
|
124 |
|
125 |
tokenizer = AutoTokenizer.from_pretrained(model_name, model_max_length = chatf.context_length)
|
126 |
|
@@ -138,10 +159,10 @@ def load_model(model_type, gpu_layers, gpu_config=None, cpu_config=None, torch_d
|
|
138 |
return model_type, load_confirmation, model_type
|
139 |
|
140 |
# Both models are loaded on app initialisation so that users don't have to wait for the models to be downloaded
|
141 |
-
model_type = "Phi 3 Mini (larger, slow)"
|
142 |
-
load_model(model_type, chatf.gpu_layers, chatf.gpu_config, chatf.cpu_config, chatf.torch_device)
|
143 |
|
144 |
-
model_type = "
|
145 |
load_model(model_type, 0, chatf.gpu_config, chatf.cpu_config, chatf.torch_device)
|
146 |
|
147 |
def docs_to_faiss_save(docs_out:PandasDataFrame, embeddings=embeddings):
|
@@ -152,25 +173,30 @@ def docs_to_faiss_save(docs_out:PandasDataFrame, embeddings=embeddings):
|
|
152 |
|
153 |
vectorstore_func = FAISS.from_documents(documents=docs_out, embedding=embeddings)
|
154 |
|
155 |
-
|
156 |
chatf.vectorstore = vectorstore_func
|
157 |
|
158 |
out_message = "Document processing complete"
|
159 |
|
160 |
return out_message, vectorstore_func
|
161 |
-
|
162 |
# Gradio chat
|
163 |
|
164 |
-
block = gr.Blocks(theme = gr.themes.Base())#css=".gradio-container {background-color: black}")
|
165 |
|
166 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
167 |
ingest_text = gr.State()
|
168 |
ingest_metadata = gr.State()
|
169 |
ingest_docs = gr.State()
|
170 |
|
171 |
model_type_state = gr.State(model_type)
|
172 |
embeddings_state = gr.State(chatf.embeddings)#globals()["embeddings"])
|
173 |
-
vectorstore_state = gr.State(chatf.vectorstore)#globals()["vectorstore"])
|
|
|
|
|
174 |
|
175 |
model_state = gr.State() # chatf.model (gives error)
|
176 |
tokenizer_state = gr.State() # chatf.tokenizer (gives error)
|
@@ -178,9 +204,26 @@ with block:
|
|
178 |
chat_history_state = gr.State()
|
179 |
instruction_prompt_out = gr.State()
|
180 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
gr.Markdown("<h1><center>Lightweight PDF / web page QA bot</center></h1>")
|
182 |
|
183 |
-
gr.Markdown("Chat with PDF, web page or (new) csv/Excel documents. The default is a small model (
|
|
|
|
|
|
|
|
|
184 |
|
185 |
with gr.Row():
|
186 |
current_source = gr.Textbox(label="Current data source(s)", value="Lambeth_2030-Our_Future_Our_Lambeth.pdf", scale = 10)
|
@@ -190,7 +233,7 @@ with block:
|
|
190 |
|
191 |
with gr.Row():
|
192 |
#chat_height = 500
|
193 |
-
chatbot = gr.Chatbot(avatar_images=('user.jfif', 'bot.jpg'),bubble_full_width = False, scale = 1) # , height=chat_height
|
194 |
with gr.Accordion("Open this tab to see the source paragraphs used to generate the answer", open = False):
|
195 |
sources = gr.HTML(value = "Source paragraphs with the most relevant text will appear here") # , height=chat_height
|
196 |
|
@@ -210,7 +253,6 @@ with block:
|
|
210 |
"What is the vision statement for Lambeth?",
|
211 |
"What are the commitments for Lambeth?",
|
212 |
"What are the 2030 outcomes for Lambeth?"])
|
213 |
-
|
214 |
|
215 |
current_topic = gr.Textbox(label="Feature currently disabled - Keywords related to current conversation topic.", placeholder="Keywords related to the conversation topic will appear here")
|
216 |
|
@@ -230,14 +272,16 @@ with block:
|
|
230 |
in_csv = gr.File(label="Upload CSV/Excel file", file_count="multiple", file_types=['.csv', '.xlsx'])
|
231 |
in_text_column = gr.Textbox(label="Enter column name where text is stored")
|
232 |
load_csv = gr.Button(value="Load in CSV/Excel file", variant="secondary", scale=0)
|
233 |
-
|
234 |
-
|
|
|
|
|
235 |
|
236 |
with gr.Tab("Advanced features"):
|
237 |
out_passages = gr.Slider(minimum=1, value = 2, maximum=10, step=1, label="Choose number of passages to retrieve from the document. Numbers greater than 2 may lead to increased hallucinations or input text being truncated.")
|
238 |
temp_slide = gr.Slider(minimum=0.1, value = 0.5, maximum=1, step=0.1, label="Choose temperature setting for response generation.")
|
239 |
with gr.Row():
|
240 |
-
model_choice = gr.Radio(label="Choose a chat model", value="
|
241 |
change_model_button = gr.Button(value="Load model", scale=0)
|
242 |
with gr.Accordion("Choose number of model layers to send to GPU (WARNING: please don't modify unless you are sure you have a GPU).", open = False):
|
243 |
gpu_layer_choice = gr.Slider(label="Choose number of model layers to send to GPU.", value=0, minimum=0, maximum=100, step = 1, visible=True)
|
@@ -246,7 +290,7 @@ with block:
|
|
246 |
|
247 |
|
248 |
gr.HTML(
|
249 |
-
"<center>This app is based on the models
|
250 |
)
|
251 |
|
252 |
examples_set.change(fn=chatf.update_message, inputs=[examples_set], outputs=[message])
|
@@ -260,34 +304,34 @@ with block:
|
|
260 |
# Load in a pdf
|
261 |
load_pdf_click = load_pdf.click(ing.parse_file, inputs=[in_pdf], outputs=[ingest_text, current_source]).\
|
262 |
then(ing.text_to_docs, inputs=[ingest_text], outputs=[ingest_docs]).\
|
263 |
-
then(
|
264 |
then(chatf.hide_block, outputs = [examples_set])
|
265 |
|
266 |
# Load in a webpage
|
267 |
load_web_click = load_web.click(ing.parse_html, inputs=[in_web, in_div], outputs=[ingest_text, ingest_metadata, current_source]).\
|
268 |
then(ing.html_text_to_docs, inputs=[ingest_text, ingest_metadata], outputs=[ingest_docs]).\
|
269 |
-
then(
|
270 |
then(chatf.hide_block, outputs = [examples_set])
|
271 |
|
272 |
# Load in a csv/excel file
|
273 |
load_csv_click = load_csv.click(ing.parse_csv_or_excel, inputs=[in_csv, in_text_column], outputs=[ingest_text, current_source]).\
|
274 |
then(ing.csv_excel_text_to_docs, inputs=[ingest_text, in_text_column], outputs=[ingest_docs]).\
|
275 |
-
then(
|
276 |
then(chatf.hide_block, outputs = [examples_set])
|
277 |
|
278 |
# Load in a webpage
|
279 |
|
280 |
# Click/enter to send message action
|
281 |
-
response_click = submit.click(chatf.create_full_prompt, inputs=[message, chat_history_state, current_topic, vectorstore_state, embeddings_state, model_type_state, out_passages], outputs=[chat_history_state, sources, instruction_prompt_out], queue=False, api_name="retrieval").\
|
282 |
then(chatf.turn_off_interactivity, inputs=[message, chatbot], outputs=[message, chatbot], queue=False).\
|
283 |
-
then(chatf.produce_streaming_answer_chatbot, inputs=[chatbot, instruction_prompt_out, model_type_state, temp_slide], outputs=chatbot)
|
284 |
response_click.then(chatf.highlight_found_text, [chatbot, sources], [sources]).\
|
285 |
then(chatf.add_inputs_answer_to_history,[message, chatbot, current_topic], [chat_history_state, current_topic]).\
|
286 |
then(lambda: chatf.restore_interactivity(), None, [message], queue=False)
|
287 |
|
288 |
-
response_enter = message.submit(chatf.create_full_prompt, inputs=[message, chat_history_state, current_topic, vectorstore_state, embeddings_state, model_type_state, out_passages], outputs=[chat_history_state, sources, instruction_prompt_out], queue=False).\
|
289 |
then(chatf.turn_off_interactivity, inputs=[message, chatbot], outputs=[message, chatbot], queue=False).\
|
290 |
-
then(chatf.produce_streaming_answer_chatbot, [chatbot, instruction_prompt_out, model_type_state, temp_slide], chatbot)
|
291 |
response_enter.then(chatf.highlight_found_text, [chatbot, sources], [sources]).\
|
292 |
then(chatf.add_inputs_answer_to_history,[message, chatbot, current_topic], [chat_history_state, current_topic]).\
|
293 |
then(lambda: chatf.restore_interactivity(), None, [message], queue=False)
|
@@ -302,4 +346,24 @@ with block:
|
|
302 |
# Thumbs up or thumbs down voting function
|
303 |
chatbot.like(chatf.vote, [chat_history_state, instruction_prompt_out, model_type_state], None)
|
304 |
|
305 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# Load in packages
|
2 |
|
3 |
import os
|
4 |
+
import socket
|
5 |
|
6 |
from typing import Type
|
7 |
+
from langchain_huggingface.embeddings import HuggingFaceEmbeddings#, HuggingFaceInstructEmbeddings
|
8 |
from langchain_community.vectorstores import FAISS
|
9 |
import gradio as gr
|
10 |
import pandas as pd
|
|
|
13 |
import torch
|
14 |
|
15 |
from llama_cpp import Llama
|
16 |
+
from huggingface_hub import hf_hub_download
|
17 |
+
from chatfuncs.ingest import embed_faiss_save_to_zip
|
18 |
+
from chatfuncs.helper_functions import get_or_create_env_var
|
19 |
+
|
20 |
+
from chatfuncs.helper_functions import ensure_output_folder_exists, get_connection_params, output_folder, get_or_create_env_var, reveal_feedback_buttons, wipe_logs
|
21 |
+
from chatfuncs.aws_functions import upload_file_to_s3
|
22 |
+
#from chatfuncs.llm_api_call import llm_query
|
23 |
+
from chatfuncs.auth import authenticate_user
|
24 |
|
25 |
PandasDataFrame = Type[pd.DataFrame]
|
26 |
|
27 |
+
from datetime import datetime
|
28 |
+
today_rev = datetime.now().strftime("%Y%m%d")
|
29 |
+
|
30 |
+
ensure_output_folder_exists()
|
31 |
+
|
32 |
+
host_name = socket.gethostname()
|
33 |
+
|
34 |
+
access_logs_data_folder = 'logs/' + today_rev + '/' + host_name + '/'
|
35 |
+
feedback_data_folder = 'feedback/' + today_rev + '/' + host_name + '/'
|
36 |
+
usage_data_folder = 'usage/' + today_rev + '/' + host_name + '/'
|
37 |
+
|
38 |
# Disable cuda devices if necessary
|
39 |
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
40 |
|
41 |
#from chatfuncs.chatfuncs import *
|
42 |
import chatfuncs.ingest as ing
|
43 |
|
44 |
+
###
|
45 |
+
# Load preset embeddings, vectorstore, and model
|
46 |
+
###
|
47 |
|
48 |
embeddings_name = "BAAI/bge-base-en-v1.5"
|
49 |
|
|
|
89 |
if torch_device is None:
|
90 |
torch_device = chatf.torch_device
|
91 |
|
92 |
+
if model_type == "Phi 3.5 Mini (larger, slow)":
|
93 |
if torch_device == "cuda":
|
94 |
gpu_config.update_gpu(gpu_layers)
|
95 |
print("Loading with", gpu_config.n_gpu_layers, "model layers sent to GPU.")
|
|
|
105 |
try:
|
106 |
model = Llama(
|
107 |
model_path=hf_hub_download(
|
108 |
+
repo_id=os.environ.get("REPO_ID", "QuantFactory/Phi-3.5-mini-instruct-GGUF"),# "QuantFactory/Phi-3-mini-128k-instruct-GGUF"), # "QuantFactory/Meta-Llama-3-8B-Instruct-GGUF-v2"), #"microsoft/Phi-3-mini-4k-instruct-gguf"),#"TheBloke/Mistral-7B-OpenOrca-GGUF"),
|
109 |
+
filename=os.environ.get("MODEL_FILE", "Phi-3.5-mini-instruct.Q4_K_M.gguf") #"Phi-3-mini-128k-instruct.Q4_K_M.gguf") #"Meta-Llama-3-8B-Instruct-v2.Q6_K.gguf") #"Phi-3-mini-4k-instruct-q4.gguf")#"mistral-7b-openorca.Q4_K_M.gguf"),
|
110 |
),
|
111 |
**vars(gpu_config) # change n_gpu_layers if you have more or less VRAM
|
112 |
)
|
|
|
116 |
print(e)
|
117 |
model = Llama(
|
118 |
model_path=hf_hub_download(
|
119 |
+
repo_id=os.environ.get("REPO_ID", "QuantFactory/Phi-3.5-mini-instruct-GGUF"), #"QuantFactory/Phi-3-mini-128k-instruct-GGUF"), #, "microsoft/Phi-3-mini-4k-instruct-gguf"),#"QuantFactory/Meta-Llama-3-8B-Instruct-GGUF-v2"), #"microsoft/Phi-3-mini-4k-instruct-gguf"),#"TheBloke/Mistral-7B-OpenOrca-GGUF"),
|
120 |
+
filename=os.environ.get("MODEL_FILE", "Phi-3.5-mini-instruct.Q4_K_M.gguf"), # "Phi-3-mini-128k-instruct.Q4_K_M.gguf") # , #"Meta-Llama-3-8B-Instruct-v2.Q6_K.gguf") #"Phi-3-mini-4k-instruct-q4.gguf"),#"mistral-7b-openorca.Q4_K_M.gguf"),
|
121 |
),
|
122 |
**vars(cpu_config)
|
123 |
)
|
124 |
|
125 |
tokenizer = []
|
126 |
|
127 |
+
if model_type == "Qwen 2 0.5B (small, fast)":
|
128 |
# Huggingface chat model
|
129 |
+
hf_checkpoint = 'Qwen/Qwen2-0.5B-Instruct'# 'declare-lab/flan-alpaca-large'#'declare-lab/flan-alpaca-base' # # # 'Qwen/Qwen1.5-0.5B-Chat' #
|
130 |
|
131 |
def create_hf_model(model_name):
|
132 |
|
|
|
134 |
|
135 |
if torch_device == "cuda":
|
136 |
if "flan" in model_name:
|
137 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, device_map="auto")#, torch_dtype=torch.float16)
|
138 |
else:
|
139 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")#, torch_dtype=torch.float16)
|
140 |
else:
|
141 |
if "flan" in model_name:
|
142 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)#, torch_dtype=torch.float16)
|
143 |
else:
|
144 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)#, trust_remote_code=True)#, torch_dtype=torch.float16)
|
145 |
|
146 |
tokenizer = AutoTokenizer.from_pretrained(model_name, model_max_length = chatf.context_length)
|
147 |
|
|
|
159 |
return model_type, load_confirmation, model_type
|
160 |
|
161 |
# Both models are loaded on app initialisation so that users don't have to wait for the models to be downloaded
|
162 |
+
#model_type = "Phi 3.5 Mini (larger, slow)"
|
163 |
+
#load_model(model_type, chatf.gpu_layers, chatf.gpu_config, chatf.cpu_config, chatf.torch_device)
|
164 |
|
165 |
+
model_type = "Qwen 2 0.5B (small, fast)"
|
166 |
load_model(model_type, 0, chatf.gpu_config, chatf.cpu_config, chatf.torch_device)
|
167 |
|
168 |
def docs_to_faiss_save(docs_out:PandasDataFrame, embeddings=embeddings):
|
|
|
173 |
|
174 |
vectorstore_func = FAISS.from_documents(documents=docs_out, embedding=embeddings)
|
175 |
|
|
|
176 |
chatf.vectorstore = vectorstore_func
|
177 |
|
178 |
out_message = "Document processing complete"
|
179 |
|
180 |
return out_message, vectorstore_func
|
|
|
181 |
# Gradio chat
|
182 |
|
|
|
183 |
|
184 |
+
###
|
185 |
+
# RUN UI
|
186 |
+
###
|
187 |
+
|
188 |
+
app = gr.Blocks(theme = gr.themes.Base())#css=".gradio-container {background-color: black}")
|
189 |
+
|
190 |
+
with app:
|
191 |
ingest_text = gr.State()
|
192 |
ingest_metadata = gr.State()
|
193 |
ingest_docs = gr.State()
|
194 |
|
195 |
model_type_state = gr.State(model_type)
|
196 |
embeddings_state = gr.State(chatf.embeddings)#globals()["embeddings"])
|
197 |
+
vectorstore_state = gr.State(chatf.vectorstore)#globals()["vectorstore"])
|
198 |
+
|
199 |
+
relevant_query_state = gr.Checkbox(value=True, visible=False)
|
200 |
|
201 |
model_state = gr.State() # chatf.model (gives error)
|
202 |
tokenizer_state = gr.State() # chatf.tokenizer (gives error)
|
|
|
204 |
chat_history_state = gr.State()
|
205 |
instruction_prompt_out = gr.State()
|
206 |
|
207 |
+
session_hash_state = gr.State()
|
208 |
+
s3_output_folder_state = gr.State()
|
209 |
+
|
210 |
+
session_hash_textbox = gr.Textbox(value="", visible=False)
|
211 |
+
s3_logs_output_textbox = gr.Textbox(label="S3 logs", visible=False)
|
212 |
+
|
213 |
+
access_logs_state = gr.State(access_logs_data_folder + 'dataset1.csv')
|
214 |
+
access_s3_logs_loc_state = gr.State(access_logs_data_folder)
|
215 |
+
usage_logs_state = gr.State(usage_data_folder + 'dataset1.csv')
|
216 |
+
usage_s3_logs_loc_state = gr.State(usage_data_folder)
|
217 |
+
feedback_logs_state = gr.State(feedback_data_folder + 'dataset1.csv')
|
218 |
+
feedback_s3_logs_loc_state = gr.State(feedback_data_folder)
|
219 |
+
|
220 |
gr.Markdown("<h1><center>Lightweight PDF / web page QA bot</center></h1>")
|
221 |
|
222 |
+
gr.Markdown("Chat with PDF, web page or (new) csv/Excel documents. The default is a small model (Qwen 2 0.5B), that can only answer specific questions that are answered in the text. It cannot give overall impressions of, or summarise the document. The alternative (Phi 3.5 Mini (larger, slow)), can reason a little better, but is much slower (See Advanced tab).\n\nBy default the Lambeth Borough Plan '[Lambeth 2030 : Our Future, Our Lambeth](https://www.lambeth.gov.uk/better-fairer-lambeth/projects/lambeth-2030-our-future-our-lambeth)' is loaded. If you want to talk about another document or web page, please select from the second tab. If switching topic, please click the 'Clear chat' button.\n\nCaution: This is a public app. Please ensure that the document you upload is not sensitive is any way as other users may see it! Also, please note that LLM chatbots may give incomplete or incorrect information, so please use with care.")
|
223 |
+
|
224 |
+
with gr.Accordion(label="Use Gemini or AWS Claude model", open=False, visible=False):
|
225 |
+
api_model_choice = gr.Dropdown(value = "None", choices = ["gemini-1.5-flash-002", "gemini-1.5-pro-002", "anthropic.claude-3-haiku-20240307-v1:0", "anthropic.claude-3-sonnet-20240229-v1:0", "None"], label="LLM model to use", multiselect=False, interactive=True, visible=False)
|
226 |
+
in_api_key = gr.Textbox(value = "", label="Enter Gemini API key (only if using Google API models)", lines=1, type="password",interactive=True, visible=False)
|
227 |
|
228 |
with gr.Row():
|
229 |
current_source = gr.Textbox(label="Current data source(s)", value="Lambeth_2030-Our_Future_Our_Lambeth.pdf", scale = 10)
|
|
|
233 |
|
234 |
with gr.Row():
|
235 |
#chat_height = 500
|
236 |
+
chatbot = gr.Chatbot(avatar_images=('user.jfif', 'bot.jpg'),bubble_full_width = False, scale = 1, type='tuples') # , height=chat_height
|
237 |
with gr.Accordion("Open this tab to see the source paragraphs used to generate the answer", open = False):
|
238 |
sources = gr.HTML(value = "Source paragraphs with the most relevant text will appear here") # , height=chat_height
|
239 |
|
|
|
253 |
"What is the vision statement for Lambeth?",
|
254 |
"What are the commitments for Lambeth?",
|
255 |
"What are the 2030 outcomes for Lambeth?"])
|
|
|
256 |
|
257 |
current_topic = gr.Textbox(label="Feature currently disabled - Keywords related to current conversation topic.", placeholder="Keywords related to the conversation topic will appear here")
|
258 |
|
|
|
272 |
in_csv = gr.File(label="Upload CSV/Excel file", file_count="multiple", file_types=['.csv', '.xlsx'])
|
273 |
in_text_column = gr.Textbox(label="Enter column name where text is stored")
|
274 |
load_csv = gr.Button(value="Load in CSV/Excel file", variant="secondary", scale=0)
|
275 |
+
|
276 |
+
with gr.Row():
|
277 |
+
ingest_embed_out = gr.Textbox(label="File/web page preparation progress")
|
278 |
+
file_out_box = gr.File(file_count='single', file_types=['.zip'])
|
279 |
|
280 |
with gr.Tab("Advanced features"):
|
281 |
out_passages = gr.Slider(minimum=1, value = 2, maximum=10, step=1, label="Choose number of passages to retrieve from the document. Numbers greater than 2 may lead to increased hallucinations or input text being truncated.")
|
282 |
temp_slide = gr.Slider(minimum=0.1, value = 0.5, maximum=1, step=0.1, label="Choose temperature setting for response generation.")
|
283 |
with gr.Row():
|
284 |
+
model_choice = gr.Radio(label="Choose a chat model", value="Qwen 2 0.5B (small, fast)", choices = ["Qwen 2 0.5B (small, fast)", "Phi 3.5 Mini (larger, slow)"])
|
285 |
change_model_button = gr.Button(value="Load model", scale=0)
|
286 |
with gr.Accordion("Choose number of model layers to send to GPU (WARNING: please don't modify unless you are sure you have a GPU).", open = False):
|
287 |
gpu_layer_choice = gr.Slider(label="Choose number of model layers to send to GPU.", value=0, minimum=0, maximum=100, step = 1, visible=True)
|
|
|
290 |
|
291 |
|
292 |
gr.HTML(
|
293 |
+
"<center>This app is based on the models Qwen 2 0.5B and Phi 3.5 Mini. It powered by Gradio, Transformers, and Llama.cpp.</a></center>"
|
294 |
)
|
295 |
|
296 |
examples_set.change(fn=chatf.update_message, inputs=[examples_set], outputs=[message])
|
|
|
304 |
# Load in a pdf
|
305 |
load_pdf_click = load_pdf.click(ing.parse_file, inputs=[in_pdf], outputs=[ingest_text, current_source]).\
|
306 |
then(ing.text_to_docs, inputs=[ingest_text], outputs=[ingest_docs]).\
|
307 |
+
then(embed_faiss_save_to_zip, inputs=[ingest_docs], outputs=[ingest_embed_out, vectorstore_state, file_out_box]).\
|
308 |
then(chatf.hide_block, outputs = [examples_set])
|
309 |
|
310 |
# Load in a webpage
|
311 |
load_web_click = load_web.click(ing.parse_html, inputs=[in_web, in_div], outputs=[ingest_text, ingest_metadata, current_source]).\
|
312 |
then(ing.html_text_to_docs, inputs=[ingest_text, ingest_metadata], outputs=[ingest_docs]).\
|
313 |
+
then(embed_faiss_save_to_zip, inputs=[ingest_docs], outputs=[ingest_embed_out, vectorstore_state, file_out_box]).\
|
314 |
then(chatf.hide_block, outputs = [examples_set])
|
315 |
|
316 |
# Load in a csv/excel file
|
317 |
load_csv_click = load_csv.click(ing.parse_csv_or_excel, inputs=[in_csv, in_text_column], outputs=[ingest_text, current_source]).\
|
318 |
then(ing.csv_excel_text_to_docs, inputs=[ingest_text, in_text_column], outputs=[ingest_docs]).\
|
319 |
+
then(embed_faiss_save_to_zip, inputs=[ingest_docs], outputs=[ingest_embed_out, vectorstore_state, file_out_box]).\
|
320 |
then(chatf.hide_block, outputs = [examples_set])
|
321 |
|
322 |
# Load in a webpage
|
323 |
|
324 |
# Click/enter to send message action
|
325 |
+
response_click = submit.click(chatf.create_full_prompt, inputs=[message, chat_history_state, current_topic, vectorstore_state, embeddings_state, model_type_state, out_passages, api_model_choice, in_api_key], outputs=[chat_history_state, sources, instruction_prompt_out, relevant_query_state], queue=False, api_name="retrieval").\
|
326 |
then(chatf.turn_off_interactivity, inputs=[message, chatbot], outputs=[message, chatbot], queue=False).\
|
327 |
+
then(chatf.produce_streaming_answer_chatbot, inputs=[chatbot, instruction_prompt_out, model_type_state, temp_slide, relevant_query_state], outputs=chatbot)
|
328 |
response_click.then(chatf.highlight_found_text, [chatbot, sources], [sources]).\
|
329 |
then(chatf.add_inputs_answer_to_history,[message, chatbot, current_topic], [chat_history_state, current_topic]).\
|
330 |
then(lambda: chatf.restore_interactivity(), None, [message], queue=False)
|
331 |
|
332 |
+
response_enter = message.submit(chatf.create_full_prompt, inputs=[message, chat_history_state, current_topic, vectorstore_state, embeddings_state, model_type_state, out_passages, api_model_choice, in_api_key], outputs=[chat_history_state, sources, instruction_prompt_out, relevant_query_state], queue=False).\
|
333 |
then(chatf.turn_off_interactivity, inputs=[message, chatbot], outputs=[message, chatbot], queue=False).\
|
334 |
+
then(chatf.produce_streaming_answer_chatbot, [chatbot, instruction_prompt_out, model_type_state, temp_slide, relevant_query_state], chatbot)
|
335 |
response_enter.then(chatf.highlight_found_text, [chatbot, sources], [sources]).\
|
336 |
then(chatf.add_inputs_answer_to_history,[message, chatbot, current_topic], [chat_history_state, current_topic]).\
|
337 |
then(lambda: chatf.restore_interactivity(), None, [message], queue=False)
|
|
|
346 |
# Thumbs up or thumbs down voting function
|
347 |
chatbot.like(chatf.vote, [chat_history_state, instruction_prompt_out, model_type_state], None)
|
348 |
|
349 |
+
###
|
350 |
+
# LOGGING AND ON APP LOAD FUNCTIONS
|
351 |
+
###
|
352 |
+
app.load(get_connection_params, inputs=None, outputs=[session_hash_state, s3_output_folder_state, session_hash_textbox])
|
353 |
+
|
354 |
+
# Log usernames and times of access to file (to know who is using the app when running on AWS)
|
355 |
+
access_callback = gr.CSVLogger()
|
356 |
+
access_callback.setup([session_hash_textbox], access_logs_data_folder)
|
357 |
+
|
358 |
+
session_hash_textbox.change(lambda *args: access_callback.flag(list(args)), [session_hash_textbox], None, preprocess=False).\
|
359 |
+
then(fn = upload_file_to_s3, inputs=[access_logs_state, access_s3_logs_loc_state], outputs=[s3_logs_output_textbox])
|
360 |
+
|
361 |
+
# Launch the Gradio app
|
362 |
+
COGNITO_AUTH = get_or_create_env_var('COGNITO_AUTH', '0')
|
363 |
+
print(f'The value of COGNITO_AUTH is {COGNITO_AUTH}')
|
364 |
+
|
365 |
+
if __name__ == "__main__":
|
366 |
+
if os.environ['COGNITO_AUTH'] == "1":
|
367 |
+
app.queue().launch(show_error=True, auth=authenticate_user, max_file_size='50mb')
|
368 |
+
else:
|
369 |
+
app.queue().launch(show_error=True, inbrowser=True, max_file_size='50mb')
|
chatfuncs/auth.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import boto3
|
3 |
+
from chatfuncs.helper_functions import get_or_create_env_var
|
4 |
+
|
5 |
+
client_id = get_or_create_env_var('AWS_CLIENT_ID', '') # This client id is borrowed from async gradio app client
|
6 |
+
print(f'The value of AWS_CLIENT_ID is {client_id}')
|
7 |
+
|
8 |
+
user_pool_id = get_or_create_env_var('AWS_USER_POOL_ID', '')
|
9 |
+
print(f'The value of AWS_USER_POOL_ID is {user_pool_id}')
|
10 |
+
|
11 |
+
def authenticate_user(username, password, user_pool_id=user_pool_id, client_id=client_id):
|
12 |
+
"""Authenticates a user against an AWS Cognito user pool.
|
13 |
+
|
14 |
+
Args:
|
15 |
+
user_pool_id (str): The ID of the Cognito user pool.
|
16 |
+
client_id (str): The ID of the Cognito user pool client.
|
17 |
+
username (str): The username of the user.
|
18 |
+
password (str): The password of the user.
|
19 |
+
|
20 |
+
Returns:
|
21 |
+
bool: True if the user is authenticated, False otherwise.
|
22 |
+
"""
|
23 |
+
|
24 |
+
client = boto3.client('cognito-idp') # Cognito Identity Provider client
|
25 |
+
|
26 |
+
try:
|
27 |
+
response = client.initiate_auth(
|
28 |
+
AuthFlow='USER_PASSWORD_AUTH',
|
29 |
+
AuthParameters={
|
30 |
+
'USERNAME': username,
|
31 |
+
'PASSWORD': password,
|
32 |
+
},
|
33 |
+
ClientId=client_id
|
34 |
+
)
|
35 |
+
|
36 |
+
# If successful, you'll receive an AuthenticationResult in the response
|
37 |
+
if response.get('AuthenticationResult'):
|
38 |
+
return True
|
39 |
+
else:
|
40 |
+
return False
|
41 |
+
|
42 |
+
except client.exceptions.NotAuthorizedException:
|
43 |
+
return False
|
44 |
+
except client.exceptions.UserNotFoundException:
|
45 |
+
return False
|
46 |
+
except Exception as e:
|
47 |
+
print(f"An error occurred: {e}")
|
48 |
+
return False
|
chatfuncs/aws_functions.py
ADDED
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Type, List
|
2 |
+
import pandas as pd
|
3 |
+
import boto3
|
4 |
+
import tempfile
|
5 |
+
import os
|
6 |
+
from chatfuncs.helper_functions import get_or_create_env_var
|
7 |
+
|
8 |
+
PandasDataFrame = Type[pd.DataFrame]
|
9 |
+
|
10 |
+
# Get AWS credentials if required
|
11 |
+
bucket_name=""
|
12 |
+
|
13 |
+
aws_var_val = get_or_create_env_var("RUN_AWS_FUNCTIONS", "1")
|
14 |
+
print(f'The value of RUN_AWS_FUNCTIONS is {aws_var_val}')
|
15 |
+
|
16 |
+
AWS_REGION = get_or_create_env_var('AWS_REGION', 'eu-west-2')
|
17 |
+
print(f'The value of AWS_REGION is {AWS_REGION}')
|
18 |
+
|
19 |
+
if aws_var_val == "1":
|
20 |
+
try:
|
21 |
+
bucket_name = os.environ['CONSULTATION_SUMMARY_BUCKET']
|
22 |
+
session = boto3.Session() # profile_name="default"
|
23 |
+
except Exception as e:
|
24 |
+
print(e)
|
25 |
+
|
26 |
+
def get_assumed_role_info():
|
27 |
+
sts_endpoint = 'https://sts.' + AWS_REGION + '.amazonaws.com'
|
28 |
+
sts = boto3.client('sts', region_name=AWS_REGION, endpoint_url=sts_endpoint)
|
29 |
+
response = sts.get_caller_identity()
|
30 |
+
|
31 |
+
# Extract ARN of the assumed role
|
32 |
+
assumed_role_arn = response['Arn']
|
33 |
+
|
34 |
+
# Extract the name of the assumed role from the ARN
|
35 |
+
assumed_role_name = assumed_role_arn.split('/')[-1]
|
36 |
+
|
37 |
+
return assumed_role_arn, assumed_role_name
|
38 |
+
|
39 |
+
try:
|
40 |
+
assumed_role_arn, assumed_role_name = get_assumed_role_info()
|
41 |
+
|
42 |
+
print("Assumed Role ARN:", assumed_role_arn)
|
43 |
+
print("Assumed Role Name:", assumed_role_name)
|
44 |
+
|
45 |
+
except Exception as e:
|
46 |
+
|
47 |
+
print(e)
|
48 |
+
|
49 |
+
# Download direct from S3 - requires login credentials
|
50 |
+
def download_file_from_s3(bucket_name, key, local_file_path):
|
51 |
+
|
52 |
+
s3 = boto3.client('s3')
|
53 |
+
s3.download_file(bucket_name, key, local_file_path)
|
54 |
+
print(f"File downloaded from S3: s3://{bucket_name}/{key} to {local_file_path}")
|
55 |
+
|
56 |
+
def download_folder_from_s3(bucket_name, s3_folder, local_folder):
|
57 |
+
"""
|
58 |
+
Download all files from an S3 folder to a local folder.
|
59 |
+
"""
|
60 |
+
s3 = boto3.client('s3')
|
61 |
+
|
62 |
+
# List objects in the specified S3 folder
|
63 |
+
response = s3.list_objects_v2(Bucket=bucket_name, Prefix=s3_folder)
|
64 |
+
|
65 |
+
# Download each object
|
66 |
+
for obj in response.get('Contents', []):
|
67 |
+
# Extract object key and construct local file path
|
68 |
+
object_key = obj['Key']
|
69 |
+
local_file_path = os.path.join(local_folder, os.path.relpath(object_key, s3_folder))
|
70 |
+
|
71 |
+
# Create directories if necessary
|
72 |
+
os.makedirs(os.path.dirname(local_file_path), exist_ok=True)
|
73 |
+
|
74 |
+
# Download the object
|
75 |
+
try:
|
76 |
+
s3.download_file(bucket_name, object_key, local_file_path)
|
77 |
+
print(f"Downloaded 's3://{bucket_name}/{object_key}' to '{local_file_path}'")
|
78 |
+
except Exception as e:
|
79 |
+
print(f"Error downloading 's3://{bucket_name}/{object_key}':", e)
|
80 |
+
|
81 |
+
def download_files_from_s3(bucket_name, s3_folder, local_folder, filenames):
|
82 |
+
"""
|
83 |
+
Download specific files from an S3 folder to a local folder.
|
84 |
+
"""
|
85 |
+
s3 = boto3.client('s3')
|
86 |
+
|
87 |
+
print("Trying to download file: ", filenames)
|
88 |
+
|
89 |
+
if filenames == '*':
|
90 |
+
# List all objects in the S3 folder
|
91 |
+
print("Trying to download all files in AWS folder: ", s3_folder)
|
92 |
+
response = s3.list_objects_v2(Bucket=bucket_name, Prefix=s3_folder)
|
93 |
+
|
94 |
+
print("Found files in AWS folder: ", response.get('Contents', []))
|
95 |
+
|
96 |
+
filenames = [obj['Key'].split('/')[-1] for obj in response.get('Contents', [])]
|
97 |
+
|
98 |
+
print("Found filenames in AWS folder: ", filenames)
|
99 |
+
|
100 |
+
for filename in filenames:
|
101 |
+
object_key = os.path.join(s3_folder, filename)
|
102 |
+
local_file_path = os.path.join(local_folder, filename)
|
103 |
+
|
104 |
+
# Create directories if necessary
|
105 |
+
os.makedirs(os.path.dirname(local_file_path), exist_ok=True)
|
106 |
+
|
107 |
+
# Download the object
|
108 |
+
try:
|
109 |
+
s3.download_file(bucket_name, object_key, local_file_path)
|
110 |
+
print(f"Downloaded 's3://{bucket_name}/{object_key}' to '{local_file_path}'")
|
111 |
+
except Exception as e:
|
112 |
+
print(f"Error downloading 's3://{bucket_name}/{object_key}':", e)
|
113 |
+
|
114 |
+
def load_data_from_aws(in_aws_keyword_file, aws_password="", bucket_name=bucket_name):
|
115 |
+
|
116 |
+
temp_dir = tempfile.mkdtemp()
|
117 |
+
local_address_stub = temp_dir + '/doc-redaction/'
|
118 |
+
files = []
|
119 |
+
|
120 |
+
if not 'LAMBETH_BOROUGH_PLAN_PASSWORD' in os.environ:
|
121 |
+
out_message = "Can't verify password for dataset access. Do you have a valid AWS connection? Data not loaded."
|
122 |
+
return files, out_message
|
123 |
+
|
124 |
+
if aws_password:
|
125 |
+
if "Lambeth borough plan" in in_aws_keyword_file and aws_password == os.environ['LAMBETH_BOROUGH_PLAN_PASSWORD']:
|
126 |
+
|
127 |
+
s3_folder_stub = 'example-data/lambeth-borough-plan/latest/'
|
128 |
+
|
129 |
+
local_folder_path = local_address_stub
|
130 |
+
|
131 |
+
# Check if folder exists
|
132 |
+
if not os.path.exists(local_folder_path):
|
133 |
+
print(f"Folder {local_folder_path} does not exist! Making folder.")
|
134 |
+
|
135 |
+
os.mkdir(local_folder_path)
|
136 |
+
|
137 |
+
# Check if folder is empty
|
138 |
+
if len(os.listdir(local_folder_path)) == 0:
|
139 |
+
print(f"Folder {local_folder_path} is empty")
|
140 |
+
# Download data
|
141 |
+
download_files_from_s3(bucket_name, s3_folder_stub, local_folder_path, filenames='*')
|
142 |
+
|
143 |
+
print("AWS data downloaded")
|
144 |
+
|
145 |
+
else:
|
146 |
+
print(f"Folder {local_folder_path} is not empty")
|
147 |
+
|
148 |
+
#files = os.listdir(local_folder_stub)
|
149 |
+
#print(files)
|
150 |
+
|
151 |
+
files = [os.path.join(local_folder_path, f) for f in os.listdir(local_folder_path) if os.path.isfile(os.path.join(local_folder_path, f))]
|
152 |
+
|
153 |
+
out_message = "Data successfully loaded from AWS"
|
154 |
+
print(out_message)
|
155 |
+
|
156 |
+
else:
|
157 |
+
out_message = "Data not loaded from AWS"
|
158 |
+
print(out_message)
|
159 |
+
else:
|
160 |
+
out_message = "No password provided. Please ask the data team for access if you need this."
|
161 |
+
print(out_message)
|
162 |
+
|
163 |
+
return files, out_message
|
164 |
+
|
165 |
+
def upload_file_to_s3(local_file_paths:List[str], s3_key:str, s3_bucket:str=bucket_name):
|
166 |
+
"""
|
167 |
+
Uploads a file from local machine to Amazon S3.
|
168 |
+
|
169 |
+
Args:
|
170 |
+
- local_file_path: Local file path(s) of the file(s) to upload.
|
171 |
+
- s3_key: Key (path) to the file in the S3 bucket.
|
172 |
+
- s3_bucket: Name of the S3 bucket.
|
173 |
+
|
174 |
+
Returns:
|
175 |
+
- Message as variable/printed to console
|
176 |
+
"""
|
177 |
+
final_out_message = []
|
178 |
+
|
179 |
+
s3_client = boto3.client('s3')
|
180 |
+
|
181 |
+
if isinstance(local_file_paths, str):
|
182 |
+
local_file_paths = [local_file_paths]
|
183 |
+
|
184 |
+
for file in local_file_paths:
|
185 |
+
try:
|
186 |
+
# Get file name off file path
|
187 |
+
file_name = os.path.basename(file)
|
188 |
+
|
189 |
+
s3_key_full = s3_key + file_name
|
190 |
+
print("S3 key: ", s3_key_full)
|
191 |
+
|
192 |
+
s3_client.upload_file(file, s3_bucket, s3_key_full)
|
193 |
+
out_message = "File " + file_name + " uploaded successfully!"
|
194 |
+
print(out_message)
|
195 |
+
|
196 |
+
except Exception as e:
|
197 |
+
out_message = f"Error uploading file(s): {e}"
|
198 |
+
print(out_message)
|
199 |
+
|
200 |
+
final_out_message.append(out_message)
|
201 |
+
final_out_message_str = '\n'.join(final_out_message)
|
202 |
+
|
203 |
+
return final_out_message_str
|
204 |
+
|
205 |
+
|
chatfuncs/chatfuncs.py
CHANGED
@@ -28,20 +28,25 @@ from langchain.docstore.document import Document
|
|
28 |
from nltk.corpus import stopwords
|
29 |
from nltk.tokenize import RegexpTokenizer
|
30 |
from nltk.stem import WordNetLemmatizer
|
|
|
31 |
from keybert import KeyBERT
|
32 |
|
33 |
# For Name Entity Recognition model
|
34 |
#from span_marker import SpanMarkerModel # Not currently used
|
35 |
|
|
|
36 |
# For BM25 retrieval
|
37 |
-
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
40 |
|
41 |
from llama_cpp import Llama
|
42 |
from huggingface_hub import hf_hub_download
|
43 |
|
44 |
-
from chatfuncs.prompts import instruction_prompt_template_alpaca, instruction_prompt_mistral_orca, instruction_prompt_phi3, instruction_prompt_llama3
|
45 |
|
46 |
import gradio as gr
|
47 |
|
@@ -84,7 +89,7 @@ print("Running on device:", torch_device)
|
|
84 |
threads = 8 #torch.get_num_threads()
|
85 |
print("CPU threads:", threads)
|
86 |
|
87 |
-
#
|
88 |
temperature: float = 0.1
|
89 |
top_k: int = 3
|
90 |
top_p: float = 1
|
@@ -182,7 +187,7 @@ def docs_to_faiss_save(docs_out:PandasDataFrame, embeddings=embeddings):
|
|
182 |
|
183 |
# Prompt functions
|
184 |
|
185 |
-
def base_prompt_templates(model_type = "
|
186 |
|
187 |
#EXAMPLE_PROMPT = PromptTemplate(
|
188 |
# template="\nCONTENT:\n\n{page_content}\n\nSOURCE: {source}\n\n",
|
@@ -196,9 +201,9 @@ def base_prompt_templates(model_type = "Flan Alpaca (small, fast)"):
|
|
196 |
|
197 |
# The main prompt:
|
198 |
|
199 |
-
if model_type == "
|
200 |
-
INSTRUCTION_PROMPT=PromptTemplate(template=
|
201 |
-
elif model_type == "Phi 3 Mini (larger, slow)":
|
202 |
INSTRUCTION_PROMPT=PromptTemplate(template=instruction_prompt_phi3, input_variables=['question', 'summaries'])
|
203 |
|
204 |
return INSTRUCTION_PROMPT, CONTENT_PROMPT
|
@@ -207,89 +212,175 @@ def write_out_metadata_as_string(metadata_in):
|
|
207 |
metadata_string = [f"{' '.join(f'{k}: {v}' for k, v in d.items() if k != 'page_section')}" for d in metadata_in] # ['metadata']
|
208 |
return metadata_string
|
209 |
|
210 |
-
def generate_expanded_prompt(inputs: Dict[str, str], instruction_prompt, content_prompt, extracted_memory, vectorstore, embeddings, out_passages = 2): # ,
|
211 |
-
|
212 |
-
question = inputs["question"]
|
213 |
-
chat_history = inputs["chat_history"]
|
214 |
|
|
|
|
|
215 |
|
|
|
|
|
|
|
|
|
216 |
new_question_kworded = adapt_q_from_chat_history(question, chat_history, extracted_memory) # new_question_keywords,
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
# Expand the found passages to the neighbouring context
|
231 |
-
file_type = determine_file_type(doc_df['meta_url'][0])
|
232 |
|
233 |
-
|
234 |
-
|
235 |
-
|
|
|
236 |
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
doc_df['meta_clean'] = write_out_metadata_as_string(doc_df["metadata"]) # [f"<b>{' '.join(f'{k}: {v}' for k, v in d.items() if k != 'page_section')}</b>" for d in doc_df['metadata']]
|
241 |
-
|
242 |
-
# Remove meta text from the page content if it already exists there
|
243 |
-
doc_df['page_content_no_meta'] = doc_df.apply(lambda row: row['page_content'].replace(row['meta_clean'] + ". ", ""), axis=1)
|
244 |
-
doc_df['content_meta'] = doc_df['meta_clean'].astype(str) + ".<br><br>" + doc_df['page_content_no_meta'].astype(str)
|
245 |
|
246 |
-
|
247 |
-
|
248 |
-
|
|
|
|
|
|
|
249 |
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
print('Final prompt is: ')
|
255 |
-
print(instruction_prompt_out)
|
256 |
-
|
257 |
-
return instruction_prompt_out, sources_docs_content_string, new_question_kworded
|
258 |
|
259 |
-
|
260 |
|
261 |
-
|
262 |
-
|
|
|
|
|
|
|
|
|
263 |
|
|
|
|
|
264 |
#if chain_agent is None:
|
265 |
# history.append((user_input, "Please click the button to submit the Huggingface API key before using the chatbot (top right)"))
|
266 |
# return history, history, "", ""
|
267 |
print("\n==== date/time: " + str(datetime.datetime.now()) + " ====")
|
268 |
-
print("User input: " + user_input)
|
269 |
|
270 |
-
history = history or []
|
271 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
272 |
# Create instruction prompt
|
273 |
instruction_prompt, content_prompt = base_prompt_templates(model_type=model_type)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
274 |
instruction_prompt_out, docs_content_string, new_question_kworded =\
|
275 |
generate_expanded_prompt({"question": user_input, "chat_history": history}, #vectorstore,
|
276 |
-
instruction_prompt, content_prompt, extracted_memory, vectorstore, embeddings, out_passages)
|
277 |
-
|
278 |
|
279 |
history.append(user_input)
|
280 |
|
281 |
-
print("Output history is:")
|
282 |
-
print(
|
283 |
-
|
284 |
-
print("Final prompt to model is:")
|
285 |
-
print(instruction_prompt_out)
|
286 |
|
287 |
-
return history, docs_content_string, instruction_prompt_out
|
288 |
|
289 |
# Chat functions
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
290 |
|
291 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
292 |
temperature=temperature,
|
|
|
293 |
max_new_tokens=max_new_tokens,
|
294 |
sample=sample,
|
295 |
repetition_penalty=repetition_penalty,
|
@@ -304,7 +395,16 @@ def produce_streaming_answer_chatbot(history, full_prompt, model_type,
|
|
304 |
|
305 |
# return history
|
306 |
|
307 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
308 |
# Get the model and tokenizer, and tokenize the user text.
|
309 |
model_inputs = tokenizer(text=full_prompt, return_tensors="pt", return_attention_mask=False).to(torch_device)
|
310 |
|
@@ -322,7 +422,7 @@ def produce_streaming_answer_chatbot(history, full_prompt, model_type,
|
|
322 |
top_k=top_k
|
323 |
)
|
324 |
|
325 |
-
print(generate_kwargs)
|
326 |
|
327 |
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
328 |
t.start()
|
@@ -350,7 +450,7 @@ def produce_streaming_answer_chatbot(history, full_prompt, model_type,
|
|
350 |
print(f'Tokens per secound: {NUM_TOKENS/time_generate}')
|
351 |
print(f'Time per token: {(time_generate/NUM_TOKENS)*1000}ms')
|
352 |
|
353 |
-
elif model_type == "Phi 3 Mini (larger, slow)":
|
354 |
#tokens = model.tokenize(full_prompt)
|
355 |
|
356 |
gen_config = CtransGenGenerationConfig()
|
@@ -384,6 +484,33 @@ def produce_streaming_answer_chatbot(history, full_prompt, model_type,
|
|
384 |
print(f'Tokens per secound: {NUM_TOKENS/time_generate}')
|
385 |
print(f'Time per token: {(time_generate/NUM_TOKENS)*1000}ms')
|
386 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
387 |
|
388 |
# Chat helper functions
|
389 |
|
@@ -507,7 +634,7 @@ def hybrid_retrieval(new_question_kworded, vectorstore, embeddings, k_val, out_p
|
|
507 |
docs_content = doc_df['page_content'].astype(str)
|
508 |
docs_url = doc_df['meta_url']
|
509 |
|
510 |
-
return docs_keep_as_doc, docs_content, docs_url
|
511 |
|
512 |
# Check for if more docs are removed than the desired output
|
513 |
if out_passages > docs_keep_length:
|
@@ -519,47 +646,58 @@ def hybrid_retrieval(new_question_kworded, vectorstore, embeddings, k_val, out_p
|
|
519 |
|
520 |
print("Number of documents remaining: ", docs_keep_length)
|
521 |
|
522 |
-
# 2nd level check on retrieved
|
523 |
-
|
524 |
content_keep=[]
|
525 |
for item in docs_keep:
|
526 |
content_keep.append(item[0].page_content)
|
527 |
|
528 |
-
|
529 |
-
|
530 |
-
|
531 |
-
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
|
536 |
-
|
537 |
-
|
538 |
-
#
|
539 |
-
|
540 |
-
|
541 |
-
|
542 |
-
|
543 |
-
|
544 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
545 |
pairs.sort()
|
546 |
-
|
547 |
-
bm25_result = [value for ranks, value in pairs]
|
548 |
|
549 |
-
bm25_rank=[]
|
550 |
-
bm25_score = []
|
551 |
-
|
552 |
-
for vec_item in docs_keep:
|
553 |
-
x = 0
|
554 |
-
for bm25_item in bm25_result:
|
555 |
-
x = x + 1
|
556 |
-
if bm25_item.page_content == vec_item[0].page_content:
|
557 |
-
bm25_rank.append(x)
|
558 |
-
bm25_score.append((docs_keep_length/x)*bm25_weight)
|
559 |
|
560 |
# 3rd level check on retrieved docs with SVM retriever
|
|
|
|
|
|
|
561 |
|
562 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
563 |
svm_result = svm_retriever.invoke(new_question_kworded)
|
564 |
|
565 |
|
@@ -605,6 +743,10 @@ def hybrid_retrieval(new_question_kworded, vectorstore, embeddings, k_val, out_p
|
|
605 |
# Make df of best options
|
606 |
doc_df = create_doc_df(docs_keep_out)
|
607 |
|
|
|
|
|
|
|
|
|
608 |
return docs_keep_as_doc, doc_df, docs_keep_out
|
609 |
|
610 |
def get_expanded_passages(vectorstore, docs, width):
|
|
|
28 |
from nltk.corpus import stopwords
|
29 |
from nltk.tokenize import RegexpTokenizer
|
30 |
from nltk.stem import WordNetLemmatizer
|
31 |
+
#from nltk.stem.snowball import SnowballStemmer
|
32 |
from keybert import KeyBERT
|
33 |
|
34 |
# For Name Entity Recognition model
|
35 |
#from span_marker import SpanMarkerModel # Not currently used
|
36 |
|
37 |
+
|
38 |
# For BM25 retrieval
|
39 |
+
import bm25s
|
40 |
+
import Stemmer
|
41 |
+
|
42 |
+
#from gensim.corpora import Dictionary
|
43 |
+
#from gensim.models import TfidfModel, OkapiBM25Model
|
44 |
+
#from gensim.similarities import SparseMatrixSimilarity
|
45 |
|
46 |
from llama_cpp import Llama
|
47 |
from huggingface_hub import hf_hub_download
|
48 |
|
49 |
+
from chatfuncs.prompts import instruction_prompt_template_alpaca, instruction_prompt_mistral_orca, instruction_prompt_phi3, instruction_prompt_llama3, instruction_prompt_qwen
|
50 |
|
51 |
import gradio as gr
|
52 |
|
|
|
89 |
threads = 8 #torch.get_num_threads()
|
90 |
print("CPU threads:", threads)
|
91 |
|
92 |
+
# Qwen 2 0.5B (small, fast) Model parameters
|
93 |
temperature: float = 0.1
|
94 |
top_k: int = 3
|
95 |
top_p: float = 1
|
|
|
187 |
|
188 |
# Prompt functions
|
189 |
|
190 |
+
def base_prompt_templates(model_type = "Qwen 2 0.5B (small, fast)"):
|
191 |
|
192 |
#EXAMPLE_PROMPT = PromptTemplate(
|
193 |
# template="\nCONTENT:\n\n{page_content}\n\nSOURCE: {source}\n\n",
|
|
|
201 |
|
202 |
# The main prompt:
|
203 |
|
204 |
+
if model_type == "Qwen 2 0.5B (small, fast)":
|
205 |
+
INSTRUCTION_PROMPT=PromptTemplate(template=instruction_prompt_qwen, input_variables=['question', 'summaries'])
|
206 |
+
elif model_type == "Phi 3.5 Mini (larger, slow)":
|
207 |
INSTRUCTION_PROMPT=PromptTemplate(template=instruction_prompt_phi3, input_variables=['question', 'summaries'])
|
208 |
|
209 |
return INSTRUCTION_PROMPT, CONTENT_PROMPT
|
|
|
212 |
metadata_string = [f"{' '.join(f'{k}: {v}' for k, v in d.items() if k != 'page_section')}" for d in metadata_in] # ['metadata']
|
213 |
return metadata_string
|
214 |
|
215 |
+
def generate_expanded_prompt(inputs: Dict[str, str], instruction_prompt, content_prompt, extracted_memory, vectorstore, embeddings, relevant_flag = True, out_passages = 2): # ,
|
|
|
|
|
|
|
216 |
|
217 |
+
question = inputs["question"]
|
218 |
+
chat_history = inputs["chat_history"]
|
219 |
|
220 |
+
print("relevant_flag in generate_expanded_prompt:", relevant_flag)
|
221 |
+
|
222 |
+
|
223 |
+
if relevant_flag == True:
|
224 |
new_question_kworded = adapt_q_from_chat_history(question, chat_history, extracted_memory) # new_question_keywords,
|
225 |
+
docs_keep_as_doc, doc_df, docs_keep_out = hybrid_retrieval(new_question_kworded, vectorstore, embeddings, k_val = 25, out_passages = out_passages, vec_score_cut_off = 0.85, vec_weight = 1, bm25_weight = 1, svm_weight = 1)
|
226 |
+
else:
|
227 |
+
new_question_kworded = question
|
228 |
+
doc_df = pd.DataFrame()
|
229 |
+
docs_keep_as_doc = []
|
230 |
+
docs_keep_out = []
|
231 |
+
|
232 |
+
if (not docs_keep_as_doc) | (doc_df.empty):
|
233 |
+
sorry_prompt = """Say 'Sorry, there is no relevant information to answer this question.'"""
|
234 |
+
return sorry_prompt, "No relevant sources found.", new_question_kworded
|
235 |
+
|
236 |
+
# Expand the found passages to the neighbouring context
|
237 |
+
print("Doc_df columns:", doc_df.columns)
|
|
|
|
|
238 |
|
239 |
+
if 'meta_url' in doc_df.columns:
|
240 |
+
file_type = determine_file_type(doc_df['meta_url'][0])
|
241 |
+
else:
|
242 |
+
file_type = determine_file_type(doc_df['source'][0])
|
243 |
|
244 |
+
# Only expand passages if not tabular data
|
245 |
+
if (file_type != ".csv") & (file_type != ".xlsx"):
|
246 |
+
docs_keep_as_doc, doc_df = get_expanded_passages(vectorstore, docs_keep_out, width=3)
|
|
|
|
|
|
|
|
|
|
|
247 |
|
248 |
+
# Build up sources content to add to user display
|
249 |
+
doc_df['meta_clean'] = write_out_metadata_as_string(doc_df["metadata"]) # [f"<b>{' '.join(f'{k}: {v}' for k, v in d.items() if k != 'page_section')}</b>" for d in doc_df['metadata']]
|
250 |
+
|
251 |
+
# Remove meta text from the page content if it already exists there
|
252 |
+
doc_df['page_content_no_meta'] = doc_df.apply(lambda row: row['page_content'].replace(row['meta_clean'] + ". ", ""), axis=1)
|
253 |
+
doc_df['content_meta'] = doc_df['meta_clean'].astype(str) + ".<br><br>" + doc_df['page_content_no_meta'].astype(str)
|
254 |
|
255 |
+
#modified_page_content = [f" Document {i+1} - {word}" for i, word in enumerate(doc_df['page_content'])]
|
256 |
+
modified_page_content = [f" Document {i+1} - {word}" for i, word in enumerate(doc_df['content_meta'])]
|
257 |
+
docs_content_string = '<br><br>'.join(modified_page_content)
|
|
|
|
|
|
|
|
|
|
|
258 |
|
259 |
+
sources_docs_content_string = '<br><br>'.join(doc_df['content_meta'])#.replace(" "," ")#.strip()
|
260 |
|
261 |
+
instruction_prompt_out = instruction_prompt.format(question=new_question_kworded, summaries=docs_content_string)
|
262 |
+
|
263 |
+
print('Final prompt is: ')
|
264 |
+
print(instruction_prompt_out)
|
265 |
+
|
266 |
+
return instruction_prompt_out, sources_docs_content_string, new_question_kworded
|
267 |
|
268 |
+
def create_full_prompt(user_input, history, extracted_memory, vectorstore, embeddings, model_type, out_passages, api_model_choice=None, api_key=None, relevant_flag = True):
|
269 |
+
|
270 |
#if chain_agent is None:
|
271 |
# history.append((user_input, "Please click the button to submit the Huggingface API key before using the chatbot (top right)"))
|
272 |
# return history, history, "", ""
|
273 |
print("\n==== date/time: " + str(datetime.datetime.now()) + " ====")
|
|
|
274 |
|
|
|
275 |
|
276 |
+
history = history or []
|
277 |
+
|
278 |
+
if api_model_choice and api_model_choice != "None":
|
279 |
+
print("API model choice detected")
|
280 |
+
if api_key:
|
281 |
+
print("API key detected")
|
282 |
+
return history, "", None, relevant_flag
|
283 |
+
else:
|
284 |
+
return history, "", None, relevant_flag
|
285 |
+
|
286 |
# Create instruction prompt
|
287 |
instruction_prompt, content_prompt = base_prompt_templates(model_type=model_type)
|
288 |
+
|
289 |
+
if not user_input.strip():
|
290 |
+
user_input = "No user input found"
|
291 |
+
relevant_flag = False
|
292 |
+
else:
|
293 |
+
relevant_flag = True
|
294 |
+
|
295 |
+
print("User input:", user_input)
|
296 |
+
|
297 |
instruction_prompt_out, docs_content_string, new_question_kworded =\
|
298 |
generate_expanded_prompt({"question": user_input, "chat_history": history}, #vectorstore,
|
299 |
+
instruction_prompt, content_prompt, extracted_memory, vectorstore, embeddings, relevant_flag, out_passages)
|
|
|
300 |
|
301 |
history.append(user_input)
|
302 |
|
303 |
+
print("Output history is:", history)
|
304 |
+
print("Final prompt to model is:",instruction_prompt_out)
|
|
|
|
|
|
|
305 |
|
306 |
+
return history, docs_content_string, instruction_prompt_out, relevant_flag
|
307 |
|
308 |
# Chat functions
|
309 |
+
import boto3
|
310 |
+
import json
|
311 |
+
from chatfuncs.helper_functions import get_or_create_env_var
|
312 |
+
|
313 |
+
# ResponseObject class for AWS Bedrock calls
|
314 |
+
class ResponseObject:
|
315 |
+
def __init__(self, text, usage_metadata):
|
316 |
+
self.text = text
|
317 |
+
self.usage_metadata = usage_metadata
|
318 |
+
|
319 |
+
max_tokens = 4096
|
320 |
+
|
321 |
+
AWS_DEFAULT_REGION = get_or_create_env_var('AWS_DEFAULT_REGION', 'eu-west-2')
|
322 |
+
print(f'The value of AWS_DEFAULT_REGION is {AWS_DEFAULT_REGION}')
|
323 |
|
324 |
+
bedrock_runtime = boto3.client('bedrock-runtime', region_name=AWS_DEFAULT_REGION)
|
325 |
+
|
326 |
+
def call_aws_claude(prompt: str, system_prompt: str, temperature: float, max_tokens: int, model_choice: str) -> ResponseObject:
|
327 |
+
"""
|
328 |
+
This function sends a request to AWS Claude with the following parameters:
|
329 |
+
- prompt: The user's input prompt to be processed by the model.
|
330 |
+
- system_prompt: A system-defined prompt that provides context or instructions for the model.
|
331 |
+
- temperature: A value that controls the randomness of the model's output, with higher values resulting in more diverse responses.
|
332 |
+
- max_tokens: The maximum number of tokens (words or characters) in the model's response.
|
333 |
+
- model_choice: The specific model to use for processing the request.
|
334 |
+
|
335 |
+
The function constructs the request configuration, invokes the model, extracts the response text, and returns a ResponseObject containing the text and metadata.
|
336 |
+
"""
|
337 |
+
|
338 |
+
prompt_config = {
|
339 |
+
"anthropic_version": "bedrock-2023-05-31",
|
340 |
+
"max_tokens": max_tokens,
|
341 |
+
"top_p": 0.999,
|
342 |
+
"temperature":temperature,
|
343 |
+
"system": system_prompt,
|
344 |
+
"messages": [
|
345 |
+
{
|
346 |
+
"role": "user",
|
347 |
+
"content": [
|
348 |
+
{"type": "text", "text": prompt},
|
349 |
+
],
|
350 |
+
}
|
351 |
+
],
|
352 |
+
}
|
353 |
+
|
354 |
+
body = json.dumps(prompt_config)
|
355 |
+
|
356 |
+
modelId = model_choice
|
357 |
+
accept = "application/json"
|
358 |
+
contentType = "application/json"
|
359 |
+
|
360 |
+
request = bedrock_runtime.invoke_model(
|
361 |
+
body=body, modelId=modelId, accept=accept, contentType=contentType
|
362 |
+
)
|
363 |
+
|
364 |
+
# Extract text from request
|
365 |
+
response_body = json.loads(request.get("body").read())
|
366 |
+
text = response_body.get("content")[0].get("text")
|
367 |
+
|
368 |
+
response = ResponseObject(
|
369 |
+
text=text,
|
370 |
+
usage_metadata=request['ResponseMetadata']
|
371 |
+
)
|
372 |
+
|
373 |
+
# Now you can access both the text and metadata
|
374 |
+
#print("Text:", response.text)
|
375 |
+
print("Metadata:", response.usage_metadata)
|
376 |
+
|
377 |
+
return response
|
378 |
+
|
379 |
+
def produce_streaming_answer_chatbot(history,
|
380 |
+
full_prompt,
|
381 |
+
model_type,
|
382 |
temperature=temperature,
|
383 |
+
relevant_query_bool=True,
|
384 |
max_new_tokens=max_new_tokens,
|
385 |
sample=sample,
|
386 |
repetition_penalty=repetition_penalty,
|
|
|
395 |
|
396 |
# return history
|
397 |
|
398 |
+
|
399 |
+
|
400 |
+
if relevant_query_bool == False:
|
401 |
+
out_message = [("","No relevant query found. Please retry your question")]
|
402 |
+
history.append(out_message)
|
403 |
+
|
404 |
+
yield history
|
405 |
+
return
|
406 |
+
|
407 |
+
if model_type == "Qwen 2 0.5B (small, fast)":
|
408 |
# Get the model and tokenizer, and tokenize the user text.
|
409 |
model_inputs = tokenizer(text=full_prompt, return_tensors="pt", return_attention_mask=False).to(torch_device)
|
410 |
|
|
|
422 |
top_k=top_k
|
423 |
)
|
424 |
|
425 |
+
#print(generate_kwargs)
|
426 |
|
427 |
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
428 |
t.start()
|
|
|
450 |
print(f'Tokens per secound: {NUM_TOKENS/time_generate}')
|
451 |
print(f'Time per token: {(time_generate/NUM_TOKENS)*1000}ms')
|
452 |
|
453 |
+
elif model_type == "Phi 3.5 Mini (larger, slow)":
|
454 |
#tokens = model.tokenize(full_prompt)
|
455 |
|
456 |
gen_config = CtransGenGenerationConfig()
|
|
|
484 |
print(f'Tokens per secound: {NUM_TOKENS/time_generate}')
|
485 |
print(f'Time per token: {(time_generate/NUM_TOKENS)*1000}ms')
|
486 |
|
487 |
+
elif model_type == "anthropic.claude-3-haiku-20240307-v1:0" or model_type == "anthropic.claude-3-sonnet-20240229-v1:0":
|
488 |
+
system_prompt = "You are answering questions from the user based on source material. Respond with short, factually correct answers."
|
489 |
+
|
490 |
+
try:
|
491 |
+
print("Calling AWS Claude model")
|
492 |
+
response = call_aws_claude(full_prompt, system_prompt, temperature, max_tokens, model_type)
|
493 |
+
except Exception as e:
|
494 |
+
# If fails, try again after 10 seconds in case there is a throttle limit
|
495 |
+
print(e)
|
496 |
+
try:
|
497 |
+
out_message = "API limit hit - waiting 30 seconds to retry."
|
498 |
+
print(out_message)
|
499 |
+
|
500 |
+
time.sleep(30)
|
501 |
+
response = call_aws_claude(full_prompt, system_prompt, temperature, max_tokens, model_type)
|
502 |
+
|
503 |
+
except Exception as e:
|
504 |
+
print(e)
|
505 |
+
return "", history
|
506 |
+
# Update the conversation history with the new prompt and response
|
507 |
+
history.append({'role': 'user', 'parts': [full_prompt]})
|
508 |
+
history.append({'role': 'assistant', 'parts': [response.text]})
|
509 |
+
|
510 |
+
# Print the updated conversation history
|
511 |
+
#print("conversation_history:", conversation_history)
|
512 |
+
|
513 |
+
return response, history
|
514 |
|
515 |
# Chat helper functions
|
516 |
|
|
|
634 |
docs_content = doc_df['page_content'].astype(str)
|
635 |
docs_url = doc_df['meta_url']
|
636 |
|
637 |
+
return docs_keep_as_doc, doc_df, docs_content, docs_url
|
638 |
|
639 |
# Check for if more docs are removed than the desired output
|
640 |
if out_passages > docs_keep_length:
|
|
|
646 |
|
647 |
print("Number of documents remaining: ", docs_keep_length)
|
648 |
|
649 |
+
# 2nd level check using BM25s package to do keyword search on retrieved passages.
|
650 |
+
|
651 |
content_keep=[]
|
652 |
for item in docs_keep:
|
653 |
content_keep.append(item[0].page_content)
|
654 |
|
655 |
+
# Prepare Corpus (Tokenized & Optional Stemming)
|
656 |
+
corpus = [doc.lower() for doc in content_keep]
|
657 |
+
#stemmer = SnowballStemmer("english", ignore_stopwords=True) # NLTK stemming not compatible
|
658 |
+
stemmer = Stemmer.Stemmer("english")
|
659 |
+
corpus_tokens = bm25s.tokenize(corpus, stopwords="en", stemmer=stemmer)
|
660 |
+
|
661 |
+
# Create and Index with BM25s
|
662 |
+
retriever = bm25s.BM25()
|
663 |
+
retriever.index(corpus_tokens)
|
664 |
+
|
665 |
+
# Query Processing (Stemming applied consistently if used above)
|
666 |
+
query_tokens = bm25s.tokenize(new_question_kworded.lower(), stemmer=stemmer)
|
667 |
+
results, scores = retriever.retrieve(query_tokens, corpus=corpus, k=len(corpus)) # Retrieve all docs
|
668 |
+
|
669 |
+
for i in range(results.shape[1]):
|
670 |
+
doc, score = results[0, i], scores[0, i]
|
671 |
+
print(f"Rank {i+1} (score: {score:.2f}): {doc}")
|
672 |
+
|
673 |
+
#print("BM25 results:", results)
|
674 |
+
#print("BM25 scores:", scores)
|
675 |
+
|
676 |
+
# Rank Calculation (Custom Logic for Your BM25 Score)
|
677 |
+
bm25_rank = list(range(1, len(results[0]) + 1))
|
678 |
+
#bm25_rank = results[0]#.tolist()[0] # Since you have a single query
|
679 |
+
bm25_score = [(docs_keep_length / (rank + 1)) * bm25_weight for rank in bm25_rank]
|
680 |
+
# +1 to avoid division by 0 for rank 0
|
681 |
+
|
682 |
+
# Result Ordering (Using the calculated ranks)
|
683 |
+
pairs = list(zip(bm25_rank, docs_keep_as_doc))
|
684 |
pairs.sort()
|
685 |
+
bm25_result = [value for rank, value in pairs]
|
|
|
686 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
687 |
|
688 |
# 3rd level check on retrieved docs with SVM retriever
|
689 |
+
# Check the type of the embeddings object
|
690 |
+
embeddings_type = type(embeddings)
|
691 |
+
print("Type of embeddings object:", embeddings_type)
|
692 |
|
693 |
+
|
694 |
+
print("embeddings:", embeddings)
|
695 |
+
|
696 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
697 |
+
#hf_embeddings = HuggingFaceEmbeddings(**embeddings)
|
698 |
+
hf_embeddings = embeddings
|
699 |
+
|
700 |
+
svm_retriever = SVMRetriever.from_texts(content_keep, hf_embeddings, k = k_val)
|
701 |
svm_result = svm_retriever.invoke(new_question_kworded)
|
702 |
|
703 |
|
|
|
743 |
# Make df of best options
|
744 |
doc_df = create_doc_df(docs_keep_out)
|
745 |
|
746 |
+
print("doc_df:",doc_df)
|
747 |
+
print("docs_keep_as_doc:",docs_keep_as_doc)
|
748 |
+
print("docs_keep_out:", docs_keep_out)
|
749 |
+
|
750 |
return docs_keep_as_doc, doc_df, docs_keep_out
|
751 |
|
752 |
def get_expanded_passages(vectorstore, docs, width):
|
chatfuncs/helper_functions.py
ADDED
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
import pandas as pd
|
4 |
+
|
5 |
+
def get_or_create_env_var(var_name, default_value):
|
6 |
+
# Get the environment variable if it exists
|
7 |
+
value = os.environ.get(var_name)
|
8 |
+
|
9 |
+
# If it doesn't exist, set it to the default value
|
10 |
+
if value is None:
|
11 |
+
os.environ[var_name] = default_value
|
12 |
+
value = default_value
|
13 |
+
|
14 |
+
return value
|
15 |
+
|
16 |
+
# Retrieving or setting output folder
|
17 |
+
env_var_name = 'GRADIO_OUTPUT_FOLDER'
|
18 |
+
default_value = 'output/'
|
19 |
+
|
20 |
+
output_folder = get_or_create_env_var(env_var_name, default_value)
|
21 |
+
print(f'The value of {env_var_name} is {output_folder}')
|
22 |
+
|
23 |
+
def get_file_path_with_extension(file_path):
|
24 |
+
# First, get the basename of the file (e.g., "example.txt" from "/path/to/example.txt")
|
25 |
+
basename = os.path.basename(file_path)
|
26 |
+
|
27 |
+
# Return the basename with its extension
|
28 |
+
return basename
|
29 |
+
|
30 |
+
def get_file_path_end(file_path):
|
31 |
+
# First, get the basename of the file (e.g., "example.txt" from "/path/to/example.txt")
|
32 |
+
basename = os.path.basename(file_path)
|
33 |
+
|
34 |
+
# Then, split the basename and its extension and return only the basename without the extension
|
35 |
+
filename_without_extension, _ = os.path.splitext(basename)
|
36 |
+
|
37 |
+
#print(filename_without_extension)
|
38 |
+
|
39 |
+
return filename_without_extension
|
40 |
+
|
41 |
+
def detect_file_type(filename):
|
42 |
+
"""Detect the file type based on its extension."""
|
43 |
+
if (filename.endswith('.csv')) | (filename.endswith('.csv.gz')) | (filename.endswith('.zip')):
|
44 |
+
return 'csv'
|
45 |
+
elif filename.endswith('.xlsx'):
|
46 |
+
return 'xlsx'
|
47 |
+
elif filename.endswith('.parquet'):
|
48 |
+
return 'parquet'
|
49 |
+
elif filename.endswith('.pdf'):
|
50 |
+
return 'pdf'
|
51 |
+
elif filename.endswith('.jpg'):
|
52 |
+
return 'jpg'
|
53 |
+
elif filename.endswith('.jpeg'):
|
54 |
+
return 'jpeg'
|
55 |
+
elif filename.endswith('.png'):
|
56 |
+
return 'png'
|
57 |
+
else:
|
58 |
+
raise ValueError("Unsupported file type.")
|
59 |
+
|
60 |
+
def read_file(filename):
|
61 |
+
"""Read the file based on its detected type."""
|
62 |
+
file_type = detect_file_type(filename)
|
63 |
+
|
64 |
+
if file_type == 'csv':
|
65 |
+
return pd.read_csv(filename, low_memory=False)
|
66 |
+
elif file_type == 'xlsx':
|
67 |
+
return pd.read_excel(filename)
|
68 |
+
elif file_type == 'parquet':
|
69 |
+
return pd.read_parquet(filename)
|
70 |
+
|
71 |
+
def ensure_output_folder_exists():
|
72 |
+
"""Checks if the 'output/' folder exists, creates it if not."""
|
73 |
+
|
74 |
+
folder_name = "output/"
|
75 |
+
|
76 |
+
if not os.path.exists(folder_name):
|
77 |
+
# Create the folder if it doesn't exist
|
78 |
+
os.makedirs(folder_name)
|
79 |
+
print(f"Created the 'output/' folder.")
|
80 |
+
else:
|
81 |
+
print(f"The 'output/' folder already exists.")
|
82 |
+
|
83 |
+
def put_columns_in_df(in_file):
|
84 |
+
new_choices = []
|
85 |
+
concat_choices = []
|
86 |
+
all_sheet_names = []
|
87 |
+
number_of_excel_files = 0
|
88 |
+
|
89 |
+
for file in in_file:
|
90 |
+
file_name = file.name
|
91 |
+
file_type = detect_file_type(file_name)
|
92 |
+
#print("File type is:", file_type)
|
93 |
+
|
94 |
+
file_end = get_file_path_with_extension(file_name)
|
95 |
+
|
96 |
+
if file_type == 'xlsx':
|
97 |
+
number_of_excel_files += 1
|
98 |
+
new_choices = []
|
99 |
+
print("Running through all xlsx sheets")
|
100 |
+
anon_xlsx = pd.ExcelFile(file_name)
|
101 |
+
new_sheet_names = anon_xlsx.sheet_names
|
102 |
+
# Iterate through the sheet names
|
103 |
+
for sheet_name in new_sheet_names:
|
104 |
+
# Read each sheet into a DataFrame
|
105 |
+
df = pd.read_excel(file_name, sheet_name=sheet_name)
|
106 |
+
|
107 |
+
# Process the DataFrame (e.g., print its contents)
|
108 |
+
print(f"Sheet Name: {sheet_name}")
|
109 |
+
print(df.head()) # Print the first few rows
|
110 |
+
|
111 |
+
new_choices.extend(list(df.columns))
|
112 |
+
|
113 |
+
all_sheet_names.extend(new_sheet_names)
|
114 |
+
|
115 |
+
else:
|
116 |
+
df = read_file(file_name)
|
117 |
+
new_choices = list(df.columns)
|
118 |
+
|
119 |
+
concat_choices.extend(new_choices)
|
120 |
+
|
121 |
+
# Drop duplicate columns
|
122 |
+
concat_choices = list(set(concat_choices))
|
123 |
+
|
124 |
+
if number_of_excel_files > 0:
|
125 |
+
return gr.Dropdown(choices=concat_choices, value=concat_choices[0]), gr.Dropdown(choices=all_sheet_names, value=all_sheet_names[0], visible=True), file_end
|
126 |
+
else:
|
127 |
+
return gr.Dropdown(choices=concat_choices, value=concat_choices[0]), gr.Dropdown(visible=False), file_end
|
128 |
+
|
129 |
+
# Following function is only relevant for locally-created executable files based on this app (when using pyinstaller it creates a _internal folder that contains tesseract and poppler. These need to be added to the system path to enable the app to run)
|
130 |
+
def add_folder_to_path(folder_path: str):
|
131 |
+
'''
|
132 |
+
Check if a folder exists on your system. If so, get the absolute path and then add it to the system Path variable if it doesn't already exist.
|
133 |
+
'''
|
134 |
+
|
135 |
+
if os.path.exists(folder_path) and os.path.isdir(folder_path):
|
136 |
+
print(folder_path, "folder exists.")
|
137 |
+
|
138 |
+
# Resolve relative path to absolute path
|
139 |
+
absolute_path = os.path.abspath(folder_path)
|
140 |
+
|
141 |
+
current_path = os.environ['PATH']
|
142 |
+
if absolute_path not in current_path.split(os.pathsep):
|
143 |
+
full_path_extension = absolute_path + os.pathsep + current_path
|
144 |
+
os.environ['PATH'] = full_path_extension
|
145 |
+
#print(f"Updated PATH with: ", full_path_extension)
|
146 |
+
else:
|
147 |
+
print(f"Directory {folder_path} already exists in PATH.")
|
148 |
+
else:
|
149 |
+
print(f"Folder not found at {folder_path} - not added to PATH")
|
150 |
+
|
151 |
+
# Upon running a process, the feedback buttons are revealed
|
152 |
+
def reveal_feedback_buttons():
|
153 |
+
return gr.Radio(visible=True), gr.Textbox(visible=True), gr.Button(visible=True), gr.Markdown(visible=True)
|
154 |
+
|
155 |
+
def wipe_logs(feedback_logs_loc, usage_logs_loc):
|
156 |
+
try:
|
157 |
+
os.remove(feedback_logs_loc)
|
158 |
+
except Exception as e:
|
159 |
+
print("Could not remove feedback logs file", e)
|
160 |
+
try:
|
161 |
+
os.remove(usage_logs_loc)
|
162 |
+
except Exception as e:
|
163 |
+
print("Could not remove usage logs file", e)
|
164 |
+
|
165 |
+
|
166 |
+
|
167 |
+
|
168 |
+
async def get_connection_params(request: gr.Request):
|
169 |
+
base_folder = ""
|
170 |
+
|
171 |
+
if request:
|
172 |
+
#print("request user:", request.username)
|
173 |
+
|
174 |
+
#request_data = await request.json() # Parse JSON body
|
175 |
+
#print("All request data:", request_data)
|
176 |
+
#context_value = request_data.get('context')
|
177 |
+
#if 'context' in request_data:
|
178 |
+
# print("Request context dictionary:", request_data['context'])
|
179 |
+
|
180 |
+
# print("Request headers dictionary:", request.headers)
|
181 |
+
# print("All host elements", request.client)
|
182 |
+
# print("IP address:", request.client.host)
|
183 |
+
# print("Query parameters:", dict(request.query_params))
|
184 |
+
# To get the underlying FastAPI items you would need to use await and some fancy @ stuff for a live query: https://fastapi.tiangolo.com/vi/reference/request/
|
185 |
+
#print("Request dictionary to object:", request.request.body())
|
186 |
+
print("Session hash:", request.session_hash)
|
187 |
+
|
188 |
+
# Retrieving or setting CUSTOM_CLOUDFRONT_HEADER
|
189 |
+
CUSTOM_CLOUDFRONT_HEADER_var = get_or_create_env_var('CUSTOM_CLOUDFRONT_HEADER', '')
|
190 |
+
#print(f'The value of CUSTOM_CLOUDFRONT_HEADER is {CUSTOM_CLOUDFRONT_HEADER_var}')
|
191 |
+
|
192 |
+
# Retrieving or setting CUSTOM_CLOUDFRONT_HEADER_VALUE
|
193 |
+
CUSTOM_CLOUDFRONT_HEADER_VALUE_var = get_or_create_env_var('CUSTOM_CLOUDFRONT_HEADER_VALUE', '')
|
194 |
+
#print(f'The value of CUSTOM_CLOUDFRONT_HEADER_VALUE_var is {CUSTOM_CLOUDFRONT_HEADER_VALUE_var}')
|
195 |
+
|
196 |
+
if CUSTOM_CLOUDFRONT_HEADER_var and CUSTOM_CLOUDFRONT_HEADER_VALUE_var:
|
197 |
+
if CUSTOM_CLOUDFRONT_HEADER_var in request.headers:
|
198 |
+
supplied_cloudfront_custom_value = request.headers[CUSTOM_CLOUDFRONT_HEADER_var]
|
199 |
+
if supplied_cloudfront_custom_value == CUSTOM_CLOUDFRONT_HEADER_VALUE_var:
|
200 |
+
print("Custom Cloudfront header found:", supplied_cloudfront_custom_value)
|
201 |
+
else:
|
202 |
+
raise(ValueError, "Custom Cloudfront header value does not match expected value.")
|
203 |
+
|
204 |
+
# Get output save folder from 1 - username passed in from direct Cognito login, 2 - Cognito ID header passed through a Lambda authenticator, 3 - the session hash.
|
205 |
+
|
206 |
+
if request.username:
|
207 |
+
out_session_hash = request.username
|
208 |
+
base_folder = "user-files/"
|
209 |
+
print("Request username found:", out_session_hash)
|
210 |
+
|
211 |
+
elif 'x-cognito-id' in request.headers:
|
212 |
+
out_session_hash = request.headers['x-cognito-id']
|
213 |
+
base_folder = "user-files/"
|
214 |
+
print("Cognito ID found:", out_session_hash)
|
215 |
+
|
216 |
+
else:
|
217 |
+
out_session_hash = request.session_hash
|
218 |
+
base_folder = "temp-files/"
|
219 |
+
# print("Cognito ID not found. Using session hash as save folder:", out_session_hash)
|
220 |
+
|
221 |
+
output_folder = base_folder + out_session_hash + "/"
|
222 |
+
#if bucket_name:
|
223 |
+
# print("S3 output folder is: " + "s3://" + bucket_name + "/" + output_folder)
|
224 |
+
|
225 |
+
return out_session_hash, output_folder, out_session_hash
|
226 |
+
else:
|
227 |
+
print("No session parameters found.")
|
228 |
+
return "",""
|
chatfuncs/ingest.py
CHANGED
@@ -573,7 +573,7 @@ def load_embeddings(model_name = "BAAI/bge-base-en-v1.5"):
|
|
573 |
|
574 |
return embeddings_func
|
575 |
|
576 |
-
def embed_faiss_save_to_zip(docs_out, save_to="
|
577 |
|
578 |
load_embeddings(model_name=model_name)
|
579 |
|
@@ -582,7 +582,9 @@ def embed_faiss_save_to_zip(docs_out, save_to="faiss_lambeth_census_embedding",
|
|
582 |
print(f"> Total split documents: {len(docs_out)}")
|
583 |
|
584 |
vectorstore = FAISS.from_documents(documents=docs_out, embedding=embeddings)
|
585 |
-
|
|
|
|
|
586 |
|
587 |
if Path(save_to).exists():
|
588 |
vectorstore.save_local(folder_path=save_to)
|
@@ -599,9 +601,13 @@ def embed_faiss_save_to_zip(docs_out, save_to="faiss_lambeth_census_embedding",
|
|
599 |
os.remove(save_to + "/index.faiss")
|
600 |
os.remove(save_to + "/index.pkl")
|
601 |
|
602 |
-
|
|
|
|
|
|
|
|
|
603 |
|
604 |
-
return vectorstore
|
605 |
|
606 |
def docs_to_chroma_save(embeddings, docs_out:PandasDataFrame, save_to:str):
|
607 |
print(f"> Total split documents: {len(docs_out)}")
|
|
|
573 |
|
574 |
return embeddings_func
|
575 |
|
576 |
+
def embed_faiss_save_to_zip(docs_out, save_to="output", model_name = "BAAI/bge-base-en-v1.5"):
|
577 |
|
578 |
load_embeddings(model_name=model_name)
|
579 |
|
|
|
582 |
print(f"> Total split documents: {len(docs_out)}")
|
583 |
|
584 |
vectorstore = FAISS.from_documents(documents=docs_out, embedding=embeddings)
|
585 |
+
|
586 |
+
if not Path(save_to).exists():
|
587 |
+
os.mkdir(save_to)
|
588 |
|
589 |
if Path(save_to).exists():
|
590 |
vectorstore.save_local(folder_path=save_to)
|
|
|
601 |
os.remove(save_to + "/index.faiss")
|
602 |
os.remove(save_to + "/index.pkl")
|
603 |
|
604 |
+
save_zip_out = save_to + "/" + save_to + '.zip'
|
605 |
+
|
606 |
+
shutil.move(save_to + '.zip', save_zip_out)
|
607 |
+
|
608 |
+
out_message = "Document processing complete"
|
609 |
|
610 |
+
return out_message, vectorstore, save_zip_out
|
611 |
|
612 |
def docs_to_chroma_save(embeddings, docs_out:PandasDataFrame, save_to:str):
|
613 |
print(f"> Total split documents: {len(docs_out)}")
|
chatfuncs/llm_api_call.py
ADDED
@@ -0,0 +1,925 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import google.generativeai as ai
|
3 |
+
import pandas as pd
|
4 |
+
import gradio as gr
|
5 |
+
import markdown
|
6 |
+
import time
|
7 |
+
import boto3
|
8 |
+
import json
|
9 |
+
import string
|
10 |
+
import re
|
11 |
+
from gradio import Progress
|
12 |
+
from typing import List, Tuple
|
13 |
+
from io import StringIO
|
14 |
+
|
15 |
+
from chatfuncs.prompts import prompt1, prompt2, prompt3, system_prompt, summarise_system_prompt, summarise_prompt
|
16 |
+
from chatfuncs.helper_functions import output_folder, detect_file_type, get_file_path_end, read_file, get_or_create_env_var
|
17 |
+
|
18 |
+
# ResponseObject class for AWS Bedrock calls
|
19 |
+
class ResponseObject:
|
20 |
+
def __init__(self, text, usage_metadata):
|
21 |
+
self.text = text
|
22 |
+
self.usage_metadata = usage_metadata
|
23 |
+
|
24 |
+
max_tokens = 4096
|
25 |
+
|
26 |
+
|
27 |
+
AWS_DEFAULT_REGION = get_or_create_env_var('AWS_DEFAULT_REGION', 'eu-west-2')
|
28 |
+
print(f'The value of AWS_DEFAULT_REGION is {AWS_DEFAULT_REGION}')
|
29 |
+
|
30 |
+
bedrock_runtime = boto3.client('bedrock-runtime', region_name=AWS_DEFAULT_REGION)
|
31 |
+
|
32 |
+
def normalise_string(text):
|
33 |
+
# Replace two or more dashes with a single dash
|
34 |
+
text = re.sub(r'-{2,}', '-', text)
|
35 |
+
|
36 |
+
# Replace two or more spaces with a single space
|
37 |
+
text = re.sub(r'\s{2,}', ' ', text)
|
38 |
+
|
39 |
+
return text
|
40 |
+
|
41 |
+
def load_in_file(file_path: str, colname:str):
|
42 |
+
"""
|
43 |
+
Loads in a tabular data file and returns data and file name.
|
44 |
+
|
45 |
+
Parameters:
|
46 |
+
- file_path (str): The path to the file to be processed.
|
47 |
+
"""
|
48 |
+
file_type = detect_file_type(file_path)
|
49 |
+
print("File type is:", file_type)
|
50 |
+
|
51 |
+
out_file_part = get_file_path_end(file_path)
|
52 |
+
file_data = read_file(file_path)
|
53 |
+
|
54 |
+
file_data[colname].fillna("", inplace=True)
|
55 |
+
|
56 |
+
file_data[colname] = file_data[colname].astype(str).str.replace("\bnan\b", "", regex=True)
|
57 |
+
|
58 |
+
|
59 |
+
print(file_data[colname])
|
60 |
+
|
61 |
+
return file_data, out_file_part
|
62 |
+
|
63 |
+
def load_in_data_file(file_paths:List[str], in_colnames:List[str], batch_size:int=50, ):
|
64 |
+
'''Load in data table, work out how many batches needed.'''
|
65 |
+
|
66 |
+
try:
|
67 |
+
file_data, file_name = load_in_file(file_paths[0], colname=in_colnames)
|
68 |
+
num_batches = (len(file_data) // batch_size) + 1
|
69 |
+
|
70 |
+
except Exception as e:
|
71 |
+
print(e)
|
72 |
+
file_data = pd.DataFrame()
|
73 |
+
file_name = ""
|
74 |
+
num_batches = 1
|
75 |
+
|
76 |
+
return file_data, file_name, num_batches
|
77 |
+
|
78 |
+
def data_file_to_markdown_table(file_data:pd.DataFrame, file_name:str, chosen_cols: List[str], output_folder: str, batch_number: int, batch_size: int) -> Tuple[str, str, str]:
|
79 |
+
"""
|
80 |
+
Processes a file by simplifying its content based on chosen columns and saves the result to a specified output folder.
|
81 |
+
|
82 |
+
Parameters:
|
83 |
+
- file_data (pd.DataFrame): Tabular data file with responses.
|
84 |
+
- file_name (str): File name with extension.
|
85 |
+
- chosen_cols (List[str]): A list of column names to include in the simplified file.
|
86 |
+
- output_folder (str): The directory where the simplified file will be saved.
|
87 |
+
- batch_number (int): The current batch number for processing.
|
88 |
+
- batch_size (int): The number of rows to process in each batch.
|
89 |
+
|
90 |
+
Returns:
|
91 |
+
- Tuple[str, str, str]: A tuple containing the path to the simplified CSV file, the simplified markdown table as a string, and the file path end (used for naming the output file).
|
92 |
+
"""
|
93 |
+
|
94 |
+
#print("\nfile_data_in_markdown func:", file_data)
|
95 |
+
#print("\nBatch size in markdown func:", str(batch_size))
|
96 |
+
|
97 |
+
normalised_simple_markdown_table = ""
|
98 |
+
simplified_csv_table_path = ""
|
99 |
+
|
100 |
+
# Simplify table to just responses column and the Response reference number
|
101 |
+
simple_file = file_data[[chosen_cols]].reset_index(names="Reference")
|
102 |
+
simple_file["Reference"] = simple_file["Reference"].astype(int) + 1
|
103 |
+
simple_file = simple_file.rename(columns={chosen_cols: "Response"})
|
104 |
+
simple_file["Response"] = simple_file["Response"].str.strip()
|
105 |
+
file_len = len(simple_file["Reference"])
|
106 |
+
|
107 |
+
|
108 |
+
# Subset the data for the current batch
|
109 |
+
start_row = batch_number * batch_size
|
110 |
+
if start_row > file_len + 1:
|
111 |
+
print("Start row greater than file row length")
|
112 |
+
return simplified_csv_table_path, normalised_simple_markdown_table, file_name
|
113 |
+
|
114 |
+
if (start_row + batch_size) <= file_len + 1:
|
115 |
+
end_row = start_row + batch_size
|
116 |
+
else:
|
117 |
+
end_row = file_len + 1
|
118 |
+
|
119 |
+
simple_file = simple_file[start_row:end_row] # Select the current batch
|
120 |
+
|
121 |
+
# Remove problematic characters including ASCII and various quote marks
|
122 |
+
# Remove problematic characters including control characters, special characters, and excessive leading/trailing whitespace
|
123 |
+
simple_file["Response"] = simple_file["Response"].str.replace(r'[\x00-\x1F\x7F]|["ββββ<>]|\\', '', regex=True) # Remove control and special characters
|
124 |
+
simple_file["Response"] = simple_file["Response"].str.strip() # Remove leading and trailing whitespace
|
125 |
+
simple_file["Response"] = simple_file["Response"].str.replace(r'\s+', ' ', regex=True) # Replace multiple spaces with a single space
|
126 |
+
|
127 |
+
# Remove blank and extremely short responses
|
128 |
+
simple_file = simple_file.loc[~(simple_file["Response"].isnull()) & ~(simple_file["Response"].str.len() < 5), :]
|
129 |
+
|
130 |
+
simplified_csv_table_path = output_folder + 'simple_markdown_table_' + file_name + '_row_' + str(start_row) + '_to_' + str(end_row) + '.csv'
|
131 |
+
simple_file.to_csv(simplified_csv_table_path, index=None)
|
132 |
+
|
133 |
+
simple_markdown_table = simple_file.to_markdown(index=None)
|
134 |
+
|
135 |
+
normalised_simple_markdown_table = normalise_string(simple_markdown_table)
|
136 |
+
|
137 |
+
return simplified_csv_table_path, normalised_simple_markdown_table, file_name, start_row, end_row
|
138 |
+
|
139 |
+
def construct_gemini_generative_model(in_api_key: str, temperature: float, model_choice: str, system_prompt: str, max_tokens: int) -> Tuple[object, dict]:
|
140 |
+
"""
|
141 |
+
Constructs a GenerativeModel for Gemini API calls.
|
142 |
+
|
143 |
+
Parameters:
|
144 |
+
- in_api_key (str): The API key for authentication.
|
145 |
+
- temperature (float): The temperature parameter for the model, controlling the randomness of the output.
|
146 |
+
- model_choice (str): The choice of model to use for generation.
|
147 |
+
- system_prompt (str): The system prompt to guide the generation.
|
148 |
+
- max_tokens (int): The maximum number of tokens to generate.
|
149 |
+
|
150 |
+
Returns:
|
151 |
+
- Tuple[object, dict]: A tuple containing the constructed GenerativeModel and its configuration.
|
152 |
+
"""
|
153 |
+
# Construct a GenerativeModel
|
154 |
+
try:
|
155 |
+
if in_api_key:
|
156 |
+
#print("Getting API key from textbox")
|
157 |
+
api_key = in_api_key
|
158 |
+
ai.configure(api_key=api_key)
|
159 |
+
elif "GOOGLE_API_KEY" in os.environ:
|
160 |
+
#print("Searching for API key in environmental variables")
|
161 |
+
api_key = os.environ["GOOGLE_API_KEY"]
|
162 |
+
ai.configure(api_key=api_key)
|
163 |
+
else:
|
164 |
+
print("No API key foound")
|
165 |
+
raise gr.Error("No API key found.")
|
166 |
+
except Exception as e:
|
167 |
+
print(e)
|
168 |
+
|
169 |
+
config = ai.GenerationConfig(temperature=temperature, max_output_tokens=max_tokens)
|
170 |
+
|
171 |
+
#model = ai.GenerativeModel.from_cached_content(cached_content=cache, generation_config=config)
|
172 |
+
model = ai.GenerativeModel(model_name='models/' + model_choice, system_instruction=system_prompt, generation_config=config)
|
173 |
+
|
174 |
+
# Upload CSV file (replace with your actual file path)
|
175 |
+
#file_id = ai.upload_file(upload_file_path)
|
176 |
+
|
177 |
+
|
178 |
+
# if file_type == 'xlsx':
|
179 |
+
# print("Running through all xlsx sheets")
|
180 |
+
# #anon_xlsx = pd.ExcelFile(upload_file_path)
|
181 |
+
# if not in_excel_sheets:
|
182 |
+
# out_message.append("No Excel sheets selected. Please select at least one to anonymise.")
|
183 |
+
# continue
|
184 |
+
|
185 |
+
# anon_xlsx = pd.ExcelFile(upload_file_path)
|
186 |
+
|
187 |
+
# # Create xlsx file:
|
188 |
+
# anon_xlsx_export_file_name = output_folder + out_file_part + "_redacted.xlsx"
|
189 |
+
|
190 |
+
|
191 |
+
### QUERYING LARGE LANGUAGE MODEL ###
|
192 |
+
# Prompt caching the table and system prompt. See here: https://ai.google.dev/gemini-api/docs/caching?lang=python
|
193 |
+
# Create a cache with a 5 minute TTL. ONLY FOR CACHES OF AT LEAST 32k TOKENS!
|
194 |
+
# cache = ai.caching.CachedContent.create(
|
195 |
+
# model='models/' + model_choice,
|
196 |
+
# display_name=out_file_part, # used to identify the cache
|
197 |
+
# system_instruction=system_prompt_with_table,
|
198 |
+
# ttl=datetime.timedelta(minutes=5),
|
199 |
+
# )
|
200 |
+
|
201 |
+
return model, config
|
202 |
+
|
203 |
+
def call_aws_claude(prompt: str, system_prompt: str, temperature: float, max_tokens: int, model_choice: str) -> ResponseObject:
|
204 |
+
"""
|
205 |
+
This function sends a request to AWS Claude with the following parameters:
|
206 |
+
- prompt: The user's input prompt to be processed by the model.
|
207 |
+
- system_prompt: A system-defined prompt that provides context or instructions for the model.
|
208 |
+
- temperature: A value that controls the randomness of the model's output, with higher values resulting in more diverse responses.
|
209 |
+
- max_tokens: The maximum number of tokens (words or characters) in the model's response.
|
210 |
+
- model_choice: The specific model to use for processing the request.
|
211 |
+
|
212 |
+
The function constructs the request configuration, invokes the model, extracts the response text, and returns a ResponseObject containing the text and metadata.
|
213 |
+
"""
|
214 |
+
|
215 |
+
prompt_config = {
|
216 |
+
"anthropic_version": "bedrock-2023-05-31",
|
217 |
+
"max_tokens": max_tokens,
|
218 |
+
"top_p": 0.999,
|
219 |
+
"temperature":temperature,
|
220 |
+
"system": system_prompt,
|
221 |
+
"messages": [
|
222 |
+
{
|
223 |
+
"role": "user",
|
224 |
+
"content": [
|
225 |
+
{"type": "text", "text": prompt},
|
226 |
+
],
|
227 |
+
}
|
228 |
+
],
|
229 |
+
}
|
230 |
+
|
231 |
+
body = json.dumps(prompt_config)
|
232 |
+
|
233 |
+
modelId = model_choice
|
234 |
+
accept = "application/json"
|
235 |
+
contentType = "application/json"
|
236 |
+
|
237 |
+
request = bedrock_runtime.invoke_model(
|
238 |
+
body=body, modelId=modelId, accept=accept, contentType=contentType
|
239 |
+
)
|
240 |
+
|
241 |
+
# Extract text from request
|
242 |
+
response_body = json.loads(request.get("body").read())
|
243 |
+
text = response_body.get("content")[0].get("text")
|
244 |
+
|
245 |
+
response = ResponseObject(
|
246 |
+
text=text,
|
247 |
+
usage_metadata=request['ResponseMetadata']
|
248 |
+
)
|
249 |
+
|
250 |
+
# Now you can access both the text and metadata
|
251 |
+
#print("Text:", response.text)
|
252 |
+
print("Metadata:", response.usage_metadata)
|
253 |
+
|
254 |
+
return response
|
255 |
+
|
256 |
+
# Function to send a request and update history
|
257 |
+
def send_request(prompt: str, conversation_history: List[dict], model: object, config: dict, model_choice: str, system_prompt: str, temperature: float, progress=Progress(track_tqdm=True)) -> Tuple[str, List[dict]]:
|
258 |
+
"""
|
259 |
+
This function sends a request to a language model with the given prompt, conversation history, model configuration, model choice, system prompt, and temperature.
|
260 |
+
It constructs the full prompt by appending the new user prompt to the conversation history, generates a response from the model, and updates the conversation history with the new prompt and response.
|
261 |
+
If the model choice is specific to AWS Claude, it calls the `call_aws_claude` function; otherwise, it uses the `model.generate_content` method.
|
262 |
+
The function returns the response text and the updated conversation history.
|
263 |
+
"""
|
264 |
+
# Constructing the full prompt from the conversation history
|
265 |
+
full_prompt = "Conversation history:\n"
|
266 |
+
|
267 |
+
for entry in conversation_history:
|
268 |
+
role = entry['role'].capitalize() # Assuming the history is stored with 'role' and 'parts'
|
269 |
+
message = ' '.join(entry['parts']) # Combining all parts of the message
|
270 |
+
full_prompt += f"{role}: {message}\n"
|
271 |
+
|
272 |
+
# Adding the new user prompt
|
273 |
+
full_prompt += f"\nUser: {prompt}"
|
274 |
+
|
275 |
+
# Print the full prompt for debugging purposes
|
276 |
+
#print("full_prompt:", full_prompt)
|
277 |
+
|
278 |
+
# Generate the model's response
|
279 |
+
if model_choice in ["gemini-1.5-flash-002", "gemini-1.5-pro-002"]:
|
280 |
+
try:
|
281 |
+
response = model.generate_content(contents=full_prompt, generation_config=config)
|
282 |
+
except Exception as e:
|
283 |
+
# If fails, try again after 10 seconds in case there is a throttle limit
|
284 |
+
print(e)
|
285 |
+
try:
|
286 |
+
print("Calling Gemini model")
|
287 |
+
out_message = "API limit hit - waiting 30 seconds to retry."
|
288 |
+
print(out_message)
|
289 |
+
progress(0.5, desc=out_message)
|
290 |
+
time.sleep(30)
|
291 |
+
response = model.generate_content(contents=full_prompt, generation_config=config)
|
292 |
+
except Exception as e:
|
293 |
+
print(e)
|
294 |
+
return "", conversation_history
|
295 |
+
else:
|
296 |
+
try:
|
297 |
+
print("Calling AWS Claude model")
|
298 |
+
response = call_aws_claude(prompt, system_prompt, temperature, max_tokens, model_choice)
|
299 |
+
except Exception as e:
|
300 |
+
# If fails, try again after 10 seconds in case there is a throttle limit
|
301 |
+
print(e)
|
302 |
+
try:
|
303 |
+
out_message = "API limit hit - waiting 30 seconds to retry."
|
304 |
+
print(out_message)
|
305 |
+
progress(0.5, desc=out_message)
|
306 |
+
time.sleep(30)
|
307 |
+
response = call_aws_claude(prompt, system_prompt, temperature, max_tokens, model_choice)
|
308 |
+
|
309 |
+
except Exception as e:
|
310 |
+
print(e)
|
311 |
+
return "", conversation_history
|
312 |
+
|
313 |
+
# Update the conversation history with the new prompt and response
|
314 |
+
conversation_history.append({'role': 'user', 'parts': [prompt]})
|
315 |
+
conversation_history.append({'role': 'assistant', 'parts': [response.text]})
|
316 |
+
|
317 |
+
# Print the updated conversation history
|
318 |
+
#print("conversation_history:", conversation_history)
|
319 |
+
|
320 |
+
return response, conversation_history
|
321 |
+
|
322 |
+
def process_requests(prompts: List[str], system_prompt_with_table: str, conversation_history: List[dict], whole_conversation: List[str], whole_conversation_metadata: List[str], model: object, config: dict, model_choice: str, temperature: float, batch_no:int = 1, master:bool = False) -> Tuple[List[ResponseObject], List[dict], List[str], List[str]]:
|
323 |
+
"""
|
324 |
+
Processes a list of prompts by sending them to the model, appending the responses to the conversation history, and updating the whole conversation and metadata.
|
325 |
+
|
326 |
+
Args:
|
327 |
+
prompts (List[str]): A list of prompts to be processed.
|
328 |
+
system_prompt_with_table (str): The system prompt including a table.
|
329 |
+
conversation_history (List[dict]): The history of the conversation.
|
330 |
+
whole_conversation (List[str]): The complete conversation including prompts and responses.
|
331 |
+
whole_conversation_metadata (List[str]): Metadata about the whole conversation.
|
332 |
+
model (object): The model to use for processing the prompts.
|
333 |
+
config (dict): Configuration for the model.
|
334 |
+
model_choice (str): The choice of model to use.
|
335 |
+
temperature (float): The temperature parameter for the model.
|
336 |
+
batch_no (int): Batch number of the large language model request.
|
337 |
+
master (bool): Is this request for the master table.
|
338 |
+
|
339 |
+
Returns:
|
340 |
+
Tuple[List[ResponseObject], List[dict], List[str], List[str]]: A tuple containing the list of responses, the updated conversation history, the updated whole conversation, and the updated whole conversation metadata.
|
341 |
+
"""
|
342 |
+
responses = []
|
343 |
+
for prompt in prompts:
|
344 |
+
|
345 |
+
response, conversation_history = send_request(prompt, conversation_history, model=model, config=config, model_choice=model_choice, system_prompt=system_prompt_with_table, temperature=temperature)
|
346 |
+
|
347 |
+
#print(response.text)
|
348 |
+
print(response.usage_metadata)
|
349 |
+
responses.append(response)
|
350 |
+
|
351 |
+
# Create conversation txt object
|
352 |
+
whole_conversation.append(prompt)
|
353 |
+
whole_conversation.append(response.text)
|
354 |
+
|
355 |
+
# Create conversation metadata
|
356 |
+
if master == False:
|
357 |
+
whole_conversation_metadata.append(f"Query batch {batch_no} prompt {len(responses)} metadata:")
|
358 |
+
else:
|
359 |
+
whole_conversation_metadata.append(f"Query summary metadata:")
|
360 |
+
|
361 |
+
whole_conversation_metadata.append(str(response.usage_metadata))
|
362 |
+
|
363 |
+
return responses, conversation_history, whole_conversation, whole_conversation_metadata
|
364 |
+
|
365 |
+
def replace_punctuation_with_underscore(input_string):
|
366 |
+
# Create a translation table where each punctuation character maps to '_'
|
367 |
+
translation_table = str.maketrans(string.punctuation, '_' * len(string.punctuation))
|
368 |
+
|
369 |
+
# Translate the input string using the translation table
|
370 |
+
return input_string.translate(translation_table)
|
371 |
+
|
372 |
+
def clean_markdown_table(text: str):
|
373 |
+
lines = text.splitlines()
|
374 |
+
|
375 |
+
# Remove any empty rows or rows with only pipes
|
376 |
+
cleaned_lines = [line for line in lines if not re.match(r'^\s*\|?\s*\|?\s*$', line)]
|
377 |
+
|
378 |
+
# Merge lines that belong to the same row (i.e., don't start with |)
|
379 |
+
merged_lines = []
|
380 |
+
buffer = ""
|
381 |
+
|
382 |
+
for line in cleaned_lines:
|
383 |
+
if line.lstrip().startswith('|'): # If line starts with |, it's a new row
|
384 |
+
if buffer:
|
385 |
+
merged_lines.append(buffer) # Append the buffered content
|
386 |
+
buffer = line # Start a new buffer with this row
|
387 |
+
else:
|
388 |
+
# Continuation of the previous row
|
389 |
+
buffer += ' ' + line.strip() # Add content to the current buffer
|
390 |
+
|
391 |
+
# Don't forget to append the last buffer
|
392 |
+
if buffer:
|
393 |
+
merged_lines.append(buffer)
|
394 |
+
|
395 |
+
# Ensure consistent number of pipes in each row based on the header
|
396 |
+
header_pipes = merged_lines[0].count('|') # Use the first row to count number of pipes
|
397 |
+
result = []
|
398 |
+
|
399 |
+
for line in merged_lines:
|
400 |
+
# Strip excessive whitespace around pipes
|
401 |
+
line = re.sub(r'\s*\|\s*', '|', line.strip())
|
402 |
+
|
403 |
+
# Replace numbers between pipes with commas and a space
|
404 |
+
line = re.sub(r'(?<=\|)(\s*\d+)(,\s*\d+)+(?=\|)', lambda m: ', '.join(m.group(0).split(',')), line)
|
405 |
+
|
406 |
+
# Replace groups of numbers separated by spaces with commas and a space
|
407 |
+
line = re.sub(r'(?<=\|)(\s*\d+)(\s+\d+)+(?=\|)', lambda m: ', '.join(m.group(0).split()), line)
|
408 |
+
|
409 |
+
# Fix inconsistent number of pipes by adjusting them to match the header
|
410 |
+
pipe_count = line.count('|')
|
411 |
+
if pipe_count < header_pipes:
|
412 |
+
line += '|' * (header_pipes - pipe_count) # Add missing pipes
|
413 |
+
elif pipe_count > header_pipes:
|
414 |
+
# If too many pipes, split line and keep the first `header_pipes` columns
|
415 |
+
columns = line.split('|')[:header_pipes + 1] # +1 to keep last pipe at the end
|
416 |
+
line = '|'.join(columns)
|
417 |
+
|
418 |
+
result.append(line)
|
419 |
+
|
420 |
+
# Join lines back into the cleaned markdown text
|
421 |
+
cleaned_text = '\n'.join(result)
|
422 |
+
|
423 |
+
return cleaned_text
|
424 |
+
|
425 |
+
def write_llm_output_and_logs(responses: List[ResponseObject], whole_conversation: List[str], whole_conversation_metadata: List[str], out_file_part: str, latest_batch_completed: int, start_row:int, end_row:int, model_choice_clean: str, temperature: float, log_files_output_paths: List[str], existing_reference_df:pd.DataFrame, existing_topics_df:pd.DataFrame, first_run: bool = False) -> None:
|
426 |
+
"""
|
427 |
+
Writes the output of the large language model requests and logs to files.
|
428 |
+
|
429 |
+
Parameters:
|
430 |
+
- responses (List[ResponseObject]): A list of ResponseObject instances containing the text and usage metadata of the responses.
|
431 |
+
- whole_conversation (List[str]): A list of strings representing the complete conversation including prompts and responses.
|
432 |
+
- whole_conversation_metadata (List[str]): A list of strings representing metadata about the whole conversation.
|
433 |
+
- out_file_part (str): The base part of the output file name.
|
434 |
+
- latest_batch_completed (int): The index of the current batch.
|
435 |
+
- start_row (int): Start row of the current batch.
|
436 |
+
- end_row (int): End row of the current batch.
|
437 |
+
- model_choice_clean (str): The cleaned model choice string.
|
438 |
+
- temperature (float): The temperature parameter used in the model.
|
439 |
+
- log_files_output_paths (List[str]): A list of paths to the log files.
|
440 |
+
- existing_reference_df (pd.DataFrame): The existing reference dataframe mapping response numbers to topics.
|
441 |
+
- existing_topics_df (pd.DataFrame): The existing unique topics dataframe
|
442 |
+
- first_run (bool): A boolean indicating if this is the first run through this function in this process. Defaults to False.
|
443 |
+
"""
|
444 |
+
unique_topics_df_out_path = []
|
445 |
+
topic_table_out_path = "topic_table_error.csv"
|
446 |
+
reference_table_out_path = "reference_table_error.csv"
|
447 |
+
unique_topics_df_out_path = "unique_topic_table_error.csv"
|
448 |
+
topic_with_response_df = pd.DataFrame()
|
449 |
+
markdown_table = ""
|
450 |
+
out_reference_df = pd.DataFrame()
|
451 |
+
out_unique_topics_df = pd.DataFrame()
|
452 |
+
batch_out_file_part = "error"
|
453 |
+
|
454 |
+
|
455 |
+
# If there was an error in parsing, return boolean saying error
|
456 |
+
is_error = False
|
457 |
+
|
458 |
+
# Convert conversation to string and add to log outputs
|
459 |
+
whole_conversation_str = '\n'.join(whole_conversation)
|
460 |
+
whole_conversation_metadata_str = '\n'.join(whole_conversation_metadata)
|
461 |
+
|
462 |
+
start_row_reported = start_row + 1
|
463 |
+
|
464 |
+
# Save outputs for each batch. If master file created, label file as master
|
465 |
+
if first_run == True:
|
466 |
+
batch_out_file_part = f"{out_file_part}_batch_{latest_batch_completed + 1}"
|
467 |
+
batch_part = f"Rows {start_row_reported} to {end_row}: "
|
468 |
+
else:
|
469 |
+
batch_out_file_part = f"{out_file_part}_combined_batch_{latest_batch_completed + 1}"
|
470 |
+
batch_part = f"Rows {start_row_reported} to {end_row}: "
|
471 |
+
|
472 |
+
whole_conversation_path = output_folder + batch_out_file_part + "_full_conversation_" + model_choice_clean + "_temp_" + str(temperature) + ".txt"
|
473 |
+
whole_conversation_path_meta = output_folder + batch_out_file_part + "_metadata_" + model_choice_clean + "_temp_" + str(temperature) + ".txt"
|
474 |
+
|
475 |
+
# print("whole_conversation:", whole_conversation_str)
|
476 |
+
|
477 |
+
with open(whole_conversation_path, "w", encoding='utf-8', errors='replace') as f:
|
478 |
+
f.write(whole_conversation_str)
|
479 |
+
|
480 |
+
with open(whole_conversation_path_meta, "w", encoding='utf-8', errors='replace') as f:
|
481 |
+
f.write(whole_conversation_metadata_str)
|
482 |
+
|
483 |
+
log_files_output_paths.append(whole_conversation_path)
|
484 |
+
log_files_output_paths.append(whole_conversation_path_meta)
|
485 |
+
|
486 |
+
# Convert output table to markdown and then to a pandas dataframe to csv
|
487 |
+
# try:
|
488 |
+
cleaned_response = clean_markdown_table(responses[-1].text)
|
489 |
+
|
490 |
+
markdown_table = markdown.markdown(cleaned_response, extensions=['tables'])
|
491 |
+
|
492 |
+
#print("markdown_table:", markdown_table)
|
493 |
+
|
494 |
+
# Remove <p> tags and make sure it has a valid HTML structure
|
495 |
+
html_table = re.sub(r'<p>(.*?)</p>', r'\1', markdown_table)
|
496 |
+
html_table = html_table.replace('<p>', '').replace('</p>', '').strip()
|
497 |
+
|
498 |
+
# Now ensure that the HTML structure is correct
|
499 |
+
if "<table>" not in html_table:
|
500 |
+
html_table = f"""
|
501 |
+
<table>
|
502 |
+
{html_table}
|
503 |
+
</table>
|
504 |
+
"""
|
505 |
+
|
506 |
+
# print("Markdown table as HTML:", html_table)
|
507 |
+
|
508 |
+
html_buffer = StringIO(html_table)
|
509 |
+
#print("html_buffer:", html_buffer)
|
510 |
+
|
511 |
+
try:
|
512 |
+
topic_with_response_df = pd.read_html(html_buffer)[0] # Assuming the first table in the HTML is the one you want
|
513 |
+
except Exception as e:
|
514 |
+
print("Error when trying to parse table:", e)
|
515 |
+
is_error = True
|
516 |
+
return topic_table_out_path, reference_table_out_path, unique_topics_df_out_path, topic_with_response_df, markdown_table, out_reference_df, out_unique_topics_df, batch_out_file_part, is_error
|
517 |
+
|
518 |
+
|
519 |
+
# Rename columns to ensure consistent use of data frames later in code
|
520 |
+
topic_with_response_df.columns = ["General Topic", "Subtopic", "Sentiment", "Summary", "Response References"]
|
521 |
+
|
522 |
+
# Fill in NA rows with values from above (topics seem to be included only on one row):
|
523 |
+
topic_with_response_df = topic_with_response_df.ffill()
|
524 |
+
|
525 |
+
topic_table_out_path = output_folder + batch_out_file_part + "_topic_table_" + model_choice_clean + "_temp_" + str(temperature) + ".csv"
|
526 |
+
|
527 |
+
# Table to map references to topics
|
528 |
+
reference_data = []
|
529 |
+
|
530 |
+
# Iterate through each row in the original DataFrame
|
531 |
+
for index, row in topic_with_response_df.iterrows():
|
532 |
+
references = re.split(r',\s*|\s+', str(row.iloc[4])) # Split the reference numbers
|
533 |
+
topic = row.iloc[0]
|
534 |
+
subtopic = row.iloc[1]
|
535 |
+
sentiment = row.iloc[2]
|
536 |
+
summary = row.iloc[3]
|
537 |
+
|
538 |
+
summary = batch_part + summary
|
539 |
+
|
540 |
+
# Create a new entry for each reference number
|
541 |
+
for ref in references:
|
542 |
+
reference_data.append({
|
543 |
+
'Response References': ref,
|
544 |
+
'General Topic': topic,
|
545 |
+
'Subtopic': subtopic,
|
546 |
+
'Sentiment': sentiment,
|
547 |
+
'Summary': summary,
|
548 |
+
"Start row of group": start_row_reported
|
549 |
+
})
|
550 |
+
|
551 |
+
# Create a new DataFrame from the reference data
|
552 |
+
new_reference_df = pd.DataFrame(reference_data)
|
553 |
+
|
554 |
+
# Append on old reference data
|
555 |
+
out_reference_df = pd.concat([new_reference_df, existing_reference_df])
|
556 |
+
|
557 |
+
# Remove duplicate Response references for the same topic
|
558 |
+
out_reference_df.drop_duplicates(["Response References", "General Topic", "Subtopic", "Sentiment"], inplace=True)
|
559 |
+
|
560 |
+
out_reference_df.sort_values(["Start row of group", "Response References", "General Topic", "Subtopic", "Sentiment"], inplace=True)
|
561 |
+
|
562 |
+
|
563 |
+
reference_counts = out_reference_df.groupby(["General Topic", "Subtopic", "Sentiment"]).agg({
|
564 |
+
'Response References': 'size', # Count the number of references
|
565 |
+
'Summary': lambda x: '<br>'.join(
|
566 |
+
sorted(set(x), key=lambda summary: out_reference_df.loc[out_reference_df['Summary'] == summary, 'Start row of group'].min())
|
567 |
+
)
|
568 |
+
}).reset_index()
|
569 |
+
|
570 |
+
# Save the new DataFrame to CSV
|
571 |
+
reference_table_out_path = output_folder + batch_out_file_part + "_reference_table_" + model_choice_clean + "_temp_" + str(temperature) + ".csv"
|
572 |
+
|
573 |
+
# Table of all unique topics with descriptions
|
574 |
+
new_unique_topics_df = topic_with_response_df[["General Topic", "Subtopic", "Sentiment"]] # , "Summary"
|
575 |
+
|
576 |
+
# Join existing and new unique topics
|
577 |
+
out_unique_topics_df = pd.concat([new_unique_topics_df, existing_topics_df]).drop_duplicates(["Subtopic"]).drop(["Response References", "Summary"], axis = 1, errors="ignore")
|
578 |
+
|
579 |
+
# Join the counts to existing_unique_topics_df
|
580 |
+
out_unique_topics_df = out_unique_topics_df.merge(reference_counts, how='left', on=["General Topic", "Subtopic", "Sentiment"]).sort_values("Response References", ascending=False)
|
581 |
+
|
582 |
+
unique_topics_df_out_path = output_folder + batch_out_file_part + "_unique_topics_" + model_choice_clean + "_temp_" + str(temperature) + ".csv"
|
583 |
+
|
584 |
+
# except Exception as e:
|
585 |
+
# print("Error in write_llm_output_and_logs:")
|
586 |
+
# print(e)
|
587 |
+
|
588 |
+
return topic_table_out_path, reference_table_out_path, unique_topics_df_out_path, topic_with_response_df, markdown_table, out_reference_df, out_unique_topics_df, batch_out_file_part, is_error
|
589 |
+
|
590 |
+
def llm_query(file_data:pd.DataFrame, existing_topics_w_references_table:pd.DataFrame, existing_reference_df:pd.DataFrame, existing_unique_topics_df:pd.DataFrame, display_table:str, file_name:str, num_batches:int, in_api_key:str, temperature:float, chosen_cols:List[str], model_choice:str, candidate_topics: List=[],latest_batch_completed:int=0, out_message:List=[], out_file_paths:List = [], log_files_output_paths:List = [], first_loop_state:bool=False, whole_conversation_metadata_str:str="", prompt1:str=prompt1, prompt2:str=prompt2, prompt3:str=prompt3, system_prompt:str=system_prompt, summarise_system_prompt:str=summarise_system_prompt, summarise_prompt:str=summarise_prompt, number_of_requests:int=1, batch_size:int=50, max_tokens:int=max_tokens, progress=Progress(track_tqdm=True)):
|
591 |
+
|
592 |
+
'''
|
593 |
+
Query an LLM (Gemini or AWS Anthropic-based) with up to three prompts about a table of open text data. Up to 'batch_size' rows will be queried at a time.
|
594 |
+
|
595 |
+
Parameters:
|
596 |
+
- file_data (pd.DataFrame): Pandas dataframe containing the consultation response data.
|
597 |
+
- existing_topics_w_references_table (pd.DataFrame): Pandas dataframe containing the latest master topic table that has been iterated through batches.
|
598 |
+
- existing_reference_df (pd.DataFrame): Pandas dataframe containing the list of Response reference numbers alongside the derived topics and subtopics.
|
599 |
+
- existing_unique_topics_df (pd.DataFrame): Pandas dataframe containing the unique list of topics, subtopics, sentiment and summaries until this point.
|
600 |
+
- display_table (str): Table for display in markdown format.
|
601 |
+
- file_name (str): File name of the data file.
|
602 |
+
- num_batches (int): Number of batches required to go through all the response rows.
|
603 |
+
- in_api_key (str): The API key for authentication.
|
604 |
+
- temperature (float): The temperature parameter for the model.
|
605 |
+
- chosen_cols (List[str]): A list of chosen columns to process.
|
606 |
+
- candidate_topics (List): A list of existing candidate topics submitted by the user.
|
607 |
+
- model_choice (str): The choice of model to use.
|
608 |
+
- latest_batch_completed (int): The index of the latest file completed.
|
609 |
+
- out_message (list): A list to store output messages.
|
610 |
+
- out_file_paths (list): A list to store output file paths.
|
611 |
+
- log_files_output_paths (list): A list to store log file output paths.
|
612 |
+
- first_loop_state (bool): A flag indicating the first loop state.
|
613 |
+
- whole_conversation_metadata_str (str): A string to store whole conversation metadata.
|
614 |
+
- prompt1 (str): The first prompt for the model.
|
615 |
+
- prompt2 (str): The second prompt for the model.
|
616 |
+
- prompt3 (str): The third prompt for the model.
|
617 |
+
- system_prompt (str): The system prompt for the model.
|
618 |
+
- summarise_system_prompt (str): The system prompt for the summary part of the model.
|
619 |
+
- summarise_prompt (str): The prompt for the model summary.
|
620 |
+
- number of requests (int): The number of prompts to send to the model.
|
621 |
+
- batch_size (int): The number of data rows to consider in each request.
|
622 |
+
- max_tokens (int): The maximum number of tokens for the model.
|
623 |
+
- progress (Progress): A progress tracker.
|
624 |
+
'''
|
625 |
+
|
626 |
+
tic = time.perf_counter()
|
627 |
+
model = ""
|
628 |
+
config = ""
|
629 |
+
final_time = 0.0
|
630 |
+
whole_conversation_metadata = []
|
631 |
+
all_topic_tables_df = []
|
632 |
+
all_markdown_topic_tables = []
|
633 |
+
is_error = False
|
634 |
+
|
635 |
+
# Reset output files on each run:
|
636 |
+
# out_file_paths = []
|
637 |
+
|
638 |
+
model_choice_clean = replace_punctuation_with_underscore(model_choice)
|
639 |
+
|
640 |
+
# If this is the first time around, set variables to 0/blank
|
641 |
+
if first_loop_state==True:
|
642 |
+
latest_batch_completed = 0
|
643 |
+
out_message = []
|
644 |
+
out_file_paths = []
|
645 |
+
|
646 |
+
print("latest_batch_completed:", str(latest_batch_completed))
|
647 |
+
|
648 |
+
if num_batches > 0:
|
649 |
+
progress_measure = round(latest_batch_completed / num_batches, 1)
|
650 |
+
progress(progress_measure, desc="Querying large language model")
|
651 |
+
else:
|
652 |
+
progress(0.1, desc="Querying large language model")
|
653 |
+
|
654 |
+
# Load file
|
655 |
+
# If out message or out_file_paths are blank, change to a list so it can be appended to
|
656 |
+
if isinstance(out_message, str):
|
657 |
+
out_message = [out_message]
|
658 |
+
|
659 |
+
if not out_file_paths:
|
660 |
+
out_file_paths = []
|
661 |
+
|
662 |
+
# Check if files and text exist
|
663 |
+
if file_data.empty:
|
664 |
+
out_message = "Please enter text or a file to redact."
|
665 |
+
return out_message, existing_topics_w_references_table, existing_reference_df, out_file_paths, out_file_paths, latest_batch_completed, log_files_output_paths, log_files_output_paths, whole_conversation_metadata_str, final_time, out_message
|
666 |
+
|
667 |
+
if model_choice == "anthropic.claude-3-sonnet-20240229-v1:0" and file_data.shape[1] > 300:
|
668 |
+
out_message = "Your data has more than 300 rows, using the Sonnet model will be too expensive. Please choose the Haiku model instead."
|
669 |
+
return out_message, existing_topics_w_references_table, existing_reference_df, out_file_paths, out_file_paths, latest_batch_completed, log_files_output_paths, log_files_output_paths, whole_conversation_metadata_str, final_time, out_message
|
670 |
+
|
671 |
+
# If we have already redacted the last file, return the input out_message and file list to the relevant components
|
672 |
+
if latest_batch_completed >= num_batches:
|
673 |
+
print("Last batch reached, returning batch:", str(latest_batch_completed))
|
674 |
+
# Set to a very high number so as not to mess with subsequent file processing by the user
|
675 |
+
latest_batch_completed = 999
|
676 |
+
|
677 |
+
toc = time.perf_counter()
|
678 |
+
final_time = toc - tic
|
679 |
+
#out_time = f"in {final_time} seconds."
|
680 |
+
#print(out_time)
|
681 |
+
|
682 |
+
final_out_message = '\n'.join(out_message)
|
683 |
+
return display_table, existing_topics_w_references_table, existing_unique_topics_df, existing_reference_df, out_file_paths, out_file_paths, latest_batch_completed, log_files_output_paths, log_files_output_paths, whole_conversation_metadata_str, final_time, final_out_message
|
684 |
+
|
685 |
+
#for latest_batch_completed in range(num_batches):
|
686 |
+
reported_batch_no = latest_batch_completed + 1
|
687 |
+
print("Running query batch", str(reported_batch_no))
|
688 |
+
|
689 |
+
# Call the function to prepare the input table
|
690 |
+
simplified_csv_table_path, normalised_simple_markdown_table, out_file_part, start_row, end_row = data_file_to_markdown_table(file_data, file_name, chosen_cols, output_folder, latest_batch_completed, batch_size)
|
691 |
+
log_files_output_paths.append(simplified_csv_table_path)
|
692 |
+
|
693 |
+
|
694 |
+
# Conversation history
|
695 |
+
conversation_history = []
|
696 |
+
|
697 |
+
|
698 |
+
# If this is the second batch, the master table will refer back to the current master table when assigning topics to the new table. Also runs if there is an existing list of topics supplied by the user
|
699 |
+
if latest_batch_completed >= 1 or candidate_topics:
|
700 |
+
|
701 |
+
#print("normalised_simple_markdown_table:", normalised_simple_markdown_table)
|
702 |
+
|
703 |
+
# Prepare Gemini models before query
|
704 |
+
if model_choice in ["gemini-1.5-flash-002", "gemini-1.5-pro-002"]:
|
705 |
+
print("Using Gemini model:", model_choice)
|
706 |
+
model, config = construct_gemini_generative_model(in_api_key=in_api_key, temperature=temperature, model_choice=model_choice, system_prompt=summarise_system_prompt, max_tokens=max_tokens)
|
707 |
+
else:
|
708 |
+
print("Using AWS Bedrock model:", model_choice)
|
709 |
+
|
710 |
+
# Merge duplicate topics together to create a big merged summary table
|
711 |
+
#all_topic_tables_df_merged = existing_topics_w_references_table#pd.concat(all_topic_tables_df)
|
712 |
+
|
713 |
+
# Group by the first three columns and concatenate the fourth and fifth columns
|
714 |
+
# all_topic_tables_df_merged = existing_topics_w_references_table.groupby(["General Topic", "Subtopic", "Sentiment"], as_index=False).agg({
|
715 |
+
# "Summary": '\n'.join, # Concatenate the fourth column
|
716 |
+
# "Response References": ', '.join # Concatenate the fifth column
|
717 |
+
# })
|
718 |
+
# all_topic_tables_df_merged["Response References"] = ""
|
719 |
+
#all_topic_tables_df_merged["Summary"] = ""
|
720 |
+
#all_topic_tables_str = all_topic_tables_df_merged.to_markdown(index=None)
|
721 |
+
|
722 |
+
if candidate_topics:
|
723 |
+
# 'Zero shot topics' are those supplied by the user
|
724 |
+
zero_shot_topics = read_file(candidate_topics.name)
|
725 |
+
zero_shot_topics_series = zero_shot_topics.iloc[:, 0]
|
726 |
+
# Max 150 topics allowed
|
727 |
+
if len(zero_shot_topics_series) > 120:
|
728 |
+
print("Maximum 120 topics allowed to fit within large language model context limits.")
|
729 |
+
zero_shot_topics_series = zero_shot_topics_series.iloc[:120]
|
730 |
+
|
731 |
+
zero_shot_topics_list = list(zero_shot_topics_series)
|
732 |
+
|
733 |
+
print("Zero shot topics are:", zero_shot_topics_list)
|
734 |
+
|
735 |
+
#all_topic_tables_df_merged = existing_unique_topics_df
|
736 |
+
existing_unique_topics_df["Response References"] = ""
|
737 |
+
|
738 |
+
|
739 |
+
|
740 |
+
|
741 |
+
# Create the most up to date list of topics and subtopics.
|
742 |
+
# If there are candidate topics, but the existing_unique_topics_df hasn't yet been constructed, then create.
|
743 |
+
if candidate_topics and existing_unique_topics_df.empty:
|
744 |
+
existing_unique_topics_df = pd.DataFrame(data={'General Topic':'', 'Subtopic':zero_shot_topics_list, 'Sentiment':''})
|
745 |
+
|
746 |
+
# This part concatenates all zero shot and new topics together, so that for the next prompt the LLM will have the full list available
|
747 |
+
elif candidate_topics and not existing_unique_topics_df.empty:
|
748 |
+
zero_shot_topics_df = pd.DataFrame(data={'General Topic':'', 'Subtopic':zero_shot_topics_list, 'Sentiment':''})
|
749 |
+
existing_unique_topics_df = pd.concat([existing_unique_topics_df, zero_shot_topics_df]).drop_duplicates("Subtopic")
|
750 |
+
|
751 |
+
#print("Full topics list with zero shot_dropped:", existing_unique_topics_df)
|
752 |
+
|
753 |
+
existing_unique_topics_df.to_csv(output_folder + "Existing topics with zero shot dropped.csv")
|
754 |
+
|
755 |
+
|
756 |
+
unique_topics_markdown = existing_unique_topics_df[["General Topic", "Subtopic", "Sentiment"]].drop_duplicates(["General Topic", "Subtopic", "Sentiment"]).to_markdown(index=False)
|
757 |
+
|
758 |
+
existing_unique_topics_df.to_csv(output_folder + f"{out_file_part}_master_all_topic_tables_df_merged_" + model_choice_clean + "_temp_" + str(temperature) + "_batch_" + str(latest_batch_completed) + ".csv")
|
759 |
+
|
760 |
+
# Format the summary prompt with the response table and topics
|
761 |
+
formatted_summary_prompt = summarise_prompt.format(response_table=normalised_simple_markdown_table, topics=unique_topics_markdown)
|
762 |
+
|
763 |
+
# Define the output file path for the formatted prompt
|
764 |
+
formatted_prompt_output_path = output_folder + out_file_part + "_full_prompt_" + model_choice_clean + "_temp_" + str(temperature) + ".txt"
|
765 |
+
|
766 |
+
# Write the formatted prompt to the specified file
|
767 |
+
try:
|
768 |
+
with open(formatted_prompt_output_path, "w", encoding='utf-8', errors='replace') as f:
|
769 |
+
f.write(formatted_summary_prompt)
|
770 |
+
except Exception as e:
|
771 |
+
print(f"Error writing prompt to file {formatted_prompt_output_path}: {e}")
|
772 |
+
|
773 |
+
summary_prompt_list = [formatted_summary_prompt]
|
774 |
+
|
775 |
+
print("master_summary_prompt_list:", summary_prompt_list[0])
|
776 |
+
|
777 |
+
summary_conversation_history = []
|
778 |
+
summary_whole_conversation = []
|
779 |
+
|
780 |
+
# Process requests to large language model
|
781 |
+
master_summary_response, summary_conversation_history, whole_summary_conversation, whole_conversation_metadata = process_requests(summary_prompt_list, summarise_system_prompt, summary_conversation_history, summary_whole_conversation, whole_conversation_metadata, model, config, model_choice, temperature, reported_batch_no, master = True)
|
782 |
+
|
783 |
+
print("master_summary_response:", master_summary_response[-1].text)
|
784 |
+
print("Whole conversation metadata:", whole_conversation_metadata)
|
785 |
+
|
786 |
+
new_topic_table_out_path, new_reference_table_out_path, new_unique_topics_df_out_path, new_topic_df, new_markdown_table, new_reference_df, new_unique_topics_df, master_batch_out_file_part, is_error = write_llm_output_and_logs(master_summary_response, whole_summary_conversation, whole_conversation_metadata, out_file_part, latest_batch_completed, start_row, end_row, model_choice_clean, temperature, log_files_output_paths, existing_reference_df, existing_unique_topics_df, first_run=False)
|
787 |
+
|
788 |
+
# If error in table parsing, leave function
|
789 |
+
if is_error == True:
|
790 |
+
final_message_out = "Could not complete summary, error in LLM output."
|
791 |
+
display_table, new_topic_df, new_unique_topics_df, new_reference_df, out_file_paths, out_file_paths, latest_batch_completed, log_files_output_paths, log_files_output_paths, whole_conversation_metadata_str, final_time, final_message_out
|
792 |
+
|
793 |
+
# Write outputs to csv
|
794 |
+
## Topics with references
|
795 |
+
new_topic_df.to_csv(new_topic_table_out_path, index=None)
|
796 |
+
log_files_output_paths.append(new_topic_table_out_path)
|
797 |
+
|
798 |
+
## Reference table mapping response numbers to topics
|
799 |
+
new_reference_df.to_csv(new_reference_table_out_path, index=None)
|
800 |
+
log_files_output_paths.append(new_reference_table_out_path)
|
801 |
+
|
802 |
+
## Unique topic list
|
803 |
+
new_unique_topics_df.to_csv(new_unique_topics_df_out_path, index=None)
|
804 |
+
out_file_paths.append(new_unique_topics_df_out_path)
|
805 |
+
|
806 |
+
all_topic_tables_df.append(new_topic_df)
|
807 |
+
all_markdown_topic_tables.append(new_markdown_table)
|
808 |
+
|
809 |
+
#display_table = master_summary_response[-1].text
|
810 |
+
|
811 |
+
# Show unique topics alongside document counts as output
|
812 |
+
display_table = new_unique_topics_df.to_markdown(index=False)
|
813 |
+
|
814 |
+
whole_conversation_metadata.append(whole_conversation_metadata_str)
|
815 |
+
whole_conversation_metadata_str = ' '.join(whole_conversation_metadata)
|
816 |
+
|
817 |
+
|
818 |
+
# Write final output to text file also
|
819 |
+
try:
|
820 |
+
new_final_table_output_path = output_folder + master_batch_out_file_part + "_full_final_response_" + model_choice_clean + "_temp_" + str(temperature) + ".txt"
|
821 |
+
|
822 |
+
with open(new_final_table_output_path, "w", encoding='utf-8', errors='replace') as f:
|
823 |
+
f.write(display_table)
|
824 |
+
|
825 |
+
log_files_output_paths.append(new_final_table_output_path)
|
826 |
+
|
827 |
+
except Exception as e:
|
828 |
+
print(e)
|
829 |
+
|
830 |
+
# If this is the first batch, run this
|
831 |
+
else:
|
832 |
+
#system_prompt_with_table = system_prompt + normalised_simple_markdown_table
|
833 |
+
|
834 |
+
# Prepare Gemini models before query
|
835 |
+
if model_choice in ["gemini-1.5-flash-002", "gemini-1.5-pro-002"]:
|
836 |
+
print("Using Gemini model:", model_choice)
|
837 |
+
model, config = construct_gemini_generative_model(in_api_key=in_api_key, temperature=temperature, model_choice=model_choice, system_prompt=system_prompt, max_tokens=max_tokens)
|
838 |
+
else:
|
839 |
+
print("Using AWS Bedrock model:", model_choice)
|
840 |
+
|
841 |
+
formatted_prompt1 = prompt1.format(response_table=normalised_simple_markdown_table)
|
842 |
+
|
843 |
+
if prompt2: formatted_prompt2 = prompt2.format(response_table=normalised_simple_markdown_table)
|
844 |
+
else: formatted_prompt2 = prompt2
|
845 |
+
|
846 |
+
if prompt3: formatted_prompt3 = prompt3.format(response_table=normalised_simple_markdown_table)
|
847 |
+
else: formatted_prompt3 = prompt3
|
848 |
+
|
849 |
+
batch_prompts = [formatted_prompt1, formatted_prompt2, formatted_prompt3][:number_of_requests] # Adjust this list to send fewer requests
|
850 |
+
|
851 |
+
#whole_conversation = [system_prompt_with_table]
|
852 |
+
|
853 |
+
whole_conversation = [system_prompt]
|
854 |
+
|
855 |
+
# Process requests to large language model
|
856 |
+
responses, conversation_history, whole_conversation, whole_conversation_metadata = process_requests(batch_prompts, system_prompt, conversation_history, whole_conversation, whole_conversation_metadata, model, config, model_choice, temperature, reported_batch_no)
|
857 |
+
|
858 |
+
#print("Whole conversation metadata before:", whole_conversation_metadata)
|
859 |
+
|
860 |
+
print("responses:", responses[-1].text)
|
861 |
+
print("Whole conversation metadata:", whole_conversation_metadata)
|
862 |
+
|
863 |
+
topic_table_out_path, reference_table_out_path, unique_topics_df_out_path, topic_table_df, markdown_table, reference_df, new_unique_topics_df, batch_out_file_part, is_error = write_llm_output_and_logs(responses, whole_conversation, whole_conversation_metadata, out_file_part, latest_batch_completed, start_row, end_row, model_choice_clean, temperature, log_files_output_paths, existing_reference_df, existing_unique_topics_df, first_run=True)
|
864 |
+
|
865 |
+
# If error in table parsing, leave function
|
866 |
+
if is_error == True:
|
867 |
+
display_table, new_topic_df, new_unique_topics_df, new_reference_df, out_file_paths, out_file_paths, latest_batch_completed, log_files_output_paths, log_files_output_paths, whole_conversation_metadata_str, final_time, final_message_out
|
868 |
+
|
869 |
+
|
870 |
+
all_topic_tables_df.append(topic_table_df)
|
871 |
+
|
872 |
+
topic_table_df.to_csv(topic_table_out_path, index=None)
|
873 |
+
out_file_paths.append(topic_table_out_path)
|
874 |
+
|
875 |
+
reference_df.to_csv(reference_table_out_path, index=None)
|
876 |
+
log_files_output_paths.append(reference_table_out_path)
|
877 |
+
|
878 |
+
## Unique topic list
|
879 |
+
|
880 |
+
new_unique_topics_df = pd.concat([new_unique_topics_df, existing_unique_topics_df]).drop_duplicates('Subtopic')
|
881 |
+
|
882 |
+
print("new_unique_topics_df:", new_unique_topics_df)
|
883 |
+
|
884 |
+
new_unique_topics_df.to_csv(unique_topics_df_out_path, index=None)
|
885 |
+
out_file_paths.append(unique_topics_df_out_path)
|
886 |
+
|
887 |
+
all_markdown_topic_tables.append(markdown_table)
|
888 |
+
|
889 |
+
whole_conversation_metadata.append(whole_conversation_metadata_str)
|
890 |
+
whole_conversation_metadata_str = ' '.join(whole_conversation_metadata)
|
891 |
+
|
892 |
+
# Write final output to text file also
|
893 |
+
try:
|
894 |
+
final_table_output_path = output_folder + batch_out_file_part + "_full_final_response_" + model_choice_clean + "_temp_" + str(temperature) + ".txt"
|
895 |
+
|
896 |
+
with open(final_table_output_path, "w", encoding='utf-8', errors='replace') as f:
|
897 |
+
f.write(responses[-1].text)
|
898 |
+
|
899 |
+
log_files_output_paths.append(final_table_output_path)
|
900 |
+
|
901 |
+
except Exception as e:
|
902 |
+
print(e)
|
903 |
+
|
904 |
+
display_table = responses[-1].text
|
905 |
+
new_topic_df = topic_table_df
|
906 |
+
new_reference_df = reference_df
|
907 |
+
|
908 |
+
# Increase latest file completed count unless we are at the last file
|
909 |
+
if latest_batch_completed != num_batches:
|
910 |
+
print("Completed file number:", str(latest_batch_completed))
|
911 |
+
latest_batch_completed += 1
|
912 |
+
|
913 |
+
toc = time.perf_counter()
|
914 |
+
final_time = toc - tic
|
915 |
+
out_time = f"in {final_time:0.1f} seconds."
|
916 |
+
print(out_time)
|
917 |
+
|
918 |
+
out_message.append('All queries successfully completed in')
|
919 |
+
|
920 |
+
final_message_out = '\n'.join(out_message)
|
921 |
+
final_message_out = final_message_out + " " + out_time
|
922 |
+
|
923 |
+
final_message_out = final_message_out + "\n\nGo to to the LLM settings tab to see redaction logs. Please give feedback on the results below to help improve this app."
|
924 |
+
|
925 |
+
return display_table, new_topic_df, new_unique_topics_df, new_reference_df, out_file_paths, out_file_paths, latest_batch_completed, log_files_output_paths, log_files_output_paths, whole_conversation_metadata_str, final_time, final_message_out
|
chatfuncs/prompts.py
CHANGED
@@ -64,4 +64,13 @@ You are an AI assistant that follows instruction extremely well. Help as much as
|
|
64 |
Answer the QUESTION using information from the following CONTENT. Respond with short answers that directly answer the question.\n
|
65 |
CONTENT: {summaries}\n
|
66 |
QUESTION: {question}\n
|
67 |
-
Answer:<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
Answer the QUESTION using information from the following CONTENT. Respond with short answers that directly answer the question.\n
|
65 |
CONTENT: {summaries}\n
|
66 |
QUESTION: {question}\n
|
67 |
+
Answer:<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n"""
|
68 |
+
|
69 |
+
instruction_prompt_qwen = """<|im_start|>system\n
|
70 |
+
You are an AI assistant that follows instruction extremely well. Help as much as you can.
|
71 |
+
<|im_start|>user\n
|
72 |
+
Answer the QUESTION using information from the following CONTENT. Respond with short answers that directly answer the question.
|
73 |
+
CONTENT: {summaries}
|
74 |
+
QUESTION: {question}\n
|
75 |
+
Answer:<|im_end|>
|
76 |
+
<|im_start|>assistant\n"""
|
requirements.txt
CHANGED
@@ -1,19 +1,22 @@
|
|
1 |
langchain
|
2 |
langchain-community
|
|
|
3 |
beautifulsoup4
|
|
|
4 |
pandas
|
5 |
-
transformers==4.
|
|
|
6 |
llama-cpp-python --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu121
|
7 |
-
|
8 |
-
--extra-index-url https://download.pytorch.org/whl/cu121
|
9 |
-
sentence_transformers==2.2.2
|
10 |
faiss-cpu==1.7.4
|
11 |
pypdf
|
12 |
python-docx
|
13 |
keybert
|
14 |
span_marker
|
15 |
-
gensim
|
16 |
-
gradio
|
17 |
-
gradio_client
|
18 |
nltk
|
19 |
-
|
|
|
|
|
|
|
|
1 |
langchain
|
2 |
langchain-community
|
3 |
+
langchain-huggingface
|
4 |
beautifulsoup4
|
5 |
+
google-generativeai==0.7.2
|
6 |
pandas
|
7 |
+
transformers==4.41.2
|
8 |
+
torch --extra-index-url https://download.pytorch.org/whl/cu121
|
9 |
llama-cpp-python --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu121
|
10 |
+
sentence_transformers==3.0.1
|
|
|
|
|
11 |
faiss-cpu==1.7.4
|
12 |
pypdf
|
13 |
python-docx
|
14 |
keybert
|
15 |
span_marker
|
16 |
+
#gensim
|
17 |
+
gradio
|
|
|
18 |
nltk
|
19 |
+
bm25s
|
20 |
+
PyStemmer
|
21 |
+
scipy<1.13
|
22 |
+
numpy==1.26.4
|
requirements_cpu.txt
CHANGED
@@ -1,18 +1,22 @@
|
|
1 |
langchain
|
|
|
2 |
langchain-community
|
3 |
beautifulsoup4
|
|
|
4 |
pandas
|
5 |
-
transformers==4.
|
6 |
llama-cpp-python --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu
|
7 |
-
torch
|
8 |
-
sentence_transformers==
|
9 |
faiss-cpu==1.7.4
|
10 |
pypdf
|
11 |
python-docx
|
12 |
keybert
|
13 |
span_marker
|
14 |
-
gensim
|
15 |
-
gradio
|
16 |
-
gradio_client
|
17 |
nltk
|
18 |
-
|
|
|
|
|
|
|
|
1 |
langchain
|
2 |
+
langchain-huggingface
|
3 |
langchain-community
|
4 |
beautifulsoup4
|
5 |
+
google-generativeai==0.7.2
|
6 |
pandas
|
7 |
+
transformers==4.41.2
|
8 |
llama-cpp-python --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu
|
9 |
+
torch==2.3.1
|
10 |
+
sentence_transformers==3.0.1
|
11 |
faiss-cpu==1.7.4
|
12 |
pypdf
|
13 |
python-docx
|
14 |
keybert
|
15 |
span_marker
|
16 |
+
#gensim
|
17 |
+
gradio
|
|
|
18 |
nltk
|
19 |
+
bm25s
|
20 |
+
PyStemmer
|
21 |
+
scipy<1.13
|
22 |
+
numpy==1.26.4
|