Spaces:
Runtime error
Runtime error
arslan-ahmed
commited on
Commit
•
ed9ad5e
1
Parent(s):
918a154
added mode type
Browse files- app.py +8 -11
- ttyd_consts.py +11 -13
- ttyd_functions.py +11 -8
app.py
CHANGED
@@ -31,7 +31,7 @@ from ttyd_consts import *
|
|
31 |
# selct the mode from ttyd_consts.py
|
32 |
mode = mode_general
|
33 |
|
34 |
-
if mode.
|
35 |
# local vector store as opposed to gradio state vector store
|
36 |
vsDict_hard = localData_vecStore(os.getenv("OPENAI_API_KEY"), inputDir=mode.inputDir, file_list=mode.file_list, url_list=mode.url_list)
|
37 |
|
@@ -74,13 +74,10 @@ def initializeChatbot(temp, k, modelName, stdlQs, api_key_st, vsDict_st, progres
|
|
74 |
|
75 |
|
76 |
def setApiKey(api_key):
|
77 |
-
|
78 |
-
api_key=os.getenv("OPENAI_API_KEY")
|
79 |
try:
|
80 |
-
api_key='Null' if api_key is None or api_key=='' else api_key
|
81 |
openai.Model.list(api_key=api_key) # test the API key
|
82 |
api_key_st = api_key
|
83 |
-
|
84 |
return aKey_tb.update('API Key accepted', interactive=False, type='text'), aKey_btn.update(interactive=False), api_key_st
|
85 |
except Exception as e:
|
86 |
return aKey_tb.update(str(e), type='text'), *[x.update() for x in [aKey_btn, api_key_state]]
|
@@ -124,7 +121,7 @@ def uiData_vecStore(userFiles, userUrls, api_key_st, vsDict_st={}, progress=gr.P
|
|
124 |
# just update the QA Chain, no updates to any UI
|
125 |
def updateQaChain(temp, k, modelName, stdlQs, api_key_st, vsDict_st):
|
126 |
# if we are not adding data from ui, then use vsDict_hard as vectorstore
|
127 |
-
if vsDict_st=={} and mode.
|
128 |
modelName = modelName.split('(')[0].strip() # so we can provide any info in brackets
|
129 |
# check if the input model is chat model or legacy model
|
130 |
try:
|
@@ -176,7 +173,7 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue='orange', secondary_hue='gray
|
|
176 |
# Initialize state variables - stored in this browser session - these can only be used within input or output of .click/.submit etc, not as a python var coz they are not stored in backend, only as a frontend gradio component
|
177 |
# but if you initialize it with a default value, that value will be stored in backend and accessible across all users. You can also change it with statear.value='newValue'
|
178 |
qa_state = gr.State()
|
179 |
-
api_key_state = gr.State()
|
180 |
chromaVS_state = gr.State({})
|
181 |
|
182 |
|
@@ -189,7 +186,7 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue='orange', secondary_hue='gray
|
|
189 |
aKey_tb = gr.Textbox(label="OpenAI API Key", type='password'\
|
190 |
, info='You can find OpenAI API key at https://platform.openai.com/account/api-keys'\
|
191 |
, placeholder='Enter your API key here and hit enter to begin chatting')
|
192 |
-
aKey_btn = gr.Button("Submit API Key")
|
193 |
with gr.Row(visible=mode.uiAddDataVis):
|
194 |
upload_fb = gr.Files(scale=5, label="Upload (multiple) Files - pdf/txt/docx supported", file_types=['.doc', '.docx', 'text', '.pdf', '.csv'])
|
195 |
urls_tb = gr.Textbox(scale=5, label="Enter URLs starting with https (comma separated)"\
|
@@ -230,7 +227,7 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue='orange', secondary_hue='gray
|
|
230 |
aKey_tb.submit(**aKey_btn_args)
|
231 |
|
232 |
# Data Ingest Button
|
233 |
-
data_ingest_btn.click(uiData_vecStore, [upload_fb, urls_tb, api_key_state, chromaVS_state], [chromaVS_state, status_tb, data_ingest_btn, upload_fb, urls_tb])
|
234 |
|
235 |
# Adv Settings
|
236 |
advSet_args = {'fn':updateQaChain, 'inputs':[temp_sld, k_sld, model_dd, stdlQs_rb, api_key_state, chromaVS_state], 'outputs':[qa_state]}
|
@@ -241,7 +238,7 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue='orange', secondary_hue='gray
|
|
241 |
|
242 |
# Initialize button
|
243 |
initCb_args = {'fn':initializeChatbot, 'inputs':[temp_sld, k_sld, model_dd, stdlQs_rb, api_key_state, chromaVS_state], 'outputs':[qa_state, btn, initChatbot_btn, aKey_tb, tabs, chatbot]}
|
244 |
-
if mode.
|
245 |
demo.load(**initCb_args) # load Chatbot UI directly on startup
|
246 |
initChatbot_btn.click(**initCb_args)
|
247 |
|
@@ -250,5 +247,5 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue='orange', secondary_hue='gray
|
|
250 |
btn.click(**chat_btn_args)
|
251 |
msg.submit(**chat_btn_args)
|
252 |
|
253 |
-
demo.queue()
|
254 |
demo.launch(show_error=True)
|
|
|
31 |
# selct the mode from ttyd_consts.py
|
32 |
mode = mode_general
|
33 |
|
34 |
+
if mode.type!='userInputDocs':
|
35 |
# local vector store as opposed to gradio state vector store
|
36 |
vsDict_hard = localData_vecStore(os.getenv("OPENAI_API_KEY"), inputDir=mode.inputDir, file_list=mode.file_list, url_list=mode.url_list)
|
37 |
|
|
|
74 |
|
75 |
|
76 |
def setApiKey(api_key):
|
77 |
+
api_key = transformApi(api_key)
|
|
|
78 |
try:
|
|
|
79 |
openai.Model.list(api_key=api_key) # test the API key
|
80 |
api_key_st = api_key
|
|
|
81 |
return aKey_tb.update('API Key accepted', interactive=False, type='text'), aKey_btn.update(interactive=False), api_key_st
|
82 |
except Exception as e:
|
83 |
return aKey_tb.update(str(e), type='text'), *[x.update() for x in [aKey_btn, api_key_state]]
|
|
|
121 |
# just update the QA Chain, no updates to any UI
|
122 |
def updateQaChain(temp, k, modelName, stdlQs, api_key_st, vsDict_st):
|
123 |
# if we are not adding data from ui, then use vsDict_hard as vectorstore
|
124 |
+
if vsDict_st=={} and mode.type!='userInputDocs': vsDict_st=vsDict_hard
|
125 |
modelName = modelName.split('(')[0].strip() # so we can provide any info in brackets
|
126 |
# check if the input model is chat model or legacy model
|
127 |
try:
|
|
|
173 |
# Initialize state variables - stored in this browser session - these can only be used within input or output of .click/.submit etc, not as a python var coz they are not stored in backend, only as a frontend gradio component
|
174 |
# but if you initialize it with a default value, that value will be stored in backend and accessible across all users. You can also change it with statear.value='newValue'
|
175 |
qa_state = gr.State()
|
176 |
+
api_key_state = gr.State(os.getenv("OPENAI_API_KEY") if mode.type=='personalBot' else 'Null')
|
177 |
chromaVS_state = gr.State({})
|
178 |
|
179 |
|
|
|
186 |
aKey_tb = gr.Textbox(label="OpenAI API Key", type='password'\
|
187 |
, info='You can find OpenAI API key at https://platform.openai.com/account/api-keys'\
|
188 |
, placeholder='Enter your API key here and hit enter to begin chatting')
|
189 |
+
aKey_btn = gr.Button("Submit API Key")
|
190 |
with gr.Row(visible=mode.uiAddDataVis):
|
191 |
upload_fb = gr.Files(scale=5, label="Upload (multiple) Files - pdf/txt/docx supported", file_types=['.doc', '.docx', 'text', '.pdf', '.csv'])
|
192 |
urls_tb = gr.Textbox(scale=5, label="Enter URLs starting with https (comma separated)"\
|
|
|
227 |
aKey_tb.submit(**aKey_btn_args)
|
228 |
|
229 |
# Data Ingest Button
|
230 |
+
data_ingest_event = data_ingest_btn.click(uiData_vecStore, [upload_fb, urls_tb, api_key_state, chromaVS_state], [chromaVS_state, status_tb, data_ingest_btn, upload_fb, urls_tb])
|
231 |
|
232 |
# Adv Settings
|
233 |
advSet_args = {'fn':updateQaChain, 'inputs':[temp_sld, k_sld, model_dd, stdlQs_rb, api_key_state, chromaVS_state], 'outputs':[qa_state]}
|
|
|
238 |
|
239 |
# Initialize button
|
240 |
initCb_args = {'fn':initializeChatbot, 'inputs':[temp_sld, k_sld, model_dd, stdlQs_rb, api_key_state, chromaVS_state], 'outputs':[qa_state, btn, initChatbot_btn, aKey_tb, tabs, chatbot]}
|
241 |
+
if mode.type=='personalBot':
|
242 |
demo.load(**initCb_args) # load Chatbot UI directly on startup
|
243 |
initChatbot_btn.click(**initCb_args)
|
244 |
|
|
|
247 |
btn.click(**chat_btn_args)
|
248 |
msg.submit(**chat_btn_args)
|
249 |
|
250 |
+
demo.queue(concurrency_count=10)
|
251 |
demo.launch(show_error=True)
|
ttyd_consts.py
CHANGED
@@ -2,8 +2,8 @@ exp_query = 'Generate top 5 questions that I can ask about this data. Questions
|
|
2 |
|
3 |
waitText_initialize = 'Preparing the documents, please wait...'
|
4 |
|
5 |
-
initialize_prompt = 'Write a short welcome message to the user. Describe the
|
6 |
-
If
|
7 |
\n\nYour response should be short and precise. Format of your response should be Summary:\n{Description and Summary} \n\n Example Questions:\n{Example Questions}'
|
8 |
|
9 |
nustian_exps = ['Tell me about NUSTIAN',
|
@@ -70,21 +70,19 @@ welcomeMsgArslan = """Summary: The document provides a comprehensive overview of
|
|
70 |
|
71 |
|
72 |
class TtydMode():
|
73 |
-
def __init__(self, name='', title='',
|
74 |
self.name = name
|
75 |
-
self.title = title
|
76 |
-
self.
|
77 |
-
self.type = type
|
78 |
self.inputDir=dir
|
79 |
self.file_list=files
|
80 |
self.url_list=urls
|
81 |
-
self.uiAddDataVis = vis
|
82 |
-
self.welcomeMsg = welMsg
|
83 |
-
self.k = def_k
|
84 |
-
|
85 |
|
86 |
|
87 |
|
88 |
-
mode_general = TtydMode(name='general', title=md_title_general, vis=True)
|
89 |
-
mode_nustian = TtydMode(name='nustian', title=md_title_nustian,
|
90 |
-
mode_arslan = TtydMode(name='arslan',
|
|
|
2 |
|
3 |
waitText_initialize = 'Preparing the documents, please wait...'
|
4 |
|
5 |
+
initialize_prompt = 'Write a short welcome message to the user. Describe the data with a comprehensive overview including short summary.\
|
6 |
+
If this data is about a person, mention his name instead of using pronouns. After describing the overview, you should mention top 3 example questions that the user can ask about this data.\
|
7 |
\n\nYour response should be short and precise. Format of your response should be Summary:\n{Description and Summary} \n\n Example Questions:\n{Example Questions}'
|
8 |
|
9 |
nustian_exps = ['Tell me about NUSTIAN',
|
|
|
70 |
|
71 |
|
72 |
class TtydMode():
|
73 |
+
def __init__(self, name='', title='', type='', dir=None, files=[], urls=[], vis=False, welMsg='', def_k=4):
|
74 |
self.name = name
|
75 |
+
self.title = title # markdown title for the top display
|
76 |
+
self.type = type # userInputDocs, fixedDocs, personalBot
|
|
|
77 |
self.inputDir=dir
|
78 |
self.file_list=files
|
79 |
self.url_list=urls
|
80 |
+
self.uiAddDataVis = vis # load data from user - this will be true for type = userInputDocs
|
81 |
+
self.welcomeMsg = welMsg #welcome msg constant - if not provided LLM will generate it
|
82 |
+
self.k = def_k # default k docs to retrieve
|
|
|
83 |
|
84 |
|
85 |
|
86 |
+
mode_general = TtydMode(name='general', title=md_title_general, type='userInputDocs', vis=True)
|
87 |
+
mode_nustian = TtydMode(name='nustian', title=md_title_nustian, type='fixedDocs', urls=['https://nustian.ca'])
|
88 |
+
mode_arslan = TtydMode(name='arslan', title=md_title_arslan, type='personalBot', dir='./documents/', welMsg=welcomeMsgArslan, def_k=8)
|
ttyd_functions.py
CHANGED
@@ -1,17 +1,11 @@
|
|
1 |
|
2 |
import datetime
|
3 |
-
import openai
|
4 |
import uuid
|
5 |
-
import gradio as gr
|
6 |
from langchain.embeddings import OpenAIEmbeddings
|
7 |
from langchain.vectorstores import Chroma
|
8 |
-
from langchain.text_splitter import
|
9 |
-
from langchain.chains import ConversationalRetrievalChain
|
10 |
-
from langchain.chains import RetrievalQA
|
11 |
|
12 |
import os
|
13 |
-
from langchain.chat_models import ChatOpenAI
|
14 |
-
from langchain import OpenAI
|
15 |
from langchain.document_loaders import WebBaseLoader, TextLoader, Docx2txtLoader, PyMuPDFLoader
|
16 |
from whatsapp_chat_custom import WhatsAppChatLoader # use this instead of from langchain.document_loaders import WhatsAppChatLoader
|
17 |
|
@@ -31,6 +25,15 @@ mimetypes.init()
|
|
31 |
media_files = tuple([x for x in mimetypes.types_map if mimetypes.types_map[x].split('/')[0] in ['image', 'video', 'audio']])
|
32 |
filter_strings = ['/email-protection#']
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
def get_hyperlinks(url):
|
35 |
try:
|
36 |
reqs = requests.get(url)
|
@@ -58,7 +61,7 @@ def get_domain_hyperlinks(local_domain, url):
|
|
58 |
if re.search(HTTP_URL_PATTERN, link):
|
59 |
# Parse the URL and check if the domain is the same
|
60 |
url_obj = urlparse(link)
|
61 |
-
if url_obj.netloc == local_domain:
|
62 |
clean_link = link
|
63 |
|
64 |
# If the link is not a URL, check if it is a relative link
|
|
|
1 |
|
2 |
import datetime
|
|
|
3 |
import uuid
|
|
|
4 |
from langchain.embeddings import OpenAIEmbeddings
|
5 |
from langchain.vectorstores import Chroma
|
6 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
|
|
|
7 |
|
8 |
import os
|
|
|
|
|
9 |
from langchain.document_loaders import WebBaseLoader, TextLoader, Docx2txtLoader, PyMuPDFLoader
|
10 |
from whatsapp_chat_custom import WhatsAppChatLoader # use this instead of from langchain.document_loaders import WhatsAppChatLoader
|
11 |
|
|
|
25 |
media_files = tuple([x for x in mimetypes.types_map if mimetypes.types_map[x].split('/')[0] in ['image', 'video', 'audio']])
|
26 |
filter_strings = ['/email-protection#']
|
27 |
|
28 |
+
|
29 |
+
def transformApi(api_key=''):
|
30 |
+
if api_key==os.getenv("TEMP_PWD"):
|
31 |
+
return os.getenv("OPENAI_API_KEY")
|
32 |
+
elif api_key is None or api_key=='':
|
33 |
+
return 'Null'
|
34 |
+
else:
|
35 |
+
return api_key
|
36 |
+
|
37 |
def get_hyperlinks(url):
|
38 |
try:
|
39 |
reqs = requests.get(url)
|
|
|
61 |
if re.search(HTTP_URL_PATTERN, link):
|
62 |
# Parse the URL and check if the domain is the same
|
63 |
url_obj = urlparse(link)
|
64 |
+
if url_obj.netloc.replace('www.','') == local_domain.replace('www.',''):
|
65 |
clean_link = link
|
66 |
|
67 |
# If the link is not a URL, check if it is a relative link
|