Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -11,18 +11,26 @@ from langchain.vectorstores import Chroma
|
|
11 |
from langchain.chains import RetrievalQA
|
12 |
import gradio as gr
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
embeddings =
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
model_name="sentence-transformers/all-MiniLM-L6-v2"
|
18 |
)
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
langchain.llm_cache = InMemoryCache()
|
|
|
24 |
|
25 |
-
def build_context(files,urls):
|
|
|
26 |
documents = []
|
27 |
if files is not None:
|
28 |
for idx, file in enumerate(files):
|
@@ -72,6 +80,8 @@ def clear_chromadb():
|
|
72 |
vectordb._collection.delete(ids=id)
|
73 |
|
74 |
with gr.Blocks() as demo:
|
|
|
|
|
75 |
with gr.Row():
|
76 |
with gr.Column():
|
77 |
pdf_docs = gr.Files(label="Load pdf/docx/ppt/pptx files", file_types=['.pdf','.docx','.ppt','.pptx'], type="file")
|
@@ -88,7 +98,7 @@ with gr.Blocks() as demo:
|
|
88 |
clear = gr.ClearButton([msg, hg_chatbot])
|
89 |
cleardb = gr.Button(value="Réinitialiser le contexte")
|
90 |
load_context.click(loading, None, loading_status, queue=False)
|
91 |
-
load_context.click(build_context, inputs=[pdf_docs, urls], outputs=[loading_status], queue=False)
|
92 |
msg.submit(respond, [msg, hg_chatbot], [msg, hg_chatbot])
|
93 |
cleardb.click(clear_chromadb)
|
94 |
|
|
|
11 |
from langchain.chains import RetrievalQA
|
12 |
import gradio as gr
|
13 |
|
14 |
+
def define_embeddings_llm(openai_key):
|
15 |
+
if openai_key != "":
|
16 |
+
embeddings = OpenAIEmbeddings(openai_api_key=openai_key)
|
17 |
+
llm = OpenAI(
|
18 |
+
temperature=0, model_name="gpt-3.5-turbo-16k", openai_api_key=openai_key, verbose=False
|
19 |
+
)
|
20 |
+
else:
|
21 |
+
HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"]
|
22 |
+
embeddings = HuggingFaceEmbeddings(
|
23 |
model_name="sentence-transformers/all-MiniLM-L6-v2"
|
24 |
)
|
25 |
+
llm = HuggingFaceHub(repo_id="MBZUAI/LaMini-Flan-T5-248M",
|
26 |
+
model_kwargs={"max_length":2048,
|
27 |
+
"temperature":0.2}
|
28 |
+
)
|
29 |
+
langchain.llm_cache = InMemoryCache()
|
30 |
+
return embeddings,llm
|
31 |
|
32 |
+
def build_context(openai_key,files,urls):
|
33 |
+
embeddings, llm = define_embeddings_llm(openai_key)
|
34 |
documents = []
|
35 |
if files is not None:
|
36 |
for idx, file in enumerate(files):
|
|
|
80 |
vectordb._collection.delete(ids=id)
|
81 |
|
82 |
with gr.Blocks() as demo:
|
83 |
+
with gr.Row():
|
84 |
+
openai_key = gr.Textbox(label="Enter your OpenAI API Key if you want to use the gpt-3.5-turbo-16k model. If not, the open source LaMini-Flan-T5-248M is used")
|
85 |
with gr.Row():
|
86 |
with gr.Column():
|
87 |
pdf_docs = gr.Files(label="Load pdf/docx/ppt/pptx files", file_types=['.pdf','.docx','.ppt','.pptx'], type="file")
|
|
|
98 |
clear = gr.ClearButton([msg, hg_chatbot])
|
99 |
cleardb = gr.Button(value="Réinitialiser le contexte")
|
100 |
load_context.click(loading, None, loading_status, queue=False)
|
101 |
+
load_context.click(build_context, inputs=[openai_key,pdf_docs, urls], outputs=[loading_status], queue=False)
|
102 |
msg.submit(respond, [msg, hg_chatbot], [msg, hg_chatbot])
|
103 |
cleardb.click(clear_chromadb)
|
104 |
|