Spaces:
Sleeping
Sleeping
Update pages/ChatPDF_Reader.py
Browse files- pages/ChatPDF_Reader.py +7 -5
pages/ChatPDF_Reader.py
CHANGED
@@ -11,12 +11,14 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
11 |
|
12 |
import streamlit as st
|
13 |
import sys,yaml,Utilities as ut
|
14 |
-
|
|
|
15 |
def get_data(query):
|
16 |
chat_history = []
|
17 |
initdict={}
|
18 |
initdict = ut.get_tokens()
|
19 |
-
hf_token =
|
|
|
20 |
embedding_model_id = initdict["embedding_model"]
|
21 |
chromadbpath = initdict["chatPDF_chroma_db"]
|
22 |
llm_repo_id = initdict["llm_repoid"]
|
@@ -30,10 +32,10 @@ def get_data(query):
|
|
30 |
db = Chroma(persist_directory=chromadbpath, embedding_function=embeddings)
|
31 |
retriever = db.as_retriever(search_type="mmr", search_kwargs={'k': 2})
|
32 |
|
33 |
-
|
34 |
-
repo_id=llm_repo_id, model_kwargs={"temperature":0.2, "max_new_tokens":50})
|
35 |
|
36 |
-
llm = HuggingFaceHub(repo_id=llm_repo_id, model_kwargs={"temperature":0.2, "max_new_tokens":50})
|
37 |
|
38 |
# Create the Conversational Retrieval Chain
|
39 |
qa_chain = ConversationalRetrievalChain.from_llm(llm, retriever,return_source_documents=True)
|
|
|
11 |
|
12 |
import streamlit as st
|
13 |
import sys,yaml,Utilities as ut
|
14 |
+
import os
|
15 |
+
print('HF_TOKEN',os.getenv('HF_TOKEN'))
|
16 |
def get_data(query):
|
17 |
chat_history = []
|
18 |
initdict={}
|
19 |
initdict = ut.get_tokens()
|
20 |
+
hf_token = os.getenv('HF_TOKEN')
|
21 |
+
#hf_token = initdict["hf_token"]
|
22 |
embedding_model_id = initdict["embedding_model"]
|
23 |
chromadbpath = initdict["chatPDF_chroma_db"]
|
24 |
llm_repo_id = initdict["llm_repoid"]
|
|
|
32 |
db = Chroma(persist_directory=chromadbpath, embedding_function=embeddings)
|
33 |
retriever = db.as_retriever(search_type="mmr", search_kwargs={'k': 2})
|
34 |
|
35 |
+
llm = HuggingFaceHub(huggingfacehub_api_token=hf_token,
|
36 |
+
repo_id=llm_repo_id, model_kwargs={"temperature":0.2, "max_new_tokens":50})
|
37 |
|
38 |
+
#llm = HuggingFaceHub(repo_id=llm_repo_id, model_kwargs={"temperature":0.2, "max_new_tokens":50})
|
39 |
|
40 |
# Create the Conversational Retrieval Chain
|
41 |
qa_chain = ConversationalRetrievalChain.from_llm(llm, retriever,return_source_documents=True)
|