Spaces:
Sleeping
Sleeping
Update app.py
Browse fileschange to ver1
app.py
CHANGED
@@ -6,28 +6,27 @@ import pathlib
|
|
6 |
from tempfile import NamedTemporaryFile
|
7 |
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
9 |
-
from
|
10 |
-
from
|
11 |
from langchain import PromptTemplate, LLMChain
|
12 |
from langchain.callbacks.manager import CallbackManager
|
13 |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
14 |
-
from
|
15 |
from langchain.chains import RetrievalQA
|
16 |
-
from
|
17 |
from PyPDF2 import PdfReader
|
18 |
import os
|
19 |
import time
|
20 |
from langchain.chains.question_answering import load_qa_chain
|
21 |
from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
|
22 |
|
23 |
-
from
|
24 |
-
from
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
# from langchain.document_loaders import TextLoader
|
31 |
from langchain.memory import ConversationBufferWindowMemory
|
32 |
|
33 |
from langchain.memory import ConversationBufferMemory
|
@@ -168,7 +167,7 @@ def split_docs(documents,chunk_size=1000):
|
|
168 |
|
169 |
@st.cache_resource
|
170 |
def load_llama2_llamaCpp():
|
171 |
-
core_model_name = "
|
172 |
#n_gpu_layers = 32
|
173 |
n_batch = 512
|
174 |
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
|
|
|
6 |
from tempfile import NamedTemporaryFile
|
7 |
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
9 |
+
from langchain.llms import HuggingFacePipeline
|
10 |
+
from langchain.llms import LlamaCpp
|
11 |
from langchain import PromptTemplate, LLMChain
|
12 |
from langchain.callbacks.manager import CallbackManager
|
13 |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
14 |
+
from langchain.embeddings import HuggingFaceEmbeddings
|
15 |
from langchain.chains import RetrievalQA
|
16 |
+
from langchain.vectorstores import FAISS
|
17 |
from PyPDF2 import PdfReader
|
18 |
import os
|
19 |
import time
|
20 |
from langchain.chains.question_answering import load_qa_chain
|
21 |
from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
|
22 |
|
23 |
+
from langchain.document_loaders import TextLoader
|
24 |
+
from langchain.document_loaders import PyPDFLoader
|
25 |
+
from langchain.document_loaders import Docx2txtLoader
|
26 |
+
from langchain.document_loaders.image import UnstructuredImageLoader
|
27 |
+
from langchain.document_loaders import UnstructuredHTMLLoader
|
28 |
+
from langchain.document_loaders import UnstructuredPowerPointLoader
|
29 |
+
from langchain.document_loaders import TextLoader
|
|
|
30 |
from langchain.memory import ConversationBufferWindowMemory
|
31 |
|
32 |
from langchain.memory import ConversationBufferMemory
|
|
|
167 |
|
168 |
@st.cache_resource
|
169 |
def load_llama2_llamaCpp():
|
170 |
+
core_model_name = "llama-2-7b-chat.Q4_0.gguf"
|
171 |
#n_gpu_layers = 32
|
172 |
n_batch = 512
|
173 |
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
|