Spaces:
Sleeping
Sleeping
Update query_data.py
Browse files- query_data.py +4 -5
query_data.py
CHANGED
@@ -7,7 +7,7 @@ from langchain.retrievers import EnsembleRetriever, BM25Retriever, ContextualCom
|
|
7 |
from memory import memory3
|
8 |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
9 |
from langchain.vectorstores import FAISS
|
10 |
-
from langchain.embeddings
|
11 |
from langchain.retrievers.document_compressors import EmbeddingsFilter
|
12 |
from langchain.document_transformers import EmbeddingsRedundantFilter
|
13 |
from langchain.retrievers.document_compressors import DocumentCompressorPipeline
|
@@ -16,13 +16,12 @@ from pydantic import BaseModel, Field
|
|
16 |
from typing import Any, Optional, Dict, List
|
17 |
from huggingface_hub import InferenceClient
|
18 |
from langchain.llms.base import LLM
|
19 |
-
|
20 |
import os
|
21 |
-
|
22 |
|
23 |
chat_model_name = "HuggingFaceH4/zephyr-7b-alpha"
|
24 |
reform_model_name = "mistralai/Mistral-7B-Instruct-v0.1"
|
25 |
-
hf_token = "
|
26 |
kwargs = {"max_new_tokens":500, "temperature":0.9, "top_p":0.95, "repetition_penalty":1.0, "do_sample":True}
|
27 |
reform_kwargs = {"max_new_tokens":50, "temperature":0.5, "top_p":0.9, "repetition_penalty":1.0, "do_sample":True}
|
28 |
|
@@ -76,7 +75,7 @@ PROMPT = PromptTemplate(
|
|
76 |
|
77 |
chain_type_kwargs = {"prompt": PROMPT}
|
78 |
|
79 |
-
embeddings =
|
80 |
vectorstore = FAISS.load_local("cima_faiss_index", embeddings)
|
81 |
|
82 |
retriever=vectorstore.as_retriever(search_type="similarity", search_kwargs={"k":5})
|
|
|
7 |
from memory import memory3
|
8 |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
9 |
from langchain.vectorstores import FAISS
|
10 |
+
from langchain.embeddings import HuggingFaceEmbeddings
|
11 |
from langchain.retrievers.document_compressors import EmbeddingsFilter
|
12 |
from langchain.document_transformers import EmbeddingsRedundantFilter
|
13 |
from langchain.retrievers.document_compressors import DocumentCompressorPipeline
|
|
|
16 |
from typing import Any, Optional, Dict, List
|
17 |
from huggingface_hub import InferenceClient
|
18 |
from langchain.llms.base import LLM
|
|
|
19 |
import os
|
20 |
+
|
21 |
|
22 |
chat_model_name = "HuggingFaceH4/zephyr-7b-alpha"
|
23 |
reform_model_name = "mistralai/Mistral-7B-Instruct-v0.1"
|
24 |
+
hf_token = os.getenv("apiToken")
|
25 |
kwargs = {"max_new_tokens":500, "temperature":0.9, "top_p":0.95, "repetition_penalty":1.0, "do_sample":True}
|
26 |
reform_kwargs = {"max_new_tokens":50, "temperature":0.5, "top_p":0.9, "repetition_penalty":1.0, "do_sample":True}
|
27 |
|
|
|
75 |
|
76 |
chain_type_kwargs = {"prompt": PROMPT}
|
77 |
|
78 |
+
embeddings = HuggingFaceEmbeddings()
|
79 |
vectorstore = FAISS.load_local("cima_faiss_index", embeddings)
|
80 |
|
81 |
retriever=vectorstore.as_retriever(search_type="similarity", search_kwargs={"k":5})
|