Spaces:
Sleeping
Sleeping
bug fix
Browse files
app.py
CHANGED
@@ -10,7 +10,6 @@ from langchain.schema.runnable import RunnablePassthrough
|
|
10 |
from langchain_openai import ChatOpenAI
|
11 |
from langchain.schema.runnable.config import RunnableConfig
|
12 |
from langchain_core.output_parsers import StrOutputParser
|
13 |
-
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
14 |
from langchain_community.document_loaders import UnstructuredPDFLoader
|
15 |
|
16 |
|
@@ -57,36 +56,9 @@ text_splitter = RecursiveCharacterTextSplitter(
|
|
57 |
],
|
58 |
)
|
59 |
|
60 |
-
# bnb_config = BitsAndBytesConfig(
|
61 |
-
# load_in_4bit=True,
|
62 |
-
# bnb_4bit_quant_type="nf4",
|
63 |
-
# bnb_double_quant=True,
|
64 |
-
# bnb_4bit_compute_dtype=torch.float16,
|
65 |
-
# )
|
66 |
-
|
67 |
-
|
68 |
-
# tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct",
|
69 |
-
# trust_remote_code=True,
|
70 |
-
# quantization_config=bnb_config,
|
71 |
-
# attn_implementation='eager',
|
72 |
-
# device_map='auto',)
|
73 |
-
# model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True)
|
74 |
-
|
75 |
-
|
76 |
-
# hf = HuggingFacePipeline.from_model_id(
|
77 |
-
# model_id="microsoft/Phi-3-mini-4k-instruct",
|
78 |
-
# task="text-generation",
|
79 |
-
# device_map="auto",
|
80 |
-
# pipeline_kwargs={"max_new_tokens": 10},
|
81 |
-
# )
|
82 |
|
83 |
-
|
84 |
-
# loader = PyPDFLoader("https://w2l.sbst.dk/file/502104/br_femogfirs.pdf")
|
85 |
loader = UnstructuredPDFLoader("br_femogfirs.pdf", strategy="fast")
|
86 |
data = loader.load_and_split(text_splitter)
|
87 |
-
# data = loader.load()
|
88 |
-
|
89 |
-
|
90 |
|
91 |
rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)
|
92 |
|
@@ -107,13 +79,12 @@ async def main():
|
|
107 |
vector_store = Pinecone.from_documents(data, embedding_model, index_name="bygnings-regl-rag-1")
|
108 |
retriever = vector_store.as_retriever()
|
109 |
|
110 |
-
|
111 |
mecanic_qa_chain = (
|
112 |
{"context": itemgetter("question") | retriever, "question": itemgetter("question")}
|
113 |
| RunnablePassthrough.assign(context=itemgetter("context"))
|
114 |
| rag_prompt | model | StrOutputParser())
|
115 |
|
116 |
-
|
117 |
|
118 |
|
119 |
|
|
|
10 |
from langchain_openai import ChatOpenAI
|
11 |
from langchain.schema.runnable.config import RunnableConfig
|
12 |
from langchain_core.output_parsers import StrOutputParser
|
|
|
13 |
from langchain_community.document_loaders import UnstructuredPDFLoader
|
14 |
|
15 |
|
|
|
56 |
],
|
57 |
)
|
58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
|
|
|
|
60 |
loader = UnstructuredPDFLoader("br_femogfirs.pdf", strategy="fast")
|
61 |
data = loader.load_and_split(text_splitter)
|
|
|
|
|
|
|
62 |
|
63 |
rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)
|
64 |
|
|
|
79 |
vector_store = Pinecone.from_documents(data, embedding_model, index_name="bygnings-regl-rag-1")
|
80 |
retriever = vector_store.as_retriever()
|
81 |
|
|
|
82 |
mecanic_qa_chain = (
|
83 |
{"context": itemgetter("question") | retriever, "question": itemgetter("question")}
|
84 |
| RunnablePassthrough.assign(context=itemgetter("context"))
|
85 |
| rag_prompt | model | StrOutputParser())
|
86 |
|
87 |
+
cl.user_session.set("runnable", mecanic_qa_chain)
|
88 |
|
89 |
|
90 |
|