rewrite prompt
Browse files
langchain-streamlit-demo/app.py
CHANGED
@@ -519,7 +519,7 @@ if st.session_state.llm:
|
|
519 |
|
520 |
@tool("user-document-chat")
|
521 |
def doc_chain_tool(input_str: str, callbacks: Callbacks = None):
|
522 |
-
"""Usa sempre questo strumento almeno una volta. L'input
|
523 |
|
524 |
# """Always use this tool at least once. Input should be a question."""
|
525 |
response = st.session_state.doc_chain.invoke(
|
|
|
519 |
|
520 |
@tool("user-document-chat")
|
521 |
def doc_chain_tool(input_str: str, callbacks: Callbacks = None):
|
522 |
+
"""Usa sempre questo strumento almeno una volta. L'input deve essere una domanda. Non ti fidare di questo tool se ti risponde che non ha informazioni dirette"""
|
523 |
|
524 |
# """Always use this tool at least once. Input should be a question."""
|
525 |
response = st.session_state.doc_chain.invoke(
|
langchain-streamlit-demo/defaults.py
CHANGED
@@ -47,7 +47,7 @@ DEFAULT_MAX_TOKENS = int(os.environ.get("DEFAULT_MAX_TOKENS", 1000))
|
|
47 |
DEFAULT_LANGSMITH_PROJECT = os.environ.get("LANGCHAIN_PROJECT")
|
48 |
|
49 |
TEST_QUESTIONS = [
|
50 |
-
"
|
51 |
"Quali prestazioni presenti nel checkup?",
|
52 |
"La risonanza magnetica è coperta dalla polizza?",
|
53 |
"Le visite odontoiatriche sono coperte dalla polizza?",
|
|
|
47 |
DEFAULT_LANGSMITH_PROJECT = os.environ.get("LANGCHAIN_PROJECT")
|
48 |
|
49 |
TEST_QUESTIONS = [
|
50 |
+
"Come posso ottenerle le credenziali di accesso all'area riservata?",
|
51 |
"Quali prestazioni presenti nel checkup?",
|
52 |
"La risonanza magnetica è coperta dalla polizza?",
|
53 |
"Le visite odontoiatriche sono coperte dalla polizza?",
|
langchain-streamlit-demo/llm_resources.py
CHANGED
@@ -15,7 +15,7 @@ from langchain.callbacks.base import BaseCallbackHandler
|
|
15 |
from langchain.chains import RetrievalQA
|
16 |
from langchain.llms.base import BaseLLM
|
17 |
from langchain.memory import ConversationBufferMemory
|
18 |
-
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
|
19 |
from langchain.retrievers import EnsembleRetriever
|
20 |
from langchain.retrievers.multi_query import MultiQueryRetriever
|
21 |
from langchain.retrievers.multi_vector import MultiVectorRetriever
|
@@ -304,6 +304,15 @@ def get_texts_and_multiretriever(
|
|
304 |
multiquery_retriever = MultiQueryRetriever.from_llm(
|
305 |
retriever=multiquerystore.as_retriever(search_kwargs={"k": k}),
|
306 |
llm=ChatOpenAI(model=model, temperature=0.0),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
307 |
)
|
308 |
|
309 |
ensemble_retriever = EnsembleRetriever(
|
|
|
15 |
from langchain.chains import RetrievalQA
|
16 |
from langchain.llms.base import BaseLLM
|
17 |
from langchain.memory import ConversationBufferMemory
|
18 |
+
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate
|
19 |
from langchain.retrievers import EnsembleRetriever
|
20 |
from langchain.retrievers.multi_query import MultiQueryRetriever
|
21 |
from langchain.retrievers.multi_vector import MultiVectorRetriever
|
|
|
304 |
multiquery_retriever = MultiQueryRetriever.from_llm(
|
305 |
retriever=multiquerystore.as_retriever(search_kwargs={"k": k}),
|
306 |
llm=ChatOpenAI(model=model, temperature=0.0),
|
307 |
+
prompt=PromptTemplate(
|
308 |
+
input_variables=["question"],
|
309 |
+
template="""You are an AI language model assistant. Your task is
|
310 |
+
to generate 3 different versions of the given user
|
311 |
+
question to retrieve relevant documents from a vector database.
|
312 |
+
By generating multiple perspectives on the user question,
|
313 |
+
your goal is to help the user overcome some of the limitations
|
314 |
+
of distance-based similarity search. DO NOT OUTPUT EMPTY LINES ABSOLUTELY. Original question: {question}""",
|
315 |
+
),
|
316 |
)
|
317 |
|
318 |
ensemble_retriever = EnsembleRetriever(
|