Added routing to avoid answering when we don't find docs
Browse files- app.py +7 -6
- climateqa/engine/prompts.py +18 -0
- climateqa/engine/rag.py +18 -4
app.py
CHANGED
|
@@ -347,6 +347,7 @@ with gr.Blocks(title="Climate Q&A", css="style.css", theme=theme,elem_id = "main
|
|
| 347 |
examples_per_page=8,
|
| 348 |
run_on_click=False,
|
| 349 |
elem_id=f"examples{i}",
|
|
|
|
| 350 |
# label = "Click on the example question or enter your own",
|
| 351 |
# cache_examples=True,
|
| 352 |
)
|
|
@@ -400,15 +401,15 @@ with gr.Blocks(title="Climate Q&A", css="style.css", theme=theme,elem_id = "main
|
|
| 400 |
return (gr.update(interactive = True,value = ""))
|
| 401 |
|
| 402 |
(textbox
|
| 403 |
-
.submit(start_chat, [textbox,chatbot], [textbox,tabs,chatbot],queue = False)
|
| 404 |
-
.success(chat, [textbox,chatbot,dropdown_audience, dropdown_sources,dropdown_reports], [chatbot,sources_textbox,output_query,output_language,gallery],concurrency_limit = 8)
|
| 405 |
-
.success(finish_chat, None, [textbox])
|
| 406 |
)
|
| 407 |
|
| 408 |
(examples_hidden
|
| 409 |
-
.change(start_chat, [examples_hidden,chatbot], [textbox,tabs,chatbot],queue = False)
|
| 410 |
-
.success(chat, [examples_hidden,chatbot,dropdown_audience, dropdown_sources,dropdown_reports], [chatbot,sources_textbox,output_query,output_language,gallery],concurrency_limit = 8)
|
| 411 |
-
.success(finish_chat, None, [textbox])
|
| 412 |
)
|
| 413 |
|
| 414 |
|
|
|
|
| 347 |
examples_per_page=8,
|
| 348 |
run_on_click=False,
|
| 349 |
elem_id=f"examples{i}",
|
| 350 |
+
api_name=f"examples{i}",
|
| 351 |
# label = "Click on the example question or enter your own",
|
| 352 |
# cache_examples=True,
|
| 353 |
)
|
|
|
|
| 401 |
return (gr.update(interactive = True,value = ""))
|
| 402 |
|
| 403 |
(textbox
|
| 404 |
+
.submit(start_chat, [textbox,chatbot], [textbox,tabs,chatbot],queue = False,api_name = "start_chat_textbox")
|
| 405 |
+
.success(chat, [textbox,chatbot,dropdown_audience, dropdown_sources,dropdown_reports], [chatbot,sources_textbox,output_query,output_language,gallery],concurrency_limit = 8,api_name = "chat_textbox")
|
| 406 |
+
.success(finish_chat, None, [textbox],api_name = "finish_chat_textbox")
|
| 407 |
)
|
| 408 |
|
| 409 |
(examples_hidden
|
| 410 |
+
.change(start_chat, [examples_hidden,chatbot], [textbox,tabs,chatbot],queue = False,api_name = "start_chat_examples")
|
| 411 |
+
.success(chat, [examples_hidden,chatbot,dropdown_audience, dropdown_sources,dropdown_reports], [chatbot,sources_textbox,output_query,output_language,gallery],concurrency_limit = 8,api_name = "chat_examples")
|
| 412 |
+
.success(finish_chat, None, [textbox],api_name = "finish_chat_examples")
|
| 413 |
)
|
| 414 |
|
| 415 |
|
climateqa/engine/prompts.py
CHANGED
|
@@ -48,6 +48,7 @@ Guidelines:
|
|
| 48 |
- If it makes sense, use bullet points and lists to make your answers easier to understand.
|
| 49 |
- You do not need to use every passage. Only use the ones that help answer the question.
|
| 50 |
- If the documents do not have the information needed to answer the question, just say you do not have enough information.
|
|
|
|
| 51 |
|
| 52 |
-----------------------
|
| 53 |
Passages:
|
|
@@ -59,6 +60,23 @@ Answer in {language} with the passages citations:
|
|
| 59 |
"""
|
| 60 |
|
| 61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
audience_prompts = {
|
| 63 |
"children": "6 year old children that don't know anything about science and climate change and need metaphors to learn",
|
| 64 |
"general": "the general public who know the basics in science and climate change and want to learn more about it without technical terms. Still use references to passages.",
|
|
|
|
| 48 |
- If it makes sense, use bullet points and lists to make your answers easier to understand.
|
| 49 |
- You do not need to use every passage. Only use the ones that help answer the question.
|
| 50 |
- If the documents do not have the information needed to answer the question, just say you do not have enough information.
|
| 51 |
+
- Consider by default that the question is about the past century unless it is specified otherwise.
|
| 52 |
|
| 53 |
-----------------------
|
| 54 |
Passages:
|
|
|
|
| 60 |
"""
|
| 61 |
|
| 62 |
|
| 63 |
+
answer_prompt_without_docs_template = """
|
| 64 |
+
You are ClimateQ&A, an AI Assistant created by Ekimetrics. Your role is to explain climate-related questions using info from the IPCC and/or IPBES reports.
|
| 65 |
+
Always stay true to climate science and do not make up information. If you do not know the answer, just say you do not know.
|
| 66 |
+
If the
|
| 67 |
+
|
| 68 |
+
Guidelines:
|
| 69 |
+
- Start by explaining clearly that you could not find the answer in the IPCC/IPBES reports, so your answer is based on your own knowledge and must be taken with great caution because it's AI generated.
|
| 70 |
+
- Also explain that the user should reformulate the question to be more specific to the IPCC/IPBES reports
|
| 71 |
+
- Because your role is to explain climate science, you can never be skeptical about climate change.
|
| 72 |
+
- If the question is not related to environmental issues, never never answer it. Say it's not your role.
|
| 73 |
+
|
| 74 |
+
Question: {question} - Explained to {audience}
|
| 75 |
+
Answer in {language}:
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
|
| 80 |
audience_prompts = {
|
| 81 |
"children": "6 year old children that don't know anything about science and climate change and need metaphors to learn",
|
| 82 |
"general": "the general public who know the basics in science and climate change and want to learn more about it without technical terms. Still use references to passages.",
|
climateqa/engine/rag.py
CHANGED
|
@@ -2,12 +2,12 @@ from operator import itemgetter
|
|
| 2 |
|
| 3 |
from langchain.prompts import ChatPromptTemplate
|
| 4 |
from langchain.schema.output_parser import StrOutputParser
|
| 5 |
-
from langchain.schema.runnable import RunnablePassthrough, RunnableLambda
|
| 6 |
from langchain.prompts.prompt import PromptTemplate
|
| 7 |
from langchain.schema import format_document
|
| 8 |
|
| 9 |
from climateqa.engine.reformulation import make_reformulation_chain
|
| 10 |
-
from climateqa.engine.prompts import answer_prompt_template
|
| 11 |
from climateqa.engine.utils import pass_values, flatten_dict
|
| 12 |
|
| 13 |
DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
|
|
@@ -24,7 +24,7 @@ def make_rag_chain(retriever,llm):
|
|
| 24 |
|
| 25 |
# Construct the prompt
|
| 26 |
prompt = ChatPromptTemplate.from_template(answer_prompt_template)
|
| 27 |
-
|
| 28 |
|
| 29 |
# ------- CHAIN 0 - Reformulation
|
| 30 |
reformulation_chain = make_reformulation_chain(llm)
|
|
@@ -51,11 +51,25 @@ def make_rag_chain(retriever,llm):
|
|
| 51 |
}
|
| 52 |
|
| 53 |
# Generate the answer
|
| 54 |
-
|
|
|
|
|
|
|
|
|
|
| 55 |
"answer": input_documents | prompt | llm | StrOutputParser(),
|
| 56 |
**pass_values(["question","audience","language","query","docs"])
|
| 57 |
}
|
| 58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
# ------- FINAL CHAIN
|
| 60 |
# Build the final chain
|
| 61 |
rag_chain = reformulation | find_documents | answer
|
|
|
|
| 2 |
|
| 3 |
from langchain.prompts import ChatPromptTemplate
|
| 4 |
from langchain.schema.output_parser import StrOutputParser
|
| 5 |
+
from langchain.schema.runnable import RunnablePassthrough, RunnableLambda, RunnableBranch
|
| 6 |
from langchain.prompts.prompt import PromptTemplate
|
| 7 |
from langchain.schema import format_document
|
| 8 |
|
| 9 |
from climateqa.engine.reformulation import make_reformulation_chain
|
| 10 |
+
from climateqa.engine.prompts import answer_prompt_template,answer_prompt_without_docs_template
|
| 11 |
from climateqa.engine.utils import pass_values, flatten_dict
|
| 12 |
|
| 13 |
DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
|
|
|
|
| 24 |
|
| 25 |
# Construct the prompt
|
| 26 |
prompt = ChatPromptTemplate.from_template(answer_prompt_template)
|
| 27 |
+
prompt_without_docs = ChatPromptTemplate.from_template(answer_prompt_without_docs_template)
|
| 28 |
|
| 29 |
# ------- CHAIN 0 - Reformulation
|
| 30 |
reformulation_chain = make_reformulation_chain(llm)
|
|
|
|
| 51 |
}
|
| 52 |
|
| 53 |
# Generate the answer
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
answer_with_docs = {
|
| 58 |
"answer": input_documents | prompt | llm | StrOutputParser(),
|
| 59 |
**pass_values(["question","audience","language","query","docs"])
|
| 60 |
}
|
| 61 |
|
| 62 |
+
answer_without_docs = {
|
| 63 |
+
"answer": prompt_without_docs | llm | StrOutputParser(),
|
| 64 |
+
**pass_values(["question","audience","language","query","docs"])
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
answer = RunnableBranch(
|
| 68 |
+
(lambda x: len(x["docs"]) > 0, answer_with_docs),
|
| 69 |
+
answer_without_docs,
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
# ------- FINAL CHAIN
|
| 74 |
# Build the final chain
|
| 75 |
rag_chain = reformulation | find_documents | answer
|