WIP
Browse files- app.py +33 -26
- climateqa/engine/chains/answer_chitchat.py +2 -0
- climateqa/engine/chains/answer_rag.py +2 -0
- climateqa/engine/chains/intent_categorization.py +2 -0
- climateqa/engine/chains/query_transformation.py +2 -0
- climateqa/engine/chains/retrieve_documents.py +2 -0
- climateqa/engine/chains/translation.py +2 -0
- requirements.txt +1 -1
- sandbox/20240310 - CQA - Semantic Routing 1.ipynb +0 -0
app.py
CHANGED
@@ -472,7 +472,10 @@ def save_graph(saved_graphs_state, embedding, category):
|
|
472 |
# current_graphs = gr.State([])
|
473 |
# saved_graphs = gr.State({})
|
474 |
with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=theme,elem_id = "main-component") as demo:
|
475 |
-
|
|
|
|
|
|
|
476 |
with gr.Tab("ClimateQ&A"):
|
477 |
|
478 |
with gr.Row(elem_id="chatbot-row"):
|
@@ -496,7 +499,7 @@ with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=t
|
|
496 |
textbox=gr.Textbox(placeholder="Ask me anything here!",show_label=False,scale=7,lines = 1,interactive = True,elem_id="input-textbox")
|
497 |
|
498 |
|
499 |
-
with gr.Column(scale=1, variant="panel",elem_id = "right-panel"):
|
500 |
|
501 |
|
502 |
with gr.Tabs() as tabs:
|
@@ -563,26 +566,26 @@ with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=t
|
|
563 |
output_language = gr.Textbox(label="Language",show_label = True,elem_id = "language",lines = 1,interactive = False)
|
564 |
|
565 |
|
566 |
-
|
567 |
-
|
568 |
-
|
569 |
-
|
570 |
-
|
571 |
-
|
572 |
-
|
573 |
-
|
574 |
-
|
575 |
-
|
576 |
-
|
577 |
-
|
578 |
-
|
579 |
-
|
580 |
-
|
581 |
-
|
582 |
-
|
583 |
-
|
584 |
-
|
585 |
-
|
586 |
|
587 |
#---------------------------------------------------------------------------------------
|
588 |
# OTHER TABS
|
@@ -679,11 +682,12 @@ with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=t
|
|
679 |
# history = history + [(query,None)]
|
680 |
# history = [tuple(x) for x in history]
|
681 |
history = history + [ChatMessage(role="user", content=query)]
|
682 |
-
return (gr.update(interactive = False),gr.update(selected=
|
683 |
|
684 |
def finish_chat():
|
685 |
return (gr.update(interactive = True,value = ""),gr.update(selected=3))
|
686 |
|
|
|
687 |
def change_completion_status(current_state):
|
688 |
current_state = 1 - current_state
|
689 |
return current_state
|
@@ -697,10 +701,13 @@ with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=t
|
|
697 |
)
|
698 |
|
699 |
(examples_hidden
|
|
|
|
|
|
|
|
|
700 |
.change(start_chat, [examples_hidden,chatbot], [textbox,tabs,chatbot],queue = False,api_name = "start_chat_examples")
|
701 |
-
.then(chat, [examples_hidden,chatbot,dropdown_audience, dropdown_sources,dropdown_reports
|
702 |
-
.then(finish_chat, None, [textbox
|
703 |
-
.then(change_completion_status, [chat_completed_state], [chat_completed_state])
|
704 |
)
|
705 |
|
706 |
|
|
|
472 |
# current_graphs = gr.State([])
|
473 |
# saved_graphs = gr.State({})
|
474 |
with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=theme,elem_id = "main-component") as demo:
|
475 |
+
chat_completed_state = gr.State(0)
|
476 |
+
current_graphs = gr.State([])
|
477 |
+
saved_graphs = gr.State({})
|
478 |
+
|
479 |
with gr.Tab("ClimateQ&A"):
|
480 |
|
481 |
with gr.Row(elem_id="chatbot-row"):
|
|
|
499 |
textbox=gr.Textbox(placeholder="Ask me anything here!",show_label=False,scale=7,lines = 1,interactive = True,elem_id="input-textbox")
|
500 |
|
501 |
|
502 |
+
with gr.Column(scale=1.5, variant="panel",elem_id = "right-panel"):
|
503 |
|
504 |
|
505 |
with gr.Tabs() as tabs:
|
|
|
566 |
output_language = gr.Textbox(label="Language",show_label = True,elem_id = "language",lines = 1,interactive = False)
|
567 |
|
568 |
|
569 |
+
with gr.Tab("Recommended content", elem_id="tab-recommended_content", id=3) as recommended_content_tab:
|
570 |
+
|
571 |
+
@gr.render(inputs=[current_graphs])
|
572 |
+
def display_default_recommended(current_graphs):
|
573 |
+
if len(current_graphs)==0:
|
574 |
+
placeholder_message = gr.HTML("<h2>There are no graphs to be displayed at the moment. Try asking another question.</h2>")
|
575 |
+
|
576 |
+
@gr.render(inputs=[current_graphs],triggers=[chat_completed_state.change])
|
577 |
+
def render_graphs(current_graph_list):
|
578 |
+
global saved_graphs
|
579 |
+
with gr.Column():
|
580 |
+
print(f"\ncurrent_graph_list:\n{current_graph_list}")
|
581 |
+
for (embedding, category) in current_graph_list:
|
582 |
+
graphs_placeholder = gr.HTML(embedding, elem_id="graphs-placeholder")
|
583 |
+
save_btn = gr.Button("Save Graph")
|
584 |
+
save_btn.click(
|
585 |
+
save_graph,
|
586 |
+
[saved_graphs, gr.State(embedding), gr.State(category)],
|
587 |
+
[saved_graphs, save_btn]
|
588 |
+
)
|
589 |
|
590 |
#---------------------------------------------------------------------------------------
|
591 |
# OTHER TABS
|
|
|
682 |
# history = history + [(query,None)]
|
683 |
# history = [tuple(x) for x in history]
|
684 |
history = history + [ChatMessage(role="user", content=query)]
|
685 |
+
return (gr.update(interactive = False),gr.update(selected=3),history)
|
686 |
|
687 |
def finish_chat():
|
688 |
return (gr.update(interactive = True,value = ""),gr.update(selected=3))
|
689 |
|
690 |
+
|
691 |
def change_completion_status(current_state):
|
692 |
current_state = 1 - current_state
|
693 |
return current_state
|
|
|
701 |
)
|
702 |
|
703 |
(examples_hidden
|
704 |
+
# .change(start_chat, [examples_hidden,chatbot], [textbox,tabs,chatbot],queue = False,api_name = "start_chat_examples")
|
705 |
+
# .then(chat, [examples_hidden,chatbot,dropdown_audience, dropdown_sources,dropdown_reports,current_graphs], [chatbot,sources_textbox,output_query,output_language,gallery_component, current_graphs],concurrency_limit = 8,api_name = "chat_examples")
|
706 |
+
# .then(finish_chat, None, [textbox,tabs],api_name = "finish_chat_examples")
|
707 |
+
# .then(change_completion_status, [chat_completed_state], [chat_completed_state])
|
708 |
.change(start_chat, [examples_hidden,chatbot], [textbox,tabs,chatbot],queue = False,api_name = "start_chat_examples")
|
709 |
+
.then(chat, [examples_hidden,chatbot,dropdown_audience, dropdown_sources,dropdown_reports], [chatbot,sources_textbox,output_query,output_language,gallery_component],concurrency_limit = 8,api_name = "chat_examples")
|
710 |
+
.then(finish_chat, None, [textbox],api_name = "finish_chat_examples")
|
|
|
711 |
)
|
712 |
|
713 |
|
climateqa/engine/chains/answer_chitchat.py
CHANGED
@@ -45,6 +45,8 @@ def make_chitchat_node(llm):
|
|
45 |
chitchat_chain = make_chitchat_chain(llm)
|
46 |
|
47 |
async def answer_chitchat(state,config):
|
|
|
|
|
48 |
answer = await chitchat_chain.ainvoke({"question":state["user_input"]},config)
|
49 |
return {"answer":answer}
|
50 |
|
|
|
45 |
chitchat_chain = make_chitchat_chain(llm)
|
46 |
|
47 |
async def answer_chitchat(state,config):
|
48 |
+
print("---- Answer chitchat ----")
|
49 |
+
|
50 |
answer = await chitchat_chain.ainvoke({"question":state["user_input"]},config)
|
51 |
return {"answer":answer}
|
52 |
|
climateqa/engine/chains/answer_rag.py
CHANGED
@@ -60,6 +60,8 @@ def make_rag_node(llm,with_docs = True):
|
|
60 |
rag_chain = make_rag_chain_without_docs(llm)
|
61 |
|
62 |
async def answer_rag(state,config):
|
|
|
|
|
63 |
answer = await rag_chain.ainvoke(state,config)
|
64 |
print(f"\n\nAnswer:\n{answer}")
|
65 |
return {"answer":answer}
|
|
|
60 |
rag_chain = make_rag_chain_without_docs(llm)
|
61 |
|
62 |
async def answer_rag(state,config):
|
63 |
+
print("---- Answer RAG ----")
|
64 |
+
|
65 |
answer = await rag_chain.ainvoke(state,config)
|
66 |
print(f"\n\nAnswer:\n{answer}")
|
67 |
return {"answer":answer}
|
climateqa/engine/chains/intent_categorization.py
CHANGED
@@ -84,6 +84,8 @@ def make_intent_categorization_node(llm):
|
|
84 |
categorization_chain = make_intent_categorization_chain(llm)
|
85 |
|
86 |
def categorize_message(state):
|
|
|
|
|
87 |
output = categorization_chain.invoke({"input": state["user_input"]})
|
88 |
print(f"\n\nOutput intent categorization: {output}\n")
|
89 |
if "language" not in output: output["language"] = "English"
|
|
|
84 |
categorization_chain = make_intent_categorization_chain(llm)
|
85 |
|
86 |
def categorize_message(state):
|
87 |
+
print("---- Categorize_message ----")
|
88 |
+
|
89 |
output = categorization_chain.invoke({"input": state["user_input"]})
|
90 |
print(f"\n\nOutput intent categorization: {output}\n")
|
91 |
if "language" not in output: output["language"] = "English"
|
climateqa/engine/chains/query_transformation.py
CHANGED
@@ -138,6 +138,8 @@ def make_query_transform_node(llm,k_final=15):
|
|
138 |
rewriter_chain = make_query_rewriter_chain(llm)
|
139 |
|
140 |
def transform_query(state):
|
|
|
|
|
141 |
|
142 |
if "sources_auto" not in state or state["sources_auto"] is None or state["sources_auto"] is False:
|
143 |
auto_mode = False
|
|
|
138 |
rewriter_chain = make_query_rewriter_chain(llm)
|
139 |
|
140 |
def transform_query(state):
|
141 |
+
print("---- Transform query ----")
|
142 |
+
|
143 |
|
144 |
if "sources_auto" not in state or state["sources_auto"] is None or state["sources_auto"] is False:
|
145 |
auto_mode = False
|
climateqa/engine/chains/retrieve_documents.py
CHANGED
@@ -68,6 +68,8 @@ def make_retriever_node(vectorstore,reranker,llm,rerank_by_question=True, k_fina
|
|
68 |
# The chain callback is not necessary, but it propagates the langchain callbacks to the astream_events logger to display intermediate results
|
69 |
@chain
|
70 |
async def retrieve_documents(state,config):
|
|
|
|
|
71 |
|
72 |
keywords_extraction = make_keywords_extraction_chain(llm)
|
73 |
|
|
|
68 |
# The chain callback is not necessary, but it propagates the langchain callbacks to the astream_events logger to display intermediate results
|
69 |
@chain
|
70 |
async def retrieve_documents(state,config):
|
71 |
+
print("---- Retrieve documents ----")
|
72 |
+
|
73 |
|
74 |
keywords_extraction = make_keywords_extraction_chain(llm)
|
75 |
|
climateqa/engine/chains/translation.py
CHANGED
@@ -30,6 +30,8 @@ def make_translation_chain(llm):
|
|
30 |
|
31 |
|
32 |
def make_translation_node(llm):
|
|
|
|
|
33 |
|
34 |
translation_chain = make_translation_chain(llm)
|
35 |
|
|
|
30 |
|
31 |
|
32 |
def make_translation_node(llm):
|
33 |
+
print("---- Translate query ----")
|
34 |
+
|
35 |
|
36 |
translation_chain = make_translation_chain(llm)
|
37 |
|
requirements.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
gradio==
|
2 |
azure-storage-file-share==12.11.1
|
3 |
azure-storage-blob
|
4 |
python-dotenv==1.0.0
|
|
|
1 |
+
gradio==5.0.2
|
2 |
azure-storage-file-share==12.11.1
|
3 |
azure-storage-blob
|
4 |
python-dotenv==1.0.0
|
sandbox/20240310 - CQA - Semantic Routing 1.ipynb
CHANGED
The diff for this file is too large to render.
See raw diff
|
|