from langchain_core.messages.human import HumanMessage
from langchain_core.messages.ai import AIMessage
from lab_7_structured_retriever import structured_retriever
from langchain_core.output_parsers import StrOutputParser
from lab_2_conf_manage_neo4j import conf_neo4j
from lab_6_unstructured_retriever import get_vector_index
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.prompts import ChatPromptTemplate
from typing import Tuple, List, Optional
from langchain_openai import ChatOpenAI
from langchain_community.chat_models import ChatTongyi
from langchain_core.runnables import (
    RunnableBranch,
    RunnableLambda,
    RunnableParallel,
    RunnablePassthrough,
) 


def retriever(question: str):
    print(f"Search query: {question}")
    structured_data = structured_retriever(question)
    unstructured_data = [el.page_content for el in get_vector_index().similarity_search(question)]
    final_data = f"""Structured data: {structured_data}
                     Unstructured data: {"#Document ". join(unstructured_data)}
                  """
    return final_data


if __name__ == '__main__':
    template = """仅根据以下上下文回答问题：
                  {context}
                  问题：{question} 
               """
    prompt = ChatPromptTemplate.from_template(template)
    llm = ChatTongyi(api_key='sk-780da555f1714f4b81c1bfc553cfd74a')
    _search_query = RunnableLambda(lambda x: x["question"])

    chain = ( 
        RunnableParallel( 
            { 
                "context": _search_query | retriever,
                "question": RunnablePassthrough(),
            } 
        ) 
        | prompt 
        | llm 
        | StrOutputParser() 
    )

    result = chain.invoke({"question": "Which house did Zhangyuanyuan I belong to?"})
    print(result)
 
