# -*- coding: utf-8 -*-
# @Author: Tim Liu
# @Date: 2024-05-26
# @Last Modified by: Tim Liu
# @Last Modified time: 2024-05-26
from typing import List, Iterator

# @Description: api endpoints for knowledge base crud
import json
from fastapi import APIRouter, Depends, Security
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain_core.prompts import PromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate
from langchain_milvus import Zilliz
from langchain_openai import AzureChatOpenAI
from utils.response import SuccessResponse
from crewplus.apps.apikey.models.account_api_key import AccountApiKey
from core.exception import CustomException
from crewplus.services.graph_engines.neo4j_graph_service import Neo4jGraphService
from ...helper.langsmith_util import use_langsmith
from crewplus.services.conversation.mongodb_conversation_service import MongoDBConversationService

from ...services.vdb_service import VDBService
from core.database import db_getter
from sqlalchemy.ext.asyncio import AsyncSession
from sse_starlette.sse import EventSourceResponse

from utils import status
from crewplus.helper.verify import get_api_key

from crewplus.apps.rag.schemas.question_request import QuestionRequest
from crewplus.services.chat_openai_service import ChatOpenAIService


from langchain_core.output_parsers import StrOutputParser
from langchain_core.documents import Document
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from config.settings import *

import logging

from crewplus.tools.ToolManager import ToolManager

app = APIRouter()

###########################################################
#    Knowledge Base Endpoints
###########################################################
@app.get("/knowledgerag", summary="Get knowledge rag response")
async def get_knowledgebases(query:str, apikey: AccountApiKey = Security(get_api_key)):
    if apikey is None:
        raise CustomException("apiKey invalid", code=status.HTTP_ERROR)
        # URI examples: "neo4j://localhost", "neo4j+s://xxx.databases.neo4j.io"
    url = NEO4J_GRAPH_ENDPOINT
    user = NEO4J_USER
    password=NEO4J_PASSWORD
    llm = AzureChatOpenAI(
        azure_endpoint="https://crewplus-westus.openai.azure.com/",
        openai_api_version="2024-05-01-preview",
        deployment_name="cpai-gpt4o-westus",
        openai_api_key="b93bc4d2ef8e4298bd8390002922d084"
        )
    #    api_key="b981ffef4db744af8f01299a4291abb5",
    #    azure_endpoint="https://crewplus-sweden-central.openai.azure.com/")

    neo4j_graph_service = Neo4jGraphService(url=url, user=user, password=password,llm=llm)
    # cypher_content=neo4j_graph_service.construct_cypher(question=query)
    response_content=neo4j_graph_service.run(question=query)
    logging.info(response_content)
    
    return SuccessResponse(response_content)

def format_docs(docs: List[Document]):
    return "\n\n".join(doc.page_content for doc in docs)

def parseDocs(docs, convid: str = None) -> Iterator[str]:
    convSrv = MongoDBConversationService()
    ai_message = ''

    for chunk in docs:
        # remove page_content, keep only metadata
        if 'context' in chunk and chunk['context'] is not None:
            meta_list = []

            for doc in chunk['context']:
                meta_list.append(doc.metadata)

            obj = { 'sources': meta_list}

            #logging.info(str(obj))
            yield str(obj)
        else:
            #logging.info(str(chunk))
            ai_message = convSrv.add_message_chunk(ai_message, str(chunk))

            yield str(chunk)

    # add ai message to history
    if ai_message:
        convSrv.add_message(convid, ai_message, 'ai')


def format_prompt_history(instructions: str = None):
    if not instructions or instructions == "":
        prehead = "You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. You also need to include history messages from user. If you don't know the answer, just say that you don't know. "
    else:
        prehead = instructions

    prompt = prehead + "\
            Question: {question}  \
            History: {history} \
            Context: {context}   \
            Answer: "

    return prompt

# def format_prompt(instructions: str = None):
#     if not instructions or instructions == "":
#         prehead = "You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. "
#     else:
#         prehead = instructions

#     prompt = prehead + "\
#             Question: {question}  \
#             Context: {context}   \
#             Answer: "

#     return prompt

# get history messages lambda function
def get_history(session_id: str = None):
    if not session_id:
        return ''

    """ get conversation's historical messages """
    convSrv = MongoDBConversationService()

    chat_history = convSrv.get_history_messages(session_id)

    if len(chat_history) == 0:
        return ''

    string_history = str(chat_history)

    logging.info("history: %s", str(string_history))

    return str(string_history)

def string_to_json(text):
    """Convert string to JSON object"""
    # 去除多余的标记
    pure_json_string = text.strip('```json')
    logging.info("pure_json_string: %s", pure_json_string)
    # 使用 json.loads() 解析
    try:
        logging.info("json loads start")
        json_data = json.loads(pure_json_string)
        logging.info("json_data: %s", json_data)
        logging.info("----------------------------------------------------")
        return json_data
    except json.JSONDecodeError as e:
        logging.info("json loads failed: %s", e)
        raise CustomException('parse string to json failed', code=status.HTTP_ERROR)

def build_ragchain(prompt, llm, with_context: bool = True, tools: list = [] ):
    # no context, no tools
    ragchain = prompt | llm | StrOutputParser()

    if len(tools) > 0:
        # Initialize ToolManager with the tools
        tool_manager = ToolManager()
        tool_manager.use_tools(tools)
        llm_w_tools = tool_manager.bind_to_llm(llm=llm)

        if with_context:
            ragchain = (
                    RunnablePassthrough.assign(context=(lambda x: format_docs(x["context"])))
                    | prompt | llm_w_tools | OpenAIFunctionsAgentOutputParser()
                    | tool_manager.route
            )
        else:
            # no context, has tools
            ragchain = ( prompt | llm_w_tools | OpenAIFunctionsAgentOutputParser() | tool_manager.route )
    else:
        if with_context:
            # has context, no tools
            ragchain = (
                    RunnablePassthrough.assign(context=(lambda x: format_docs(x["context"])))
                    | prompt | llm | StrOutputParser()
            )

    return ragchain

def get_sources_from_graph(elements):
    sources = []
    kbase_name = ''

    for element in elements:
        logging.info("get_sources_from_graph---element: %s", element)
        if isinstance(element, list) and len(element) > 0:
            for doc in element:
                logging.info("get_sources_from_graph----doc: %s", doc)
                try:
                    if type(doc) is not str:
                        # shall be of type <class 'neo4j.graph.Node'>
                        path = doc.get('source_url')
                        sources.append(path)

                        collection = doc.get('collection')
                        if collection is not None:
                            kbase_name = collection
                except Exception as e:
                    continue

    return sources, kbase_name

def get_graph_stream(convid: str, message: QuestionRequest, llm: AzureChatOpenAI):
    """ Get graph query result, or rag result with sourcesself.
    First, construct_cypher query from user query, then run the query(cypher ql) against neo4j graph database;

    If graph query result does not contain source documents, then return the result directly;
    If graph query result contains source documents, then run rag query against the documents in knowledge base;

    sample query with source:
    {
        'cypher': "MATCH (wc:WorkCenter {wc_name: 'filling station'})-[:has_document]->(doc:CP_Document) RETURN doc",
        'question': 'What countermeasures can be taken to reduce the mechanical failure based on the document details?'
    }

    Args:
        message (QuestionRequest): user query
        llm (AzureChatOpenAI): the llm to use

    Yields:
        Iterator[str]: graph query result, or rag result with sources
    """
    gdbsrv = Neo4jGraphService(
        url=NEO4J_GRAPH_ENDPOINT,
        user=NEO4J_USER,
        password=NEO4J_PASSWORD,
        llm=llm
    )

    cql = gdbsrv.construct_cypher(message.question)
    logging.info("cql: %s", cql)
    jsoncql = None
    cypher = ''

    try:
        strcql = str(cql)

        logging.info("json cql: %s", strcql)

        jsoncql = string_to_json(strcql)
        cypher = jsoncql['cypher']
        logging.info("jsoncql-cypher: %s", cypher)
    except CustomException as e:
        cypher = cql
    logging.info("json result jsoncql: %s", jsoncql)
    # if cypher is not empty, then run graph query
    if jsoncql and "question" in jsoncql:
        # filter_retriever = EmptyRetriever()
        # get document list update by g
        filter_retriever = []
        if cypher != '':
            logging.info("there is not a right cypher,try to dgbsrv query start")
            # get document list
            elements = gdbsrv.query_database(neo4j_query=cypher, question=message.question)
            logging.info("elements: %s", elements)
            logging.info("dgbsrv query end")
            logging.info("get source from graph by elements start")
            # get list of unique doc path from graph query result
            sources, kbase_name = get_sources_from_graph(elements)
            logging.info("get source from graph by elements end")
            if len(sources) > 0:
                if kbase_name is None or kbase_name == '' :
                    # fallback to use kbase_name from message
                    kbase_name = message.kbase_name

                vdbsrv = VDBService()
                milvus_store: Zilliz = vdbsrv.get_vector_store(kbase_name)

                logging.info("sources_url: %s, kbase: %s", str(sources), kbase_name)

                formatted_string = '["' + str('", "').join(sources) + '"]'
                search_kwargs={"expr": f"source_url in {formatted_string}"}

                logging.info("search_kwargs: %s", search_kwargs)

                filter_retriever = milvus_store.as_retriever(search_kwargs=search_kwargs)

        prompt = PromptTemplate.from_template( format_prompt_history(message.instructions) )
        # prompt = hub.pull("rlm/rag-prompt")

        # rag_chain_from_docs = (
        #     RunnablePassthrough.assign(context=(lambda x: format_docs(x["context"])))
        #     | prompt
        #     | llm
        #     | StrOutputParser()
        # )

        rag_chain_from_docs = build_ragchain(prompt=prompt, llm=llm, with_context=True, tools=message.tools)

        rag_chain_with_source = RunnableParallel(
            {"context": filter_retriever, "question" : RunnablePassthrough(), "history": lambda x : get_history(convid) }
        ).assign(answer=rag_chain_from_docs)
        # prompt = PromptTemplate.from_template( format_prompt(message.instructions) )

        # rag_chain_from_docs = (
        #     RunnablePassthrough.assign(context=(lambda x: format_docs(x["context"])))
        #     | prompt
        #     | llm
        #     | StrOutputParser()
        # )

        # rag_chain_with_source = RunnableParallel(
        #     {"context": filter_retriever, "question": RunnablePassthrough()}
        # ).assign(answer=rag_chain_from_docs)

        return rag_chain_with_source, False

    else:
        logging.info("there is cypher,then run cypher on database")
        # direct graph query result
        result = gdbsrv.query_database(neo4j_query=cypher, question=message.question)
        logging.info("run cypher on database end,get result: %s", result)
        return str(result), True

def graphrag_generator(convid: str, message: QuestionRequest, gres:str, llm: AzureChatOpenAI)-> Iterator[str]:
    """ string generator for graph result
    Build following query string
    "query": "Please answer 'How to deal with root cause inadequate lubrication routines?' with below data, do not speak content out of the data. Data: [['RootCause',  'DocumentFilename',  'DocumentPath',  'DocumentCategory',  'CounterMeasure'], ['Inadequate lubrication routines.',  'a3 report_English.pdf',  'https://crewplus-useast-demo.s3.amazonaws.com/data/file/2024/08/08/64303_s8qy_3423.pdf',  'file',  'Train operators on proper lubrication techniques.']]"

    Args:
        gres (str): data array as graph result

    Yields:
        Iterator[str]: llm sse stream
    """

    yield f"{{'result': {gres}\n\n}}"

    human_template = "{query}"
    human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
    none_prompt = ChatPromptTemplate.from_messages([human_message_prompt])

    parser = StrOutputParser()

    #chain = none_prompt | llm | parser
    chain = build_ragchain(prompt=none_prompt, llm=llm, with_context=False, tools=message.tools)

    question = message.question
    history = get_history(convid)

    query = f"Please answer '{question}' with below data and history messages, do not speak content not within the data. Data: {gres}, History: {history} ."

    # add user message to history
    convSrv = MongoDBConversationService()
    convSrv.add_message(convid, question)

    ai_message = ''

    for chunk in chain.stream(query):
        ai_message = ai_message + str(chunk)

        yield f"{{'answer': '{str(chunk)}'}}"

    # add ai message to history
    convSrv.add_message(convid, ai_message, "ai")

@app.post("/rag_with_source", #tags=["rag"],
          summary="Q&A against knowledge base, streaming response with sse ",
          description="streaming Q&A with sse, respones with source document")
async def rag_with_source(message: QuestionRequest, apikey: AccountApiKey = Security(get_api_key), db: AsyncSession = Depends(db_getter)):
    if apikey is None:
        raise CustomException("invalid api key", code=status.HTTP_ERROR)

    use_langsmith()

    try:
        # add tool support

        callback_list = []
        llm = ChatOpenAIService(callbacks=callback_list).get_azure_llm_deployment(deployment_id="GPT4o", temperature=0.0)

        convSrv = MongoDBConversationService()

        conversation_id = message.conversation_id

        if not conversation_id:
            converse = convSrv.new_conversation()
            conversation_id = converse.session_id
        logging.info("message.engine: %s", message.engine)
        if message.engine == 'graph':
            logging.info("get_graph_stream start")
            # Graph+Vector RAG
            ret, is_direct = get_graph_stream(conversation_id, message, llm)
            logging.info("is_direct: %s", is_direct)
            logging.info("get_graph_stream end")
            if is_direct:
                # Direct graph query result
                chatbot = graphrag_generator(conversation_id, message, ret, llm)
            else:
                # add user message to history
                convSrv.add_message(conversation_id, message.question)

                chatbot = parseDocs(ret.stream(message.question), conversation_id)
        else:
            # Vector-only RAG
            logging.info("vector-only RAG start")
            # get vector store instance
            vdbsrv = VDBService()
            # update by gdf 20241204
            milvus_store = await vdbsrv.get_vector_store_by_kbase(message.kbase_id, message.kbase_name, db)

            # Retrieve and generate response
            retriever = milvus_store.as_retriever()

            prompt = PromptTemplate.from_template(format_prompt_history(message.instructions) )
            # prompt = hub.pull("rlm/rag-prompt")

            # rag_chain_from_docs = (
            #     RunnablePassthrough.assign(context=(lambda x: format_docs(x["context"])))
            #     | prompt
            #     | llm
            #     | StrOutputParser()
            # )

            rag_chain_from_docs = build_ragchain(prompt=prompt, llm=llm, with_context=True, tools=message.tools)

            rag_chain_with_source = RunnableParallel(
                {"context": retriever, "question" : RunnablePassthrough(), "history": lambda x : get_history(conversation_id) }
            ).assign(answer=rag_chain_from_docs)
            # rag_chain_with_source = RunnableParallel(
            #     {"context": retriever, "question": RunnablePassthrough()}
            # ).assign(answer=rag_chain_from_docs)

            # This is where we configure the session id
            config = {"configurable": {"conversation_id": conversation_id}}

            # add user message to history
            convSrv.add_message(conversation_id, message.question)

            chatbot= parseDocs(rag_chain_with_source.stream(message.question, config=config), conversation_id)

    except Exception as e:
        raise CustomException(str(e), code=status.HTTP_ERROR)

    return EventSourceResponse(chatbot, media_type="text/event-stream", headers={"Cache-Control": "no-cache", "Connection": "keep-alive"})

