from question.kb_service.milvus_service import MilvusService
from question.llm_service.chatglmService import ChatGLMService
from question.llm_service.qwenService import QwenService
from question.kb_service.faiss_service import KGService
from langchain.chains import RetrievalQA
from langchain.prompts.prompt import PromptTemplate
from question.chat_chain.config import CFG


class DocumentQA(object):
    def __init__(self):
        self.config = CFG()
        if "chatglm" in self.config.llm_model_name:
            self.llm_service = ChatGLMService()
            self.llm_service.load_model(model_name_or_path=self.config.llm_model_name
                                        , isqiamtize4=False, device=self.config.device)
        elif "Qwen" in self.config.llm_model_name:
            self.llm_service = QwenService()
            self.llm_service.load_model(model_name_or_path=self.config.llm_model_name
                                        , isqiamtize4=False, device=self.config.device)

        if self.config.knowledge_base_type == "faiss":
            self.source_service = KGService(knowledge_base_name=self.config.knowledge_base_name)
        elif self.config.knowledge_base_type == "Milvus":
            self.source_service = MilvusService(knowledge_base_name=self.config.knowledge_base_name)
        self.source_service.load_vector_store()
        print("QA初始化完成!")

    def get_knowledge_based_answer(self, query,
                                   history_len=0,
                                   temperature=0.7,
                                   top_p=0.9,
                                   top_k=3,
                                   score_threshold=0.5,
                                   web_content='',
                                   chat_history=[]):
        if web_content:
            prompt_template = f"""首先,如果已知内容与问题无关,请忽略已知内容.基于以下已知内容，简洁和专业的来回答用户的问题。
                                如果无法从中得到答案，请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息"，不允许在答案中添加编造成分，答案请使用中文。
                                已知网络检索内容：{web_content}""" + """
                                问题:
                                {question}
                                已知内容:
                                {context}
                                """
        else:
            prompt_template = """首先,如果已知内容与问题无关,请忽略已知内容.基于以下已知内容，简洁和专业的来回答用户的问题。
                                如果无法从中得到答案，请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息"，不允许在答案中添加编造成分，答案请使用中文。
                                问题:
                                {question}
                                已知内容:
                                {context}
                                """
        prompt = PromptTemplate(template=prompt_template,
                                input_variables=["context", "question"])
        self.llm_service.history = chat_history[-history_len:] if history_len > 0 else []

        self.llm_service.temperature = temperature
        self.llm_service.top_p = top_p

        knowledge_chain = RetrievalQA.from_llm(
            llm=self.llm_service,
            retriever=self.source_service.vector_store.as_retriever(
                search_kwargs={"k": top_k}),
            prompt=prompt)
        # Milvus不支持score_threshold,需要手动查询
        # retriever = self.source_service.vector_store
        # knowledge_chain = RetrievalQA.from_llm(
        #     llm=self.llm_service,
        #     retriever=retriever.retriever,
        #     prompt=prompt)
        knowledge_chain.combine_documents_chain.document_prompt = PromptTemplate(
            input_variables=["page_content"], template="{page_content}")

        knowledge_chain.return_source_documents = True

        result = knowledge_chain({"query": query})
        print(result)

        return result['result']

    def get_llm_answer(self, query='', web_content=''):
        if web_content:
            prompt = f'基于网络检索内容：{web_content}，回答以下问题{query}'
        else:
            prompt = query
        result = self.llm_service._call(prompt)
        return result
