import sys
sys.path.append('D:\wordSpace\langchainQA')
# import os
# os.environ["OPENAI_API_BASE"] = "http://192.168.2.45:7892/v1"
# os.environ["OPENAI_API_KEY"] = "xxx"
import asyncio
from langchain.chains import RetrievalQA,QAWithSourcesChain
from langchain_community.llms.openai import OpenAI
from app.core.compressors import MyDocumentCompressor
from app.chain.de_chain import deLLMChain
from langchain.chains import create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.callbacks import AsyncCallbackManagerForRetrieverRun,AsyncCallbackHandler

from langchain.retrievers.multi_query import MultiQueryRetriever
from langchain.retrievers import  EnsembleRetriever

from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import FlashrankRerank

from app.config.milvus import MilvusSearchParam
from app.prompt.base import  BasePrompt
from app.llm.base import BaseLLM
from app.llm.chat import ChatLLM
from app.retriever.es import MyElasticsearch
from app.retriever.milvus import MilvusVectorStore
from app.retriever.my_multi_query import MyMultiQueryRetriever

from langchain_core.output_parsers import BaseOutputParser
# class testOutputParser(BaseOutputParser):
#     def parse(self,outputs:str)
#         return outputs
class ChatDocsChain:
    def __init__(self,index_name:str,llm:OpenAI=ChatLLM(),prompt:BasePrompt=BasePrompt()) -> None:
        self.index_name=index_name
        self.llm=llm
        self.prompt=prompt
        # 初始化es
        # elastic_vector=MyElasticsearch(index_name)
        # 初始化milvus
        milvus_vector=MilvusVectorStore(index_name)
        # # 封装milvus 作为多重检索
        # vector_retriever=MyMultiQueryRetriever(retriever=milvus_vector.as_retriever(search_params=MilvusSearchParam().dict()),llm_chain=deLLMChain,parser_key='lines')
        # 实例化混合搜索
        ensemble_retriever=EnsembleRetriever(retrievers=[milvus_vector.as_retriever(search_params=MilvusSearchParam().dict())])
        # 实例化rerank检索器
        compressor = MyDocumentCompressor()
        self.compression_retriever=ContextualCompressionRetriever(base_retriever=ensemble_retriever,base_compressor=compressor)
    async def chat(self, query, history, ai_prompt):
       
        combine_docs_chain = create_stuff_documents_chain(
            llm=self.llm, prompt=self.prompt.init_prompt(ai_prompt = "",histroy=history)
        )
        retrieval_chain = create_retrieval_chain(self.compression_retriever, combine_docs_chain)
        async for item in retrieval_chain.astream({"input":query}):
            if 'context' in item:
                print(item)
            if 'answer' in item and item['answer'] != '':
                 yield item['answer']

    async def chat_content(self, query, history, ai_prompt):
       
        combine_docs_chain = create_stuff_documents_chain(
            llm=self.llm, prompt=self.prompt.init_prompt(ai_prompt = "",histroy=history)
        )
        retrieval_chain = create_retrieval_chain(self.compression_retriever, combine_docs_chain)
        knowledge = "knowledge: "
        async for item in retrieval_chain.astream({"input":query}):
            if 'context' in item:
                if isinstance(item['context'],list):
                    for index,i in enumerate(item['context']):
                        knowledge += f"[{index+1}]：{i.page_content} \n"
                        if index == len(item['context'])-1:
                            knowledge += "answer: "
                yield knowledge
                
            if 'answer' in item and item['answer'] != '':
                yield item['answer']
                
    async def nostream_chat(self, query):
        combine_docs_chain = create_stuff_documents_chain(
            llm=self.llm, prompt=self.prompt.init_prompt(ai_prompt='',histroy='')
        )

        retrieval_chain = create_retrieval_chain(self.compression_retriever, combine_docs_chain)
        ans = ''

        # content = retrieval_chain.ainvoke({"input":query})

        async for item in retrieval_chain.astream({"input":query}):
            if 'answer' in item and item['answer'] != '':
                ans += item['answer']
        return ans

if __name__ == '__main__':
    import asyncio
    async def test():
        chain=ChatDocsChain("test")
        res=''
        async for token in  chain.chat(query="详细介绍一下楼盘\n"):
            # print(token,"\n",'==========')
            res+=token
        print(res)
    asyncio.run(test())




