import sys
from typing import List
sys.path.append("D:\wordSpace\langchainQA")

from app.config.es import EsConfig
from langchain_elasticsearch import ElasticsearchStore,ElasticsearchRetriever
from langchain_elasticsearch import ElasticsearchRetriever
from langchain_core.embeddings import Embeddings
from app.embedding.com_mbeddings import ComEmbeddings
import asyncio
class MyElasticsearch(ElasticsearchStore):
    def __init__(self,index_name:str,embedding=ComEmbeddings(),config:EsConfig=EsConfig()):
        
        self.retriever=ElasticsearchRetriever.from_es_params(index_name=index_name,url=config.es_url,content_field=config.content_field,username=config.es_user,password=config.es_password,body_func=self.bm25_query)
        super().__init__(es_url=config.es_url,index_name=index_name,es_user=config.es_user,es_password=config.es_password,embedding=embedding)
    
    def bm25_query(self,search_query: str):
            print("==============bm25_query============")
            return {
                "query": {
                    "match": {
                        "text": search_query,
                    },
                },
            }
    async def adelete(self, ids: List[str]):
         for id in ids:
             self.retriever.es_client.delete(index=self.index_name, id=id)
    
    async def get_ids(self, search_body: dict):
         ids = []
         docs = self.retriever.es_client.search(index=self.index_name, body=search_body)
         for doc in docs['hits']['hits']:
            ids.append(doc['_id'])
         return ids
     

if __name__ == '__main__':
    
    import mimetypes  
    from langchain.document_loaders.base import BaseLoader
    from langchain_community.document_loaders import PyPDFium2Loader
    documents=PyPDFium2Loader(file_path=r'D:\wordSpace\langchainQA\docs\demo\华昭府销售说辞初稿-未审定0929.pdf',extract_images=True).load()
    from langchain.text_splitter import CharacterTextSplitter
    text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=0,is_separator_regex=True)
    documents = text_splitter.split_documents(documents)
    for k,v in enumerate(documents):
        documents[k].metadata['id']=k
    async def test():
        es=MyElasticsearch(config=EsConfig(index_name='test_qq',embedding=ComEmbeddings()))
        await es.aadd_documents(documents)
        res=await es.retriever.ainvoke('你好')
        for item in res:
            print(item.page_content)
    asyncio.run(test())
    