from elasticsearch import Elasticsearch, helpers
from concurrent.futures import ThreadPoolExecutor
import asyncio
from langchain.docstore.document import Document

import sys
sys.path.append('/workspace/qanything_local')
from qanything_kernel.utils.custom_log import debug_logger


class ElasticsearchClient:
    def __init__(self, host='172.31.24.110', port=9600, kb_id=None):
        self.host = host
        self.port = port
        self.kb_id = kb_id
        self.executor = ThreadPoolExecutor(max_workers=10)
        # self.es = Elasticsearch([{'host': self.host, 'port': self.port, "scheme": "http"}])
        self.es = Elasticsearch([{'host': self.host, 'port': self.port, "scheme": "http"}], request_timeout=30)
        # self.es = Elasticsearch("http://172.31.30.3:9200")

    def create_index(self, index_name):
        if not self.es.indices.exists(index=index_name):
            mapping = {
                "properties": {
                    "text": {
                        "type": "text",
                        "similarity": "BM25"  # 指定使用BM25算法
                    },
                    "keywords": {  # 添加关键字字段
                        "type": "text",
                        "similarity": "BM25"
                    }
                }
            }
            self.es.indices.create(index=index_name, body={"mappings": mapping})
            debug_logger.info(f"Index '{index_name}' created successfully.")
        else:
            debug_logger.info(f"Index '{index_name}' already exists.")

    async def import_documents(self, file_id, index_name, documents):
        def import_helper(chunk):
            try:
                actions = []
                for doc in chunk:
                    # 检查keywords字段是否存在
                    if 'keywords' in doc.metadata:
                        # 构建导入的动作
                        action = {
                            "_index": index_name,
                            "_source": {
                                "text": doc.page_content,
                                "keywords": doc.metadata['keywords'],
                                "metadata": doc.metadata
                            }
                        }
                    else:
                        action = {
                            "_index": index_name,
                            "_source": {
                                "text": doc.page_content,
                                "keywords": '',
                                "metadata": doc.metadata
                            }
                        }
                    actions.append(action)
                success, _ = helpers.bulk(self.es, actions)
                debug_logger.info(f"Successfully imported {success} chunk into index '{index_name}'.")
                return True
            except Exception as e:
                debug_logger.info(f"An error occurred while importing documents: {str(e)}")
                return False

        try:
            for i, doc in enumerate(documents):
                doc.metadata['chunk_id'] = f"{file_id}_{i}"  # 添加文档在文件中的索引信息
            chunk_size = 20  # 设置每个线程处理的文档数量
            loop = asyncio.get_event_loop()
            all_success = True
            for i in range(0, len(documents), chunk_size):
                chunk = documents[i:i+chunk_size]
                result = await loop.run_in_executor(self.executor, import_helper, chunk)
                if not result:
                    all_success = False
            if not all_success:
                return False
        except Exception as e:
            print(f"An error occurred while importing documents: {str(e)}")
            return False
        
        return True


        
    def search_documents(self, index_name, query, topk=5):
        # search_body = {
        #     "size": topk,
        #     "query": {
        #         "match": {
        #             "text": {
        #                 "query": query,
        #                 "analyzer": "standard"
        #             }
        #         }
        #     },
        #     "explain": True,  # 返回每个文档的得分解释
        #     "sort": [
        #         {"_score": {"order": "desc"}}
        #     ]
        # }

        search_body = {
            "size": topk,
            "query": {
                "multi_match": {  # 使用multi_match查询
                    "query": query,
                    "fields": [
                        "text",  # 内容字段
                        # "keywords^2"  # 关键字字段，权重为2
                        "keywords"
                    ],
                    "type": "most_fields",  # 使用most_fields类型
                    "analyzer": "standard"
                }
            },
            "explain": True,  # 返回每个文档的得分解释
            "sort": [
                {"_score": {"order": "desc"}}
            ]
        }

        results = self.es.search(index=index_name, body=search_body)

        res = results['hits']['hits']

        search_results = []

        for hit in res:
            doc = Document(page_content=hit['_source']['text'], 
                        metadata={
                            "score": hit['_score'], 
                            "file_id": hit['_source']['metadata']['file_id'],
                            "file_name": hit['_source']['metadata']['file_name'],
                            "chunk_id": hit['_source']['metadata']['chunk_id'],
                            "zsk_file_id": hit['_source']['metadata'].get('zsk_file_id', ''),
                            "title": hit['_source']['metadata'].get('title', ''),
                            "keywords": hit['_source']['metadata'].get('keywords', '')
                            }
                        )
            search_results.append(doc)
        return search_results

    def delete_index(self, index_name):
        if self.es.indices.exists(index=index_name):
            self.es.indices.delete(index=index_name)
            debug_logger.info(f"Index '{index_name}' deleted successfully.")
        else:
            debug_logger.info(f"Index '{index_name}' does not exist.")



if __name__ == "__main__":

    async def main():
        # 创建ElasticsearchHandler实例
        es_handler = ElasticsearchClient()
        # index_names = es_handler.es.cat.indices(h='index', request_timeout=30).split()
        # 创建索引并导入文档
        index_name = "documents_index"

        from langchain.docstore.document import Document


        documents = []

        file_id = 'kb_123456'
        
        d1 = Document(page_content="本文介绍了提高Python编程效率的几种方法，包括使用内置函数、编写简洁的代码和使用第三方库。",
                      metadata={
                        "keywords": "",
                        "file_id": "kb123456",
                        "file_name": "kb"})
        documents.append(d1)
        d2 = Document(page_content="本文是Elasticsearch的入门指南，介绍了Elasticsearch的基本概念、安装方法和常用操作。",
                      metadata={
                        "keywords": "",
                        "file_id": "kb123456",
                        "file_name": "kb"})
        documents.append(d2)

        d3 = Document(page_content="本文介绍了几种常用的机器学习算法，包括线性回归、决策树和神经网络，并分析了它们的优缺点。",
                      metadata={
                        "keywords": "机器学习 算法 线性回归 决策树 神经网络",
                        "file_id": "kb123456",
                        "file_name": "kb"})

        documents.append(d3)


        # # 删除索引
        # es_handler.delete_index(index_name)

        # es_handler.create_index(index_name)
        # ret = await es_handler.import_documents(file_id, index_name, documents)

        # 查询文档
        query = "机器学习中的方法有哪些？"

        search_results = es_handler.search_documents(index_name, query)
        print("Query Results:")

        for hit in search_results:
            print(hit.page_content)

        # # 删除索引
        # es_handler.delete_index(index_name)


    # 在 asyncio 程序中运行 main 函数
    asyncio.run(main())
