import os
import re
import json
import time
import copy
from typing import List, Tuple, Dict, Optional, Union

from loguru import logger
import elasticsearch
from elastic_transport import ConnectionTimeout
from elasticsearch import Elasticsearch
from elasticsearch_dsl import UpdateByQuery, Search, Index, Q
from langchain_core.documents import Document


def singleton(cls, *args, **kw):
    """单例模式装饰器"""
    instances = {}

    def _singleton():
        key = str(cls) + str(os.getpid())
        if key not in instances:
            instances[key] = cls(*args, **kw)
        return instances[key]

    return _singleton


@singleton
class ESConnection:
    """Elasticsearch连接管理类"""

    def __init__(self):
        """初始化ES连接类"""
        self.es: Optional[Elasticsearch] = None
        self.info: Dict = {}
        logger.info("ESConnection initialized")

    def conn(self, client: Elasticsearch) -> bool:
        """
        建立ES连接

        Args:
            client: Elasticsearch客户端实例

        Returns:
            bool: 连接是否成功
        """
        try:
            self.es = client
            if self.es and self.es.ping():
                self.info = self.es.info()
                logger.info(f"Successfully connected to ES cluster: {self.info.get('cluster_name', 'Unknown')}")
                return True
            else:
                logger.error("Failed to ping ES cluster")
                return False
        except Exception as e:
            logger.error(f"Failed to connect to ES: {str(e)}")
            time.sleep(1)
            return False

    def version(self) -> int:
        """
        获取ES版本号的主版本

        Returns:
            int: ES主版本号
        """
        try:
            v = self.info.get("version", {"number": "5.6"})
            v = v["number"].split(".")[0]
            return int(v) >= 7
        except Exception as e:
            logger.error(f"Error getting ES version: {e}")
            return 5  # 默认返回5

    def health(self) -> Dict:
        """
        获取集群健康状态

        Returns:
            Dict: 集群健康信息
        """
        try:
            if not self.es:
                logger.error("ES client not initialized")
                return {}
            return dict(self.es.cluster.health())
        except Exception as e:
            logger.error(f"Error getting cluster health: {e}")
            return {}

    def search(self, q: Union[Dict, str], idxnm: str, src: bool = False, timeout: str = "2s") -> Dict:
        """
        执行搜索操作

        Args:
            q: 查询条件，可以是字典或查询字符串
            idxnm: 索引名称（必需）
            src: 是否返回源数据
            timeout: 超时时间

        Returns:
            Dict: 搜索结果

        Raises:
            Exception: 搜索失败或超时
        """
        if not idxnm:
            raise ValueError("Index name is required for search operation")

        if not isinstance(q, dict):
            q = Search().query(q).to_dict()

        logger.debug(f"Searching in index '{idxnm}' with query: {json.dumps(q, ensure_ascii=False)}")

        for attempt in range(3):
            try:
                res = self.es.search(
                    index=idxnm,
                    body=q,
                    # timeout=timeout,
                    # track_total_hits=True,
                    _source=src
                )

                if str(res.get("timed_out", "")).lower() == "true":
                    raise Exception("ES search timeout")

                logger.debug(
                    f"Search successful in index '{idxnm}', found {len(res.get('hits', {}).get('hits', []))} documents")
                return res

            except Exception as e:
                logger.warning(f"ES search attempt {attempt + 1} failed in index '{idxnm}': {str(e)}")
                if "timeout" in str(e).lower() and attempt < 2:
                    time.sleep(1)
                    continue
                raise e

        logger.error(f"ES search failed after 3 attempts in index '{idxnm}'")
        raise Exception("ES search timeout after 3 attempts")

    def get(self, doc_id: str, idxnm: str, **kwargs) -> Dict:
        """
        获取单个文档

        Args:
            doc_id: 文档ID
            idxnm: 索引名称（必需）
            **kwargs: 其他参数

        Returns:
            Dict: 文档内容

        Raises:
            Exception: 获取失败或超时
        """
        if not idxnm:
            raise ValueError("Index name is required for get operation")

        logger.debug(f"Getting document '{doc_id}' from index '{idxnm}'")

        for attempt in range(3):
            try:
                res = self.es.get(index=idxnm, id=doc_id, **kwargs)
                logger.debug(f"Successfully retrieved document '{doc_id}' from index '{idxnm}'")
                return res

            except Exception as e:
                logger.warning(
                    f"ES get attempt {attempt + 1} failed for document '{doc_id}' in index '{idxnm}': {str(e)}")
                if "timeout" in str(e).lower() and attempt < 2:
                    time.sleep(1)
                    continue
                raise e

        logger.error(f"ES get failed after 3 attempts for document '{doc_id}' in index '{idxnm}'")
        raise Exception("ES get timeout after 3 attempts")

    def updateByQuery(self, q: Union[Dict, str], d: Dict, idxnm: str) -> bool:
        """
        根据查询条件更新文档

        Args:
            q: 查询条件
            d: 更新数据
            idxnm: 索引名称（必需）

        Returns:
            bool: 更新是否成功
        """
        if not idxnm:
            logger.error("Index name is required for updateByQuery operation")
            return False

        logger.info(f"Updating documents in index '{idxnm}' with query: {json.dumps(q, ensure_ascii=False)}")

        try:
            ubq = UpdateByQuery(index=idxnm).using(self.es).query(q)

            # 构建更新脚本
            scripts = ""
            for k, v in d.items():
                scripts += f"ctx._source.{k} = params.{k};"

            ubq = ubq.script(source=scripts, params=d)
            ubq = ubq.params(refresh=False, slices=5, conflicts="proceed")

            for attempt in range(3):
                try:
                    r = ubq.execute()
                    logger.info(f"Successfully updated documents in index '{idxnm}'")
                    return True

                except Exception as e:
                    logger.warning(f"UpdateByQuery attempt {attempt + 1} failed in index '{idxnm}': {str(e)}")
                    if ("timeout" in str(e).lower() or "conflict" in str(e).lower()) and attempt < 2:
                        time.sleep(1)
                        continue
                    raise e

        except Exception as e:
            logger.error(f"UpdateByQuery failed in index '{idxnm}': {str(e)}")

        return False

    def updateScriptByQuery(self, q: Union[Dict, str], scripts: str, idxnm: str) -> bool:
        """
        根据查询条件和脚本更新文档

        Args:
            q: 查询条件
            scripts: 更新脚本
            idxnm: 索引名称（必需）

        Returns:
            bool: 更新是否成功
        """
        if not idxnm:
            logger.error("Index name is required for updateScriptByQuery operation")
            return False

        logger.info(f"Updating documents with script in index '{idxnm}'")

        try:
            ubq = UpdateByQuery(index=idxnm).using(self.es).query(q)
            ubq = ubq.script(source=scripts)
            ubq = ubq.params(refresh=True, slices=5, conflicts="proceed")

            for attempt in range(3):
                try:
                    r = ubq.execute()
                    logger.info(f"Successfully updated documents with script in index '{idxnm}'")
                    return True

                except Exception as e:
                    logger.warning(f"UpdateScriptByQuery attempt {attempt + 1} failed in index '{idxnm}': {str(e)}")
                    if ("timeout" in str(e).lower() or "conflict" in str(e).lower()) and attempt < 2:
                        time.sleep(1)
                        continue
                    raise e

        except Exception as e:
            logger.error(f"UpdateScriptByQuery failed in index '{idxnm}': {str(e)}")

        return False

    def delete_doc_by_query(self, query: Dict, idxnm: str) -> bool:
        """
        根据查询条件删除文档

        Args:
            query: 删除查询条件
            idxnm: 索引名称（必需）

        Returns:
            bool: 删除是否成功
        """
        if not idxnm:
            logger.error("Index name is required for delete_doc_by_query operation")
            return False

        logger.info(f"Deleting documents in index '{idxnm}' with query: {json.dumps(query, ensure_ascii=False)}")

        for attempt in range(3):
            try:
                r = self.es.delete_by_query(
                    index=idxnm,
                    refresh=True,
                    body=query
                )
                logger.info(f"Successfully deleted documents in index '{idxnm}'")
                return True

            except Exception as e:
                logger.warning(f"DeleteByQuery attempt {attempt + 1} failed in index '{idxnm}': {str(e)}")
                if "notfounderror" in str(e).lower():
                    logger.info(f"No documents found to delete in index '{idxnm}'")
                    return True
                if "timeout" in str(e).lower() or "conflict" in str(e).lower():
                    if attempt < 2:
                        time.sleep(1)
                        continue

        logger.error(f"DeleteByQuery failed after 3 attempts in index '{idxnm}'")
        return False

    def update(self, doc_id: str, body: Dict, idxnm: str) -> Union[bool, str]:
        """
        更新单个文档

        Args:
            doc_id: 文档ID
            body: 更新内容
            idxnm: 索引名称（必需）

        Returns:
            Union[bool, str]: 成功返回True，失败返回错误信息
        """
        if not idxnm:
            logger.error("Index name is required for update operation")
            return "Index name is required"

        logger.debug(f"Updating document '{doc_id}' in index '{idxnm}'")

        for attempt in range(3):
            try:
                if not self.version():
                    # ES 5.x 版本
                    r = self.es.update(
                        index=idxnm,
                        id=doc_id,
                        body=body,
                        doc_type="doc"
                    )
                else:
                    # ES 7.x+ 版本
                    r = self.es.update(
                        index=idxnm,
                        id=doc_id,
                        body=body
                    )

                logger.debug(f"Successfully updated document '{doc_id}' in index '{idxnm}'")
                return True

            except Exception as e:
                logger.warning(
                    f"Update attempt {attempt + 1} failed for document '{doc_id}' in index '{idxnm}': {str(e)}")
                if "timeout" in str(e).lower() and attempt < 2:
                    time.sleep(1)
                    continue
                return str(e)

        logger.error(f"Update failed after 3 attempts for document '{doc_id}' in index '{idxnm}'")
        return "Update timeout after 3 attempts"

    def update_by_query(self, index: str, update_query: Dict) -> Optional[Dict]:
        """
        根据查询条件更新文档（新版本）

        Args:
            index: 索引名称
            update_query: 更新查询

        Returns:
            Optional[Dict]: 更新结果，失败返回None
        """
        if not index:
            logger.error("Index name is required for update_by_query operation")
            return None

        logger.info(f"Updating documents by query in index '{index}'")

        try:
            response = self.es.update_by_query(
                index=index,
                body=update_query,
                refresh=True
            )
            logger.info(f"Successfully updated documents by query in index '{index}'")
            return response

        except Exception as e:
            logger.error(f"UpdateByQuery failed in index '{index}': {str(e)}")
            return None

    def delete(self, doc_id: str, idxnm: str) -> Union[bool, str]:
        """
        删除单个文档

        Args:
            doc_id: 文档ID
            idxnm: 索引名称（必需）

        Returns:
            Union[bool, str]: 成功返回True，失败返回错误信息
        """
        if not idxnm:
            logger.error("Index name is required for delete operation")
            return "Index name is required"

        logger.debug(f"Deleting document '{doc_id}' from index '{idxnm}'")

        for attempt in range(3):
            try:
                r = self.es.delete(index=idxnm, id=doc_id)
                logger.debug(f"Successfully deleted document '{doc_id}' from index '{idxnm}'")
                return True

            except Exception as e:
                logger.warning(
                    f"Delete attempt {attempt + 1} failed for document '{doc_id}' in index '{idxnm}': {str(e)}")
                if "timeout" in str(e).lower() and attempt < 2:
                    time.sleep(1)
                    continue
                return str(e)

        logger.error(f"Delete failed after 3 attempts for document '{doc_id}' in index '{idxnm}'")
        return "Delete timeout after 3 attempts"

    def indexExist(self, idxnm: str) -> bool:
        """
        检查索引是否存在

        Args:
            idxnm: 索引名称（必需）

        Returns:
            bool: 索引是否存在
        """
        if not idxnm:
            logger.error("Index name is required for indexExist operation")
            return False

        logger.debug(f"Checking if index '{idxnm}' exists")

        try:
            s = Index(idxnm, self.es)

            for attempt in range(3):
                try:
                    exists = s.exists()
                    logger.debug(f"Index '{idxnm}' exists: {exists}")
                    return exists

                except Exception as e:
                    logger.warning(f"IndexExist check attempt {attempt + 1} failed for '{idxnm}': {str(e)}")
                    if ("timeout" in str(e).lower() or "conflict" in str(e).lower()) and attempt < 2:
                        time.sleep(1)
                        continue
                    break

        except Exception as e:
            logger.error(f"Error checking index existence for '{idxnm}': {str(e)}")

        return False

    def docExist(self, doc_id: str, idxnm: str) -> bool:
        """
        检查文档是否存在

        Args:
            doc_id: 文档ID
            idxnm: 索引名称（必需）

        Returns:
            bool: 文档是否存在
        """
        if not idxnm:
            logger.error("Index name is required for docExist operation")
            return False

        logger.debug(f"Checking if document '{doc_id}' exists in index '{idxnm}'")

        for attempt in range(3):
            try:
                exists = self.es.exists(index=idxnm, id=doc_id)
                logger.debug(f"Document '{doc_id}' exists in index '{idxnm}': {exists}")
                return exists

            except Exception as e:
                logger.warning(
                    f"DocExist check attempt {attempt + 1} failed for document '{doc_id}' in index '{idxnm}': {str(e)}")
                if ("timeout" in str(e).lower() or "conflict" in str(e).lower()) and attempt < 2:
                    time.sleep(1)
                    continue
                break

        logger.error(f"DocExist check failed for document '{doc_id}' in index '{idxnm}'")
        return False

    def createIdx(self, idxnm: str, mapping: Dict) -> Optional[Dict]:
        """
        创建索引

        Args:
            idxnm: 索引名称
            mapping: 索引映射配置

        Returns:
            Optional[Dict]: 创建结果，失败返回None
        """
        if not idxnm:
            logger.error("Index name is required for createIdx operation")
            return None

        logger.info(f"Creating index '{idxnm}' with mapping")

        try:
            if elasticsearch.__version__[0] < 8:
                # ES 7.x 版本
                result = self.es.indices.create(idxnm, body=mapping)
            else:
                # ES 8.x+ 版本
                from elasticsearch.client import IndicesClient
                result = IndicesClient(self.es).create(
                    index=idxnm,
                    settings=mapping.get("settings", {}),
                    mappings=mapping.get("mappings", {})
                )

            logger.info(f"Successfully created index '{idxnm}'")
            return result

        except Exception as e:
            logger.error(f"Failed to create index '{idxnm}': {str(e)}")
            return None

    def deleteIdx(self, idxnm: str) -> Optional[Dict]:
        """
        删除索引

        Args:
            idxnm: 索引名称

        Returns:
            Optional[Dict]: 删除结果，失败返回None
        """
        if not idxnm:
            logger.error("Index name is required for deleteIdx operation")
            return None

        logger.info(f"Deleting index '{idxnm}'")

        try:
            result = self.es.indices.delete(idxnm, allow_no_indices=True)
            logger.info(f"Successfully deleted index '{idxnm}'")
            return result

        except Exception as e:
            logger.error(f"Failed to delete index '{idxnm}': {str(e)}")
            return None

    def getTotal(self, res: Dict) -> int:
        """
        从搜索结果中获取总文档数

        Args:
            res: 搜索结果

        Returns:
            int: 总文档数
        """
        try:
            if isinstance(res.get("hits", {}).get("total"), dict):
                return res["hits"]["total"]["value"]
            return res.get("hits", {}).get("total", 0)
        except Exception as e:
            logger.error(f"Error getting total from search result: {e}")
            return 0

    def getDocIds(self, res: Dict) -> List[str]:
        """
        从搜索结果中获取文档ID列表

        Args:
            res: 搜索结果

        Returns:
            List[str]: 文档ID列表
        """
        try:
            return [d["_id"] for d in res.get("hits", {}).get("hits", [])]
        except Exception as e:
            logger.error(f"Error getting document IDs from search result: {e}")
            return []

    def getSource(self, res: Dict) -> List[Dict]:
        """
        从搜索结果中获取源数据列表

        Args:
            res: 搜索结果

        Returns:
            List[Dict]: 源数据列表
        """
        try:
            result = []
            for d in res.get("hits", {}).get("hits", []):
                source = d.get("_source", {})
                source["id"] = d["_id"]
                source["_score"] = d.get("_score", 0)
                result.append(source)
            return result
        except Exception as e:
            logger.error(f"Error getting source from search result: {e}")
            return []

    def scrollIter(self, idxnm: str, pagesize: int = 100, scroll_time: str = '2m',
                   q: Dict = None) -> List[Dict]:
        """
        滚动迭代搜索结果

        Args:
            idxnm: 索引名称（必需）
            pagesize: 每页大小
            scroll_time: 滚动时间
            q: 查询条件

        Yields:
            List[Dict]: 每页的文档列表
        """
        if not idxnm:
            logger.error("Index name is required for scrollIter operation")
            return

        if q is None:
            q = {"query": {"match_all": {}}, "sort": [{"updated_at": {"order": "desc"}}]}

        logger.info(f"Starting scroll iteration in index '{idxnm}' with page size {pagesize}")

        # 初始化滚动搜索
        page = None
        for attempt in range(3):
            try:
                page = self.es.search(
                    index=idxnm,
                    scroll=scroll_time,
                    size=pagesize,
                    body=q,
                    _source=None
                )
                break
            except Exception as e:
                logger.warning(f"Scroll initialization attempt {attempt + 1} failed in index '{idxnm}': {str(e)}")
                if attempt < 2:
                    time.sleep(3)
                    continue
                logger.error(f"Failed to initialize scroll in index '{idxnm}' after 3 attempts")
                return

        if not page:
            logger.error(f"Failed to initialize scroll in index '{idxnm}'")
            return

        sid = page['_scroll_id']
        scroll_size = page['hits']['total']["value"]
        logger.info(f"[TOTAL] {scroll_size} documents found in index '{idxnm}'")

        # 开始滚动
        while scroll_size > 0:
            yield page["hits"]["hits"]

            # 获取下一页
            for attempt in range(3):
                try:
                    page = self.es.scroll(scroll_id=sid, scroll=scroll_time)
                    break
                except Exception as e:
                    logger.warning(f"Scroll attempt {attempt + 1} failed in index '{idxnm}': {str(e)}")
                    if attempt < 2:
                        time.sleep(3)
                        continue
                    logger.error(f"Scroll failed in index '{idxnm}' after 3 attempts")
                    return

            # 更新滚动ID和大小
            sid = page['_scroll_id']
            scroll_size = len(page['hits']['hits'])

    def search_documents(self, query_body: Dict, index_name: str) -> List[Document]:
        """
        搜索文档并返回Document对象列表

        Args:
            query_body: 查询条件
            index_name: 索引名称（必需）

        Returns:
            List[Document]: Document对象列表
        """
        if not index_name:
            logger.error("Index name is required for search_documents operation")
            return []

        try:
            search_result = self.search(q=query_body, idxnm=index_name)
            if not search_result.get('hits', {}).get('hits'):
                logger.debug(f"No documents found in index '{index_name}'")
                return []

            documents = []
            for hit in search_result['hits']['hits']:
                doc = hit['_source']
                document = Document(
                    page_content=doc.get("text", ""),
                    metadata=doc.get("metadata", {})
                )
                documents.append(document)

            logger.debug(f"Found {len(documents)} documents in index '{index_name}'")
            return documents

        except Exception as e:
            logger.error(f"Error searching documents in index '{index_name}': {e}")
            return []

    def count_documents(self, query_body: Dict, index_name: str) -> int:
        """
        统计文档数量

        Args:
            query_body: 查询条件
            index_name: 索引名称（必需）

        Returns:
            int: 文档数量
        """
        if not index_name:
            logger.error("Index name is required for count_documents operation")
            return 0

        try:
            count_result = self.es.count(body=query_body, index=index_name)
            count = count_result.get('count', 0)
            logger.debug(f"Found {count} documents in index '{index_name}'")
            return count

        except Exception as e:
            logger.error(f"Error counting documents in index '{index_name}': {e}")
            return 0

    def list_knowledge_documents(self, index: str, query: Dict, page: int,
                                 page_size: int = 10, search_type: str = "kb") -> Dict:
        """
        列出知识库文档

        Args:
            index: 索引名称（必需）
            query: 查询条件
            page: 页码
            page_size: 每页大小
            search_type: 搜索类型

        Returns:
            Dict: 包含总数和数据的字典
        """
        if not index:
            logger.error("Index name is required for list_knowledge_documents operation")
            return {"total": 0, "data": []}

        logger.info(f"Listing knowledge documents in index '{index}', page {page}, size {page_size}")

        try:
            s = Search(using=self.es, index=index)
            start_offset = (page - 1) * page_size

            # 构建查询条件
            must_list = []
            for k, v in query.items():
                if v is not None:
                    if k == "content":
                        must_list.append(Q("match", text=query.get("content")))
                    elif k == "standard_number":
                        must_list.append(Q("term", metadata__standard_number=query.get("standard_number")))
                    elif k == "standard_clause":
                        must_list.append(Q("match", metadata__standard_clause=query.get("standard_clause")))
                    elif k == "standard_name":
                        must_list.append(Q("match", metadata__standard_name=query.get("standard_name")))
                    elif k == "standard_tags":
                        must_list.append(Q("match", metadata__standard_tags=query.get("standard_tags")))
                    elif k == "file_name":
                        must_list.append(Q("wildcard", metadata__file_name=query.get("file_name") + "*"))

            if query.get("kb_ids"):
                must_list.append(Q("terms", metadata__kb_id=query.get("kb_ids")))

            s.query = Q('bool', must=must_list)
            s = s.extra(from_=start_offset, size=page_size)

            response = s.execute()
            total = response.hits.total.value

            # 处理搜索结果
            hits = []
            for hit in response.hits:
                metadata = hit.metadata.to_dict()

                if search_type == "kb":
                    if metadata.get("table_id"):
                        hits.append({
                            "id": hit.meta.id,
                            "file_id": metadata.get("file_id"),
                            "kb_id": metadata.get("kb_id"),
                            "table_id": metadata.get("table_id"),
                            "content": hit.text,
                            "image": metadata.get("image"),
                            "update_time": metadata.get("update_time"),
                        })
                    else:
                        hits.append({
                            "id": hit.meta.id,
                            "standard_name": metadata.get("standard_name"),
                            "standard_number": metadata.get("standard_number"),
                            "standard_clause": metadata.get("standard_clause"),
                            "standard_tags": metadata.get("standard_tags"),
                            "image": metadata.get("image"),
                            "kb_id": metadata.get("kb_id"),
                            "file_id": metadata.get("file_id"),
                            "file_name": metadata.get("file_name"),
                            "update_time": metadata.get("update_time"),
                            "content": hit.text,
                        })
                else:
                    hits.append({
                        "id": hit.meta.id,
                        "file_id": metadata.get("file_id"),
                        "file_name": metadata.get("file_name"),
                        "update_time": metadata.get("update_time"),
                        "content": hit.text,
                    })

            logger.info(f"Successfully listed {len(hits)} documents from index '{index}'")
            return {
                "total": total,
                "data": hits
            }

        except Exception as e:
            logger.error(f"Error listing knowledge documents in index '{index}': {e}")
            return {"total": 0, "data": []}

