import asyncio
import json
import os
import threading
import time
import traceback
import warnings
from typing import Any, Dict, List, Optional

import pandas as pd
from pymilvus import (
    AsyncMilvusClient,
    Collection,
    CollectionSchema,
    DataType,
    FieldSchema,
    connections,
    utility,
)
from pymilvus.exceptions import MilvusException

from embedding_model import get_text_embedding
from utils.base_utils import (
    get_config,
    get_datetime_earlier,
    get_datetime_now,
    hash_string,
    logger,
    timer,
)

warnings.filterwarnings("ignore")

MAX_LENGTH = 40960
OUTPUT_FIELDS = [
    "pk",
    "extra_key",
    "agent_id",
    "query",
    "key_fields",
    "answer",
    "update_time",
]


# ------------------------------ 公共工具方法 (同步) ------------------------------
def _get_common_schema(embedding_dim: int, collection_name: str) -> CollectionSchema:
    """获取Milvus集合的公共Schema，同步方法"""
    fields = [
        FieldSchema(name="pk", dtype=DataType.VARCHAR, is_primary=True, max_length=100),
        FieldSchema(name="extra_key", dtype=DataType.VARCHAR, max_length=100),
        FieldSchema(name="agent_id", dtype=DataType.VARCHAR, max_length=100),
        FieldSchema(name="query", dtype=DataType.VARCHAR, max_length=256),
        FieldSchema(name="key_fields", dtype=DataType.VARCHAR, max_length=MAX_LENGTH),
        FieldSchema(name="answer", dtype=DataType.VARCHAR, max_length=MAX_LENGTH),
        FieldSchema(name="update_time", dtype=DataType.VARCHAR, max_length=100),
        FieldSchema(
            name="embeddings",
            dtype=DataType.FLOAT_VECTOR,
            dim=embedding_dim,
        ),
    ]
    return CollectionSchema(fields, collection_name)


def _generate_entities(
    extra_key: str,
    agent_id: str,
    query: str,
    key_fields: str,
    answer: str,
) -> List[List[Any]]:
    """生成Milvus插入/更新所需的实体数据，同步方法"""
    embedding = get_text_embedding(query)
    if embedding is None:
        logger.error(f"Could't get embedding for query:{query}")
        return None
    # 生成主键（query + extra_key 哈希）
    pk = hash_string(query + extra_key)
    # 生成时间戳
    update_time = get_datetime_now()

    # 构建实体列表（顺序需与Schema字段顺序一致）
    entities = [
        [pk],
        [extra_key],
        [agent_id],
        [query],
        [key_fields],
        [answer],
        [update_time],
        [embedding],
    ]
    return entities


def _generate_row_entities(
    extra_key: str,
    agent_id: str,
    query: str,
    key_fields: str,
    answer: str,
) -> Optional[List[Dict[str, Any]]]:
    """生成Milvus插入/更新所需的行式实体数据（用于AsyncMilvusClient），同步方法"""
    # 获取embedding
    embedding = get_text_embedding(query)
    if embedding is None:
        logger.error(f"Could't get embedding for query:{query}")
        return None
    pk = hash_string(query + extra_key)
    update_time = get_datetime_now()
    row = {
        "pk": pk,
        "extra_key": extra_key,
        "agent_id": agent_id,
        "query": query,
        "key_fields": key_fields,
        "answer": answer,
        "update_time": update_time,
        "embeddings": embedding,
    }
    return [row]


def _generate_search_expr(extra_key: Optional[str], expire: int) -> str:
    """生成Milvus搜索的过滤表达式，同步方法"""
    date_str = get_datetime_earlier(expire)
    if extra_key:
        return f'extra_key == "{extra_key}" && update_time >= "{date_str}"'
    return f'update_time >= "{date_str}"'


# ------------------------------ 同步Milvus缓存类 ------------------------------
class MilvusCache:
    def __init__(self):
        self.config = get_config("milvus")
        self.milvus_collection_name = self.config["collections"]["query_cache"]
        self.embedding_dim = self.config["params"]["embedding_dim"]
        self.metric_type = self.config["params"]["metric_type"]
        self.nprobe = self.config["params"]["nprobe"]
        self.search_params = {
            "metric_type": self.metric_type,
            "params": {"nprobe": self.nprobe},
        }
        self.index_type = self.config["params"]["index_type"]
        self.nlist = self.config["params"]["nlist"]
        self.search_limit = self.config["params"]["search_limit"]
        self.expire = self.config["params"]["query_cache"]["expire"]
        self.export_batch_size = self.config["params"]["export_batch_size"]
        self.milvus_cache_collection: Optional[Collection] = None

        self._connect_milvus()
        self._load_collection()

    @timer
    def _connect_milvus(self):
        try:
            connections.connect(
                alias="default",
                host=self.config["host"],
                port=self.config["port"],
                user=self.config["user"],
                password=self.config["password"],
                keep_alive=True,
            )
        except MilvusException as e:
            logger.error(
                f"Milvus connect error: {str(e)}, Traceback: {traceback.format_exc()}"
            )
        except Exception as e:
            logger.error(
                f"Unexpected Milvus connect error: {str(e)}, Traceback: {traceback.format_exc()}"
            )

    @timer
    def _load_collection(self):
        try:
            # 获取公共Schema
            schema = _get_common_schema(self.embedding_dim, self.milvus_collection_name)

            if not utility.has_collection(self.milvus_collection_name):
                # 创建新集合
                self.milvus_cache_collection = Collection(
                    self.milvus_collection_name, schema, consistency_level="Strong"
                )
                # 插入初始化数据
                init_entities = _generate_entities(
                    extra_key="",
                    agent_id="",
                    query="您好呀",
                    key_fields="关键词",
                    answer="您好，请问有什么可以帮到您",
                )
                if init_entities:
                    self.milvus_cache_collection.upsert(init_entities)
                    self.milvus_cache_collection.flush()
                # 创建索引
                index_params = {
                    "index_type": self.index_type,
                    "metric_type": self.metric_type,
                    "params": {"nlist": self.nlist},
                }
                self.milvus_cache_collection.create_index(
                    field_name="embeddings", index_params=index_params
                )
            else:
                # 加载已有集合
                self.milvus_cache_collection = Collection(
                    self.milvus_collection_name, schema, consistency_level="Strong"
                )
            # 加载集合到内存
            self.milvus_cache_collection.load()

        except MilvusException as e:
            logger.error(
                f"Milvus load collection:{self.milvus_collection_name} error: {str(e)}, Traceback: {traceback.format_exc()}"
            )
        except Exception as e:
            logger.error(
                f"Unexpected error in _load_collection: {str(e)}, Traceback: {traceback.format_exc()}"
            )

    @timer
    def _search(
        self,
        collection: Collection,
        search_embedding: List[float],
        output_fields: List[str] = OUTPUT_FIELDS,
        search_limit: int = None,
        milvus_expr: str = None,
    ):
        try:
            search_limit = search_limit or self.search_limit
            result = collection.search(
                data=[search_embedding],
                anns_field="embeddings",
                param=self.search_params,
                limit=search_limit,
                output_fields=output_fields,
                expr=milvus_expr,
            )
            return result
        except Exception as e:
            logger.error(
                f"Milvus search error: {str(e)}, Traceback: {traceback.format_exc()}"
            )
            return None

    @timer
    def get(
        self,
        query: str,
        vector_to_search: List[float] = None,
        extra_key: str = None,
        top_n: int = None,
        threshold_score: float = None,
    ):
        if os.environ.get("DISABLE_CACHE"):
            logger.info("Cache disabled, will return None")
            return None

        # 获取搜索向量
        vector_to_search = vector_to_search or get_text_embedding(query)
        if not vector_to_search:
            logger.error(f"Could't get embedding for query:{query}")
            return None

        # 初始化参数
        top_n = top_n or self.config["params"]["query_cache"]["top_n"]
        threshold_score = (
            threshold_score or self.config["params"]["query_cache"]["threshold_score"]
        )
        milvus_expr = _generate_search_expr(extra_key, self.expire)
        # 检查集合状态
        if self.milvus_cache_collection is None:
            logger.error("Milvus collection is not initialized")
            return None

        # 执行搜索
        search_result = self._search(
            collection=self.milvus_cache_collection,
            search_embedding=vector_to_search,
            output_fields=OUTPUT_FIELDS,
            search_limit=top_n,
            milvus_expr=milvus_expr,
        )

        # 处理搜索结果（按score降序，满足阈值则返回）
        if search_result:
            for hit in search_result[0]:
                score = hit.distance
                if score > threshold_score:
                    logger.info(f"Found hit:{hit.entity}, distance:{hit.distance}")
                    return hit.entity.get("answer")
                logger.info(f"Hit score {score} <= threshold {threshold_score}, skip")
        return None

    @timer
    def _add(
        self,
        extra_key: str = "",
        agent_id: str = "",
        query: str = "",
        key_fields: str = "",
        answer: str = "",
    ):
        logger.info(
            f"Adding new query-answer pairs to milvus_cache: {query} - {answer}"
        )

        # 生成实体数据
        entities = _generate_entities(
            extra_key=extra_key,
            agent_id=agent_id,
            query=query,
            key_fields=key_fields,
            answer=answer,
        )
        if not entities:
            return False

        # 检查集合状态
        if self.milvus_cache_collection is None:
            logger.error("Milvus collection is not initialized, cannot upsert")
            return False

        # 执行upsert
        try:
            self.milvus_cache_collection.upsert(entities)
            logger.info(
                f"Milvus cache entity added, current count: {self.milvus_cache_collection.num_entities}"
            )
            return True
        except Exception as e:
            logger.error(
                f"Milvus cache entity add error: {str(e)}, Traceback: {traceback.format_exc()}"
            )
            return False

    @timer
    def add(
        self,
        extra_key: str = "",
        agent_id: str = "",
        query: str = "",
        key_fields: str = "",
        answer: str = "",
    ):
        """异步线程执行add操作（同步类中的"异步"实现）"""
        thread = threading.Thread(
            target=self._add,
            args=(extra_key, agent_id, query, key_fields, answer),
            daemon=True,
        )
        thread.start()

    @timer
    def delete(
        self,
        query: str,
        extra_key: str = None,
    ):
        try:
            # 生成主键
            pk = hash_string(query + (extra_key or ""))
            delete_expr = f'pk == "{pk}"'

            # 检查集合状态
            if self.milvus_cache_collection is None:
                logger.error("Milvus collection is not initialized, cannot delete")
                return False

            # 执行删除
            self.milvus_cache_collection.delete(delete_expr)
            self.milvus_cache_collection.flush()
            logger.info(
                f"Milvus cache entity deleted, query: {query}, extra_key: {extra_key}"
            )
            return True
        except MilvusException as e:
            logger.error(
                f"Milvus cache entity delete error: {str(e)}, Traceback: {traceback.format_exc()}"
            )
            return False
        except Exception as e:
            logger.error(
                f"Unexpected delete error: {str(e)}, Traceback: {traceback.format_exc()}"
            )
            return False

    @timer
    def export_to_excel(self, file_path: str, expr: Optional[str] = None):
        df = self.export_to_df(expr)
        with pd.ExcelWriter(file_path, engine="openpyxl") as writer:
            df.to_excel(writer, index=False)
        logger.info(f"Exported data to Excel success, file path: {file_path}")

    @timer
    def export_to_df(self, expr: Optional[str] = None):
        # 检查集合状态
        if self.milvus_cache_collection is None:
            logger.error("Milvus collection is not initialized, cannot export")
            return pd.DataFrame(columns=OUTPUT_FIELDS)

        # 批量查询数据
        all_data = []
        iterator = self.milvus_cache_collection.query_iterator(
            batch_size=self.export_batch_size,
            expr=expr,
            output_fields=OUTPUT_FIELDS,
        )

        try:
            while True:
                batch_data = iterator.next()
                if not batch_data:
                    break
                all_data.extend(batch_data)
                time.sleep(0.1)  # 控制查询速率，避免压力过大
        finally:
            iterator.close()  # 确保迭代器关闭

        # 构建DataFrame并排序
        df = pd.DataFrame(all_data)
        df = df.reindex(columns=OUTPUT_FIELDS)
        df.sort_values(by="update_time", ascending=False, inplace=True)
        logger.info(f"Exported data to DataFrame success, shape: {df.shape}")
        return df


# ------------------------------ 异步Milvus缓存类 ------------------------------
class AsyncMilvusCache:
    def __init__(self):
        self.config = get_config("milvus")
        self.milvus_collection_name = self.config["collections"]["query_cache"]
        self.embedding_dim = self.config["params"]["embedding_dim"]
        self.metric_type = self.config["params"]["metric_type"]
        self.nprobe = self.config["params"]["nprobe"]
        self.search_params: Dict[str, Any] = {
            "metric_type": self.metric_type,
            "params": {"nprobe": self.nprobe},
        }
        self.index_type = self.config["params"]["index_type"]
        self.nlist = self.config["params"]["nlist"]
        self.search_limit = self.config["params"]["search_limit"]
        self.expire = self.config["params"]["query_cache"]["expire"]
        self.export_batch_size = self.config["params"]["export_batch_size"]
        self.client: Optional[AsyncMilvusClient] = None  # 异步客户端
        self.collection_loaded: bool = False  # 集合是否已加载到内存

    @timer
    async def _connect_milvus(self):
        """异步连接Milvus"""
        try:
            self.client = AsyncMilvusClient(
                uri=f"http://{self.config['host']}:{self.config['port']}",
                user=self.config["user"],
                password=self.config["password"],
                # keep_alive=True,  # 异步这里不支持
            )
            logger.info("Async Milvus client connected successfully")
        except MilvusException as e:
            logger.error(
                f"Async Milvus connect error: {str(e)}, Traceback: {traceback.format_exc()}"
            )
            self.client = None
        except Exception as e:
            logger.error(
                f"Unexpected async connect error: {str(e)}, Traceback: {traceback.format_exc()}"
            )
            self.client = None

    @timer
    async def _load_collection(self):
        if not self.client:
            logger.error("Async Milvus client not initialized, skip load collection")
            return
        try:
            # 1. 检查集合是否存在
            has_collection = False
            try:
                # 尝试获取集合描述，成功则说明集合存在
                await self.client.load_collection(
                    collection_name=self.milvus_collection_name
                )
                has_collection = True
            except MilvusException as e:
                logger.error(
                    f"Failed to load collection: {self.milvus_collection_name}, error: {str(e)}, Traceback: {traceback.format_exc()}"
                )
                raise e

            # 2. 获取公共Schema
            schema = _get_common_schema(self.embedding_dim, self.milvus_collection_name)

            if not has_collection:
                # 3. 创建新集合
                await self.client.create_collection(
                    collection_name=self.milvus_collection_name,
                    schema=schema.to_dict(),
                    consistency_level="Strong",
                )
                logger.info(f"Async collection {self.milvus_collection_name} created")

                # 4. 插入初始化数据
                init_entities = _generate_row_entities(
                    extra_key="",
                    agent_id="",
                    query="您好呀",
                    key_fields="关键词",
                    answer="您好，请问有什么可以帮到您",
                )
                if init_entities:
                    await self.client.upsert(
                        collection_name=self.milvus_collection_name, data=init_entities
                    )
                    await self.client.flush(collection_name=self.milvus_collection_name)
                    logger.info("Async collection init data inserted")

                # 5. 创建索引
                index_params = {
                    "index_type": self.index_type,
                    "metric_type": self.metric_type,
                    "params": {"nlist": self.nlist},
                }
                await self.client.create_index(
                    collection_name=self.milvus_collection_name,
                    field_name="embeddings",
                    index_params=index_params,
                )
                logger.info("Async index created for field 'embeddings'")

            await self.client.load_collection(
                collection_name=self.milvus_collection_name
            )
            self.collection_loaded = True
            logger.info(
                f"Async collection {self.milvus_collection_name} loaded successfully"
            )

        except MilvusException as e:
            logger.error(
                f"Async load collection error: {str(e)}, Traceback: {traceback.format_exc()}"
            )
        except Exception as e:
            logger.error(
                f"Unexpected async load collection error: {str(e)}, Traceback: {traceback.format_exc()}"
            )

    @timer
    async def _search(
        self,
        search_embedding: List[float],
        output_fields: List[str] = OUTPUT_FIELDS,
        search_limit: int = None,
        milvus_expr: str = None,
    ):
        """异步执行搜索"""
        if not self.client or not self.collection_loaded:
            logger.error("Async client not ready or collection not loaded")
            return None

        try:
            search_limit = search_limit or self.search_limit
            result = await self.client.search(
                collection_name=self.milvus_collection_name,
                data=[search_embedding],
                anns_field="embeddings",
                search_params=self.search_params,
                limit=search_limit,
                output_fields=output_fields,
                filter=milvus_expr,
                consistency_level="Strong",
            )
            return result
        except Exception as e:
            logger.error(
                f"Async search error: {str(e)}, Traceback: {traceback.format_exc()}"
            )
            return None

    @timer
    async def get(
        self,
        query: str,
        vector_to_search: List[float] = None,
        extra_key: str = None,
        top_n: int = None,
        threshold_score: float = None,
        load_as_dict: bool = False,
    ):
        """异步获取缓存（核心方法）"""
        if os.environ.get("DISABLE_CACHE"):
            logger.info("Cache disabled, return None")
            return None

        # 1. 准备搜索向量
        vector_to_search = vector_to_search or get_text_embedding(query)
        if not vector_to_search:
            logger.error(f"Could't get embedding for query:{query}")
            return None

        # 2. 初始化参数
        top_n = top_n or self.config["params"]["query_cache"]["top_n"]
        threshold_score = (
            threshold_score or self.config["params"]["query_cache"]["threshold_score"]
        )
        milvus_expr = _generate_search_expr(extra_key, self.expire)
        # 3. 执行异步搜索
        search_result = await self._search(
            search_embedding=vector_to_search,
            output_fields=OUTPUT_FIELDS,
            search_limit=top_n,
            milvus_expr=milvus_expr,
        )
        # 4. 处理搜索结果
        result = None
        if search_result and len(search_result) > 0:
            for hit in search_result[0]:
                score = hit["distance"]
                if score > threshold_score:
                    logger.info(f"Found async hit:{hit['entity']}, distance:{score}")
                    # 这里存入的是字符串。
                    result_str = hit["entity"].get("answer")
                    if result_str:
                        result = result_str
                    if result_str and load_as_dict:
                        try:
                            result = json.loads(result_str)
                        except json.JSONDecodeError:
                            logger.error(
                                f"Failed to parse JSON from cache: {result_str}"
                            )
                        return result
                # milvus返回结果是降序，所以第一个是最匹配的，缓存也只需要最匹配的那个。
                logger.info(
                    f"Async hit score {score} <= threshold {threshold_score}, skip"
                )
                break
        return result

    @timer
    async def add(
        self,
        extra_key: str = "",
        agent_id: str = "",
        query: str = "",
        key_fields: str = "",
        answer: str = "",
    ):
        """异步添加缓存（无线程，纯异步IO）"""
        logger.info(f"Async adding query-answer: {query} - {answer}")

        if not self.client or not self.collection_loaded:
            logger.error("Async client not ready or collection not loaded")
            return False
        entities = _generate_row_entities(
            extra_key=extra_key,
            agent_id=agent_id,
            query=query,
            key_fields=key_fields,
            answer=answer,
        )

        if not entities:
            return False
        try:
            await self.client.upsert(
                collection_name=self.milvus_collection_name, data=entities
            )
            # await self.client.flush(collection_name=self.milvus_collection_name) # 异步不需要flush
            logger.info("Async entity added")
            return True
        except Exception as e:
            logger.error(
                f"Async add error: {str(e)}, Traceback: {traceback.format_exc()}"
            )
            return False

    @timer
    async def delete(
        self,
        query: str,
        extra_key: str = None,
    ):
        if not self.client or not self.collection_loaded:
            logger.error("Async client not ready or collection not loaded")
            return False

        try:
            pk = hash_string(query + (extra_key or ""))
            delete_expr = f'pk == "{pk}"'
            await self.client.delete(
                collection_name=self.milvus_collection_name, filter=delete_expr
            )
            logger.info(f"Async deleted entity: query={query}, extra_key={extra_key}")
            return True
        except MilvusException as e:
            logger.error(
                f"Async delete error: {str(e)}, Traceback: {traceback.format_exc()}"
            )
            return False
        except Exception as e:
            logger.error(
                f"Unexpected async delete error: {str(e)}, Traceback: {traceback.format_exc()}"
            )
            return False

    @timer
    async def export_to_df(self, expr: Optional[str] = None):
        if not self.client or not self.collection_loaded:
            logger.error("Async client not ready or collection not loaded")
            return pd.DataFrame(columns=OUTPUT_FIELDS)

        all_data = []
        try:
            # 异步批量查询（使用limit+offset分页，替代同步iterator）
            offset = 0
            while True:
                batch_data = await self.client.query(
                    collection_name=self.milvus_collection_name,
                    filter=expr,
                    output_fields=OUTPUT_FIELDS,
                    limit=self.export_batch_size,
                    offset=offset,
                    consistency_level="Strong",
                )
                if not batch_data:
                    break
                all_data.extend(batch_data)
                offset += self.export_batch_size
                await asyncio.sleep(0.1)
        except Exception as e:
            logger.error(
                f"Async export query error: {str(e)}, Traceback: {traceback.format_exc()}"
            )

        # 构建并排序DataFrame
        df = pd.DataFrame(all_data)
        df = df.reindex(columns=OUTPUT_FIELDS)
        df.sort_values(by="update_time", ascending=False, inplace=True)
        logger.info(f"Async exported DataFrame success, shape: {df.shape}")
        return df

    @timer
    async def export_to_excel(self, file_path: str, expr: Optional[str] = None):
        df = await self.export_to_df(expr)
        with pd.ExcelWriter(file_path, engine="openpyxl") as writer:
            df.to_excel(writer, index=False)
        logger.info(f"Async exported to Excel: {file_path}")

    @timer
    async def init(self):
        """统一初始化入口（连接+加载集合）"""
        await self._connect_milvus()
        await self._load_collection()


# ------------------------------ 全局实例 & 测试代码 ------------------------------
# 同步实例（直接初始化）
milvus_cache = MilvusCache()

_async_cache: Optional[AsyncMilvusCache] = None


async def get_async_milvus_cache() -> AsyncMilvusCache:
    global _async_cache
    if _async_cache is None:
        _async_cache = AsyncMilvusCache()
        await _async_cache.init()
    return _async_cache


# 测试代码（同步+异步）
if __name__ == "__main__":
    import asyncio

    # ------------------------------ 同步测试 ------------------------------
    os.makedirs("output",exist_ok=True)
    print("=== 同步测试开始 ===")
    # 1. 同步添加
    milvus_cache.add(query="同步测试问题", answer="同步测试答案", extra_key="test_sync")
    time.sleep(1)  # 等待线程执行完成

    # 2. 同步查询
    sync_result = milvus_cache.get(query="同步测试问题", extra_key="test_sync")
    print(f"同步查询结果: {sync_result}")  # 预期输出：同步测试答案

    # 3. 同步删除
    sync_delete = milvus_cache.delete(query="同步测试问题", extra_key="test_sync")
    print(f"同步删除结果: {sync_delete}")  # 预期输出：True

    # 4. 同步导出（可选）
    milvus_cache.export_to_excel("output/sync_milvus_cache_test.xlsx")
    print("=== 同步测试结束 ===\n")

    # ------------------------------ 异步测试 ------------------------------
    print("=== 异步测试开始 ===")

    async def async_test():
        # 1. 初始化异步实例
        async_cache = await get_async_milvus_cache()

        # 2. 异步添加
        add_ok = await async_cache.add(
            extra_key="test_async",
            query="异步测试问题",
            answer="异步测试答案",
        )
        print(f"异步添加结果: {add_ok}")  # 预期输出：True

        # 3. 异步查询
        async_result = await async_cache.get(
            query="异步测试问题", extra_key="test_async"
        )
        print(f"异步查询结果: {async_result}")  # 预期输出：异步测试答案

        # 4. 异步删除
        delete_ok = await async_cache.delete(
            query="异步测试问题", extra_key="test_async"
        )
        print(f"异步删除结果: {delete_ok}")  # 预期输出：True

        # 5. 异步导出（可选）
        await async_cache.export_to_excel("output/async_milvus_cache_test.xlsx")

    # 运行异步测试
    asyncio.run(async_test())
    print("=== 异步测试结束 ===")
