import asyncio
import json
import os
import traceback
from typing import Dict, List

from llm_finetune.qwen_lora import LoraType, QwenLoRA
from milvus_cache import get_async_milvus_cache
from redis_client import async_redis_pipe
from utils.base_utils import get_config, get_extra_key, logger, timer
from embedding_model import get_embed_model
get_embed_model()

REDIS_EXPIRE_SECONDS = get_config("redis.expire_seconds")
MAX_RETRIES = 3
NO_ANSWER = "不好意思我无法回答这个问题。"


class AsyncRecAgent:
    def __init__(self, agent_id: str):
        self.agent_id = agent_id
        self.history = []
        self.MAX_HISTORY_TURNS = 5  # 最多保留 5 轮对话
        self.model = QwenLoRA(LoraType.Classify)

    @timer
    async def _get_cache(self, query: str, cache_flag: bool = True):
        if os.environ.get("DISABLE_CACHE"):
            logger.info("Disable cache, not get cache")
            return None
        if not cache_flag:
            logger.info("Not cache flag, not get cache")
            return None
        extra_key = get_extra_key()
        redis_key = extra_key + f"_query: {query}"
        result_from_cache = await async_redis_pipe.get(key=redis_key, load_as_dict=True)
        if result_from_cache:
            logger.info(
                f"Redis cache query:{query}, type:{type(result_from_cache)}, result:{result_from_cache}"
            )
        else:
            logger.warning(f"Not found redis cache for query:{query}")
            milvus_cache_instance = await get_async_milvus_cache()
            result_from_cache = await milvus_cache_instance.get(
                query=query, extra_key=extra_key, top_n=10, load_as_dict=True
            )
            if result_from_cache:
                logger.info(
                    f"Milvus cache query:{query}, type:{type(result_from_cache)}, result:{result_from_cache}"
                )

        if result_from_cache:
            await async_redis_pipe.set(
                key=redis_key, value=result_from_cache, expire=REDIS_EXPIRE_SECONDS
            )
            return result_from_cache
        logger.warning(f"Not found milvus cache for query:{query}")
        return None

    @timer
    async def _save_cache(self, query: str, answers: List[Dict], save_cache=True):
        if os.environ.get("DISABLE_CACHE"):
            logger.info("Disable cache, not save cache")
            return False
        if not save_cache:
            logger.info("Not save cache")
            return False
        try:
            extra_key = get_extra_key()
            redis_key = extra_key + f"_query: {query}"
            # redis缓存不宜太久
            answer_str = json.dumps(answers)
            await async_redis_pipe.set(
                key=redis_key, value=answer_str, expire=REDIS_EXPIRE_SECONDS
            )
            milvus_cache_instance = await get_async_milvus_cache()
            await milvus_cache_instance.add(
                extra_key=extra_key,
                query=query,
                answer=answer_str,
            )
            return True
        except Exception as e:
            logger.error(
                f"Save cache error, query:{query}, answers:{answers}, error:{e}, Traceback: {traceback.format_exc()}"
            )
            return False

    @timer
    async def execute_query(self, query: str):
        results = NO_ANSWER
        try:
            results = await self._get_cache(query)
            if results:
                return results
        except Exception as e:
            logger.warning(
                f"Failed get cache error: {str(e)}, traceback: {traceback.format_exc()}"
            )
        # 到这里就代表缓存没有命中
        try:
            results = self.model.classify(query)
            if results:
                # 存入缓存
                await self._save_cache(query, results)
                return results
            else:
                print(NO_ANSWER)
        except Exception as e:
            logger.error(f"查询执行失败: {e}", exc_info=True)
            print(NO_ANSWER)
            return results

    async def run(self):
        """启动交互式对话系统，仅用于模拟"""
        try:
            while True:
                try:
                    query = input("\n💬 请输入您想要的咨询的问题> ").strip()
                    if query.lower() in ["quit", "exit", "退出"]:
                        print("👋 再见！")
                        break
                    if not query:
                        print("⚠️  请输入有效问题。")
                        continue
                    results = NO_ANSWER
                    results = await self.execute_query(query)
                    print("*" * 10, results, "*" * 10)
                    # 更新对话历史
                    self.history.append({"role": "user", "content": query})

                    self.history.append({"role": "assistant", "content": results})
                    # 只保留最近 N 轮对话
                    self.history = self.history[-2 * self.MAX_HISTORY_TURNS :]
                except KeyboardInterrupt:
                    print("\n")
                    logger.info("用户通过 Ctrl+C 中断输入")
                    choice = input("是否退出？[y/N] ").strip().lower()
                    if choice in ["y", "yes", "是", "退出"]:
                        print("👋 再见！")
                        break
                    else:
                        continue

        except Exception as e:
            logger.critical(f"程序发生未预期错误: {e}", exc_info=True)
            print("\n🚨 系统异常，已退出。")
        finally:
            logger.info("对话系统已关闭, 下次再见噢~")


async def main():
    agent = AsyncRecAgent(agent_id="agent_id")
    await agent.run()


if __name__ == "__main__":
    asyncio.run(main())
