import json
import time
import numpy as np
from redis import Redis
from redis.exceptions import RedisError
from loguru import logger
from typing import Dict, Optional, List, Tuple, Any

# 导入你自己的Ollama嵌入函数
from faiss_rag.embed import get_embedding


class RedisContextManager:
    """修复：关闭decode_responses，手动处理文本/二进制数据"""

    def __init__(
            self,
            host: str = "localhost",
            port: int = 6379,
            db: int = 0,
            prefix: str = "agent_context:"
    ):
        # 关键修复1：删除decode_responses=True（默认False，返回bytes类型）
        self.redis = Redis(
            host=host,
            port=port,
            db=db
        )
        self.prefix = prefix
        try:
            self.redis.ping()
            logger.info("Redis 连接成功")
        except RedisError as e:
            logger.error(f"Redis 连接失败：{str(e)}")
            raise

    def _get_key(self, session_id: str) -> str:
        return f"{self.prefix}{session_id}".encode('utf-8')  # 键转为bytes

    def save_context(self, session_id: str, context: Dict[str, Any], expire_seconds: int = 3600) -> bool:
        try:
            key = self._get_key(session_id)
            # 文本数据手动编码为bytes
            self.redis.set(key, json.dumps(context).encode('utf-8'), ex=expire_seconds)
            logger.debug(f"已保存上下文（session_id={session_id}，过期时间={expire_seconds}s）")
            return True
        except RedisError as e:
            logger.error(f"保存上下文失败：{str(e)}")
            return False

    def get_context(self, session_id: str) -> Optional[Dict[str, Any]]:
        try:
            key = self._get_key(session_id)
            data = self.redis.get(key)
            if data:
                # 文本数据手动解码为字符串
                context_str = data.decode('utf-8')
                logger.debug(f"已获取上下文（session_id={session_id}）")
                return json.loads(context_str)
            logger.debug(f"未找到上下文（session_id={session_id}）")
            return None
        except RedisError as e:
            logger.error(f"获取上下文失败：{str(e)}")
            return None

    def update_context(self, session_id: str, key: str, value: Any) -> bool:
        try:
            context = self.get_context(session_id)
            if not context:
                logger.warning(f"上下文不存在，无法更新（session_id={session_id}）")
                return False
            context[key] = value
            return self.save_context(session_id, context)
        except Exception as e:
            logger.error(f"更新上下文失败：{str(e)}")
            return False

    def delete_context(self, session_id: str) -> bool:
        try:
            key = self._get_key(session_id)
            result = self.redis.delete(key)
            if result:
                logger.debug(f"已删除上下文（session_id={session_id}）")
                return True
            logger.debug(f"上下文不存在，无需删除")
            return False
        except RedisError as e:
            logger.error(f"删除上下文失败：{str(e)}")
            return False


class RedisVectorContextManager(RedisContextManager):
    """基于修复后的Redis处理，确保向量字节流正确解析"""

    def __init__(
            self,
            host: str = "localhost",
            port: int = 6379,
            db: int = 0,
            prefix: str = "agent_context:",
            embedding_dim: int = 384
    ):
        super().__init__(host, port, db, prefix)
        self.embedding_dim = embedding_dim
        # 验证Ollama嵌入
        try:
            test_vec = get_embedding("测试嵌入可用性")
            assert len(test_vec) == self.embedding_dim, f"嵌入维度不匹配（实际{len(test_vec)}，预期{self.embedding_dim}）"
            logger.info(f"Ollama嵌入验证成功（维度：{self.embedding_dim}）")
        except Exception as e:
            logger.error(f"Ollama嵌入服务异常：{str(e)}")
            raise

    def _get_vector_key(self, session_id: str, query_id: int) -> bytes:
        """向量键转为bytes类型"""
        key_str = f"{self.prefix}{session_id}:query_{query_id}"
        return key_str.encode('utf-8')

    def save_query_with_vector(
            self,
            session_id: str,
            query_text: str,
            expire_seconds: int = 3600
    ) -> bool:
        try:
            context = self.get_context(session_id) or {"history": []}
            query_id = len(context["history"])
            vector_key = self._get_vector_key(session_id, query_id)

            # 1. 生成Ollama嵌入向量
            embedding_vec = get_embedding(query_text)
            vector = np.array(embedding_vec, dtype=np.float32)
            vector_bytes = vector.tobytes()  # 向量按bytes存储（不编码为UTF-8）

            # 2. 存储到Redis（文本手动编码，向量直接存bytes）
            self.redis.hset(
                vector_key,
                mapping={
                    b"text": query_text.encode('utf-8'),  # 文本→bytes
                    b"vector": vector_bytes,  # 向量→bytes（不解码）
                    b"timestamp": str(time.time()).encode('utf-8'),
                    b"query_id": str(query_id).encode('utf-8'),
                    b"session_id": session_id.encode('utf-8')
                }
            )
            self.redis.expire(vector_key, expire_seconds)

            # 3. 更新history
            context["history"].append({
                "input": query_text,
                "timestamp": time.time(),
                "query_id": query_id
            })
            self.save_context(session_id, context, expire_seconds)
            logger.debug(f"已保存Ollama向量query（key={vector_key.decode('utf-8')}）")
            return True
        except Exception as e:
            logger.error(f"保存向量query失败：{str(e)}")
            return False

    def _calc_cosine_similarity(self, vec1: np.ndarray, vec2: np.ndarray) -> float:
        dot_product = np.dot(vec1, vec2)
        norm1 = np.linalg.norm(vec1)
        norm2 = np.linalg.norm(vec2)
        return round(dot_product / (norm1 * norm2) if (norm1 and norm2) else 0.0, 4)

    def semantic_search(
            self,
            session_id: str,
            query_text: str,
            topk: int = 3,
            time_weights: Tuple[float, float, float, float] = (1.0, 0.8, 0.5, 0.2)
    ) -> List[Dict[str, Any]]:
        try:
            # 1. 生成查询向量
            query_embedding = get_embedding(query_text)
            query_vector = np.array(query_embedding, dtype=np.float32)
            current_time = time.time()
            all_candidates = []

            # 2. 遍历历史query
            context = self.get_context(session_id) or {"history": []}
            for record in context["history"]:
                query_id = record["query_id"]
                vector_key = self._get_vector_key(session_id, query_id)
                query_data = self.redis.hgetall(vector_key)  # 返回bytes字典

                if not query_data:
                    continue

                # 关键修复2：向量按bytes解析为np.float32（不按UTF-8解码）
                stored_vector_bytes = query_data.get(b"vector")
                if not stored_vector_bytes:
                    continue
                # 按np.float32格式还原向量
                stored_vector = np.frombuffer(stored_vector_bytes, dtype=np.float32)
                # 确保向量维度正确
                if len(stored_vector) != self.embedding_dim:
                    logger.warning(f"向量维度异常（key={vector_key.decode('utf-8')}）")
                    continue

                # 3. 解析文本数据（手动解码）
                stored_text = query_data[b"text"].decode('utf-8')
                stored_timestamp = float(query_data[b"timestamp"].decode('utf-8'))

                # 4. 计算相似度和加权分数
                similarity = self._calc_cosine_similarity(query_vector, stored_vector)
                time_diff_min = (current_time - stored_timestamp) / 60

                if time_diff_min <= 1:
                    weight, time_range = time_weights[0], 0
                elif 1 < time_diff_min <= 5:
                    weight, time_range = time_weights[1], 1
                elif 5 < time_diff_min <= 10:
                    weight, time_range = time_weights[2], 2
                else:
                    weight, time_range = time_weights[3], 3

                all_candidates.append({
                    "text": stored_text,
                    "similarity": similarity,
                    "weighted_score": round(similarity * weight, 4),
                    "timestamp": stored_timestamp,
                    "time_range": time_range
                })

            # 5. 排序取topk
            all_candidates.sort(key=lambda x: x["weighted_score"], reverse=True)
            return all_candidates[:topk]

        except Exception as e:
            logger.error(f"语义搜索失败：{str(e)}")
            return []

    def generate_prompt(self, session_id: str, query_text: str, topk: int = 3) -> str:
        topk_results = self.semantic_search(session_id, query_text, topk)
        time_range_names = ["0-1分钟", "1-5分钟", "5-10分钟", "10分钟以上"]

        prompt = f"用户当前查询：{query_text}\n"
        prompt += f"相关历史查询（top{topk}，加权分数越高越相关）：\n"
        for i, res in enumerate(topk_results, 1):
            prompt += f"{i}. 内容：{res['text']} | 时间区间：{time_range_names[res['time_range']]} | 相似度：{res['similarity']} | 加权分数：{res['weighted_score']}\n"
        prompt += "结合历史查询的语义关联和时间优先级，生成准确、连贯的回答，优先参考加权分数高的信息。"
        return prompt


# -------------------------- 运行示例 --------------------------
if __name__ == "__main__":
    vector_manager = RedisVectorContextManager(
        prefix="ollama_rag_context:",
        embedding_dim=384
    )
    session_id = "user_rag_compat_001"

    # 【优化1：先清理历史数据，避免重复】
    vector_manager.delete_context(session_id)
    # 清理所有向量键（避免旧数据干扰）
    context = vector_manager.get_context(session_id) or {"history": []}
    for record in context["history"]:
        vec_key = vector_manager._get_vector_key(session_id, record["query_id"])
        vector_manager.redis.delete(vec_key)
    logger.info("已清理历史数据，确保测试环境干净")

    # 【优化2：确保每个查询唯一，时间区间不重叠】
    test_queries = [
        ("周杰伦2025演唱会门票购买", 11),  # 10+分钟
        ("周杰伦新专辑发布时间", 3),  # 1-5分钟
        ("周杰伦杭州巡演行程", 0.5),  # 0-1分钟（唯一）
        ("林俊杰演唱会信息", 6)  # 5-10分钟（语义低相关）
    ]
    for text, delay_min in test_queries:
        # 确保每次运行都是全新存储（不依赖历史数据）
        mock_ts = time.time() - (delay_min * 60)
        vector_manager.save_query_with_vector(session_id, text)
        # 调整时间戳
        ctx = vector_manager.get_context(session_id)
        last_id = len(ctx["history"]) - 1  # 最新的query_id
        vector_key = vector_manager._get_vector_key(session_id, last_id)
        vector_manager.redis.hset(vector_key, b"timestamp", str(mock_ts).encode('utf-8'))
        logger.info(f"已存储：{text}（模拟{delay_min}分钟前）")

    # 语义搜索+生成prompt
    current_query = "周杰伦近期演唱会相关"
    logger.info(f"\n当前查询：{current_query}")

    # 【优化3：搜索结果去重（按文本内容）】
    top3 = vector_manager.semantic_search(session_id, current_query, topk=3)
    # 去重逻辑：保留第一个出现的文本
    unique_results = []
    seen_texts = set()
    for res in top3:
        if res["text"] not in seen_texts:
            seen_texts.add(res["text"])
            unique_results.append(res)
    # 确保结果数量足够（不足时补全）
    unique_results = unique_results[:3]

    logger.info("\nTop3召回结果（去重后）：")
    for res in unique_results:
        print(
            f"文本：{res['text']} | 加权分数：{res['weighted_score']} | 时间区间：{['0-1', '1-5', '5-10', '10+'][res['time_range']]}分钟")

    prompt = vector_manager.generate_prompt(session_id, current_query)
    logger.info("\n生成Prompt：")
    print("=" * 100)
    print(prompt)
    print("=" * 100)