from typing import Any, Dict, List, Optional, Tuple
from litellm.caching.redis_semantic_cache import RedisSemanticCache
import json
import numpy as np
from redisvl.query import VectorQuery
import os
import litellm

# 设置Redis连接信息
os.environ["REDIS_HOST"] = "redis-14342.c52.us-east-1-4.ec2.redns.redis-cloud.com"
os.environ["REDIS_PORT"] = "14342"
os.environ["REDIS_PASSWORD"] = "UGwqvLGG1aoCSjXnXFDt26cC8TryQ3ER"

class CustomSemanticCache(RedisSemanticCache):
    """
    自定义语义缓存类，继承自RedisSemanticCache
    实现了基于相似度的缓存检索逻辑：
    - 相似度 > 80%：直接返回缓存的response
    - 相似度在60%-80%之间：返回缓存的response、原始prompt和相似度
    - 相似度 < 60%：返回None
    """
    def __init__(
        self,
        host=None,
        port=None,
        password=None,
        redis_url=None,
        similarity_threshold=0.8,  # 默认相似度阈值设置为0.8
        use_async=False,
        embedding_model="text-embedding-ada-002",
        **kwargs,
    ):
        """
        初始化自定义语义缓存
        Args:
            host: Redis主机地址
            port: Redis端口
            password: Redis密码
            redis_url: Redis连接URL
            similarity_threshold: 相似度阈值，默认0.8
            use_async: 是否使用异步模式
            embedding_model: 用于生成文本嵌入的模型名称
        """
        super().__init__(
            host=host,
            port=port,
            password=password,
            redis_url=redis_url,
            similarity_threshold=similarity_threshold,
            use_async=use_async,
            embedding_model=embedding_model,
            **kwargs,
        )
        self.lower_threshold = 0.6  # 设置下限阈值

    def _calculate_similarity(self, vec1: List[float], vec2: List[float]) -> float:
        """
        计算两个向量之间的余弦相似度
        Args:
            vec1: 第一个向量
            vec2: 第二个向量
        Returns:
            float: 余弦相似度值
        """
        vec1 = np.array(vec1)
        vec2 = np.array(vec2)
        return float(np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2)))

    def _get_cache_logic(self, cached_response: Any) -> Tuple[Any, Optional[Dict], int]:
        """
        重写缓存获取逻辑
        Args:
            cached_response: 缓存的响应数据
        Returns:
            Tuple[Any, Optional[Dict], int]: 
            - 如果相似度>80%，返回(response, None, 1)
            - 如果相似度在60%-80%之间，返回(None, {cached_response, prompt, similarity}, 0)
            - 如果相似度<60%，返回(None, None, -1)
        """
        if cached_response is None:
            return None, None, -1

        if isinstance(cached_response, bytes):
            cached_response = cached_response.decode("utf-8")

        try:
            cached_data = json.loads(cached_response)
            similarity = cached_data.get("similarity", 0.0)
            
            if similarity >= self.similarity_threshold:
                return cached_data["response"], None, 1
            elif self.lower_threshold <= similarity < self.similarity_threshold:
                return None, {
                    "cached_response": cached_data["response"],
                    "original_prompt": cached_data.get("prompt", ""),
                    "similarity": similarity
                }, 0
            else:
                return None, None, -1
        except Exception as e:
            print(f"Error parsing cached response: {str(e)}")
            return None, None, -1

    def get_cache(self, key, **kwargs):
        """
        同步获取缓存方法
        Args:
            key: 查询的prompt
            **kwargs: 额外的参数
        Returns:
            Tuple[Any, Optional[Dict], int]: 返回缓存结果和相似度信息
        """
        try:
            # 获取用户查询的embedding
            query_embedding = litellm.embedding(
                model=self.embedding_model,
                input=key
            ).data[0].embedding

            # 构建向量查询
            vector_query = (
                VectorQuery("litellm_embedding")
                .vector(query_embedding)
                .return_fields("response", "litellm_embedding")
                .topk(5)  # 获取最相似的5个结果
            )

            results = self.index.query(vector_query)
            if not results or len(results.docs) == 0:
                return None, None, -1

            # 获取最相似的结果
            top_result = results.docs[0]
            similarity = self._calculate_similarity(
                query_embedding,
                top_result.litellm_embedding
            )

            cached_data = {
                "response": top_result.response,
                "similarity": similarity,
                "prompt": key
            }

            return self._get_cache_logic(json.dumps(cached_data))
        except Exception as e:
            print(f"Error in get_cache: {str(e)}")
            return None, None, -1

    async def async_get_cache(self, key, **kwargs):
        """
        异步获取缓存方法
        Args:
            key: 查询的prompt
            **kwargs: 额外的参数
        Returns:
            Tuple[Any, Optional[Dict], int]: 返回缓存结果和相似度信息
        """
        try:
            # 获取用户查询的embedding
            query_embedding = litellm.embedding(
                model=self.embedding_model,
                input=key
            ).data[0].embedding

            # 构建向量查询
            vector_query = (
                VectorQuery("litellm_embedding")
                .vector(query_embedding)
                .return_fields("response", "litellm_embedding")
                .topk(5)
            )

            results = await self.index.query(vector_query)
            if not results or len(results.docs) == 0:
                return None, None, -1

            # 获取最相似的结果
            top_result = results.docs[0]
            similarity = self._calculate_similarity(
                query_embedding,
                top_result.litellm_embedding
            )

            cached_data = {
                "response": top_result.response,
                "similarity": similarity,
                "prompt": key
            }

            return self._get_cache_logic(json.dumps(cached_data))
        except Exception as e:
            print(f"Error in async_get_cache: {str(e)}")
            return None, None, -1

    # 保持原有的缓存设置方法不变
    def set_cache(self, key, value, **kwargs):
        """保持原有的同步缓存设置方法不变"""
        return super().set_cache(key, value, **kwargs)

    async def async_set_cache(self, key, value, **kwargs):
        """保持原有的异步缓存设置方法不变"""
        return await super().async_set_cache(key, value, **kwargs)

    async def async_set_cache_pipeline(self, cache_list, **kwargs):
        """保持原有的异步缓存批量设置方法不变"""
        return await super().async_set_cache_pipeline(cache_list, **kwargs)
