import os
import redis
import json
from langchain_core.vectorstores import VectorStoreRetriever
from werkzeug.datastructures import FileStorage
from io import BytesIO
from pypdf import PdfReader
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_redis import RedisConfig, RedisVectorStore
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_core.documents import Document
import loguru
import re
from typing import Optional, Tuple, List, Dict
from datetime import datetime
import hashlib

logger = loguru.logger


def init_redis_Object(redis_url: str):
    r = redis.from_url(redis_url)
    logger.debug(f"r的类型是{type(r)}")
    return r


def format_retriever(docs: List[Document]) -> str:
    """
    优化后的检索结果格式化函数，支持多文档合并和元数据增强
    """
    if not docs:
        return "未找到相关知识库内容"

    formatted_content = []
    for i, doc in enumerate(docs, 1):
        # 添加文档来源和相关性信息
        source = doc.metadata.get("source", "未知来源")
        score = doc.metadata.get("score", 0)
        content = f"【文档{i}】来源: {source} (相关性: {score:.2f})\n{doc.page_content}"
        formatted_content.append(content)

    return "\n\n".join(formatted_content)


def get_index_name(user_id: int, user_input_name: str) -> str:
    """知识库命名，添加哈希值确保唯一性"""
    name_hash = hashlib.md5(f"{user_id}_{user_input_name}".encode()).hexdigest()[:8]
    return f"{user_id}_{user_input_name}::{name_hash}"


def get_user_input_names_by_user_id(redis_url: str, user_id: int) -> List[str]:
    """
    根据 user_id 获取所有知识库名称（格式：{user_id}_{kb_name}::{random_hash}）
    返回去重后的 kb_name 列表。
    """
    r = redis.from_url(redis_url)
    pattern = f"{user_id}_*::*"  # 匹配带哈希后缀的键名
    kb_names = set()

    cursor = 0
    while True:
        cursor, keys = r.scan(cursor=cursor, match=pattern, count=100)
        for key in keys:
            try:
                key_str = key.decode("utf-8")
                # 提取 kb_name（去掉哈希后缀）
                kb_name = key_str.split("::")[0].split("_", 1)[1]
                kb_names.add(kb_name)
            except Exception as e:
                logger.error(f"解析键名失败 {key_str}: {str(e)}")
        if cursor == 0:
            break

    return sorted(list(kb_names))


def delete_kb_by_user_id(redis_url: str, user_id: int, kb_name: str) -> bool:
    """
    根据用户id和其知识库名删除对应的知识库
    """
    r = redis.from_url(redis_url)
    pattern = f"{user_id}_{kb_name}::*"

    deleted_count = 0
    cursor = 0
    while True:
        cursor, keys = r.scan(cursor=cursor, match=pattern, count=100)
        for key in keys:
            try:
                r.delete(key)
                deleted_count += 1
            except Exception as e:
                logger.error(f"删除失败 {key}: {str(e)}")
                return False
        if cursor == 0:
            break

    logger.info(f"成功删除 {deleted_count} 个知识库索引")
    return deleted_count > 0


class RAGBuild:
    def __init__(self, redis_url: str, embedding_model_name: str, embedding_model_api_key: str,
                 chunk_size: int = 1000, chunk_overlap: int = 200):
        self.redis_url = redis_url
        self.embedding_model_name = embedding_model_name
        self.embedding_model_api_key = embedding_model_api_key

        # 设置环境变量
        os.environ["DASHSCOPE_API_KEY"] = self.embedding_model_api_key

        # 初始化嵌入模型
        self.embedding_model = DashScopeEmbeddings(model=self.embedding_model_name)

        # 优化后的文本分割器配置
        self.text_splitter = RecursiveCharacterTextSplitter(
            separators=["\n\n", "\n", "。", "！", "？", "；", " ", ""],
            chunk_size=chunk_size,
            chunk_overlap=chunk_overlap,
            length_function=len,
            is_separator_regex=False,
            keep_separator=True
        )

        # 初始化Redis连接池
        self.redis_pool = redis.ConnectionPool.from_url(redis_url)

    def get_redis_connection(self):
        """获取Redis连接"""
        return redis.Redis(connection_pool=self.redis_pool)

    @staticmethod
    def _cosine_similarity(vec1: List[float], vec2: List[float]) -> float:
        """计算两个向量的余弦相似度"""
        dot_product = sum(a * b for a, b in zip(vec1, vec2))
        norm_a = sum(a * a for a in vec1) ** 0.5
        norm_b = sum(b * b for b in vec2) ** 0.5
        return dot_product / (norm_a * norm_b) if norm_a and norm_b else 0.0

    # 在 RAGBuild 类中修改以下方法
    def get_similar_cached_answer(self, user_id: int, question: str,
                                  similarity_threshold: float = 0.75) -> Tuple[Optional[str], float]:
        """
        优化后的缓存查询方法，增加字段验证和错误处理
        """
        r = self.get_redis_connection()
        question_embedding = self.embedding_model.embed_query(question)
        pattern = f"qa_cache:{user_id}:*"

        best_match = (None, 0.0)

        try:
            # 使用SCAN批量获取键
            keys = []
            cursor = '0'
            while cursor != 0:
                cursor, partial_keys = r.scan(cursor=cursor, match=pattern, count=100)
                keys.extend(partial_keys)

            if not keys:
                logger.debug(f"用户 {user_id} 无缓存记录")
                return best_match

            # 批量获取缓存数据
            pipe = r.pipeline()
            for key in keys:
                if hasattr(r, 'json'):
                    pipe.json().get(key)
                else:
                    pipe.get(key)
            cached_data_list = pipe.execute()

            # 计算相似度并找出最佳匹配
            for key, cached_data in zip(keys, cached_data_list):
                if not cached_data:
                    continue

                try:
                    if not hasattr(r, 'json'):
                        cached_data = json.loads(cached_data)

                    # 添加字段验证
                    if not isinstance(cached_data, dict):
                        logger.warning(f"缓存数据格式错误 {key}: 不是字典类型")
                        continue

                    if "embedding" not in cached_data:
                        logger.warning(f"缓存数据缺失embedding字段 {key}")
                        continue

                    similarity = self._cosine_similarity(
                        question_embedding,
                        cached_data["embedding"]
                    )

                    if similarity > best_match[1] and similarity >= similarity_threshold:
                        best_match = (cached_data.get("answer"), similarity)
                        logger.debug(f"发现更高相似度匹配: {similarity:.2f}")

                except Exception as e:
                    logger.warning(f"解析缓存数据失败 {key}: {str(e)}")
                    continue

        except Exception as e:
            logger.error(f"缓存查询失败: {str(e)}")

        logger.info(f"缓存查询完成，最佳相似度: {best_match[1]:.2f}")
        return best_match

    def set_similar_cached_answer(self, user_id: int, question: str,
                                  answer: str, expire_time: int = 3600):
        """
        优化后的缓存存储方法
        1. 添加数据验证
        2. 优化错误处理
        3. 添加详细日志
        """
        if not question or not answer:
            logger.warning("无效的缓存数据: 问题或答案为空")
            return

        try:
            r = self.get_redis_connection()
            question_embedding = self.embedding_model.embed_query(question)
            question_hash = hashlib.md5(question.encode()).hexdigest()
            cache_key = f"qa_cache:{user_id}:{question_hash}"

            qa_data = {
                "question": question,
                "answer": answer,
                "embedding": question_embedding,
                "timestamp": datetime.now().isoformat(),
                "metadata": {
                    "model": "RAG",
                    "version": "1.0"
                }
            }

            # 添加数据大小检查
            data_size = len(json.dumps(qa_data))
            if data_size > 10 * 1024 * 1024:  # 10MB限制
                logger.warning(f"缓存数据过大({data_size / 1024:.2f}KB)，将被拒绝")
                return

            if hasattr(r, 'json'):
                r.json().set(cache_key, '.', qa_data)
            else:
                r.set(cache_key, json.dumps(qa_data))

            r.expire(cache_key, expire_time)
            logger.info(f"缓存存储成功: {cache_key} (过期时间: {expire_time}s)")

        except Exception as e:
            logger.error(f"缓存存储失败: {str(e)}")

    def get_embedding_retriever(
            self,
            redis_vector_store: RedisVectorStore,
            similarity_threshold: float = 0.5,
            k: int = 2,
            search_type: str = "similarity",
            filter_conditions: Optional[Dict] = None
    ) -> Optional[VectorStoreRetriever]:
        """
        增强的检索器创建方法，支持多种搜索类型和过滤条件

        参数:
        - redis_vector_store: Redis向量存储实例
        - similarity_threshold: 相似度阈值(0-1)
        - k: 返回的最相关文档数量
        - search_type: 搜索类型 ("similarity"|"mmr"|"similarity_score_threshold")
        - filter_conditions: 元数据过滤条件

        返回:
        - VectorStoreRetriever实例或None(如果创建失败)
        """
        if not isinstance(redis_vector_store, RedisVectorStore):
            logger.error("必须提供有效的RedisVectorStore实例")
            return None

        search_kwargs = {
            "k": k,
            "score_threshold": similarity_threshold
        }

        if filter_conditions:
            search_kwargs["filter"] = filter_conditions

        try:
            retriever = redis_vector_store.as_retriever(
                search_type=search_type,
                search_kwargs=search_kwargs
            )

            # 关联RAGBuild实例到vectorstore
            setattr(retriever.vectorstore, '_rag_build', self)

            logger.info(
                f"创建检索器成功: 类型={search_type}, 阈值={similarity_threshold}, k={k} "
                f"{'带过滤条件' if filter_conditions else ''}"
            )
            return retriever

        except Exception as e:
            logger.error(f"创建检索器失败: {str(e)}")
            return None

    def _process_file(self, file: FileStorage, file_name: str) -> List[Document]:
        """处理单个文件，返回Document列表"""
        try:
            if file_name.lower().endswith(".pdf"):
                pdf_stream = BytesIO(file.read())
                reader = PdfReader(pdf_stream)
                text = "\n".join([page.extract_text() for page in reader.pages])
                metadata = {
                    "source": file_name,
                    "type": "pdf",
                    "pages": len(reader.pages),
                    "processed_at": datetime.now().isoformat()
                }
                return [Document(page_content=text, metadata=metadata)]

            elif file_name.lower().endswith(".txt"):
                content = file.read().decode("utf-8")
                metadata = {
                    "source": file_name,
                    "type": "text",
                    "processed_at": datetime.now().isoformat()
                }
                return [Document(page_content=content, metadata=metadata)]

            else:
                logger.warning(f"不支持的文件类型: {file_name}")
                return []

        except Exception as e:
            logger.error(f"处理文件 {file_name} 失败: {str(e)}")
            return []

    def embedding_index(self, index_name: str, files: List[FileStorage],
                        files_name: List[str]) -> Optional[RedisVectorStore]:
        """
        优化的索引构建函数，支持增量更新和元数据增强
        """
        if len(files) != len(files_name):
            logger.error("文件列表和文件名列表长度不匹配")
            return None

        try:
            redis_config = RedisConfig(
                redis_url=self.redis_url,
                index_name=index_name,
            )

            redis_vector_store = RedisVectorStore(
                self.embedding_model,
                redis_config
            )

            total_docs = 0
            for file, file_name in zip(files, files_name):
                documents = self._process_file(file, file_name)
                if not documents:
                    continue

                segments = self.text_splitter.split_documents(documents)
                if segments:
                    redis_vector_store.add_documents(segments)
                    total_docs += len(segments)
                    logger.info(f"已处理文件 {file_name}，生成 {len(segments)} 个文档片段")

            logger.success(f"✅ 索引构建完成，共添加 {total_docs} 个文档片段到索引 {index_name}")
            return redis_vector_store if total_docs > 0 else None

        except Exception as e:
            logger.error(f"构建索引失败: {str(e)}")
            return None

    def get_redis_vector_store_by_index_name(self, index_name: str) -> Optional[RedisVectorStore]:
        """获取指定索引名的RedisVectorStore实例"""
        try:
            redis_config = RedisConfig(
                redis_url=self.redis_url,
                index_name=index_name,
            )

            return RedisVectorStore(
                self.embedding_model,
                redis_config
            )
        except Exception as e:
            logger.error(f"获取向量存储失败: {str(e)}")
            return None

    def fast_build_faq(self, user_id: int, faq_name: str, files: List[FileStorage],
                       files_name: List[str]) -> Optional[RedisVectorStore]:
        """
        优化的快速构建知识库函数，支持增量更新
        """
        index_name = get_index_name(user_id, faq_name)
        logger.info(f"开始构建/更新索引：{index_name}")

        # 检查是否已有索引
        existing_store = self.get_redis_vector_store_by_index_name(index_name)
        if existing_store:
            logger.info(f"发现已有索引 {index_name}，将进行增量更新")

        redis_vector_store = self.embedding_index(
            index_name=index_name,
            files=files,
            files_name=files_name
        )

        if not redis_vector_store:
            logger.error("向量数据库构建失败")
            return None

        return redis_vector_store

    def check_index_quality(self, index_name: str, sample_queries: List[str] = None) -> Dict:
        """
        检查索引质量，返回质量报告
        """
        if not sample_queries:
            sample_queries = [
                "这个知识库的主要内容是什么？",
                "请列举几个关键概念",
                "有哪些重要的日期或数字？"
            ]

        vector_store = self.get_redis_vector_store_by_index_name(index_name)
        if not vector_store:
            return {"error": "索引不存在"}

        quality_report = {
            "index_name": index_name,
            "check_time": datetime.now().isoformat(),
            "sample_queries": {},
            "stats": {}
        }

        try:
            # 获取索引统计信息
            r = self.get_redis_connection()
            index_info = r.execute_command("FT.INFO", index_name)
            if isinstance(index_info, list):
                for i in range(0, len(index_info), 2):
                    if i + 1 < len(index_info):
                        quality_report["stats"][index_info[i]] = index_info[i + 1]

            # 测试查询
            retriever = self.get_embedding_retriever(vector_store)
            if not retriever:
                return quality_report

            for query in sample_queries:
                try:
                    docs = retriever.get_relevant_documents(query)
                    quality_report["sample_queries"][query] = {
                        "num_results": len(docs),
                        "avg_score": sum(d.metadata.get("score", 0) for d in docs) / len(docs) if docs else 0,
                        "sources": list(set(d.metadata.get("source", "unknown") for d in docs))
                    }
                except Exception as e:
                    logger.warning(f"测试查询失败 {query}: {str(e)}")
                    quality_report["sample_queries"][query] = {"error": str(e)}

        except Exception as e:
            logger.error(f"检查索引质量失败: {str(e)}")
            quality_report["error"] = str(e)

        return quality_report