"""
向量数据库服务 - 基于 LangChain PGVector
提供向量存储、搜索和管理功能
"""

import streamlit as st
import pandas as pd
import json
from typing import List, Dict, Any, Optional, Tuple, Union
from dataclasses import dataclass
from enum import Enum

from langchain_postgres import PGVector
from langchain_core.documents import Document
from langchain_community.embeddings import DashScopeEmbeddings
from sqlalchemy import create_engine, text
from sqlalchemy.orm import sessionmaker

from .logger_service import get_logger_service


class DistanceStrategy(Enum):
    """向量距离计算策略"""
    COSINE = "cosine"
    EUCLIDEAN = "euclidean"
    MAX_INNER_PRODUCT = "max_inner_product"


@dataclass
class VectorSearchResult:
    """向量搜索结果"""
    content: str
    metadata: Dict[str, Any]
    score: Optional[float] = None
    
    
@dataclass
class CollectionInfo:
    """集合信息"""
    name: str
    uuid: str
    document_count: int
    metadata: Dict[str, Any] = None


class VectorService:
    """向量数据库服务类"""
    
    def __init__(self, 
                 collection_name: str,
                 embeddings_model: str = "text-embedding-v3",
                 distance_strategy: DistanceStrategy = DistanceStrategy.COSINE):
        """
        初始化向量服务
        
        Args:
            collection_name: 集合名称
            embeddings_model: 嵌入模型名称
            distance_strategy: 距离计算策略
        """
        self.collection_name = collection_name
        self.embeddings_model = embeddings_model
        self.distance_strategy = distance_strategy
        self.logger = get_logger_service()
        
        # 数据库配置
        self.db_config = {
            'user': st.secrets["postgresql"]["username"],
            'password': st.secrets["postgresql"]["password"],
            'host': st.secrets["postgresql"]["host"],
            'port': st.secrets["postgresql"]["port"],
            'database': st.secrets["postgresql"]["database"]
        }
        
        # 连接字符串
        self.connection_string = (
            f"postgresql+psycopg://{self.db_config['user']}:"
            f"{self.db_config['password']}@{self.db_config['host']}:"
            f"{self.db_config['port']}/{self.db_config['database']}"
        )
        
        # 初始化组件
        self._init_embeddings()
        self._init_database()
        self._init_vector_store()
        
    def _init_embeddings(self):
        """初始化嵌入模型"""
        try:
            self.embeddings = DashScopeEmbeddings(
                model=self.embeddings_model,
                dashscope_api_key=st.secrets["dashscope"]["key"]
            )
            self.logger.log_system_event("embedding_init", f"嵌入模型初始化成功: {self.embeddings_model}")
        except Exception as e:
            self.logger.log_system_event("embedding_init_error", f"嵌入模型初始化失败: {e}", level="error")
            raise
            
    def _init_database(self):
        """初始化数据库连接"""
        try:
            self.engine = create_engine(self.connection_string)
            self.Session = sessionmaker(bind=self.engine)
            self.session = self.Session()
            self.logger.log_system_event("database_init", f"数据库连接初始化成功")
        except Exception as e:
            self.logger.log_system_event("database_init_error", f"数据库连接初始化失败: {e}", level="error")
            raise
            
    def _init_vector_store(self):
        """初始化向量存储"""
        try:
            self.vector_store = PGVector(
                embeddings=self.embeddings,
                collection_name=self.collection_name,
                connection=self.connection_string,
                use_jsonb=True,
                distance_strategy=self.distance_strategy.value,
            )
            self.logger.log_system_event(
                "vector_store_init",
                f"向量存储初始化成功: {self.collection_name}, "
                f"策略: {self.distance_strategy.value}"
            )
        except Exception as e:
            self.logger.log_system_event("vector_store_init_error", f"向量存储初始化失败: {e}", level="error")
            raise

    def get_embedding_dimension(self) -> int:
        """获取嵌入向量维度"""
        try:
            test_vector = self.embeddings.embed_query("test")
            return len(test_vector)
        except Exception as e:
            self.logger.log_system_event("get_embedding_dimension_error", f"获取向量维度失败: {e}", level="error")
            return 1536  # 默认维度

    def create_collection(self, metadata: Dict[str, Any] = None) -> bool:
        """
        创建新集合
        
        Args:
            metadata: 集合元数据
            
        Returns:
            bool: 创建成功返回 True
        """
        try:
            # PGVector 会自动创建集合，这里主要是验证
            test_doc = Document(
                page_content="test document for collection creation",
                metadata=metadata or {"_test": True}
            )
            self.vector_store.add_documents([test_doc], ids=["_test_doc"])
            
            # 删除测试文档
            self.vector_store.delete(ids=["_test_doc"])
            
            self.logger.log_system_event("collection_create", f"集合创建成功: {self.collection_name}")
            return True
        except Exception as e:
            self.logger.log_system_event("collection_create_error", f"集合创建失败: {e}", level="error")
            return False

    def add_documents(self, 
                     documents: List[Document], 
                     ids: List[str] = None) -> bool:
        """
        添加文档到向量存储
        
        Args:
            documents: 文档列表
            ids: 文档ID列表
            
        Returns:
            bool: 添加成功返回 True
        """
        try:
            if ids and len(ids) != len(documents):
                raise ValueError("文档数量与ID数量不匹配")
                
            self.vector_store.add_documents(documents, ids=ids)
            
            self.logger.log_system_event(
                "documents_add",
                f"文档添加成功: {len(documents)} 个文档到集合 {self.collection_name}"
            )
            return True
        except Exception as e:
            self.logger.log_system_event("documents_add_error", f"文档添加失败: {e}", level="error")
            return False

    def add_texts(self, 
                  texts: List[str], 
                  metadatas: List[Dict[str, Any]] = None,
                  ids: List[str] = None) -> bool:
        """
        添加文本到向量存储
        
        Args:
            texts: 文本列表
            metadatas: 元数据列表
            ids: 文档ID列表
            
        Returns:
            bool: 添加成功返回 True
        """
        try:
            if metadatas and len(metadatas) != len(texts):
                raise ValueError("文本数量与元数据数量不匹配")
            if ids and len(ids) != len(texts):
                raise ValueError("文本数量与ID数量不匹配")
                
            self.vector_store.add_texts(texts, metadatas=metadatas, ids=ids)
            
            self.logger.log_system_event(
                "texts_add",
                f"文本添加成功: {len(texts)} 个文本到集合 {self.collection_name}"
            )
            return True
        except Exception as e:
            self.logger.log_system_event("texts_add_error", f"文本添加失败: {e}", level="error")
            return False

    def similarity_search(self, 
                         query: str, 
                         k: int = 4,
                         filter: Dict[str, Any] = None) -> List[VectorSearchResult]:
        """
        相似度搜索
        
        Args:
            query: 查询文本
            k: 返回结果数量
            filter: 元数据过滤器
            
        Returns:
            List[VectorSearchResult]: 搜索结果列表
        """
        try:
            results = self.vector_store.similarity_search(query, k=k, filter=filter)
            
            search_results = [
                VectorSearchResult(
                    content=doc.page_content,
                    metadata=doc.metadata
                ) for doc in results
            ]
            
            self.logger.log_system_event(
                "similarity_search",
                f"相似度搜索完成: 查询='{query}', 结果数={len(search_results)}"
            )
            return search_results
            
        except Exception as e:
            self.logger.log_system_event("similarity_search_error", f"相似度搜索失败: {e}", level="error")
            return []

    def similarity_search_with_score(self, 
                                   query: str, 
                                   k: int = 4,
                                   filter: Dict[str, Any] = None) -> List[VectorSearchResult]:
        """
        带分数的相似度搜索
        
        Args:
            query: 查询文本
            k: 返回结果数量
            filter: 元数据过滤器
            
        Returns:
            List[VectorSearchResult]: 带分数的搜索结果列表
        """
        try:
            results = self.vector_store.similarity_search_with_score(
                query, k=k, filter=filter
            )
            
            search_results = [
                VectorSearchResult(
                    content=doc.page_content,
                    metadata=doc.metadata,
                    score=score
                ) for doc, score in results
            ]
            
            self.logger.log_system_event(
                "similarity_search_with_score",
                f"带分数相似度搜索完成: 查询='{query}', 结果数={len(search_results)}"
            )
            return search_results
            
        except Exception as e:
            self.logger.log_system_event("similarity_search_with_score_error", f"带分数相似度搜索失败: {e}", level="error")
            return []

    def max_marginal_relevance_search(self, 
                                    query: str,
                                    k: int = 4,
                                    fetch_k: int = 20,
                                    lambda_mult: float = 0.5) -> List[VectorSearchResult]:
        """
        最大边际相关性搜索 (MMR)
        
        Args:
            query: 查询文本
            k: 返回结果数量
            fetch_k: 获取候选数量
            lambda_mult: 多样性参数 (0-1)
            
        Returns:
            List[VectorSearchResult]: MMR搜索结果列表
        """
        try:
            results = self.vector_store.max_marginal_relevance_search(
                query=query,
                k=k,
                fetch_k=fetch_k,
                lambda_mult=lambda_mult
            )
            
            search_results = [
                VectorSearchResult(
                    content=doc.page_content,
                    metadata=doc.metadata
                ) for doc in results
            ]
            
            self.logger.log_system_event(
                "mmr_search",
                f"MMR搜索完成: 查询='{query}', 结果数={len(search_results)}"
            )
            return search_results
            
        except Exception as e:
            self.logger.log_system_event("mmr_search_error", f"MMR搜索失败: {e}", level="error")
            return []

    def similarity_search_by_vector(self, 
                                  embedding: List[float],
                                  k: int = 4) -> List[VectorSearchResult]:
        """
        基于向量的相似度搜索
        
        Args:
            embedding: 查询向量
            k: 返回结果数量
            
        Returns:
            List[VectorSearchResult]: 搜索结果列表
        """
        try:
            results = self.vector_store.similarity_search_by_vector(embedding, k=k)
            
            search_results = [
                VectorSearchResult(
                    content=doc.page_content,
                    metadata=doc.metadata
                ) for doc in results
            ]
            
            self.logger.log_system_event("vector_search", f"向量搜索完成: 结果数={len(search_results)}")
            return search_results
            
        except Exception as e:
            self.logger.log_system_event("vector_search_error", f"向量搜索失败: {e}", level="error")
            return []

    def delete_documents(self, ids: List[str]) -> bool:
        """
        删除文档
        
        Args:
            ids: 要删除的文档ID列表
            
        Returns:
            bool: 删除成功返回 True
        """
        try:
            self.vector_store.delete(ids=ids)
            self.logger.log_system_event("documents_delete", f"文档删除成功: {len(ids)} 个文档")
            return True
        except Exception as e:
            self.logger.log_system_event("documents_delete_error", f"文档删除失败: {e}", level="error")
            return False

    def update_document(self, 
                       doc_id: str, 
                       content: str, 
                       metadata: Dict[str, Any] = None) -> bool:
        """
        更新文档
        
        Args:
            doc_id: 文档ID
            content: 新内容
            metadata: 新元数据
            
        Returns:
            bool: 更新成功返回 True
        """
        try:
            # 先删除旧文档
            self.vector_store.delete(ids=[doc_id])
            
            # 添加新文档
            new_doc = Document(page_content=content, metadata=metadata or {})
            self.vector_store.add_documents([new_doc], ids=[doc_id])
            
            self.logger.log_system_event("document_update", f"文档更新成功: ID={doc_id}")
            return True
        except Exception as e:
            self.logger.log_system_event("document_update_error", f"文档更新失败: {e}", level="error")
            return False

    def clear_collection(self) -> bool:
        """
        清空集合
        
        Returns:
            bool: 清空成功返回 True
        """
        try:
            # 获取所有文档ID并删除
            docs_df = self.get_documents()
            if not docs_df.empty:
                all_ids = docs_df['id'].astype(str).tolist()
                self.vector_store.delete(ids=all_ids)
            
            self.logger.log_system_event("collection_clear", f"集合清空成功: {self.collection_name}")
            return True
        except Exception as e:
            self.logger.log_system_event("collection_clear_error", f"集合清空失败: {e}", level="error")
            return False

    def get_collection_info(self) -> CollectionInfo:
        """
        获取集合信息
        
        Returns:
            CollectionInfo: 集合信息
        """
        try:
            # 获取集合基本信息
            collection_query = text("""
                SELECT name, uuid, cmetadata
                FROM langchain_pg_collection 
                WHERE name = :collection_name
            """)
            
            collection_result = self.session.execute(
                collection_query, 
                {"collection_name": self.collection_name}
            ).fetchone()
            
            if not collection_result:
                return CollectionInfo(
                    name=self.collection_name,
                    uuid="",
                    document_count=0
                )
            
            # 获取文档数量
            count_query = text("""
                SELECT COUNT(*) 
                FROM langchain_pg_embedding
                WHERE collection_id = :collection_uuid
            """)
            
            count_result = self.session.execute(
                count_query,
                {"collection_uuid": collection_result[1]}
            ).scalar()
            
            return CollectionInfo(
                name=collection_result[0],
                uuid=str(collection_result[1]),
                document_count=count_result or 0,
                metadata=collection_result[2] or {}
            )
            
        except Exception as e:
            self.logger.log_system_event("get_collection_info_error", f"获取集合信息失败: {e}", level="error")
            return CollectionInfo(
                name=self.collection_name,
                uuid="",
                document_count=0
            )

    def get_documents(self, limit: int = None, offset: int = 0) -> pd.DataFrame:
        """
        获取集合中的文档
        
        Args:
            limit: 限制返回数量
            offset: 偏移量
            
        Returns:
            pd.DataFrame: 文档数据框
        """
        try:
            query = """
                SELECT e.id, e.document, e.cmetadata
                FROM langchain_pg_embedding e
                JOIN langchain_pg_collection c ON e.collection_id = c.uuid
                WHERE c.name = :collection_name
                ORDER BY e.id ASC
            """
            
            if limit:
                query += f" LIMIT {limit}"
            if offset:
                query += f" OFFSET {offset}"
            
            df = pd.read_sql(
                text(query),
                self.engine,
                params={"collection_name": self.collection_name}
            )
            
            # 修复数据类型问题
            if not df.empty and 'id' in df.columns:
                df['id'] = df['id'].astype(str)
                
            return df
            
        except Exception as e:
            self.logger.log_system_event("get_documents_error", f"获取文档列表失败: {e}", level="error")
            return pd.DataFrame()

    def get_metadata_stats(self) -> pd.DataFrame:
        """
        获取元数据统计信息
        
        Returns:
            pd.DataFrame: 元数据统计数据框
        """
        try:
            query = text("""
                SELECT 
                    json_object_keys(cmetadata) as metadata_key,
                    COUNT(*) as count
                FROM langchain_pg_embedding e
                JOIN langchain_pg_collection c ON e.collection_id = c.uuid
                WHERE c.name = :collection_name
                GROUP BY json_object_keys(cmetadata)
                ORDER BY count DESC
            """)
            
            df = pd.read_sql(
                query,
                self.engine,
                params={"collection_name": self.collection_name}
            )
            
            return df
            
        except Exception as e:
            self.logger.log_system_event("get_metadata_stats_error", f"获取元数据统计失败: {e}", level="error")
            return pd.DataFrame()

    def export_documents(self) -> str:
        """
        导出文档为JSON字符串
        
        Returns:
            str: JSON格式的文档数据
        """
        try:
            df = self.get_documents()
            if df.empty:
                return "[]"
                
            # 转换为导出格式
            export_data = []
            for _, row in df.iterrows():
                export_data.append({
                    "id": str(row['id']),
                    "page_content": row['document'],
                    "metadata": row['cmetadata'] or {}
                })
                
            return json.dumps(export_data, ensure_ascii=False, indent=2)
            
        except Exception as e:
            self.logger.log_system_event("export_documents_error", f"导出文档失败: {e}", level="error")
            return "[]"

    def get_service_status(self) -> Dict[str, Any]:
        """
        获取服务状态信息
        
        Returns:
            Dict[str, Any]: 服务状态信息
        """
        try:
            collection_info = self.get_collection_info()
            return {
                "collection_name": self.collection_name,
                "embeddings_model": self.embeddings_model,
                "distance_strategy": self.distance_strategy.value,
                "embedding_dimension": self.get_embedding_dimension(),
                "document_count": collection_info.document_count,
                "collection_uuid": collection_info.uuid,
                "database_config": {
                    "host": self.db_config["host"],
                    "port": self.db_config["port"],
                    "database": self.db_config["database"],
                    "user": self.db_config["user"]
                },
                "status": "active"
            }
            
        except Exception as e:
            self.logger.log_system_event("get_service_status_error", f"获取服务状态失败: {e}", level="error")
            return {
                "collection_name": self.collection_name,
                "status": "error",
                "error": str(e)
            }


# 全局服务实例缓存
@st.cache_resource
def get_vector_service(collection_name: str,
                      embeddings_model: str = "text-embedding-v3",
                      distance_strategy: str = "cosine") -> VectorService:
    """
    获取向量服务实例 (带缓存)
    
    Args:
        collection_name: 集合名称
        embeddings_model: 嵌入模型名称
        distance_strategy: 距离计算策略
        
    Returns:
        VectorService: 向量服务实例
    """
    distance_enum = DistanceStrategy(distance_strategy)
    return VectorService(
        collection_name=collection_name,
        embeddings_model=embeddings_model,
        distance_strategy=distance_enum
    )


def create_vector_service(collection_name: str,
                         embeddings_model: str = "text-embedding-v3",
                         distance_strategy: str = "cosine") -> VectorService:
    """
    创建新的向量服务实例 (不使用缓存)
    
    Args:
        collection_name: 集合名称
        embeddings_model: 嵌入模型名称
        distance_strategy: 距离计算策略
    Returns:
        VectorService: 向量服务实例
    """
    distance_enum = DistanceStrategy(distance_strategy)
    return VectorService(
        collection_name=collection_name,
        embeddings_model=embeddings_model,
        distance_strategy=distance_enum
    )


# ======================== 全局集合管理功能 ========================

@st.cache_data(ttl=60)  # 缓存1分钟
def get_all_collections() -> List[Dict[str, Any]]:
    """
    获取数据库中所有向量集合的详细信息
    
    Returns:
        List[Dict[str, Any]]: 包含集合信息的列表，每个元素包含：
        - name: 集合名称
        - uuid: 集合UUID
        - metadata: 集合元数据
        - doc_count: 文档数量
        - first_doc_id: 第一个文档ID
        - last_doc_id: 最后一个文档ID
        - is_empty: 是否为空集合
    """
    logger = get_logger_service()
    
    try:
        # 获取数据库配置
        db_config = {
            'user': st.secrets.get("postgresql", {}).get("username") or st.secrets.get("connections", {}).get("sql", {}).get("username", "docker"),
            'password': st.secrets.get("postgresql", {}).get("password") or st.secrets.get("connections", {}).get("sql", {}).get("password", "docker"),
            'host': st.secrets.get("postgresql", {}).get("host") or st.secrets.get("connections", {}).get("sql", {}).get("host", "localhost"),
            'port': st.secrets.get("postgresql", {}).get("port") or st.secrets.get("connections", {}).get("sql", {}).get("port", 5432),
            'database': st.secrets.get("postgresql", {}).get("database") or st.secrets.get("connections", {}).get("sql", {}).get("database", "streamlit_generic")
        }
        
        # 连接字符串
        connection_string = (
            f"postgresql+psycopg://{db_config['user']}:"
            f"{db_config['password']}@{db_config['host']}:"
            f"{db_config['port']}/{db_config['database']}"
        )
        
        # 创建数据库连接
        engine = create_engine(connection_string)
        
        # 查询所有集合及其详细信息
        query = text("""
            SELECT 
                c.name as collection_name,
                c.uuid as collection_id,
                c.cmetadata as metadata,
                COUNT(e.id) as document_count,
                MIN(e.id) as first_doc_id,
                MAX(e.id) as last_doc_id
            FROM langchain_pg_collection c
            LEFT JOIN langchain_pg_embedding e ON c.uuid = e.collection_id
            WHERE c.name IS NOT NULL 
            GROUP BY c.name, c.uuid
            ORDER BY c.name
        """)
        
        collections_data = []
        with engine.connect() as conn:
            result = conn.execute(query)
            for row in result.fetchall():
                collections_data.append({
                    "name": row[0],
                    "uuid": str(row[1]),
                    "metadata": row[2] or {},
                    "doc_count": int(row[3] or 0),
                    "first_doc_id": row[4],
                    "last_doc_id": row[5],
                    "is_empty": (row[3] or 0) == 0
                })
        
        # 如果没有找到任何集合，返回空列表
        if not collections_data:
            logger.log_system_event("get_all_collections_warning", "未找到任何集合", level="warning")
            
        logger.log_system_event("get_all_collections_success", f"成功获取 {len(collections_data)} 个集合")
        return collections_data
        
    except Exception as e:
        logger.log_system_event("get_all_collections_error", f"获取集合列表失败: {e}", level="error")
        return []


def get_collection_names() -> List[str]:
    """
    获取所有集合的名称列表
    
    Returns:
        List[str]: 集合名称列表
    """
    collections = get_all_collections()
    return [col["name"] for col in collections]


def get_collection_details(collection_name: str) -> Dict[str, Any]:
    """
    根据集合名称获取集合详细信息
    
    Args:
        collection_name: 集合名称
        
    Returns:
        Dict[str, Any]: 集合详细信息，如果未找到则返回默认信息
    """
    collections = get_all_collections()
    for col in collections:
        if col["name"] == collection_name:
            return col
    
    # 如果没找到，返回默认信息
    return {
        "name": collection_name,
        "uuid": "",
        "metadata": {},
        "doc_count": 0,
        "first_doc_id": None,
        "last_doc_id": None,
        "is_empty": True
    }


def get_collections_summary() -> Dict[str, Any]:
    """
    获取集合汇总统计信息
    
    Returns:
        Dict[str, Any]: 汇总统计信息，包含：
        - total_collections: 总集合数
        - total_documents: 总文档数
        - non_empty_collections: 非空集合数
        - empty_collections: 空集合数
        - collections_with_metadata: 有元数据的集合数
    """
    collections = get_all_collections()
    
    total_collections = len(collections)
    total_documents = sum(col["doc_count"] for col in collections)
    non_empty_collections = sum(1 for col in collections if not col["is_empty"])
    empty_collections = total_collections - non_empty_collections
    collections_with_metadata = sum(1 for col in collections if col["metadata"])
    
    return {
        "total_collections": total_collections,
        "total_documents": total_documents,
        "non_empty_collections": non_empty_collections,
        "empty_collections": empty_collections,
        "collections_with_metadata": collections_with_metadata,
        "collection_names": [col["name"] for col in collections]
    }