"""向量数据库操作模块，处理RAG相关的数据存储和检索"""

import logging
import uuid
import time
from typing import List, Dict, Any, Optional, Tuple, Union
from datetime import datetime
import psycopg2
from psycopg2 import sql
from psycopg2.extensions import connection as PgConnection
import numpy as np
from backend.common.config import config_manager
from backend.llm import llm_manager
from backend.common.config import config

logger = logging.getLogger('vector_db')

class VectorDBManager:
    """向量数据库管理器，处理文档嵌入、向量存储和检索"""
    
    def __init__(self):
        """初始化向量数据库管理器"""
        self.config = config_manager.get_vector_db_config()
        self._conn = None
        self._embed_model = None
        self._lock = __import__('threading').RLock()
        
    @property
    def connection(self) -> PgConnection:
        """获取数据库连接，按需创建"""
        with self._lock:
            if self._conn is None or self._conn.closed != 0:
                try:
                    self._conn = psycopg2.connect(
                        host=self.config.get('host'),
                        port=self.config.get('port'),
                        dbname=self.config.get('dbname'),
                        user=self.config.get('user'),
                        password=self.config.get('password')
                    )
                    logger.info(f"已连接到向量数据库: {self.config.get('host')}:{self.config.get('port')}/{self.config.get('dbname')}")
                except Exception as e:
                    logger.error(f"连接向量数据库失败: {str(e)}")
                    raise
            return self._conn
    
    def _generate_embedding_with_ollama(self, text: str) -> List[float]:
        """使用OllamaProvider生成文本嵌入向量"""
        try:
            # 使用OllamaProvider生成嵌入
            active_provider = config.llm_providers.active_provider
            llm_provider = llm_manager.get_provider(active_provider)
            embedding = llm_provider.generate_embedding(text)
            return embedding.tolist()
        except Exception as e:
            logger.error(f"生成嵌入失败: {str(e)}")
            raise
    
    def _generate_embeddings_with_ollama(self, texts: List[str]) -> List[List[float]]:
        """使用OllamaProvider批量生成文本嵌入向量"""
        try:
            # 使用OllamaProvider批量生成嵌入
            active_provider = config.llm_providers.active_provider
            llm_provider = llm_manager.get_provider(active_provider)
            embeddings = llm_provider.generate_embeddings(texts)
            return [embedding.tolist() for embedding in embeddings]
        except Exception as e:
            logger.error(f"批量生成嵌入失败: {str(e)}")
            raise
    
    def close(self) -> None:
        """关闭数据库连接"""
        with self._lock:
            if self._conn is not None and self._conn.closed == 0:
                self._conn.close()
                self._conn = None
                logger.info("向量数据库连接已关闭")
    
    def create_tables(self) -> None:
        """创建必要的数据库表"""
        with self._lock:
            conn = self.connection
            try:
                with conn.cursor() as cur:
                    # 创建集合表
                    cur.execute("""
                    CREATE TABLE IF NOT EXISTS collections (
                        id SERIAL PRIMARY KEY,
                        name VARCHAR(100) UNIQUE NOT NULL,
                        description TEXT,
                        created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                        updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
                    )
                    """)
                    
                    # 创建向量表
                    cur.execute("""
                    CREATE TABLE IF NOT EXISTS documents (
                        id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
                        collection_id INTEGER REFERENCES collections(id) ON DELETE CASCADE,
                        content TEXT NOT NULL,
                        metadata JSONB,
                        embedding VECTOR(768) NOT NULL,
                        chunk_id INTEGER,
                        total_chunks INTEGER,
                        created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                        updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
                    )
                    """)
                    
                    # 创建索引
                    cur.execute("""
                    CREATE INDEX IF NOT EXISTS idx_documents_collection_id 
                    ON documents(collection_id)
                    """)
                    
                    cur.execute("""
                    CREATE INDEX IF NOT EXISTS idx_documents_embedding 
                    ON documents USING ivfflat (embedding vector_cosine_ops)
                    WITH (lists = 100)
                    """)
                    
                    cur.execute("""
                    CREATE INDEX IF NOT EXISTS idx_documents_metadata 
                    ON documents USING gin(metadata)
                    """)
                    
                    conn.commit()
                    logger.info("向量数据库表和索引已创建")
            except Exception as e:
                conn.rollback()
                logger.error(f"创建表失败: {str(e)}")
                raise
    
    def create_collection(self, name: str, description: Optional[str] = None) -> int:
        """创建新的文档集合
        
        Args:
            name: 集合名称
            description: 集合描述
            
        Returns:
            集合ID
        """
        with self._lock:
            conn = self.connection
            try:
                with conn.cursor() as cur:
                    cur.execute(
                        "INSERT INTO collections (name, description) VALUES (%s, %s) RETURNING id",
                        (name, description)
                    )
                    collection_id = cur.fetchone()[0]
                    conn.commit()
                    logger.info(f"已创建集合: id={collection_id}, name={name}")
                    return collection_id
            except psycopg2.IntegrityError:
                conn.rollback()
                logger.warning(f"集合已存在: {name}")
                # 获取已存在的集合ID
                cur.execute("SELECT id FROM collections WHERE name = %s", (name,))
                result = cur.fetchone()
                if result:
                    return result[0]
                raise
            except Exception as e:
                conn.rollback()
                logger.error(f"创建集合失败: {str(e)}")
                raise
    
    def get_collection(self, collection_id: Optional[int] = None, 
                      collection_name: Optional[str] = None) -> Optional[Dict[str, Any]]:
        """获取集合信息
        
        Args:
            collection_id: 集合ID
            collection_name: 集合名称
            
        Returns:
            集合信息，如果不存在则返回None
        """
        with self._lock:
            conn = self.connection
            try:
                with conn.cursor() as cur:
                    if collection_id:
                        cur.execute("SELECT * FROM collections WHERE id = %s", (collection_id,))
                    elif collection_name:
                        cur.execute("SELECT * FROM collections WHERE name = %s", (collection_name,))
                    else:
                        raise ValueError("必须提供collection_id或collection_name")
                    
                    result = cur.fetchone()
                    if result:
                        columns = [desc[0] for desc in cur.description]
                        return dict(zip(columns, result))
                    return None
            except Exception as e:
                logger.error(f"获取集合信息失败: {str(e)}")
                raise
    
    def list_collections(self) -> List[Dict[str, Any]]:
        """列出所有集合
        
        Returns:
            集合列表
        """
        with self._lock:
            conn = self.connection
            try:
                with conn.cursor() as cur:
                    cur.execute("SELECT * FROM collections ORDER BY created_at DESC")
                    results = cur.fetchall()
                    columns = [desc[0] for desc in cur.description]
                    return [dict(zip(columns, row)) for row in results]
            except Exception as e:
                logger.error(f"列出集合失败: {str(e)}")
                raise
    
    def delete_collection(self, collection_id: Optional[int] = None, 
                         collection_name: Optional[str] = None) -> bool:
        """删除集合
        
        Args:
            collection_id: 集合ID
            collection_name: 集合名称
            
        Returns:
            删除是否成功
        """
        with self._lock:
            conn = self.connection
            try:
                with conn.cursor() as cur:
                    if collection_id:
                        cur.execute("DELETE FROM collections WHERE id = %s", (collection_id,))
                    elif collection_name:
                        cur.execute("DELETE FROM collections WHERE name = %s", (collection_name,))
                    else:
                        raise ValueError("必须提供collection_id或collection_name")
                    
                    success = cur.rowcount > 0
                    conn.commit()
                    if success:
                        logger.info(f"已删除集合: id={collection_id}, name={collection_name}")
                    else:
                        logger.warning(f"集合不存在: id={collection_id}, name={collection_name}")
                    return success
            except Exception as e:
                conn.rollback()
                logger.error(f"删除集合失败: {str(e)}")
                raise
    
    def generate_embedding(self, text: str) -> List[float]:
        """生成文本嵌入
        
        Args:
            text: 要嵌入的文本
            
        Returns:
            嵌入向量
        """
        # 使用OllamaProvider生成嵌入
        return self._generate_embedding_with_ollama(text)
    
    def generate_embeddings(self, texts: List[str]) -> List[List[float]]:
        """批量生成文本嵌入向量
        
        Args:
            texts: 要嵌入的文本列表
            
        Returns:
            嵌入向量列表
        """
        # 使用OllamaProvider批量生成嵌入
        return self._generate_embeddings_with_ollama(texts)
    
    def split_text(self, text: str, chunk_size: int = 500, chunk_overlap: int = 50) -> List[str]:
        """文本分块
        
        Args:
            text: 要分块的文本
            chunk_size: 块大小
            chunk_overlap: 块重叠大小
            
        Returns:
            分块后的文本列表
        """
        if not text or len(text) <= chunk_size:
            return [text]
        
        chunks = []
        start = 0
        while start < len(text):
            end = min(start + chunk_size, len(text))
            chunk = text[start:end]
            chunks.append(chunk)
            # 移动到下一个块，考虑重叠
            start += chunk_size - chunk_overlap
        
        return chunks
    
    def add_document(self, collection_id: int, content: str, 
                    metadata: Optional[Dict[str, Any]] = None, 
                    chunk_size: int = 500) -> List[Dict[str, Any]]:
        """添加文档到集合
        
        Args:
            collection_id: 集合ID
            content: 文档内容
            metadata: 文档元数据
            chunk_size: 分块大小
            
        Returns:
            已添加的文档块列表
        """
        with self._lock:
            conn = self.connection
            try:
                # 检查集合是否存在
                collection = self.get_collection(collection_id=collection_id)
                if not collection:
                    raise ValueError(f"集合不存在: {collection_id}")
                
                # 分块处理
                chunks = self.split_text(content, chunk_size=chunk_size)
                inserted_docs = []
                
                with conn.cursor() as cur:
                    for i, chunk in enumerate(chunks):
                        # 生成嵌入
                        embedding = self.generate_embedding(chunk)
                        doc_id = str(uuid.uuid4())
                        
                        # 准备元数据
                        doc_metadata = metadata.copy() if metadata else {}
                        doc_metadata.update({
                            'chunk_index': i,
                            'total_chunks': len(chunks)
                        })
                        
                        # 插入数据
                        cur.execute(
                            """
                            INSERT INTO documents (id, collection_id, content, metadata, embedding, chunk_id, total_chunks)
                            VALUES (%s, %s, %s, %s, %s, %s, %s)
                            RETURNING id, content, metadata, created_at
                            """,
                            (doc_id, collection_id, chunk, doc_metadata, embedding, i, len(chunks))
                        )
                        
                        result = cur.fetchone()
                        if result:
                            columns = [desc[0] for desc in cur.description]
                            inserted_docs.append(dict(zip(columns, result)))
                    
                    conn.commit()
                    logger.info(f"已添加文档到集合: collection_id={collection_id}, chunks={len(chunks)}")
                    return inserted_docs
            except Exception as e:
                conn.rollback()
                logger.error(f"添加文档失败: {str(e)}")
                raise
    
    def search(self, collection_id: int, query: str, 
              limit: int = 5, distance_threshold: float = 0.7, 
              filters: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
        """向量相似度搜索
        
        Args:
            collection_id: 集合ID
            query: 搜索查询
            limit: 返回结果数量
            distance_threshold: 距离阈值
            filters: 元数据过滤条件
            
        Returns:
            搜索结果列表
        """
        with self._lock:
            conn = self.connection
            try:
                # 生成查询嵌入
                query_embedding = self.generate_embedding(query)
                
                with conn.cursor() as cur:
                    # 构建查询
                    base_query = sql.SQL("""
                    SELECT id, content, metadata, 1 - (embedding <=> %s) as similarity
                    FROM documents
                    WHERE collection_id = %s AND 1 - (embedding <=> %s) > %s
                    """)
                    
                    params = [query_embedding, collection_id, query_embedding, distance_threshold]
                    
                    # 添加元数据过滤
                    if filters:
                        filter_conditions = []
                        for key, value in filters.items():
                            if isinstance(value, list):
                                # 处理数组包含关系
                                filter_conditions.append(sql.SQL("metadata->%s ?| array[%s]").format(
                                    sql.Literal(key),
                                    sql.SQL(",").join(map(sql.Literal, value))
                                ))
                            else:
                                # 处理等值匹配
                                filter_conditions.append(sql.SQL("metadata->>%s = %s").format(
                                    sql.Literal(key),
                                    sql.Literal(str(value))
                                ))
                        
                        if filter_conditions:
                            base_query += sql.SQL(" AND ") + sql.SQL(" AND ").join(filter_conditions)
                    
                    # 添加排序和限制
                    base_query += sql.SQL(" ORDER BY similarity DESC LIMIT %s")
                    params.append(limit)
                    
                    # 执行查询
                    cur.execute(base_query, params)
                    results = cur.fetchall()
                    
                    # 格式化结果
                    columns = [desc[0] for desc in cur.description]
                    formatted_results = []
                    for row in results:
                        result_dict = dict(zip(columns, row))
                        # 确保similarity是浮点数
                        result_dict['similarity'] = float(result_dict['similarity'])
                        formatted_results.append(result_dict)
                    
                    logger.info(f"搜索完成: collection_id={collection_id}, query='{query[:30]}...', results={len(formatted_results)}")
                    return formatted_results
            except Exception as e:
                logger.error(f"搜索失败: {str(e)}")
                raise
    
    def hybrid_search(self, collection_id: int, query: str, 
                     limit: int = 5, distance_threshold: float = 0.7, 
                     text_weight: float = 0.5, vector_weight: float = 0.5,
                     filters: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
        """混合搜索（文本搜索+向量搜索）
        
        Args:
            collection_id: 集合ID
            query: 搜索查询
            limit: 返回结果数量
            distance_threshold: 向量距离阈值
            text_weight: 文本搜索权重
            vector_weight: 向量搜索权重
            filters: 元数据过滤条件
            
        Returns:
            混合搜索结果列表
        """
        with self._lock:
            conn = self.connection
            try:
                # 生成查询嵌入
                query_embedding = self.generate_embedding(query)
                
                with conn.cursor() as cur:
                    # 构建混合搜索查询
                    # 注意：这个实现是简化版，真实环境中可能需要更复杂的加权策略
                    query_sql = sql.SQL("""
                    WITH vector_search AS (
                        SELECT id, content, metadata, 1 - (embedding <=> %s) as vector_score
                        FROM documents
                        WHERE collection_id = %s AND 1 - (embedding <=> %s) > %s
                    ),
                    text_search AS (
                        SELECT id, content, metadata, ts_rank(to_tsvector('english', content), plainto_tsquery('english', %s)) as text_score
                        FROM documents
                        WHERE collection_id = %s AND to_tsvector('english', content) @@ plainto_tsquery('english', %s)
                    )
                    SELECT 
                        COALESCE(vs.id, ts.id) as id,
                        COALESCE(vs.content, ts.content) as content,
                        COALESCE(vs.metadata, ts.metadata) as metadata,
                        COALESCE(vs.vector_score, 0) * %s + COALESCE(ts.text_score, 0) * %s as final_score
                    FROM vector_search vs
                    FULL JOIN text_search ts ON vs.id = ts.id
                    WHERE COALESCE(vs.vector_score, 0) > 0 OR COALESCE(ts.text_score, 0) > 0
                    ORDER BY final_score DESC
                    LIMIT %s
                    """)
                    
                    params = [
                        query_embedding, collection_id, query_embedding, distance_threshold,
                        query, collection_id, query,
                        vector_weight, text_weight,
                        limit
                    ]
                    
                    # 执行查询
                    cur.execute(query_sql, params)
                    results = cur.fetchall()
                    
                    # 格式化结果
                    columns = [desc[0] for desc in cur.description]
                    formatted_results = []
                    for row in results:
                        result_dict = dict(zip(columns, row))
                        # 确保final_score是浮点数
                        result_dict['final_score'] = float(result_dict['final_score'])
                        formatted_results.append(result_dict)
                    
                    logger.info(f"混合搜索完成: collection_id={collection_id}, query='{query[:30]}...', results={len(formatted_results)}")
                    return formatted_results
            except Exception as e:
                logger.error(f"混合搜索失败: {str(e)}")
                raise
    
    def get_document(self, document_id: str) -> Optional[Dict[str, Any]]:
        """获取单个文档
        
        Args:
            document_id: 文档ID
            
        Returns:
            文档信息，如果不存在则返回None
        """
        with self._lock:
            conn = self.connection
            try:
                with conn.cursor() as cur:
                    cur.execute("SELECT id, collection_id, content, metadata, created_at FROM documents WHERE id = %s", (document_id,))
                    result = cur.fetchone()
                    if result:
                        columns = [desc[0] for desc in cur.description]
                        return dict(zip(columns, result))
                    return None
            except Exception as e:
                logger.error(f"获取文档失败: {str(e)}")
                raise
    
    def list_documents(self, collection_id: Optional[int] = None, 
                      limit: int = 100, offset: int = 0) -> List[Dict[str, Any]]:
        """列出文档
        
        Args:
            collection_id: 集合ID，可选
            limit: 每页数量
            offset: 偏移量
            
        Returns:
            文档列表
        """
        with self._lock:
            conn = self.connection
            try:
                with conn.cursor() as cur:
                    if collection_id:
                        cur.execute(
                            "SELECT id, collection_id, content, metadata, created_at FROM documents WHERE collection_id = %s ORDER BY created_at DESC LIMIT %s OFFSET %s",
                            (collection_id, limit, offset)
                        )
                    else:
                        cur.execute(
                            "SELECT id, collection_id, content, metadata, created_at FROM documents ORDER BY created_at DESC LIMIT %s OFFSET %s",
                            (limit, offset)
                        )
                    
                    results = cur.fetchall()
                    columns = [desc[0] for desc in cur.description]
                    return [dict(zip(columns, row)) for row in results]
            except Exception as e:
                logger.error(f"列出文档失败: {str(e)}")
                raise
    
    def delete_document(self, document_id: str) -> bool:
        """删除文档
        
        Args:
            document_id: 文档ID
            
        Returns:
            删除是否成功
        """
        with self._lock:
            conn = self.connection
            try:
                with conn.cursor() as cur:
                    cur.execute("DELETE FROM documents WHERE id = %s", (document_id,))
                    success = cur.rowcount > 0
                    conn.commit()
                    if success:
                        logger.info(f"已删除文档: {document_id}")
                    else:
                        logger.warning(f"文档不存在: {document_id}")
                    return success
            except Exception as e:
                conn.rollback()
                logger.error(f"删除文档失败: {str(e)}")
                raise
    
    def count_documents(self, collection_id: Optional[int] = None) -> int:
        """统计文档数量
        
        Args:
            collection_id: 集合ID，可选
            
        Returns:
            文档数量
        """
        with self._lock:
            conn = self.connection
            try:
                with conn.cursor() as cur:
                    if collection_id:
                        cur.execute("SELECT COUNT(*) FROM documents WHERE collection_id = %s", (collection_id,))
                    else:
                        cur.execute("SELECT COUNT(*) FROM documents")
                    
                    count = cur.fetchone()[0]
                    logger.debug(f"文档计数: collection_id={collection_id}, count={count}")
                    return count
            except Exception as e:
                logger.error(f"统计文档数量失败: {str(e)}")
                raise
    
    def optimize_indexes(self) -> None:
        """优化向量索引"""
        with self._lock:
            conn = self.connection
            try:
                with conn.cursor() as cur:
                    # 重建向量索引
                    cur.execute("REINDEX INDEX idx_documents_embedding")
                    
                    # 收集统计信息
                    cur.execute("ANALYZE documents")
                    
                    conn.commit()
                    logger.info("向量数据库索引已优化")
            except Exception as e:
                conn.rollback()
                logger.error(f"优化索引失败: {str(e)}")
                raise


# 创建全局向量数据库管理器实例
vector_db_manager = VectorDBManager()

__all__ = [
    'VectorDBManager',
    'vector_db_manager'
]