from typing import List, Dict, Any
import json
from scorpio.core.database import VectorDatabaseManager

from scorpio.core.common import get_logger
logger = get_logger(__name__)

class DocumentProcessor:
    def __init__(self, db_manager: VectorDatabaseManager, dimension: int):
        logger.debug("initialize DocumentProcessorService")
        self.db_manager = db_manager
        self.dimension = dimension

    async def create_table(self):
        """创建文档相关的表结构"""
        async with self.db_manager.get_connection() as conn:
            # 创建文档表
            await conn.execute('''
                CREATE TABLE IF NOT EXISTS documents (
                    id BIGSERIAL PRIMARY KEY,
                    external_id UUID NOT NULL UNIQUE,
                    title TEXT,
                    content_type TEXT,
                    file_path TEXT,
                    file_size BIGINT,
                    chunk_count INTEGER DEFAULT 0,
                    metadata JSONB DEFAULT '{}',
                    processing_status TEXT DEFAULT 'pending',
                    created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
                    updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
                    processed_at TIMESTAMP WITH TIME ZONE
                );
            ''')
            logger.info("documents table created/checked")

            # 创建文档块表
            await conn.execute(f'''
                CREATE TABLE IF NOT EXISTS chunks (
                    id BIGSERIAL PRIMARY KEY,
                    document_id BIGINT NOT NULL REFERENCES documents(id) ON DELETE CASCADE,
                    chunk_index INTEGER NOT NULL,
                    content TEXT NOT NULL,
                    content_hash TEXT,
                    embedding vector({self.dimension}),
                    token_count INTEGER,
                    metadata JSONB DEFAULT '{{}}',
                    created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
                    
                    CONSTRAINT unique_document_chunk UNIQUE(document_id, chunk_index)
                );
            ''')
            logger.info("chunks table created/checked")

    async def create_indexes(self):
        """创建性能索引"""
        async with self.db_manager.get_connection() as conn:
            # 向量索引 - 使用 HNSW 优化相似度搜索
            await conn.execute('''
                CREATE INDEX IF NOT EXISTS idx_chunks_embedding_hnsw 
                ON chunks USING hnsw (embedding vector_l2_ops);
            ''')
            logger.info("向量 HNSW 索引创建/检查完成")
            
            # 文档ID索引
            await conn.execute('''
                CREATE INDEX IF NOT EXISTS idx_chunks_document_id 
                ON chunks(document_id);
            ''')
            
            # 文档唯一标识索引
            await conn.execute('''
                CREATE INDEX IF NOT EXISTS idx_documents_external_id 
                ON documents(external_id);
            ''')
            
            # 时间索引
            await conn.execute('''
                CREATE INDEX IF NOT EXISTS idx_documents_created_at 
                ON documents(created_at);
            ''')
        
    async def insert(self, document_data: Dict[str, Any]) -> int:
        """插入文档元数据"""
        async with self.db_manager.get_connection() as conn:
            try:
                document_id = await conn.fetchval('''
                    INSERT INTO documents (
                        external_id, title, content_type, file_path, 
                        file_size, metadata, processing_status
                    )
                    VALUES ($1, $2, $3, $4, $5, $6, 'completed')
                    RETURNING id;
                ''', 
                document_data['external_id'],
                document_data.get('title'),
                document_data.get('content_type'),
                document_data.get('file_path'),
                document_data.get('file_size', 0),
                json.dumps(document_data.get('metadata', {}))
                )
                logger.info(f"insert document {document_id} with external_id {document_data['external_id']} successfully")
                return document_id
            except Exception as e:
                logger.error(f"Failed to insert document: {e}")
                raise

    async def insert_chunks(self, document_id: int, chunks: List[Dict[str, Any]]):
        """批量插入文档块"""
        async with self.db_manager.get_connection() as conn:
            try:
                # 准备批量插入数据
                values = []
                for chunk in chunks:
                    # Sanitize string fields to remove null bytes (0x00) which Postgres doesn't allow in UTF-8
                    content = chunk['content'].replace('\x00', '')
                    content_hash = chunk.get('content_hash')
                    if content_hash is not None:
                        content_hash = content_hash.replace('\x00', '')
                    metadata_str = json.dumps(chunk.get('metadata', {})).replace('\x00', '')
                    
                    values.append((
                        document_id,
                        chunk['chunk_index'],
                        content,
                        content_hash,
                        chunk['embedding'],
                        chunk.get('token_count'),
                        metadata_str
                    ))
                
                logger.debug(f"start to insert {len(chunks)} chunks for document {document_id}")
                await conn.executemany('''
                    INSERT INTO chunks 
                    (document_id, chunk_index, content, content_hash, embedding, token_count, metadata)
                    VALUES ($1, $2, $3, $4, $5, $6, $7)
                ''', values)
                await conn.execute('''
                    UPDATE documents 
                    SET chunk_count = chunk_count + $1, processed_at = NOW()
                    WHERE id = $2;
                ''', len(chunks), document_id)
                logger.info(f"insert {len(chunks)} chunks for document {document_id} successfully")
            except Exception as e:
                logger.error(f"Failed to insert document chunks: {e}")
                raise
    
    async def get_stats(self, document_external_id: str) -> Dict[str, Any]:
        """获取文档统计信息"""
        async with self.db_manager.get_connection() as conn:
            # 获取文档基本信息
            doc_info = await conn.fetchrow('''
                SELECT d.*, COUNT(dc.id) as chunk_count
                FROM documents d
                LEFT JOIN chunks dc ON d.id = dc.document_id
                WHERE d.external_id = $1
                GROUP BY d.id
            ''', document_external_id)
            
            if not doc_info:
                return None
            
            # 获取向量维度信息
            dimension_info = await conn.fetchval('''
                SELECT vector_dims(embedding) 
                FROM chunks 
                WHERE document_id = $1 
                LIMIT 1
            ''', doc_info['id'])
            
            return {
                'document_id': doc_info['id'],
                'document_external_id': doc_info['external_id'],
                'title': doc_info['title'],
                'content_type': doc_info['content_type'],
                'chunk_count': doc_info['chunk_count'],
                'vector_dimensions': dimension_info,
                'created_at': doc_info['created_at'],
                'metadata': doc_info['metadata']
            }
    