"""
清理孤儿向量脚本
- 检查向量库中的文档ID是否在数据库中存在
- 删除不存在文档的向量数据
"""
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))

from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from app.core.config import settings
from app.models.document import Document
from app.services.vector_store import VectorStore
import logging

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


def clean_orphan_vectors():
    """清理孤儿向量数据"""
    
    # 创建数据库连接
    engine = create_engine(settings.DATABASE_URL)
    SessionLocal = sessionmaker(bind=engine)
    db = SessionLocal()
    
    try:
        # 获取所有已向量化的文档ID
        valid_doc_ids = set()
        all_docs = db.query(Document).filter(Document.is_vectorized == True).all()
        
        for doc in all_docs:
            valid_doc_ids.add(doc.id)
            logger.info(f"有效文档: {doc.id} - {doc.title}")
        
        logger.info(f"\n数据库中共有 {len(valid_doc_ids)} 个已向量化的文档")
        logger.info("=" * 80)
        
        # 检查所有向量库
        vectors_dir = os.path.join(settings.UPLOAD_DIR, "vectors")
        if not os.path.exists(vectors_dir):
            logger.warning(f"向量库目录不存在: {vectors_dir}")
            return
        
        cleaned_kb_count = 0
        total_removed_vectors = 0
        
        # 遍历所有知识库的向量文件
        for filename in os.listdir(vectors_dir):
            if not filename.endswith('.index'):
                continue
            
            kb_id = filename.replace('.index', '')
            vector_path = os.path.join(vectors_dir, kb_id)
            
            logger.info(f"\n检查知识库: {kb_id}")
            logger.info("-" * 80)
            
            try:
                # 加载向量库
                vector_store = VectorStore()
                vector_store.load(vector_path)
                
                original_count = vector_store.count()
                logger.info(f"原始向量数: {original_count}")
                
                # 找出需要删除的文档ID
                doc_ids_in_vectors = set()
                orphan_doc_ids = set()
                
                for metadata in vector_store.metadatas:
                    doc_id = metadata.get('document_id')
                    if doc_id:
                        doc_ids_in_vectors.add(doc_id)
                        if doc_id not in valid_doc_ids:
                            orphan_doc_ids.add(doc_id)
                
                logger.info(f"向量库中的文档ID数: {len(doc_ids_in_vectors)}")
                logger.info(f"孤儿文档ID数: {len(orphan_doc_ids)}")
                
                if orphan_doc_ids:
                    logger.warning(f"发现孤儿向量，文档ID: {orphan_doc_ids}")
                    
                    # 删除每个孤儿文档的向量
                    for orphan_id in orphan_doc_ids:
                        logger.info(f"  删除文档 {orphan_id} 的向量...")
                        vector_store.remove_by_document_id(orphan_id)
                    
                    # 保存清理后的向量库
                    vector_store.save(vector_path)
                    
                    removed_count = original_count - vector_store.count()
                    total_removed_vectors += removed_count
                    cleaned_kb_count += 1
                    
                    logger.info(f"✅ 成功删除 {removed_count} 个孤儿向量")
                    logger.info(f"剩余向量数: {vector_store.count()}")
                else:
                    logger.info("✅ 没有发现孤儿向量，向量库干净")
                    
            except Exception as e:
                logger.error(f"❌ 处理知识库 {kb_id} 时出错: {str(e)}")
                continue
        
        logger.info("\n" + "=" * 80)
        logger.info("清理完成！")
        logger.info(f"处理的知识库数: {cleaned_kb_count}")
        logger.info(f"总共删除的向量数: {total_removed_vectors}")
        
    finally:
        db.close()


if __name__ == "__main__":
    print("开始清理孤儿向量数据...")
    print("=" * 80)
    clean_orphan_vectors()
    print("\n按任意键退出...")
    input()

