"""
重新向量化所有文档（从L2距离迁移到余弦相似度）
"""
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from sqlalchemy.orm import Session
from app.models.database import engine
from app.models.document import Document, DocumentStatus
from app.services.document_processor import document_processor
from app.services.vector_store import VectorStore
from app.services.llm_client import ollama_client
from app.core.config import settings
from app.models.knowledge_base import KnowledgeBase
import logging
import asyncio

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

def revectorize_all():
    """重新向量化所有已向量化的文档"""
    db = Session(engine)
    try:
        # 查询所有已向量化的文档
        documents = db.query(Document).filter(Document.is_vectorized == True).all()
        
        if not documents:
            logger.info("没有需要重新向量化的文档")
            return
        
        logger.info(f"找到 {len(documents)} 个已向量化的文档，开始重新向量化...")
        
        success_count = 0
        fail_count = 0
        
        async def vectorize_doc(doc, idx, total):
            """异步向量化单个文档"""
            try:
                logger.info(f"[{idx}/{total}] 正在处理: {doc.title}")
                
                # 检查是否关联知识库
                if not doc.knowledge_base_id:
                    logger.warning(f"  ⚠️ 文档未关联知识库，跳过")
                    return False
                
                # 删除旧的向量存储文件
                if doc.vector_store_id:
                    old_vector_path = os.path.join(settings.VECTOR_STORE_DIR, f"{doc.vector_store_id}.faiss")
                    if os.path.exists(old_vector_path):
                        os.remove(old_vector_path)
                        logger.info(f"  已删除旧向量文件: {old_vector_path}")
                
                # 1. 提取文本并分块
                result = document_processor.process_document(doc.file_path, doc.file_type)
                chunks = result["chunks"]
                
                if not chunks:
                    logger.error(f"  ❌ 无法从文档中提取文本")
                    return False
                
                logger.info(f"  文档分块完成，共 {len(chunks)} 个块")
                
                # 2. 生成向量
                embeddings = []
                for chunk in chunks:
                    emb = await ollama_client.generate_embedding(chunk)
                    embeddings.append(emb)
                
                logger.info(f"  向量生成完成，共 {len(embeddings)} 个")
                
                # 3. 存储到向量库
                vector_store_id = f"{doc.knowledge_base_id}"
                vector_path = os.path.join(settings.VECTOR_STORE_DIR, f"{vector_store_id}.faiss")
                
                # 加载或创建向量库
                vector_store = VectorStore()
                if os.path.exists(vector_path):
                    vector_store.load(vector_path)
                    logger.info(f"  加载已有向量库")
                
                # 添加文档块
                metadatas = [{
                    "document_id": doc.id,
                    "document_title": doc.title,
                    "chunk_index": i
                } for i in range(len(chunks))]
                
                vector_store.add_texts(chunks, embeddings, metadatas)
                
                # 保存向量库
                vector_store.save(vector_path)
                logger.info(f"  向量库已保存")
                
                # 4. 更新文档状态
                doc.is_vectorized = True
                doc.vector_store_id = vector_store_id
                doc.word_count = result["word_count"]
                doc.summary = result["summary"]
                doc.status = DocumentStatus.COMPLETED
                db.commit()
                
                return True
                    
            except Exception as e:
                logger.error(f"  ❌ 处理失败: {str(e)}", exc_info=True)
                return False
        
        # 逐个向量化文档（避免并发问题）
        for idx, doc in enumerate(documents, 1):
            success = asyncio.run(vectorize_doc(doc, idx, len(documents)))
            if success:
                success_count += 1
                logger.info(f"  ✅ 成功 ({success_count}/{len(documents)})")
            else:
                fail_count += 1
        
        logger.info(f"\n{'='*60}")
        logger.info(f"重新向量化完成！")
        logger.info(f"  成功: {success_count}")
        logger.info(f"  失败: {fail_count}")
        logger.info(f"  总计: {len(documents)}")
        logger.info(f"{'='*60}")
        
    except Exception as e:
        logger.error(f"重新向量化过程出错: {str(e)}", exc_info=True)
    finally:
        db.close()

if __name__ == "__main__":
    print("="*60)
    print("重要提示：")
    print("  本脚本将重新向量化所有文档，以支持新的余弦相似度算法")
    print("  这将删除旧的向量索引并重新生成")
    print("  根据文档数量，可能需要较长时间")
    print("="*60)
    
    confirm = input("\n确认继续？(yes/no): ")
    if confirm.lower() in ['yes', 'y']:
        revectorize_all()
    else:
        print("已取消")

