#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""为缺失向量文件的文档重新向量化"""

import os
import sys
import asyncio

# Windows 控制台编码设置
if sys.platform == 'win32':
    import codecs
    sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer, 'strict')
    sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer, 'strict')

# 添加项目根目录到路径
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))

from app.models.database import engine
from sqlalchemy.orm import sessionmaker
from app.models.document import Document
from app.core.config import settings
from app.services.document_processor import document_processor
from app.services.vector_store import VectorStore, get_embedding_dimension
from app.services.llm_client import ollama_client

async def vectorize_doc(doc, db_session):
    try:
        print(f'\n处理文档: {doc.title}')
        
        # 1. 提取文本并分块
        result = document_processor.process_document(doc.file_path, doc.file_type)
        chunks = result["chunks"]
        
        if not chunks:
            print(f'  ❌ 无法提取文本')
            return False
        
        print(f'  文档分块完成，共 {len(chunks)} 个块')
        
        # 2. 生成向量
        embeddings = []
        for i, chunk in enumerate(chunks):
            emb = await ollama_client.generate_embedding(chunk)
            embeddings.append(emb)
            if (i + 1) % 10 == 0:
                print(f'    已生成 {i + 1}/{len(chunks)} 个向量...')
        
        print(f'  向量生成完成，共 {len(embeddings)} 个')
        
        # 3. 存储到向量库（使用文档ID作为向量库ID）
        vector_store_id = doc.id
        vector_path = os.path.join(settings.VECTOR_STORE_DIR, f"{vector_store_id}.faiss")
        
        # 删除旧的向量库（如果存在）
        if os.path.exists(f"{vector_path}.index"):
            os.remove(f"{vector_path}.index")
        if os.path.exists(f"{vector_path}.meta"):
            os.remove(f"{vector_path}.meta")
        
        # 创建新向量库
        dimension = get_embedding_dimension(settings.OLLAMA_EMBED_MODEL)
        vector_store = VectorStore(dimension=dimension)
        
        # 添加文档块
        metadatas = [{
            "document_id": doc.id,
            "document_title": doc.title,
            "chunk_index": i,
            "total_chunks": len(chunks)
        } for i in range(len(chunks))]
        
        vector_store.add_texts(chunks, embeddings, metadatas)
        vector_store.save(vector_path)
        print(f'  向量库已保存: {vector_path}')
        
        # 4. 更新文档状态
        doc.is_vectorized = True
        doc.vector_store_id = vector_store_id
        doc.word_count = result["word_count"]
        doc.summary = result["summary"]
        db_session.commit()
        
        print(f'  ✅ 成功')
        return True
            
    except Exception as e:
        print(f'  ❌ 失败: {str(e)}')
        import traceback
        traceback.print_exc()
        return False

async def main():
    Session = sessionmaker(bind=engine)
    db = Session()
    
    try:
        # 找出需要重新向量化的文档
        docs = db.query(Document).filter(Document.is_vectorized == True).all()
        
        need_revectorize = []
        for d in docs:
            vector_path = os.path.join(settings.VECTOR_STORE_DIR, f"{d.id}.faiss")
            if not os.path.exists(f"{vector_path}.index"):
                need_revectorize.append(d)
        
        print(f'\n=== 需要重新向量化的文档 ===')
        print(f'总数: {len(need_revectorize)}')
        
        if not need_revectorize:
            print('✅ 所有文档的向量文件都存在，无需重新向量化')
            return
        
        for idx, doc in enumerate(need_revectorize, 1):
            print(f'\n[{idx}/{len(need_revectorize)}]')
            await vectorize_doc(doc, db)
        
        print(f'\n=== 完成 ===')
        print(f'✅ 重新向量化完成')
        
    finally:
        db.close()

if __name__ == "__main__":
    asyncio.run(main())

