#!/usr/bin/env python3
"""
ChromaDB和知识库管理工具
用于管理向量数据库和知识库的内容，包括：
- 查看、清空、搜索、导出ChromaDB数据
- 解析PDF文档并导入知识库
- 重置整个知识库
- 重新生成向量数据（使用豆包嵌入模型）

使用方法:
  python scripts/chroma_db_manager.py --action list                    # 列出所有文档
  python scripts/chroma_db_manager.py --action stats                   # 显示统计信息
  python scripts/chroma_db_manager.py --action search --query "钥匙"    # 搜索文档
  python scripts/chroma_db_manager.py --action clear                   # 清空数据库
  python scripts/chroma_db_manager.py --action export --output data.json # 导出数据
  python scripts/chroma_db_manager.py --action parse --pdf file.pdf    # 解析特定PDF
  python scripts/chroma_db_manager.py --action parse-all               # 解析所有PDF
  python scripts/chroma_db_manager.py --action reset --pdf file.pdf    # 重置知识库
  python scripts/chroma_db_manager.py --action rebuild-vectors         # 重新生成向量数据
"""

import os
import sys
import json
import argparse
from pathlib import Path
import chromadb
from chromadb.config import Settings

# 添加backend目录到Python路径
backend_path = Path(__file__).parent.parent
sys.path.insert(0, str(backend_path))

try:
    from app.core.config import settings as app_settings
    CHROMA_PATH = app_settings.chroma_persist_directory
    COLLECTION_NAME = "operation_documents"
except ImportError:
    CHROMA_PATH = "./data/chroma_db"
    COLLECTION_NAME = "operation_documents"


class ChromaDBManager:
    """ChromaDB管理器"""
    
    def __init__(self, chroma_path: str = CHROMA_PATH):
        self.chroma_path = chroma_path
        self.client = None
        self.collection = None
        
    def connect(self):
        """连接到ChromaDB"""
        try:
            self.client = chromadb.PersistentClient(
                path=self.chroma_path,
                settings=Settings(anonymized_telemetry=False)
            )
            print(f"✅ 已连接到ChromaDB: {self.chroma_path}")
            return True
        except Exception as e:
            print(f"❌ 连接ChromaDB失败: {e}")
            return False
    
    def get_collection(self, collection_name: str = COLLECTION_NAME):
        """获取或创建集合"""
        if not self.client:
            print("❌ 未连接到数据库")
            return False
        
        try:
            self.collection = self.client.get_collection(collection_name)
            print(f"✅ 已连接到集合: {collection_name}")
            return True
        except Exception:
            try:
                self.collection = self.client.create_collection(
                    name=collection_name,
                    metadata={"description": "系统操作文档向量存储"}
                )
                print(f"✅ 已创建新集合: {collection_name}")
                return True
            except Exception as e:
                print(f"❌ 创建集合失败: {e}")
                return False
    
    def list_collections(self):
        """列出所有集合"""
        if not self.client:
            print("❌ 未连接到数据库")
            return
        
        try:
            collections = self.client.list_collections()
            print(f"📋 数据库中的集合 ({len(collections)} 个):")
            for collection in collections:
                print(f"  - {collection.name}")
        except Exception as e:
            print(f"❌ 获取集合列表失败: {e}")
    
    def show_stats(self):
        """显示集合统计信息"""
        if not self.collection:
            print("❌ 未连接到集合")
            return
        
        try:
            count = self.collection.count()
            print(f"📊 集合统计信息:")
            print(f"  - 集合名称: {self.collection.name}")
            print(f"  - 文档数量: {count}")
            print(f"  - 数据库路径: {self.chroma_path}")
        except Exception as e:
            print(f"❌ 获取统计信息失败: {e}")
    
    def list_documents(self, limit: int = 10):
        """列出文档"""
        if not self.collection:
            print("❌ 未连接到集合")
            return
        
        try:
            results = self.collection.get(limit=limit)
            
            if not results["ids"]:
                print("📭 集合为空")
                return
            
            print(f"\n📄 文档列表 (显示前 {len(results['ids'])} 个):")
            for i, (doc_id, document, metadata) in enumerate(zip(
                results["ids"], 
                results["documents"], 
                results["metadatas"]
            ), 1):
                title = metadata.get("title", "无标题") if metadata else "无标题"
                content_preview = document[:100] + "..." if len(document) > 100 else document
                print(f"\n{i}. ID: {doc_id}")
                print(f"   标题: {title}")
                print(f"   内容预览: {content_preview}")
                if metadata:
                    print(f"   平台: {metadata.get('platform', 'N/A')}")
                    print(f"   难度: {metadata.get('difficulty_level', 'N/A')}")
                    print(f"   风险: {metadata.get('risk_level', 'N/A')}")
                
        except Exception as e:
            print(f"❌ 获取文档列表失败: {e}")
    
    def search_documents(self, query: str, limit: int = 5):
        """搜索文档（使用豆包嵌入模型）"""
        if not self.collection:
            print("❌ 未连接到集合")
            return

        try:
            # 使用豆包嵌入服务生成查询向量
            from app.services.embedding_service import create_embedding_service

            embedding_service = create_embedding_service()
            if not embedding_service.is_available():
                print("❌ 豆包嵌入服务不可用")
                return

            # 生成查询向量
            query_embedding = embedding_service.encode(query)

            # 使用向量搜索
            results = self.collection.query(
                query_embeddings=[query_embedding],
                n_results=limit,
                include=["documents", "metadatas", "distances"]
            )

            if not results["ids"][0]:
                print(f"🔍 未找到与 '{query}' 相关的文档")
                return

            print(f"\n🔍 搜索结果 (查询: '{query}'):")
            for i, (doc_id, document, metadata, distance) in enumerate(zip(
                results["ids"][0],
                results["documents"][0],
                results["metadatas"][0],
                results["distances"][0]
            ), 1):
                title = metadata.get("title", "无标题") if metadata else "无标题"
                content_preview = document[:150] + "..." if len(document) > 150 else document
                similarity = 1 - distance
                print(f"\n{i}. 相似度: {similarity:.3f}")
                print(f"   ID: {doc_id}")
                print(f"   标题: {title}")
                print(f"   内容: {content_preview}")

        except Exception as e:
            print(f"❌ 搜索失败: {e}")
    
    def clear_collection(self):
        """清空集合"""
        if not self.collection:
            print("❌ 未连接到集合")
            return False
        
        try:
            # 获取所有文档ID
            results = self.collection.get()
            if not results["ids"]:
                print("📭 集合已经为空")
                return True
            
            # 删除所有文档
            self.collection.delete(ids=results["ids"])
            print(f"✅ 已清空集合，删除了 {len(results['ids'])} 个文档")
            return True
            
        except Exception as e:
            print(f"❌ 清空集合失败: {e}")
            return False
    
    def delete_collection(self, collection_name: str = COLLECTION_NAME):
        """删除集合"""
        if not self.client:
            print("❌ 未连接到数据库")
            return False
        
        try:
            self.client.delete_collection(collection_name)
            print(f"✅ 已删除集合: {collection_name}")
            self.collection = None
            return True
        except Exception as e:
            print(f"❌ 删除集合失败: {e}")
            return False
    
    def export_data(self, filename: str = "chroma_export.json"):
        """导出数据到JSON文件"""
        if not self.collection:
            print("❌ 未连接到集合")
            return False
        
        try:
            results = self.collection.get()
            
            if not results["ids"]:
                print("📭 集合为空，无数据可导出")
                return False
            
            export_data = {
                "collection_name": self.collection.name,
                "total_documents": len(results["ids"]),
                "export_time": str(Path(__file__).stat().st_mtime),
                "documents": []
            }
            
            for doc_id, document, metadata in zip(
                results["ids"], 
                results["documents"], 
                results["metadatas"]
            ):
                doc_data = {
                    "id": doc_id,
                    "document": document,
                    "metadata": metadata
                }
                export_data["documents"].append(doc_data)
            
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(export_data, f, ensure_ascii=False, indent=2)
            
            print(f"✅ 数据已导出到: {filename}")
            return True
            
        except Exception as e:
            print(f"❌ 导出失败: {e}")
            return False


def parse_pdf_document(pdf_filename):
    """解析特定的PDF文件"""
    try:
        from app.models.base import create_tables, SessionLocal
        from app.services.enhanced_document_processor import EnhancedDocumentProcessor
        from pathlib import Path

        # 确保数据库表存在
        create_tables()

        # 创建数据库会话
        db = SessionLocal()

        try:
            # 构建文件路径
            file_path = Path(app_settings.knowledge_base_path) / pdf_filename

            if not file_path.exists():
                print(f"❌ 文件不存在: {file_path}")
                return False

            print(f"📄 开始解析PDF文件: {pdf_filename}")

            # 创建增强版文档处理器
            processor = EnhancedDocumentProcessor()

            # 处理文档
            result = processor.process_document(db, str(file_path))

            # 打印详细信息
            print("\n" + "="*50)
            print(f"📊 PDF文件解析结果: {pdf_filename}")
            print("="*50)
            print(f"文件类型: {result['file_type']}")
            print(f"处理状态: {result['processing_status']}")
            print(f"创建的文档数量: {len(result['documents_created'])}")

            if result.get('summary'):
                summary = result['summary']
                print(f"\n📋 文档摘要:")
                print(f"  总页数: {summary.get('total_pages', 'N/A')}")
                print(f"  文本长度: {summary.get('total_text_length', 'N/A')}")
                print(f"  图片数量: {summary.get('total_images', 'N/A')}")
                print(f"  表格数量: {summary.get('total_tables', 'N/A')}")
                print(f"  文档类型: {summary.get('document_type', 'N/A')}")

                if summary.get('key_sections'):
                    print(f"\n🔑 关键章节:")
                    for section in summary['key_sections'][:5]:  # 显示前5个
                        print(f"  - {section}")

            print(f"\n📚 创建的操作文档:")
            for doc in result['documents_created'][:10]:  # 显示前10个
                print(f"  - ID: {doc['id']}, 标题: {doc['title'][:50]}...")

            if len(result['documents_created']) > 10:
                print(f"  ... 还有 {len(result['documents_created']) - 10} 个文档")

            print(f"\n💾 解析数据已保存到: {result.get('parsed_data_file', 'N/A')}")
            print("="*50)

            return True

        finally:
            db.close()

    except Exception as e:
        print(f"❌ 解析PDF失败: {e}")
        return False


def parse_all_pdfs():
    """解析知识库中的所有PDF文件"""
    try:
        from app.models.base import create_tables, SessionLocal
        from app.services.enhanced_document_processor import EnhancedDocumentProcessor

        # 确保数据库表存在
        create_tables()

        # 创建数据库会话
        db = SessionLocal()

        try:
            print("📄 开始批量解析PDF文件")

            # 创建增强版文档处理器
            processor = EnhancedDocumentProcessor()

            # 处理整个知识库目录
            result = processor.process_knowledge_base_directory(db)

            # 打印结果
            print("\n" + "="*50)
            print("📊 批量PDF解析结果")
            print("="*50)
            print(f"成功处理的文件: {len(result['processed_files'])}")
            print(f"失败的文件: {len(result['failed_files'])}")
            print(f"总共创建的文档: {result['total_documents_created']}")

            if result['processed_files']:
                print(f"\n✅ 成功处理的文件:")
                for file_info in result['processed_files']:
                    print(f"  - {file_info['file_name']}: {file_info['documents_created']} 个文档")

            if result['failed_files']:
                print(f"\n❌ 失败的文件:")
                for file_info in result['failed_files']:
                    print(f"  - {file_info['file_name']}: {file_info['error']}")

            print("="*50)

            return True

        finally:
            db.close()

    except Exception as e:
        print(f"❌ 批量解析失败: {e}")
        return False


def rebuild_vectors():
    """重新生成向量数据（使用豆包嵌入模型）"""
    try:
        from app.models.base import SessionLocal
        from app.models.operation import OperationDocument
        from app.services.embedding_service import create_embedding_service

        print("🚀 开始重新生成向量数据...")
        print("="*60)

        # 创建数据库会话
        db = SessionLocal()

        try:
            # 步骤1: 检查数据库中的文档
            print("\n📝 步骤1: 检查数据库中的文档")
            documents = db.query(OperationDocument).all()

            if not documents:
                print("❌ 数据库中没有文档，请先导入文档")
                return False

            print(f"✅ 找到 {len(documents)} 个文档")

            # 步骤2: 初始化豆包嵌入服务
            print("\n🤖 步骤2: 初始化豆包嵌入服务")
            embedding_service = create_embedding_service()

            if not embedding_service.is_available():
                print("❌ 豆包嵌入服务不可用，请检查配置")
                return False

            print("✅ 豆包嵌入服务初始化成功")

            # 步骤3: 删除并重新创建集合（因为向量维度不同）
            print("\n🗄️ 步骤3: 删除并重新创建集合")
            manager = ChromaDBManager()
            if not manager.connect():
                print("❌ 无法连接到ChromaDB")
                return False

            # 删除现有集合
            try:
                manager.delete_collection()
                print("✅ 已删除旧集合")
            except Exception as e:
                print(f"⚠️ 删除旧集合失败（可能不存在）: {e}")

            # 重新创建集合
            if not manager.get_collection():
                print("❌ 无法创建新集合")
                return False

            # 步骤4: 重新生成向量数据
            print("\n🔄 步骤4: 重新生成向量数据")
            success_count = 0
            failed_count = 0

            for i, document in enumerate(documents, 1):
                try:
                    print(f"处理文档 {i}/{len(documents)}: {document.title[:50]}...")

                    # 生成文档的向量嵌入
                    text_content = f"{document.title}\n{document.description}\n{document.content}"
                    embedding = embedding_service.encode(text_content)

                    # 准备元数据
                    metadata = {
                        "title": document.title,
                        "platform": document.platform or "general",
                        "difficulty_level": document.difficulty_level,
                        "risk_level": document.risk_level,
                        "keywords": document.keywords or "",
                        "category_id": document.category_id or 0
                    }

                    # 添加到向量数据库
                    manager.collection.add(
                        embeddings=[embedding],
                        documents=[text_content],
                        metadatas=[metadata],
                        ids=[document.embedding_id or f"doc_{document.id}"]
                    )

                    # 更新数据库中的embedding_id（如果没有的话）
                    if not document.embedding_id:
                        document.embedding_id = f"doc_{document.id}"
                        db.commit()

                    success_count += 1

                except Exception as e:
                    print(f"❌ 处理文档失败: {e}")
                    failed_count += 1
                    continue

            # 步骤5: 验证结果
            print(f"\n🔍 步骤5: 验证结果")
            vector_count = manager.collection.count()

            print("="*60)
            print("📊 向量重建结果")
            print("="*60)
            print(f"数据库文档数量: {len(documents)}")
            print(f"成功生成向量: {success_count}")
            print(f"失败数量: {failed_count}")
            print(f"向量数据库文档数量: {vector_count}")

            if success_count > 0:
                print("✅ 向量数据重建成功!")

                # 测试搜索功能
                print("\n🧪 测试搜索功能...")
                test_query = "轮毂尺寸"
                manager.search_documents(test_query, limit=3)

                print("="*60)
                print("🎉 向量数据重建完成!")
                print("="*60)
                return True
            else:
                print("❌ 没有成功生成任何向量数据")
                return False

        finally:
            db.close()

    except Exception as e:
        print(f"❌ 重建向量数据失败: {e}")
        return False


def reset_knowledge_base(pdf_filename="Owners_Manual.pdf"):
    """重置知识库：清空数据库并重新初始化PDF文档"""
    try:
        from app.models.base import create_tables, SessionLocal
        from app.models.operation import OperationDocument
        from app.services.enhanced_document_processor import EnhancedDocumentProcessor
        from app.services.knowledge_service import KnowledgeService
        from pathlib import Path

        print("🚀 开始重置知识库...")
        print("="*60)

        # 确保数据库表存在
        try:
            create_tables()
            print("✅ 数据库表检查完成")
        except Exception as e:
            print(f"❌ 数据库表创建失败: {e}")
            return False

        # 创建数据库会话
        db = SessionLocal()

        try:
            # 步骤1: 清空数据库记录
            print("\n📝 步骤1: 清空数据库记录")
            deleted_count = db.query(OperationDocument).delete()
            db.commit()
            print(f"✅ 已清空数据库记录: {deleted_count} 条")

            # 步骤2: 清空Chroma向量数据库
            print("\n🗄️ 步骤2: 清空Chroma向量数据库")
            manager = ChromaDBManager()
            if manager.connect() and manager.get_collection():
                manager.clear_collection()

            # 步骤3: 重新初始化PDF文档
            print("\n📄 步骤3: 重新初始化PDF文档")
            file_path = Path(app_settings.knowledge_base_path) / pdf_filename

            if not file_path.exists():
                print(f"❌ PDF文件不存在: {file_path}")
                return False

            print(f"📄 开始处理PDF文件: {pdf_filename}")

            # 创建增强版文档处理器
            processor = EnhancedDocumentProcessor()

            # 处理文档
            result = processor.process_document(db, str(file_path))

            print(f"\n📊 PDF文档处理结果: {pdf_filename}")
            print("="*60)
            print(f"文件类型: {result['file_type']}")
            print(f"处理状态: {result['processing_status']}")
            print(f"创建的文档数量: {len(result['documents_created'])}")

            if result.get('summary'):
                summary = result['summary']
                print(f"\n📋 文档摘要:")
                print(f"  总页数: {summary.get('total_pages', 'N/A')}")
                print(f"  文本长度: {summary.get('total_text_length', 'N/A')}")
                print(f"  图片数量: {summary.get('total_images', 'N/A')}")
                print(f"  表格数量: {summary.get('total_tables', 'N/A')}")

            # 步骤4: 验证初始化结果
            print("\n🔍 步骤4: 验证初始化结果")
            doc_count = db.query(OperationDocument).count()
            print(f"📊 数据库中的文档数量: {doc_count}")

            try:
                knowledge_service = KnowledgeService()
                stats = knowledge_service.get_collection_stats()
                print(f"🔍 向量数据库统计: {stats}")

                if stats.get('total_documents', 0) > 0:
                    print("✅ 知识库初始化成功!")
                    print("\n" + "="*60)
                    print("🎉 知识库重置完成!")
                    print("="*60)
                    return True
                else:
                    print("⚠️ 向量数据库中没有文档")
                    return False

            except Exception as e:
                print(f"❌ 检查向量数据库失败: {e}")
                return False

        finally:
            db.close()

    except Exception as e:
        print(f"❌ 重置知识库失败: {e}")
        return False


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="ChromaDB和知识库管理工具")
    parser.add_argument("--action", choices=["list", "stats", "clear", "delete", "search", "export", "reset", "parse", "parse-all", "rebuild-vectors", "check-status", "test-imports", "clean-init"],
                       default="list", help="执行的操作")
    parser.add_argument("--query", type=str, help="搜索查询")
    parser.add_argument("--limit", type=int, default=10, help="显示文档数量限制")
    parser.add_argument("--output", type=str, default="chroma_export.json", help="导出文件名")
    parser.add_argument("--pdf", type=str, default="Owners_Manual.pdf", help="要处理的PDF文件名")

    args = parser.parse_args()

    # 处理不需要ChromaDB连接的操作
    if args.action == "reset":
        # 重置知识库
        success = reset_knowledge_base(args.pdf)
        if not success:
            print("\n❌ 知识库重置失败!")
            sys.exit(1)
        return
    elif args.action == "check-status":
        # 检查数据状态
        success = check_data_status()
        if not success:
            sys.exit(1)
        return
    elif args.action == "test-imports":
        # 测试导入
        success = test_imports()
        if not success:
            sys.exit(1)
        return
    elif args.action == "clean-init":
        # 清理并重新初始化
        success = clean_and_init()
        if not success:
            sys.exit(1)
        return
    elif args.action == "parse":
        # 解析特定PDF文件
        success = parse_pdf_document(args.pdf)
        if success:
            print("\n✅ PDF解析完成!")
        else:
            print("\n❌ PDF解析失败!")
            sys.exit(1)
        return
    elif args.action == "parse-all":
        # 解析所有PDF文件
        success = parse_all_pdfs()
        if success:
            print("\n✅ 批量PDF解析完成!")
        else:
            print("\n❌ 批量PDF解析失败!")
            sys.exit(1)
        return
    elif args.action == "rebuild-vectors":
        # 重新生成向量数据
        success = rebuild_vectors()
        if success:
            print("\n✅ 向量数据重建完成!")
        else:
            print("\n❌ 向量数据重建失败!")
            sys.exit(1)
        return

    # 需要ChromaDB连接的操作
    manager = ChromaDBManager()

    if not manager.connect():
        return

    manager.list_collections()

    if not manager.get_collection():
        return

    if args.action == "list":
        manager.show_stats()
        manager.list_documents(args.limit)
    elif args.action == "stats":
        manager.show_stats()
    elif args.action == "clear":
        confirm = input("确认要清空集合吗？(y/N): ").lower().strip()
        if confirm == 'y':
            manager.clear_collection()
    elif args.action == "delete":
        confirm = input("确认要删除整个集合吗？(y/N): ").lower().strip()
        if confirm == 'y':
            manager.delete_collection()
    elif args.action == "search":
        if args.query:
            manager.search_documents(args.query)
        else:
            print("❌ 请提供搜索查询 (--query)")
    elif args.action == "export":
        manager.export_data(args.output)


def show_help():
    """显示帮助信息"""
    print("""
银行助手知识库管理工具 (增强版)

📋 基础操作:
  list            列出所有文档 (默认)
  stats           显示统计信息
  search          搜索文档 (需要 --query，使用豆包嵌入模型)
  clear           清空数据库
  delete          删除整个集合
  export          导出数据 (可选 --output)

📄 文档处理 (增强版):
  parse           解析特定PDF文件 (需要 --pdf)
  parse-all       解析所有PDF文件
  reset           重置知识库 (清空并重新导入PDF)
  rebuild-vectors 重新生成向量数据 (使用豆包嵌入模型)

🔍 诊断和维护:
  check-status    检查数据库和向量库状态
  test-imports    测试系统导入和依赖
  clean-init      清理所有数据并重新初始化 (增强版)

📝 使用示例:

基础操作:
  python scripts/chroma_db_manager.py --action list
  python scripts/chroma_db_manager.py --action stats
  python scripts/chroma_db_manager.py --action search --query "贷款审批"
  python scripts/chroma_db_manager.py --action clear

文档处理:
  python scripts/chroma_db_manager.py --action parse --pdf "XX银行零售贷款系统操作手册V1.1.pdf"
  python scripts/chroma_db_manager.py --action parse-all
  python scripts/chroma_db_manager.py --action reset --pdf "XX银行零售贷款系统操作手册V1.1.pdf"

诊断维护:
  python scripts/chroma_db_manager.py --action check-status
  python scripts/chroma_db_manager.py --action test-imports
  python scripts/chroma_db_manager.py --action clean-init

💡 推荐工作流程:
  1. 检查状态: --action check-status
  2. 测试环境: --action test-imports
  3. 清理重建: --action clean-init (如有问题)
  4. 验证结果: --action check-status
""")


# ==================== 新增功能：数据状态检查 ====================

def check_data_status():
    """检查数据库和向量数据库的状态"""
    print("🔍 数据状况检查报告")
    print("=" * 60)

    try:
        from app.models.base import SessionLocal
        from app.models.operation import OperationDocument
        from sqlalchemy import func, desc
        from datetime import datetime, timedelta

        # 检查数据库状况
        print("🗄️ 检查数据库状况...")
        db = SessionLocal()
        try:
            total_docs = db.query(OperationDocument).count()
            print(f"📊 数据库中总文档数: {total_docs}")

            # 按平台分组统计
            platform_stats = db.query(
                OperationDocument.platform,
                func.count(OperationDocument.id)
            ).group_by(OperationDocument.platform).all()

            print("\n📋 按平台分组:")
            for platform, count in platform_stats:
                print(f"  {platform}: {count}个文档")

            # 按时间分析
            now = datetime.now()
            today = now.replace(hour=0, minute=0, second=0, microsecond=0)

            today_docs = db.query(OperationDocument).filter(
                OperationDocument.created_at >= today
            ).count()

            print(f"\n📅 今天创建的文档: {today_docs}个")

            # 检查关键词分布
            docs_with_keywords = db.query(OperationDocument).filter(
                OperationDocument.keywords.isnot(None),
                OperationDocument.keywords != ""
            ).count()

            print(f"🔑 包含关键词的文档: {docs_with_keywords}/{total_docs} ({docs_with_keywords/total_docs*100:.1f}%)" if total_docs > 0 else "🔑 包含关键词的文档: 0/0")

        finally:
            db.close()

        # 检查向量数据库状况
        print("\n🔍 检查向量数据库状况...")
        manager = ChromaDBManager()
        if manager.connect() and manager.get_collection():
            try:
                vector_docs = manager.collection.count()
                print(f"📊 向量数据库文档数: {vector_docs}")

                # 数据一致性检查
                print(f"\n📊 数据量对比:")
                print(f"  数据库文档数: {total_docs}")
                print(f"  向量库文档数: {vector_docs}")
                print(f"  差异: {abs(total_docs - vector_docs)}")

                if total_docs == vector_docs:
                    print("✅ 数据量一致")
                elif total_docs > vector_docs:
                    print("⚠️  数据库文档多于向量库，可能有文档未同步")
                else:
                    print("⚠️  向量库文档多于数据库，可能存在孤立的向量数据")

            except Exception as e:
                print(f"⚠️  向量库检查失败: {e}")
        else:
            print("❌ 无法连接到向量数据库")

        print("\n" + "=" * 60)
        print("✅ 数据状况检查完成")

        return True

    except Exception as e:
        print(f"❌ 数据状况检查失败: {e}")
        import traceback
        traceback.print_exc()
        return False

def test_imports():
    """测试所有关键导入"""
    print("🔍 测试系统导入...")

    try:
        # 测试基础模型导入
        print("  测试基础模型...")
        from app.models.base import BaseModel, SessionLocal, create_tables
        from app.models.operation import OperationDocument, OperationCategory
        print("  ✅ 基础模型导入成功")

        # 测试服务导入
        print("  测试服务...")
        from app.services.enhanced_pdf_parser import EnhancedPDFParser
        from app.services.enhanced_document_processor import EnhancedDocumentProcessor
        from app.services.knowledge_service import KnowledgeService
        print("  ✅ 服务导入成功")

        # 测试配置导入
        print("  测试配置...")
        from app.core.config import settings
        print("  ✅ 配置导入成功")

        # 测试实例化
        print("  测试实例化...")
        parser = EnhancedPDFParser()
        processor = EnhancedDocumentProcessor()
        knowledge_service = KnowledgeService()
        chroma_manager = ChromaDBManager()
        print("  ✅ 实例化成功")

        print("🎉 所有导入测试通过！")
        return True

    except Exception as e:
        print(f"❌ 导入测试失败: {e}")
        import traceback
        traceback.print_exc()
        return False

def clean_and_init():
    """清理数据并重新初始化（增强版）"""
    print("🧹 清理数据并重新初始化")
    print("=" * 60)

    print("⚠️  警告: 此操作将删除所有现有数据!")
    print("❓ 是否继续? (输入 'YES' 确认): ", end="")

    try:
        confirmation = input().strip()
        if confirmation != 'YES':
            print("❌ 操作已取消")
            return False
    except (KeyboardInterrupt, EOFError):
        print("\n❌ 操作已取消")
        return False

    try:
        from app.models.base import SessionLocal
        from app.models.operation import OperationDocument, OperationCategory
        from app.services.enhanced_document_processor import EnhancedDocumentProcessor
        from pathlib import Path

        # 1. 清理数据库
        print("\n🧹 清理数据库...")
        db = SessionLocal()
        try:
            deleted_docs = db.query(OperationDocument).delete()
            deleted_categories = db.query(OperationCategory).delete()
            db.commit()
            print(f"✅ 清理了 {deleted_docs} 个文档和 {deleted_categories} 个分类")
        finally:
            db.close()

        # 2. 清理向量数据库
        print("\n🧹 清理向量数据库...")
        manager = ChromaDBManager()
        if manager.connect():
            success = manager.clear_collection()
            if success:
                print("✅ 向量数据库清理完成")
            else:
                print("⚠️  向量数据库清理失败")

        # 3. 重新初始化
        print("\n🚀 重新初始化...")
        kb_path = Path("data/knowledge_base")
        pdf_files = list(kb_path.glob("*.pdf"))

        if not pdf_files:
            print("❌ 未找到PDF文件")
            return False

        processor = EnhancedDocumentProcessor()
        db = SessionLocal()
        try:
            total_docs_created = 0

            for pdf_file in pdf_files:
                print(f"\n📄 处理文件: {pdf_file.name}")
                result = processor.process_document(db, str(pdf_file))
                docs_created = len(result.get("documents_created", []))
                total_docs_created += docs_created
                print(f"✅ 创建了 {docs_created} 个文档")

            print(f"\n🎉 重新初始化完成! 总共创建了 {total_docs_created} 个文档")
            return True

        finally:
            db.close()

    except Exception as e:
        print(f"❌ 清理和初始化失败: {e}")
        import traceback
        traceback.print_exc()
        return False


if __name__ == "__main__":
    if len(sys.argv) == 1:
        show_help()
    else:
        main()
