#!/usr/bin/env python3
"""
简化版向量化脚本
使用配置文件进行MD文件向量化
"""

import json
import sys
import os
from pathlib import Path

# 添加当前目录到Python路径
sys.path.append(str(Path(__file__).parent))

from vectorize_md_to_milvus import MDVectorizer
import logging

def load_config(config_path: str = "config.json") -> dict:
    """加载配置文件"""
    try:
        with open(config_path, 'r', encoding='utf-8') as f:
            return json.load(f)
    except FileNotFoundError:
        print(f"配置文件 {config_path} 不存在")
        return None
    except json.JSONDecodeError as e:
        print(f"配置文件格式错误: {e}")
        return None

def main():
    """主函数"""
    print("MD文件向量化工具")
    print("=" * 40)
    
    # 加载配置
    config = load_config()
    if not config:
        return 1
    
    # 设置日志级别
    log_level = getattr(logging, config["logging"]["level"], logging.INFO)
    logging.basicConfig(
        level=log_level,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.StreamHandler(),
            logging.FileHandler(config["logging"]["log_file"], encoding='utf-8')
        ]
    )
    
    logger = logging.getLogger(__name__)
    
    try:
        # 检查输入目录
        md_dir = config["input"]["md_directory"]
        if not os.path.exists(md_dir):
            logger.error(f"输入目录不存在: {md_dir}")
            return 1
        
        # 初始化向量化处理器
        logger.info("初始化向量化处理器...")
        vectorizer = MDVectorizer(
            embedding_model=config["embedding"]["model"],
            embedding_url=config["embedding"]["url"],
            milvus_host=config["milvus"]["host"],
            milvus_port=config["milvus"]["port"],
            collection_name=config["milvus"]["collection_name"]
        )
        
        # 设置处理参数
        vectorizer.chunk_size = config["processing"]["chunk_size"]
        vectorizer.chunk_overlap = config["processing"]["chunk_overlap"]
        
        # 开始处理
        logger.info(f"开始处理目录: {md_dir}")
        results = vectorizer.process_directory(md_dir)
        
        # 输出结果
        print("\n处理结果:")
        print(f"总文件数: {results['total']}")
        print(f"成功处理: {results['success']}")
        print(f"处理失败: {results['failed']}")
        
        if results['failed'] > 0:
            print(f"失败率: {results['failed']/results['total']*100:.1f}%")
        
        # 获取集合统计
        stats = vectorizer.get_collection_stats()
        print(f"\n集合统计:")
        print(f"总向量数: {stats.get('total_entities', 0)}")
        print(f"集合名称: {stats.get('collection_name', 'N/A')}")
        
        logger.info("向量化完成!")
        return 0
        
    except Exception as e:
        logger.error(f"程序执行失败: {e}")
        return 1

if __name__ == "__main__":
    exit(main())
