import os
import argparse
import yaml
from tools.ollama_manager import OllamaManager
from tools.vector_store import VectorStoreManager
from data.loaders import CodeLoader
from data.splitters import CodeSplitter
from core.processing_chain import DocumentGenerationChain
from core.dependency_graph import DependencyAnalyzer


def load_config(config_path='configs/paths.yml'):
    """加载配置文件"""
    try:
        with open(config_path, 'r', encoding='utf-8') as file:  # 添加 encoding='utf-8'
            return yaml.safe_load(file)
    except Exception as e:
        print(f"加载配置文件出错: {e}")
        return {"paths": {"output": "./output", "vectordb": "./vectordb"}}



def main(args):
    """主函数，执行文档生成流程"""

    # 加载配置
    config = load_config()
    output_dir = args.output_dir or config["paths"]["output"]
    vector_db_path = args.vector_db_path or config["paths"]["vectordb"]

    print(f"===== 开始处理项目: {args.project_path} =====")

    # 确保输出目录存在
    os.makedirs(output_dir, exist_ok=True)
    os.makedirs(vector_db_path, exist_ok=True)

    # 初始化LLM
    print(f"正在初始化模型: {args.model}")
    ollama_manager = OllamaManager()
    llm = ollama_manager.get_model(args.model)

    # 初始化向量数据库
    print(f"初始化向量数据库: {vector_db_path}")
    vector_store_manager = VectorStoreManager()
    vector_store = vector_store_manager.init_chroma(
        persist_directory=vector_db_path
    )

    # 加载代码文件
    print("开始加载代码文件...")
    documents_by_file = CodeLoader.load_project(args.project_path)
    print(f"加载了 {len(documents_by_file)} 个文件")

    # 构建依赖图
    print("分析代码依赖关系...")
    analyzer = DependencyAnalyzer()
    for file_path, docs in documents_by_file.items():
        if docs:
            analyzer.parse_file(file_path, docs[0].page_content)
    dependency_graph = analyzer.build_dependency_graph()
    print(f"发现 {sum(len(deps) for deps in dependency_graph.values())} 个依赖关系")

    # 文档分割
    print("分割文档...")
    split_documents = CodeSplitter.split_documents(documents_by_file)

    # 添加到向量库
    print("将文档添加到向量库...")
    for file_path, documents in split_documents.items():
        print(f"  添加 {len(documents)} 个分块，来自 {file_path}")
        vector_store_manager.add_documents(documents)

    # 创建文档生成链
    print("设置文档生成链...")
    doc_chain = DocumentGenerationChain(llm, vector_store)
    doc_chain.setup_chain()

    # 生成文档
    print("开始生成文档...")
    original_docs = {}
    for file_path, docs in documents_by_file.items():
        if docs:  # 确保文档列表不为空
            original_docs[file_path] = docs[0].page_content

    # 如果指定了单个文件，则只处理该文件
    if args.file:
        if args.file in original_docs:
            file_to_process = {args.file: original_docs[args.file]}

            # 获取依赖关系，丰富上下文
            related_files = analyzer.get_related_files(args.file, dependency_graph)
            print(f"发现相关文件: {related_files}")

            results = doc_chain.process_project(file_to_process)
        else:
            print(f"错误: 找不到指定的文件 {args.file}")
            return
    else:
        results = doc_chain.process_project(original_docs)

    # 保存结果
    print(f"保存结果到 {output_dir}")
    os.makedirs(output_dir, exist_ok=True)
    for file_path, doc_content in results.items():
        # 创建输出目录结构
        output_path = os.path.join(output_dir, file_path + ".md")
        os.makedirs(os.path.dirname(output_path), exist_ok=True)

        # 写入文档内容
        with open(output_path, "w", encoding="utf-8") as f:
            f.write(doc_content)

        print(f"  已生成文档: {output_path}")

    print("===== 文档生成完成 =====")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="代码文档自动生成工具")
    parser.add_argument("--project_path", type=str, required=True, help="代码项目路径")
    parser.add_argument("--output_dir", type=str, help="文档输出目录")
    parser.add_argument("--vector_db_path", type=str, help="向量数据库路径")
    parser.add_argument("--model", type=str, default="deepseek-r1:14b", help="使用的Ollama模型")
    parser.add_argument("--file", type=str, help="单独处理的文件路径(可选)")

    args = parser.parse_args()
    main(args)
