import os
import re
import argparse
import subprocess
from collections import defaultdict
from typing import List, Set, Dict
from concurrent.futures import ThreadPoolExecutor, as_completed


def find_cpp_files(root_dir: str, exclude_dirs: Set[str] = None) -> List[str]:
    """快速查找所有.cpp文件，排除指定目录"""
    if exclude_dirs is None:
        exclude_dirs = set()
    
    cpp_files = []
    # 预处理排除目录为绝对路径
    abs_exclude_dirs = {os.path.abspath(d) for d in exclude_dirs}
    
    for dirpath, dirnames, filenames in os.walk(root_dir):
        # 快速检查是否需要排除当前目录
        abs_dirpath = os.path.abspath(dirpath)
        if any(abs_dirpath.startswith(exclude_dir + os.sep) or abs_dirpath == exclude_dir 
               for exclude_dir in abs_exclude_dirs):
            dirnames[:] = []  # 清空子目录，不再遍历
            continue
            
        # 在当前目录中查找.cpp文件
        for filename in filenames:
            if filename.endswith('.cpp'):
                cpp_files.append(os.path.join(dirpath, filename))
    
    return cpp_files


def parse_gcc_output(output: str) -> List[str]:
    """高效解析GCC输出，提取.h文件列表"""
    h_files = set()
    # 使用更高效的正则匹配
    h_pattern = re.compile(r'(\S+\.h)\b')
    
    # 一次性查找所有.h文件
    matches = h_pattern.findall(output)
    for match in matches:
        if 'foundation/arkui/ace_engine' in match:
            h_files.add(match)
    
    return list(h_files)


def execute_gcc_command(command: str) -> List[str]:
    """执行GCC命令并返回头文件列表，专注于核心功能"""
    try:
        result = subprocess.run(
            command, 
            shell=True, 
            check=True,
            stdout=subprocess.PIPE, 
            stderr=subprocess.PIPE,
            text=True,
            timeout=30  # 添加超时防止卡死
        )
        return parse_gcc_output(result.stdout)
    except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
        return []  # 直接返回空列表，不记录错误信息


def generate_gcc_command(cpp_file: str, include_paths: List[str]) -> str:
    """生成单个GCC命令"""
    base_command = "gcc -MM -MG -w"
    include_options = " ".join(f"-I{path}" for path in include_paths)
    return f"{base_command} {include_options} {cpp_file}"


def process_cpp_file(cpp_file: str, include_paths: List[str]) -> Dict:
    """处理单个cpp文件，返回精简的结果"""
    command = generate_gcc_command(cpp_file, include_paths)
    h_files = execute_gcc_command(command)
    
    return {
        'cpp_file': cpp_file,
        'h_files': h_files,
        'success': len(h_files) > 0 or not h_files  # 即使为空也认为是成功
    }


def main():
    parser = argparse.ArgumentParser(description='高效分析C++文件依赖关系')
    parser.add_argument('root_dir', help='源代码根目录')
    parser.add_argument('--include_dirs', nargs='+', required=True, help='头文件搜索路径')
    parser.add_argument('--exclude_dirs', nargs='+', default=[], help='排除的目录列表')
    parser.add_argument('--output', default='hfile_counts.txt', help='输出文件路径')
    parser.add_argument('--threads', type=int, default=8, 
                       help='线程数，默认根据CPU核心数自动设置')
    
    args = parser.parse_args()
    
    # 快速路径验证
    if not os.path.isdir(args.root_dir):
        print(f"错误: 目录 '{args.root_dir}' 不存在!")
        return

    print("快速查找.cpp文件中...")
    cpp_files = find_cpp_files(args.root_dir, set(args.exclude_dirs))
    
    if not cpp_files:
        print("未找到任何.cpp文件")
        return
        
    print(f"找到 {len(cpp_files)} 个.cpp文件，开始分析...")

    # 统计头文件出现次数
    hfile_counts = defaultdict(int)
    processed_count = 0
    failed_count = 0

    # 使用进程池并行处理
    with ThreadPoolExecutor(max_workers=args.threads) as executor:
        # 一次性提交所有任务
        future_to_file = {
            executor.submit(process_cpp_file, cpp_file, args.include_dirs): cpp_file 
            for cpp_file in cpp_files
        }
        
        # 批量处理完成的任务
        for future in as_completed(future_to_file):
            try:
                result = future.result()
                processed_count += 1
                
                # 直接统计，不存储中间结果
                for h_file in result['h_files']:
                    hfile_counts[h_file] += 1
                    
                # 简单进度显示
                if processed_count % 100 == 0:
                    print(f"已处理 {processed_count}/{len(cpp_files)} 文件")
                    
            except Exception:
                failed_count += 1

    # 生成最终输出文件
    if hfile_counts:
        # 按出现次数降序排序
        sorted_counts = sorted(hfile_counts.items(), key=lambda x: x[1], reverse=True)
        
        # 直接写入最终结果，不进行复杂格式化
        with open(args.output, 'w', encoding='utf-8') as f:
            # 只写必要的统计信息
            f.write(f"processed_files: {processed_count}\n")
            f.write(f"failed_files: {failed_count}\n")
            f.write(f"unique_header_files: {len(hfile_counts)}\n\n")
            
            # 写入头文件统计
            prefix = "foundation/arkui/ace_engine/"
            for h_file, count in sorted_counts:
                if prefix in h_file:
                    # 直接写入相对路径
                    relative_path = h_file.split(prefix, 1)[1]
                    f.write(f"{prefix}{relative_path}:{count}\n")
                else:
                    f.write(f"{h_file}:{count}\n")
        
        print(f"\n分析完成! 结果保存到 {args.output}")
        print(f"成功处理: {processed_count} 文件")
        print(f"失败: {failed_count} 文件")
        print(f"发现 {len(hfile_counts)} 个唯一头文件")
    else:
        print("未找到任何头文件依赖")


if __name__ == "__main__":
    main()