#!/usr/bin/env python3
"""
统一测试运行脚本
支持功能：
- 单元测试执行
- Valgrind内存检查
- 代码覆盖率测试
- 性能测试
- 并行执行
- 详细报告生成
"""

import argparse
import os
import sys
import subprocess
import concurrent.futures
import json
import xml.etree.ElementTree as ET
from pathlib import Path
from datetime import datetime
from typing import List, Dict, Any, Tuple, Optional
import tempfile
import re
import shutil
from dataclasses import dataclass
from enum import Enum


class TestType(Enum):
    UNIT = "unit"
    VALGRIND = "valgrind"
    COVERAGE = "coverage"
    PERFORMANCE = "performance"
    ALL = "all"


class TestResult(Enum):
    PASS = "PASS"
    FAIL = "FAIL"
    SKIP = "SKIP"


@dataclass
class TestInfo:
    """测试信息"""
    name: str
    path: str
    type: TestType
    result: TestResult = TestResult.SKIP
    duration: float = 0.0
    exit_code: int = 0
    output: str = ""
    error: str = ""
    details: Optional[Dict[str, Any]] = None
    
    def __post_init__(self):
        if self.details is None:
            self.details = {}


class Colors:
    """终端颜色定义"""
    RED = '\033[0;31m'
    GREEN = '\033[0;32m'
    YELLOW = '\033[1;33m'
    BLUE = '\033[0;34m'
    PURPLE = '\033[0;35m'
    CYAN = '\033[0;36m'
    WHITE = '\033[1;37m'
    BOLD = '\033[1m'
    RESET = '\033[0m'


class Logger:
    """彩色日志输出"""
    
    @staticmethod
    def log(color: str, message: str, prefix: str = ""):
        timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        if prefix:
            print(f"{Colors.CYAN}[{timestamp}]{Colors.RESET} {color}{prefix}{Colors.RESET} {message}")
        else:
            print(f"{Colors.CYAN}[{timestamp}]{Colors.RESET} {color}{message}{Colors.RESET}")
    
    @staticmethod
    def info(message: str):
        Logger.log(Colors.BLUE, message, "INFO")
    
    @staticmethod
    def success(message: str):
        Logger.log(Colors.GREEN, message, "SUCCESS")
    
    @staticmethod
    def warning(message: str):
        Logger.log(Colors.YELLOW, message, "WARNING")
    
    @staticmethod
    def error(message: str):
        Logger.log(Colors.RED, message, "ERROR")
    
    @staticmethod
    def debug(message: str, verbose: bool = False):
        if verbose:
            Logger.log(Colors.PURPLE, message, "DEBUG")


class TestRunner:
    """测试运行器"""
    
    def __init__(self, config: Dict[str, Any]):
        self.config = config
        self.results: List[TestInfo] = []
        self.start_time = datetime.now()
        self.execution_id = config['execution_timestamp']
        
        # 更新输出目录路径，添加时间戳
        self.update_output_paths()
        
        # 创建输出目录
        self.ensure_output_dirs()
    
    def update_output_paths(self):
        """更新输出路径，添加时间戳和功能分组"""
        base_dir = Path(self.config['report_dir'])
        
        # 为每种测试类型创建带时间戳的目录
        self.config['unit_report_dir'] = str(base_dir / 'unit' / self.execution_id)
        self.config['valgrind_report_dir'] = str(base_dir / 'valgrind' / self.execution_id)
        self.config['coverage_report_dir'] = str(base_dir / 'coverage' / self.execution_id)
        
        # 更新各个功能的输出目录
        self.config['valgrind_dir'] = self.config['valgrind_report_dir']
        self.config['coverage_dir'] = self.config['coverage_report_dir']
    
    def ensure_output_dirs(self):
        """确保输出目录存在"""
        dirs = [
            self.config['valgrind_dir'],
            self.config['coverage_dir'],
            self.config.get('unit_report_dir', ''),
            self.config.get('valgrind_report_dir', ''),
            self.config.get('coverage_report_dir', '')
        ]
        for dir_path in dirs:
            if dir_path:
                Path(dir_path).mkdir(parents=True, exist_ok=True)
    
    def find_executables(self, test_type: Optional[TestType] = None) -> List[str]:
        """查找可执行测试文件"""
        executables = []
        test_dir = Path(self.config['test_dir'])
        
        if not test_dir.exists():
            Logger.error(f"测试目录不存在: {test_dir}")
            return []
        
        Logger.info(f"在目录中查找可执行文件: {test_dir}")
        
        # 递归查找所有可执行文件
        for exe_file in test_dir.rglob('*'):
            if exe_file.is_file() and os.access(exe_file, os.X_OK):
                # 排除脚本文件
                if not exe_file.suffix in ['.sh', '.py', '.pl']:
                    # 根据测试类型过滤文件
                    if test_type == TestType.VALGRIND:
                        valgrind_filter = self.config.get('valgrind_filter', 'security')
                        if valgrind_filter == 'security' and 'security' not in exe_file.name.lower():
                            continue
                    
                    executables.append(str(exe_file))
        
        Logger.info(f"找到 {len(executables)} 个可执行文件")
        return executables
    
    def run_unit_test(self, executable: str) -> TestInfo:
        """运行单元测试"""
        test_info = TestInfo(
            name=Path(executable).name,
            path=executable,
            type=TestType.UNIT
        )
        
        try:
            start = datetime.now()
            result = subprocess.run(
                [executable],
                capture_output=True,
                text=True,
                timeout=self.config.get('timeout', 300)
            )
            end = datetime.now()
            
            test_info.duration = (end - start).total_seconds()
            test_info.exit_code = result.returncode
            test_info.output = result.stdout
            test_info.error = result.stderr
            
            if result.returncode == 0:
                test_info.result = TestResult.PASS
            else:
                test_info.result = TestResult.FAIL
                
        except subprocess.TimeoutExpired:
            test_info.result = TestResult.FAIL
            test_info.error = f"测试超时 ({self.config.get('timeout', 300)}秒)"
        except Exception as e:
            test_info.result = TestResult.FAIL
            test_info.error = str(e)
        
        return test_info
    
    def run_valgrind_test(self, executable: str) -> TestInfo:
        """运行Valgrind内存检查"""
        test_info = TestInfo(
            name=Path(executable).name,
            path=executable,
            type=TestType.VALGRIND
        )
        
        # 检查是否启用了ASAN/UBSAN
        if self.is_sanitizer_enabled(executable):
            test_info.result = TestResult.SKIP
            test_info.error = "已启用ASAN/UBSAN，跳过Valgrind检查"
            return test_info
        
        log_file = Path(self.config['valgrind_dir']) / f"{test_info.name}.log"
        
        try:
            start = datetime.now()
            
            valgrind_cmd = [
                'valgrind',
                '--tool=memcheck',
                '--leak-check=full',
                '--show-leak-kinds=all',
                '--track-origins=yes',
                '--xml=yes',
                f'--xml-file={log_file}.xml',
                f'--log-file={log_file}',
                executable
            ]
            
            # 运行valgrind，不设置超时限制
            timeout = self.config.get('valgrind_timeout')
            if timeout is not None:
                result = subprocess.run(
                    valgrind_cmd,
                    capture_output=True,
                    text=True,
                    timeout=timeout
                )
            else:
                result = subprocess.run(
                    valgrind_cmd,
                    capture_output=True,
                    text=True
                )
            
            end = datetime.now()
            test_info.duration = (end - start).total_seconds()
            test_info.exit_code = result.returncode
            test_info.output = result.stdout
            
            # 分析Valgrind结果
            valgrind_result = self.analyze_valgrind_log(str(log_file))
            test_info.details = valgrind_result
            
            if valgrind_result['has_errors']:
                test_info.result = TestResult.FAIL
                test_info.error = f"检测到内存错误: {valgrind_result['error_summary']}"
            else:
                test_info.result = TestResult.PASS
                
        except subprocess.TimeoutExpired:
            test_info.result = TestResult.FAIL
            timeout = self.config.get('valgrind_timeout')
            if timeout:
                test_info.error = f"Valgrind检查超时 ({timeout}秒)"
            else:
                test_info.error = "Valgrind检查被中断"
        except Exception as e:
            test_info.result = TestResult.FAIL
            test_info.error = str(e)
        
        return test_info
    
    def run_coverage_test(self, executable: str) -> TestInfo:
        """运行覆盖率测试"""
        test_info = TestInfo(
            name=Path(executable).name,
            path=executable,
            type=TestType.COVERAGE
        )
        
        # 检查是否是用gcov编译的可执行文件
        if not self.is_coverage_enabled(executable):
            test_info.result = TestResult.SKIP
            test_info.error = "可执行文件未启用覆盖率支持"
            return test_info
        
        try:
            # 清理旧的覆盖率数据
            self.cleanup_coverage_data()
            
            # 运行测试以生成覆盖率数据
            start = datetime.now()
            result = subprocess.run(
                [executable],
                capture_output=True,
                text=True,
                timeout=self.config.get('timeout', 300)
            )
            end = datetime.now()
            
            test_info.duration = (end - start).total_seconds()
            
            if result.returncode != 0:
                test_info.result = TestResult.FAIL
                test_info.error = f"测试执行失败: {result.stderr}"
                return test_info
            
            # 生成覆盖率报告
            coverage_result = self.generate_coverage_report(test_info.name)
            test_info.details = coverage_result
            
            if coverage_result.get('success', False):
                test_info.result = TestResult.PASS
                test_info.output = f"覆盖率: {coverage_result.get('line_coverage', 0):.1f}%"
            else:
                test_info.result = TestResult.FAIL
                test_info.error = coverage_result.get('error', '生成覆盖率报告失败')
                
        except Exception as e:
            test_info.result = TestResult.FAIL
            test_info.error = str(e)
        
        return test_info
    
    def is_sanitizer_enabled(self, executable: str) -> bool:
        """检查是否启用了ASAN/UBSAN"""
        # 检查环境变量
        if os.environ.get('ASAN_OPTIONS') or os.environ.get('UBSAN_OPTIONS'):
            return True
        
        # 检查二进制文件符号
        try:
            result = subprocess.run(
                ['nm', executable],
                capture_output=True,
                text=True
            )
            if '__asan' in result.stdout or '__ubsan' in result.stdout:
                return True
        except:
            pass
        
        return False
    
    def is_coverage_enabled(self, executable: str) -> bool:
        """检查是否启用了覆盖率支持"""
        try:
            result = subprocess.run(
                ['nm', executable],
                capture_output=True,
                text=True
            )
            # 查找gcov相关符号
            return '__gcov' in result.stdout
        except:
            return False
    
    def analyze_valgrind_log(self, log_file: str) -> Dict[str, Any]:
        """分析Valgrind日志"""
        result = {
            'has_errors': False,
            'error_count': 0,
            'definitely_lost': 0,
            'indirectly_lost': 0,
            'possibly_lost': 0,
            'error_summary': ''
        }
        
        try:
            if not os.path.exists(log_file):
                return result
            
            with open(log_file, 'r') as f:
                content = f.read()
            
            # 解析错误摘要
            error_match = re.search(r'ERROR SUMMARY: (\d+) errors', content)
            if error_match:
                result['error_count'] = int(error_match.group(1))
            
            # 解析内存泄漏信息
            def_lost_match = re.search(r'definitely lost: ([\d,]+) bytes', content)
            if def_lost_match:
                result['definitely_lost'] = int(def_lost_match.group(1).replace(',', ''))
            
            ind_lost_match = re.search(r'indirectly lost: ([\d,]+) bytes', content)
            if ind_lost_match:
                result['indirectly_lost'] = int(ind_lost_match.group(1).replace(',', ''))
            
            pos_lost_match = re.search(r'possibly lost: ([\d,]+) bytes', content)
            if pos_lost_match:
                result['possibly_lost'] = int(pos_lost_match.group(1).replace(',', ''))
            
            # 判断是否有错误
            result['has_errors'] = (
                result['error_count'] > 0 or
                result['definitely_lost'] > 0 or
                result['indirectly_lost'] > 0
            )
            
            if result['has_errors']:
                result['error_summary'] = f"错误: {result['error_count']}, 泄漏: {result['definitely_lost']} bytes"
            
        except Exception as e:
            Logger.debug(f"分析Valgrind日志失败: {e}", self.config.get('verbose', False))
        
        return result
    
    def cleanup_coverage_data(self):
        """清理旧的覆盖率数据"""
        try:
            # 查找并删除.gcda文件
            for gcda_file in Path('.').rglob('*.gcda'):
                gcda_file.unlink()
        except Exception as e:
            Logger.debug(f"清理覆盖率数据失败: {e}", self.config.get('verbose', False))
    
    def generate_coverage_report(self, test_name: str) -> Dict[str, Any]:
        """生成覆盖率报告"""
        result = {
            'success': False,
            'line_coverage': 0.0,
            'function_coverage': 0.0,
            'branch_coverage': 0.0,
            'error': ''
        }
        
        try:
            coverage_dir = Path(self.config['coverage_dir'])
            report_file = coverage_dir / f"{test_name}.html"
            
            # 使用lcov生成覆盖率报告
            lcov_cmd = [
                'lcov',
                '--capture',
                '--directory', '.',
                '--output-file', str(coverage_dir / f"{test_name}.info"),
                '--quiet'
            ]
            
            subprocess.run(lcov_cmd, check=True, capture_output=True)
            
            # 生成HTML报告
            genhtml_cmd = [
                'genhtml',
                str(coverage_dir / f"{test_name}.info"),
                '--output-directory', str(coverage_dir / test_name),
                '--quiet'
            ]
            
            subprocess.run(genhtml_cmd, check=True, capture_output=True)
            
            # 解析覆盖率数据
            info_file = coverage_dir / f"{test_name}.info"
            if info_file.exists():
                coverage_data = self.parse_coverage_info(str(info_file))
                result.update(coverage_data)
            
            result['success'] = True
            
        except subprocess.CalledProcessError as e:
            result['error'] = f"生成覆盖率报告失败: {e}"
        except Exception as e:
            result['error'] = str(e)
        
        return result
    
    def parse_coverage_info(self, info_file: str) -> Dict[str, float]:
        """解析覆盖率信息文件"""
        result = {
            'line_coverage': 0.0,
            'function_coverage': 0.0,
            'branch_coverage': 0.0
        }
        
        try:
            with open(info_file, 'r') as f:
                content = f.read()
            
            # 解析行覆盖率
            lines_found = len(re.findall(r'^LF:', content, re.MULTILINE))
            lines_hit = len(re.findall(r'^LH:', content, re.MULTILINE))
            
            if lines_found > 0:
                result['line_coverage'] = (lines_hit / lines_found) * 100
            
            # 解析函数覆盖率
            funcs_found = len(re.findall(r'^FNF:', content, re.MULTILINE))
            funcs_hit = len(re.findall(r'^FNH:', content, re.MULTILINE))
            
            if funcs_found > 0:
                result['function_coverage'] = (funcs_hit / funcs_found) * 100
            
            # 解析分支覆盖率
            branches_found = len(re.findall(r'^BRF:', content, re.MULTILINE))
            branches_hit = len(re.findall(r'^BRH:', content, re.MULTILINE))
            
            if branches_found > 0:
                result['branch_coverage'] = (branches_hit / branches_found) * 100
                
        except Exception as e:
            Logger.debug(f"解析覆盖率信息失败: {e}")
        
        return result
    
    def run_tests_parallel(self, executables: List[str], test_type: TestType) -> List[TestInfo]:
        """并行运行测试"""
        results = []
        max_workers = self.config.get('parallel_jobs', 1)
        
        # 创建测试运行日志文件
        log_file = self.create_test_log_file(test_type)
        
        if max_workers == 1:
            # 顺序执行
            for executable in executables:
                result = self.run_single_test(executable, test_type)
                results.append(result)
                self.log_test_result(result, log_file)
        else:
            # 并行执行
            Logger.info(f"使用并行执行: {max_workers} 个任务")
            with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
                future_to_exe = {
                    executor.submit(self.run_single_test, exe, test_type): exe
                    for exe in executables
                }
                
                for future in concurrent.futures.as_completed(future_to_exe):
                    result = future.result()
                    results.append(result)
                    self.log_test_result(result, log_file)
        
        # 关闭日志文件
        if log_file:
            log_file.close()
        
        return results
    
    def create_test_log_file(self, test_type: TestType):
        """为测试类型创建日志文件"""
        try:
            report_dir_key = f'{test_type.value}_report_dir'
            report_dir = self.config.get(report_dir_key, self.config['report_dir'])
            Path(report_dir).mkdir(parents=True, exist_ok=True)
            
            log_path = Path(report_dir) / f'{test_type.value}_run.log'
            log_file = open(log_path, 'w', encoding='utf-8')
            
            # 写入日志头
            log_file.write(f"# {test_type.value.upper()} 测试运行日志\n")
            log_file.write(f"# 开始时间: {self.start_time.isoformat()}\n")
            log_file.write(f"# 执行ID: {self.execution_id}\n")
            log_file.write("# " + "="*50 + "\n\n")
            log_file.flush()
            
            return log_file
        except Exception as e:
            Logger.debug(f"创建日志文件失败: {e}", self.config.get('verbose', False))
            return None
    
    def run_single_test(self, executable: str, test_type: TestType) -> TestInfo:
        """运行单个测试"""
        if test_type == TestType.UNIT:
            return self.run_unit_test(executable)
        elif test_type == TestType.VALGRIND:
            return self.run_valgrind_test(executable)
        elif test_type == TestType.COVERAGE:
            return self.run_coverage_test(executable)
        else:
            raise ValueError(f"不支持的测试类型: {test_type}")
    
    def log_test_result(self, test_info: TestInfo, log_file=None):
        """记录测试结果"""
        status_color = {
            TestResult.PASS: Colors.GREEN,
            TestResult.FAIL: Colors.RED,
            TestResult.SKIP: Colors.YELLOW
        }
        
        status_symbol = {
            TestResult.PASS: "✓",
            TestResult.FAIL: "✗",
            TestResult.SKIP: "⊘"
        }
        
        color = status_color[test_info.result]
        symbol = status_symbol[test_info.result]
        duration_str = f"({test_info.duration:.2f}s)" if test_info.duration > 0 else ""
        
        # 控制台输出
        console_msg = f"{color}{symbol} {test_info.result.value}: {test_info.name} {duration_str}{Colors.RESET}"
        print(console_msg)
        
        # 写入日志文件
        if log_file:
            log_msg = f"{symbol} {test_info.result.value}: {test_info.name} {duration_str}\n"
            log_file.write(log_msg)
            
            if test_info.result == TestResult.FAIL and test_info.error:
                log_file.write(f"    错误: {test_info.error}\n")
            
            if test_info.output:
                log_file.write(f"    输出: {test_info.output}\n")
            
            if test_info.error and test_info.result == TestResult.FAIL:
                log_file.write(f"    详细错误: {test_info.error}\n")
            
            log_file.write("\n")
            log_file.flush()
        
        if test_info.result == TestResult.FAIL and test_info.error:
            print(f"    {Colors.YELLOW}错误: {test_info.error}{Colors.RESET}")
        
        if self.config.get('verbose', False) and test_info.output:
            print(f"    {Colors.CYAN}输出: {test_info.output[:200]}...{Colors.RESET}")
    
    def run_all_tests(self, test_types: List[TestType]) -> Dict[str, Any]:
        """运行所有测试"""
        all_results = []
        
        for test_type in test_types:
            if test_type == TestType.ALL:
                continue
            
            # 为每种测试类型查找对应的可执行文件
            executables = self.find_executables(test_type)
            if not executables:
                Logger.warning(f"没有找到 {test_type.value} 类型的可执行测试文件")
                continue
                
            Logger.info(f"开始运行 {test_type.value} 测试...")
            Logger.info(f"找到 {len(executables)} 个测试文件")
            
            results = self.run_tests_parallel(executables, test_type)
            all_results.extend(results)
        
        if not all_results:
            Logger.warning("没有执行任何测试")
            return {'summary': {'total': 0, 'passed': 0, 'failed': 0, 'skipped': 0}}
        
        self.results = all_results
        
        # 生成报告
        summary = self.generate_summary()
        self.generate_reports()
        
        return summary
    
    def generate_summary(self) -> Dict[str, Any]:
        """生成测试摘要"""
        total = len(self.results)
        passed = sum(1 for r in self.results if r.result == TestResult.PASS)
        failed = sum(1 for r in self.results if r.result == TestResult.FAIL)
        skipped = sum(1 for r in self.results if r.result == TestResult.SKIP)
        
        end_time = datetime.now()
        duration = (end_time - self.start_time).total_seconds()
        
        summary = {
            'total': total,
            'passed': passed,
            'failed': failed,
            'skipped': skipped,
            'duration': duration,
            'start_time': self.start_time.isoformat(),
            'end_time': end_time.isoformat()
        }
        
        # 按类型分组统计
        by_type = {}
        for result in self.results:
            test_type = result.type.value
            if test_type not in by_type:
                by_type[test_type] = {'total': 0, 'passed': 0, 'failed': 0, 'skipped': 0}
            
            by_type[test_type]['total'] += 1
            # 将结果值转换为统计键名
            result_key = result.result.value.lower()
            if result_key == 'pass':
                result_key = 'passed'
            elif result_key == 'fail':
                result_key = 'failed'
            elif result_key == 'skip':
                result_key = 'skipped'
            
            by_type[test_type][result_key] += 1
        
        summary['by_type'] = by_type
        
        return summary
    
    def generate_reports(self):
        """生成详细报告"""
        # 为每种测试类型生成单独的报告
        test_types_in_results = set(r.type for r in self.results)
        
        for test_type in test_types_in_results:
            # 获取该测试类型的结果
            type_results = [r for r in self.results if r.type == test_type]
            
            # 确定报告目录
            report_dir_key = f'{test_type.value}_report_dir'
            report_dir = Path(self.config.get(report_dir_key, self.config['report_dir']))
            report_dir.mkdir(parents=True, exist_ok=True)
            
            # 生成JSON报告
            json_report = {
                'summary': self.generate_type_summary(test_type, type_results),
                'execution_info': {
                    'execution_id': self.execution_id,
                    'start_time': self.start_time.isoformat(),
                    'test_type': test_type.value,
                    'config': self.config
                },
                'results': [
                    {
                        'name': r.name,
                        'path': r.path,
                        'type': r.type.value,
                        'result': r.result.value,
                        'duration': r.duration,
                        'exit_code': r.exit_code,
                        'output': r.output,
                        'error': r.error,
                        'details': r.details
                    } for r in type_results
                ]
            }
            
            json_file = report_dir / f'{test_type.value}_report.json'
            with open(json_file, 'w', encoding='utf-8') as f:
                json.dump(json_report, f, indent=2, ensure_ascii=False)
            
            # 生成HTML报告
            html_file = report_dir / f'{test_type.value}_report.html'
            self.generate_html_report(json_report, html_file, test_type)
            
            Logger.info(f"{test_type.value} 报告已生成: {json_file}")
            Logger.info(f"{test_type.value} HTML报告已生成: {html_file}")
        
        # 生成综合报告
        if len(test_types_in_results) > 1:
            self.generate_combined_report(test_types_in_results)
    
    def generate_type_summary(self, test_type: TestType, results: List[TestInfo]) -> Dict[str, Any]:
        """生成单一测试类型的摘要"""
        total = len(results)
        passed = sum(1 for r in results if r.result == TestResult.PASS)
        failed = sum(1 for r in results if r.result == TestResult.FAIL)
        skipped = sum(1 for r in results if r.result == TestResult.SKIP)
        
        duration = sum(r.duration for r in results)
        end_time = datetime.now()
        
        return {
            'total': total,
            'passed': passed,
            'failed': failed,
            'skipped': skipped,
            'duration': duration,
            'test_type': test_type.value,
            'start_time': self.start_time.isoformat(),
            'end_time': end_time.isoformat()
        }
    
    def generate_combined_report(self, test_types: set):
        """生成综合报告"""
        base_report_dir = Path(self.config['report_dir'])
        combined_dir = base_report_dir / 'combined' / self.execution_id
        combined_dir.mkdir(parents=True, exist_ok=True)
        
        # 生成综合JSON报告
        combined_report = {
            'summary': self.generate_summary(),
            'execution_info': {
                'execution_id': self.execution_id,
                'start_time': self.start_time.isoformat(),
                'test_types': [t.value for t in test_types]
            },
            'results_by_type': {}
        }
        
        for test_type in test_types:
            type_results = [r for r in self.results if r.type == test_type]
            combined_report['results_by_type'][test_type.value] = [
                {
                    'name': r.name,
                    'path': r.path,
                    'result': r.result.value,
                    'duration': r.duration,
                    'error': r.error
                } for r in type_results
            ]
        
        json_file = combined_dir / 'combined_report.json'
        with open(json_file, 'w', encoding='utf-8') as f:
            json.dump(combined_report, f, indent=2, ensure_ascii=False)
        
        Logger.info(f"综合报告已生成: {json_file}")
    
    def generate_html_report(self, data: Dict[str, Any], output_file: Path, test_type: Optional[TestType] = None):
        """生成HTML报告"""
        html_content = f"""
<!DOCTYPE html>
<html>
<head>
    <meta charset="utf-8">
    <title>测试报告</title>
    <style>
        body {{ font-family: Arial, sans-serif; margin: 20px; }}
        .summary {{ background-color: #f5f5f5; padding: 20px; border-radius: 5px; margin-bottom: 20px; }}
        .pass {{ color: #28a745; }}
        .fail {{ color: #dc3545; }}
        .skip {{ color: #ffc107; }}
        table {{ width: 100%; border-collapse: collapse; margin-top: 20px; }}
        th, td {{ border: 1px solid #ddd; padding: 8px; text-align: left; }}
        th {{ background-color: #f2f2f2; }}
        .details {{ max-width: 300px; max-height: 100px; overflow: auto; font-family: monospace; font-size: 12px; }}
    </style>
</head>
<body>
    <h1>测试报告</h1>
    
    <div class="summary">
        <h2>摘要</h2>
        <p><strong>总计:</strong> {data['summary']['total']}</p>
        <p><strong class="pass">通过:</strong> {data['summary']['passed']}</p>
        <p><strong class="fail">失败:</strong> {data['summary']['failed']}</p>
        <p><strong class="skip">跳过:</strong> {data['summary']['skipped']}</p>
        <p><strong>耗时:</strong> {data['summary']['duration']:.2f} 秒</p>
        <p><strong>开始时间:</strong> {data['summary']['start_time']}</p>
        <p><strong>结束时间:</strong> {data['summary']['end_time']}</p>
    </div>
    
    <h2>详细结果</h2>
    <table>
        <tr>
            <th>测试名称</th>
            <th>类型</th>
            <th>结果</th>
            <th>耗时(秒)</th>
            <th>详情</th>
        </tr>
        """
        
        for result in data['results']:
            result_class = result['result'].lower()
            details = result.get('error') or result.get('output', '') or str(result.get('details', ''))
            details = details[:500] + ('...' if len(details) > 500 else '')
            
            html_content += f"""
        <tr>
            <td>{result['name']}</td>
            <td>{result['type']}</td>
            <td><span class="{result_class}">{result['result']}</span></td>
            <td>{result['duration']:.2f}</td>
            <td class="details">{details}</td>
        </tr>
            """
        
        html_content += """
    </table>
</body>
</html>
        """
        
        with open(output_file, 'w', encoding='utf-8') as f:
            f.write(html_content)
    
    def print_summary(self, summary: Dict[str, Any]):
        """打印测试摘要"""
        print(f"\n{Colors.BLUE}{'='*50}{Colors.RESET}")
        print(f"{Colors.BLUE}测试结果摘要{Colors.RESET}")
        print(f"{Colors.BLUE}{'='*50}{Colors.RESET}")
        
        print(f"{Colors.GREEN}通过: {summary['passed']}{Colors.RESET}")
        print(f"{Colors.RED}失败: {summary['failed']}{Colors.RESET}")
        print(f"{Colors.YELLOW}跳过: {summary['skipped']}{Colors.RESET}")
        print(f"{Colors.WHITE}总计: {summary['total']}{Colors.RESET}")
        
        minutes = int(summary['duration'] // 60)
        seconds = int(summary['duration'] % 60)
        print(f"{Colors.CYAN}耗时: {minutes}分{seconds}秒{Colors.RESET}")
        
        # 显示各类型测试统计
        if summary.get('by_type'):
            print(f"\n{Colors.BLUE}各类型测试统计:{Colors.RESET}")
            for test_type, stats in summary['by_type'].items():
                print(f"  {test_type}: {Colors.GREEN}{stats['passed']}{Colors.RESET}/"
                      f"{Colors.RED}{stats['failed']}{Colors.RESET}/"
                      f"{Colors.YELLOW}{stats['skipped']}{Colors.RESET} "
                      f"(通过/失败/跳过)")
        
        # 显示失败的测试
        failed_tests = [r for r in self.results if r.result == TestResult.FAIL]
        if failed_tests:
            print(f"\n{Colors.RED}失败的测试:{Colors.RESET}")
            for test in failed_tests:
                print(f"  {Colors.YELLOW}- {test.name} ({test.type.value}){Colors.RESET}")
                if test.error:
                    print(f"    {Colors.RED}{test.error}{Colors.RESET}")
        
        if summary['failed'] == 0:
            print(f"\n{Colors.GREEN}🎉 所有测试通过!{Colors.RESET}")
        else:
            print(f"\n{Colors.RED}❌ 有测试失败!{Colors.RESET}")
        
        print(f"{Colors.BLUE}{'='*50}{Colors.RESET}")


def create_default_config() -> Dict[str, Any]:
    """创建默认配置"""
    return {
        'test_dir': 'out/tests',
        'valgrind_dir': 'out/tests/valgrind',
        'coverage_dir': 'out/tests/coverage',
        'report_dir': 'out/tests',
        'parallel_jobs': 1,
        'timeout': 300,
        'valgrind_timeout': None,  # 不设置超时限制
        'verbose': False,
        'valgrind_filter': 'security',  # 默认只运行security测试
        'execution_timestamp': datetime.now().strftime("%Y%m%d_%H%M%S")
    }


def main():
    parser = argparse.ArgumentParser(
        description='统一测试运行脚本',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog='''
示例用法:
  %(prog)s unit                    # 运行单元测试
  %(prog)s valgrind               # 运行Valgrind内存检查（仅security测试）
  %(prog)s valgrind --valgrind-all # 运行所有测试的Valgrind检查
  %(prog)s coverage               # 运行覆盖率测试
  %(prog)s all                    # 运行所有测试
  %(prog)s unit valgrind          # 运行单元测试和Valgrind检查
  %(prog)s --test-dir out/debug unit  # 指定测试目录
        ''')
    
    parser.add_argument(
        'types',
        nargs='+',
        choices=[t.value for t in TestType],
        help='要运行的测试类型'
    )
    
    parser.add_argument(
        '--test-dir',
        default='out/tests',
        help='测试目录路径 (默认: out/tests)'
    )
    
    parser.add_argument(
        '--valgrind-dir',
        default='out/valgrind',
        help='Valgrind输出目录 (默认: out/valgrind)'
    )
    
    parser.add_argument(
        '--coverage-dir',
        default='out/coverage',
        help='覆盖率报告目录 (默认: out/coverage)'
    )
    
    parser.add_argument(
        '--report-dir',
        default='out/reports',
        help='测试报告目录 (默认: out/reports)'
    )
    
    parser.add_argument(
        '-j', '--jobs',
        type=int,
        default=1,
        help='并行任务数 (默认: 1)'
    )
    
    parser.add_argument(
        '--timeout',
        type=int,
        default=300,
        help='单个测试超时时间，秒 (默认: 300)'
    )
    
    parser.add_argument(
        '--valgrind-timeout',
        type=int,
        default=None,
        help='Valgrind检查超时时间，秒 (默认: 无限制)'
    )
    
    parser.add_argument(
        '-v', '--verbose',
        action='store_true',
        help='详细输出'
    )
    
    parser.add_argument(
        '--valgrind-all',
        action='store_true',
        help='Valgrind检查所有测试文件，而不仅仅是security测试'
    )
    
    args = parser.parse_args()
    
    # 创建配置
    config = create_default_config()
    config.update({
        'test_dir': args.test_dir,
        'valgrind_dir': args.valgrind_dir,
        'coverage_dir': args.coverage_dir,
        'report_dir': args.report_dir,
        'parallel_jobs': args.jobs,
        'timeout': args.timeout,
        'valgrind_timeout': args.valgrind_timeout,
        'verbose': args.verbose,
        'valgrind_filter': 'all' if args.valgrind_all else 'security'
    })
    
    # 解析测试类型
    test_types = []
    for type_str in args.types:
        test_type = TestType(type_str)
        if test_type == TestType.ALL:
            test_types = [TestType.UNIT, TestType.VALGRIND, TestType.COVERAGE]
            break
        else:
            test_types.append(test_type)
    
    # 运行测试
    runner = TestRunner(config)
    
    try:
        Logger.info(f"开始运行测试: {[t.value for t in test_types]}")
        Logger.info(f"测试目录: {config['test_dir']}")
        Logger.info(f"并行任务数: {config['parallel_jobs']}")
        
        summary = runner.run_all_tests(test_types)
        runner.print_summary(summary)
        
        # 返回适当的退出码
        sys.exit(0 if summary['failed'] == 0 else 1)
        
    except KeyboardInterrupt:
        Logger.error("测试被用户中断")
        sys.exit(130)
    except Exception as e:
        Logger.error(f"运行测试时发生错误: {e}")
        if config['verbose']:
            import traceback
            traceback.print_exc()
        sys.exit(1)


if __name__ == '__main__':
    main()
