#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
集成测试套件 v2.0
专为重装系统后的持久化测试集成验证设计
验证从部署到清理的完整工作流程
"""

import os
import sys
import json
import time
import subprocess
import threading
import signal
import logging
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Tuple, Optional, Any

class IntegrationTestSuite:
    """集成测试套件"""
    
    def __init__(self):
        self.version = "2.0.0"
        self.session_id = datetime.now().strftime("%Y%m%d_%H%M%S")
        self.start_time = time.time()
        
        # 配置路径
        self.script_dir = Path(__file__).parent
        self.base_dir = Path("/tmp/integration_test")
        self.log_dir = self.base_dir / "logs"
        self.report_dir = self.base_dir / "reports"
        
        # 创建目录
        for dir_path in [self.base_dir, self.log_dir, self.report_dir]:
            dir_path.mkdir(parents=True, exist_ok=True)
        
        # 配置日志
        self.setup_logging()
        
        # 测试配置
        self.test_scripts = {
            'deployment': self.script_dir / 'ultimate_persistence_tester.py',
            'verification': self.script_dir / 'enhanced_verification_system.py',
            'cleanup': self.script_dir / 'comprehensive_cleanup_system.py',
            'quick_deploy': self.script_dir / 'quick_deploy_after_reinstall.sh'
        }
        
        # 测试阶段
        self.test_phases = [
            'environment_check',
            'script_validation',
            'deployment_test',
            'verification_test',
            'cleanup_test',
            'integration_validation'
        ]
        
        # 测试结果
        self.test_results = {}
        self.phase_results = {}
        self.performance_metrics = {}
        self.test_errors = []
        
        self.logger.info(f"集成测试套件 v{self.version} 初始化完成")
        self.logger.info(f"会话ID: {self.session_id}")
    
    def setup_logging(self):
        """设置日志系统"""
        log_file = self.log_dir / f"integration_test_{self.session_id}.log"
        
        # 配置日志格式
        log_format = '%(asctime)s - %(levelname)s - %(funcName)s:%(lineno)d - %(message)s'
        
        # 配置日志记录器
        logging.basicConfig(
            level=logging.INFO,
            format=log_format,
            handlers=[
                logging.FileHandler(log_file, encoding='utf-8'),
                logging.StreamHandler(sys.stdout)
            ]
        )
        
        self.logger = logging.getLogger(__name__)
        self.logger.info("集成测试日志系统初始化完成")
    
    def run_command(self, command: str, timeout: int = 300) -> Tuple[bool, str, str]:
        """执行系统命令"""
        try:
            self.logger.debug(f"执行命令: {command}")
            
            result = subprocess.run(
                command,
                shell=True,
                capture_output=True,
                text=True,
                timeout=timeout
            )
            
            success = result.returncode == 0
            stdout = result.stdout.strip()
            stderr = result.stderr.strip()
            
            if not success:
                self.logger.warning(f"命令执行失败: {command}")
                self.logger.warning(f"错误输出: {stderr}")
            
            return success, stdout, stderr
            
        except subprocess.TimeoutExpired:
            self.logger.error(f"命令执行超时: {command}")
            return False, "", "命令执行超时"
        except Exception as e:
            self.logger.error(f"命令执行异常: {command}, 错误: {str(e)}")
            return False, "", str(e)
    
    def check_environment(self) -> Dict[str, Any]:
        """检查测试环境"""
        self.logger.info("开始环境检查")
        
        result = {
            'phase': 'environment_check',
            'success': False,
            'checks': {},
            'errors': [],
            'warnings': []
        }
        
        try:
            # 检查Root权限
            if os.geteuid() != 0:
                result['errors'].append("需要Root权限运行测试")
            else:
                result['checks']['root_permission'] = True
                self.logger.info("Root权限检查通过")
            
            # 检查Python版本
            python_version = sys.version_info
            if python_version.major >= 3 and python_version.minor >= 6:
                result['checks']['python_version'] = True
                self.logger.info(f"Python版本检查通过: {python_version.major}.{python_version.minor}")
            else:
                result['errors'].append(f"Python版本过低: {python_version.major}.{python_version.minor}")
            
            # 检查必要命令
            required_commands = ['systemctl', 'crontab', 'ssh', 'ps', 'netstat']
            for cmd in required_commands:
                success, _, _ = self.run_command(f"which {cmd}")
                if success:
                    result['checks'][f'command_{cmd}'] = True
                else:
                    result['warnings'].append(f"命令不可用: {cmd}")
            
            # 检查系统服务
            required_services = ['ssh', 'cron']
            for service in required_services:
                success, stdout, _ = self.run_command(f"systemctl list-unit-files | grep {service}")
                if success and service in stdout:
                    result['checks'][f'service_{service}'] = True
                else:
                    result['warnings'].append(f"服务不可用: {service}")
            
            # 检查磁盘空间
            success, stdout, _ = self.run_command("df / | awk 'NR==2 {print $4}'")
            if success:
                available_space = int(stdout) if stdout.isdigit() else 0
                if available_space > 1048576:  # 1GB
                    result['checks']['disk_space'] = True
                    self.logger.info(f"磁盘空间充足: {available_space//1024//1024}GB")
                else:
                    result['warnings'].append("磁盘空间不足")
            
            # 检查网络连接
            success, _, _ = self.run_command("ping -c 3 8.8.8.8")
            if success:
                result['checks']['network'] = True
                self.logger.info("网络连接正常")
            else:
                result['warnings'].append("网络连接异常")
            
            # 判断总体状态
            result['success'] = len(result['errors']) == 0
            
            self.logger.info(f"环境检查完成: {'通过' if result['success'] else '失败'}")
            
        except Exception as e:
            result['errors'].append(f"环境检查异常: {str(e)}")
            self.logger.error(f"环境检查失败: {str(e)}")
        
        return result
    
    def validate_scripts(self) -> Dict[str, Any]:
        """验证测试脚本"""
        self.logger.info("开始脚本验证")
        
        result = {
            'phase': 'script_validation',
            'success': False,
            'scripts': {},
            'errors': [],
            'warnings': []
        }
        
        try:
            for script_name, script_path in self.test_scripts.items():
                script_result = {
                    'exists': False,
                    'executable': False,
                    'syntax_valid': False,
                    'size': 0
                }
                
                # 检查文件存在
                if script_path.exists():
                    script_result['exists'] = True
                    script_result['size'] = script_path.stat().st_size
                    
                    # 检查可执行权限
                    if os.access(script_path, os.X_OK):
                        script_result['executable'] = True
                    
                    # 检查语法（Python脚本）
                    if script_path.suffix == '.py':
                        success, _, stderr = self.run_command(f"python3 -m py_compile {script_path}")
                        if success:
                            script_result['syntax_valid'] = True
                        else:
                            result['errors'].append(f"脚本语法错误: {script_name} - {stderr}")
                    elif script_path.suffix == '.sh':
                        success, _, stderr = self.run_command(f"bash -n {script_path}")
                        if success:
                            script_result['syntax_valid'] = True
                        else:
                            result['errors'].append(f"脚本语法错误: {script_name} - {stderr}")
                    else:
                        script_result['syntax_valid'] = True  # 其他文件类型跳过语法检查
                    
                    self.logger.info(f"脚本验证: {script_name} - 存在: {script_result['exists']}, 可执行: {script_result['executable']}, 语法: {script_result['syntax_valid']}")
                else:
                    result['errors'].append(f"脚本不存在: {script_name} - {script_path}")
                
                result['scripts'][script_name] = script_result
            
            # 判断总体状态
            all_valid = all(
                script_info['exists'] and script_info['syntax_valid']
                for script_info in result['scripts'].values()
            )
            result['success'] = all_valid and len(result['errors']) == 0
            
            self.logger.info(f"脚本验证完成: {'通过' if result['success'] else '失败'}")
            
        except Exception as e:
            result['errors'].append(f"脚本验证异常: {str(e)}")
            self.logger.error(f"脚本验证失败: {str(e)}")
        
        return result
    
    def test_deployment(self) -> Dict[str, Any]:
        """测试部署功能"""
        self.logger.info("开始部署测试")
        
        result = {
            'phase': 'deployment_test',
            'success': False,
            'deployment_results': {},
            'execution_time': 0,
            'errors': [],
            'warnings': []
        }
        
        try:
            start_time = time.time()
            
            # 运行部署脚本
            deployment_script = self.test_scripts['deployment']
            success, stdout, stderr = self.run_command(f"python3 {deployment_script} --test")
            
            end_time = time.time()
            result['execution_time'] = end_time - start_time
            
            if success:
                # 解析部署结果
                try:
                    # 查找最新的测试报告
                    report_files = list(Path("/tmp").glob("persistence_test_report_*.json"))
                    if report_files:
                        latest_report = max(report_files, key=lambda x: x.stat().st_mtime)
                        
                        with open(latest_report, 'r') as f:
                            report_data = json.load(f)
                        
                        deployment_results = report_data.get('deployment_results', {})
                        result['deployment_results'] = deployment_results
                        
                        # 计算成功率
                        successful_deployments = sum(deployment_results.values())
                        total_deployments = len(deployment_results)
                        success_rate = successful_deployments / total_deployments if total_deployments > 0 else 0
                        
                        result['success'] = success_rate >= 0.8  # 80%以上认为成功
                        
                        self.logger.info(f"部署测试完成: 成功率 {success_rate:.1%}")
                        
                        if success_rate < 0.8:
                            result['warnings'].append(f"部署成功率较低: {success_rate:.1%}")
                    else:
                        result['errors'].append("未找到部署测试报告")
                        
                except Exception as e:
                    result['errors'].append(f"解析部署结果失败: {str(e)}")
            else:
                result['errors'].append(f"部署脚本执行失败: {stderr}")
            
        except Exception as e:
            result['errors'].append(f"部署测试异常: {str(e)}")
            self.logger.error(f"部署测试失败: {str(e)}")
        
        return result
    
    def test_verification(self) -> Dict[str, Any]:
        """测试验证功能"""
        self.logger.info("开始验证测试")
        
        result = {
            'phase': 'verification_test',
            'success': False,
            'verification_results': {},
            'execution_time': 0,
            'errors': [],
            'warnings': []
        }
        
        try:
            start_time = time.time()
            
            # 运行验证脚本
            verification_script = self.test_scripts['verification']
            success, stdout, stderr = self.run_command(f"python3 {verification_script} --verify")
            
            end_time = time.time()
            result['execution_time'] = end_time - start_time
            
            if success:
                # 解析验证结果
                try:
                    # 查找最新的验证报告
                    report_files = list(Path("/tmp/persistence_verification/reports").glob("verification_report_*.json"))
                    if report_files:
                        latest_report = max(report_files, key=lambda x: x.stat().st_mtime)
                        
                        with open(latest_report, 'r') as f:
                            report_data = json.load(f)
                        
                        verification_results = report_data.get('verification_results', {})
                        result['verification_results'] = verification_results
                        
                        # 计算验证成功率
                        successful_verifications = sum(verification_results.values())
                        total_verifications = len(verification_results)
                        success_rate = successful_verifications / total_verifications if total_verifications > 0 else 0
                        
                        result['success'] = success_rate >= 0.8  # 80%以上认为成功
                        
                        self.logger.info(f"验证测试完成: 成功率 {success_rate:.1%}")
                        
                        if success_rate < 0.8:
                            result['warnings'].append(f"验证成功率较低: {success_rate:.1%}")
                    else:
                        result['errors'].append("未找到验证测试报告")
                        
                except Exception as e:
                    result['errors'].append(f"解析验证结果失败: {str(e)}")
            else:
                result['errors'].append(f"验证脚本执行失败: {stderr}")
            
        except Exception as e:
            result['errors'].append(f"验证测试异常: {str(e)}")
            self.logger.error(f"验证测试失败: {str(e)}")
        
        return result
    
    def test_cleanup(self) -> Dict[str, Any]:
        """测试清理功能"""
        self.logger.info("开始清理测试")
        
        result = {
            'phase': 'cleanup_test',
            'success': False,
            'cleanup_results': {},
            'execution_time': 0,
            'errors': [],
            'warnings': []
        }
        
        try:
            start_time = time.time()
            
            # 运行清理脚本
            cleanup_script = self.test_scripts['cleanup']
            success, stdout, stderr = self.run_command(f"python3 {cleanup_script} --cleanup --force")
            
            end_time = time.time()
            result['execution_time'] = end_time - start_time
            
            if success:
                # 验证清理结果
                verify_success, verify_stdout, verify_stderr = self.run_command(f"python3 {cleanup_script} --verify")
                
                if verify_success:
                    result['success'] = True
                    self.logger.info("清理测试完成: 成功")
                else:
                    result['warnings'].append("清理验证失败")
                    self.logger.warning("清理验证失败")
            else:
                result['errors'].append(f"清理脚本执行失败: {stderr}")
            
        except Exception as e:
            result['errors'].append(f"清理测试异常: {str(e)}")
            self.logger.error(f"清理测试失败: {str(e)}")
        
        return result
    
    def validate_integration(self) -> Dict[str, Any]:
        """验证集成完整性"""
        self.logger.info("开始集成验证")
        
        result = {
            'phase': 'integration_validation',
            'success': False,
            'workflow_test': {},
            'performance_analysis': {},
            'errors': [],
            'warnings': []
        }
        
        try:
            # 完整工作流程测试
            workflow_start = time.time()
            
            # 1. 快速部署
            self.logger.info("执行完整工作流程测试...")
            deploy_success, deploy_stdout, deploy_stderr = self.run_command(
                f"bash {self.test_scripts['quick_deploy']} --no-install --cleanup"
            )
            
            workflow_end = time.time()
            workflow_time = workflow_end - workflow_start
            
            result['workflow_test'] = {
                'success': deploy_success,
                'execution_time': workflow_time,
                'output': deploy_stdout,
                'error': deploy_stderr
            }
            
            # 2. 性能分析
            total_test_time = time.time() - self.start_time
            result['performance_analysis'] = {
                'total_test_time': total_test_time,
                'workflow_time': workflow_time,
                'average_phase_time': total_test_time / len(self.test_phases),
                'performance_score': self.calculate_performance_score()
            }
            
            # 3. 判断集成成功
            all_phases_success = all(
                phase_result.get('success', False)
                for phase_result in self.phase_results.values()
            )
            
            result['success'] = all_phases_success and deploy_success
            
            self.logger.info(f"集成验证完成: {'成功' if result['success'] else '失败'}")
            
        except Exception as e:
            result['errors'].append(f"集成验证异常: {str(e)}")
            self.logger.error(f"集成验证失败: {str(e)}")
        
        return result
    
    def calculate_performance_score(self) -> float:
        """计算性能得分"""
        try:
            # 基于执行时间和成功率计算性能得分
            total_time = time.time() - self.start_time
            successful_phases = sum(1 for result in self.phase_results.values() if result.get('success', False))
            total_phases = len(self.phase_results)
            
            # 时间得分（越快越好，基准时间300秒）
            time_score = max(0, 100 - (total_time / 300) * 50)
            
            # 成功率得分
            success_score = (successful_phases / total_phases) * 100 if total_phases > 0 else 0
            
            # 综合得分
            performance_score = (time_score * 0.3 + success_score * 0.7)
            
            return min(100, max(0, performance_score))
            
        except:
            return 0.0
    
    def run_full_test_suite(self) -> Dict[str, Any]:
        """运行完整测试套件"""
        self.logger.info("开始运行完整集成测试套件")
        
        suite_start_time = time.time()
        
        # 执行各个测试阶段
        test_methods = {
            'environment_check': self.check_environment,
            'script_validation': self.validate_scripts,
            'deployment_test': self.test_deployment,
            'verification_test': self.test_verification,
            'cleanup_test': self.test_cleanup,
            'integration_validation': self.validate_integration
        }
        
        for phase in self.test_phases:
            self.logger.info(f"执行测试阶段: {phase}")
            
            try:
                phase_result = test_methods[phase]()
                self.phase_results[phase] = phase_result
                
                if not phase_result['success']:
                    self.logger.warning(f"测试阶段失败: {phase}")
                    # 记录错误但继续执行后续阶段
                    for error in phase_result.get('errors', []):
                        self.test_errors.append(f"{phase}: {error}")
                else:
                    self.logger.info(f"测试阶段成功: {phase}")
                    
            except Exception as e:
                self.logger.error(f"测试阶段异常: {phase}, 错误: {str(e)}")
                self.phase_results[phase] = {
                    'phase': phase,
                    'success': False,
                    'errors': [f"阶段执行异常: {str(e)}"]
                }
                self.test_errors.append(f"{phase}: 阶段执行异常: {str(e)}")
        
        suite_end_time = time.time()
        
        # 计算总体结果
        successful_phases = sum(1 for result in self.phase_results.values() if result.get('success', False))
        total_phases = len(self.phase_results)
        success_rate = successful_phases / total_phases if total_phases > 0 else 0
        
        self.performance_metrics = {
            'total_execution_time': suite_end_time - suite_start_time,
            'successful_phases': successful_phases,
            'total_phases': total_phases,
            'success_rate': success_rate,
            'performance_score': self.calculate_performance_score(),
            'total_errors': len(self.test_errors)
        }
        
        # 判断总体成功状态
        overall_success = success_rate >= 0.8  # 80%以上认为成功
        
        self.logger.info(f"集成测试套件完成: 成功率 {success_rate:.1%}")
        
        return {
            'overall_success': overall_success,
            'performance_metrics': self.performance_metrics,
            'phase_results': self.phase_results,
            'test_errors': self.test_errors
        }
    
    def generate_test_report(self) -> str:
        """生成测试报告"""
        self.logger.info("生成集成测试报告")
        
        report_file = self.report_dir / f"integration_test_report_{self.session_id}.md"
        
        try:
            with open(report_file, 'w', encoding='utf-8') as f:
                f.write(f"# 集成测试套件报告 v{self.version}\n\n")
                f.write(f"**会话ID**: {self.session_id}\n")
                f.write(f"**生成时间**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
                f.write(f"**测试耗时**: {self.performance_metrics.get('total_execution_time', 0):.2f} 秒\n\n")
                
                # 测试摘要
                f.write("## 📊 测试摘要\n\n")
                f.write(f"- **测试阶段数量**: {self.performance_metrics.get('total_phases', 0)}\n")
                f.write(f"- **成功阶段数量**: {self.performance_metrics.get('successful_phases', 0)}\n")
                f.write(f"- **失败阶段数量**: {self.performance_metrics.get('total_phases', 0) - self.performance_metrics.get('successful_phases', 0)}\n")
                f.write(f"- **总体成功率**: {self.performance_metrics.get('success_rate', 0):.1%}\n")
                f.write(f"- **性能得分**: {self.performance_metrics.get('performance_score', 0):.1f}/100\n")
                f.write(f"- **错误数量**: {self.performance_metrics.get('total_errors', 0)}\n\n")
                
                # 详细结果
                f.write("## 🔍 详细测试结果\n\n")
                
                for phase, result in self.phase_results.items():
                    status_icon = "✅" if result.get('success', False) else "❌"
                    f.write(f"### {status_icon} {phase.upper().replace('_', ' ')}\n\n")
                    
                    f.write(f"**状态**: {'成功' if result.get('success', False) else '失败'}\n")
                    
                    if 'execution_time' in result:
                        f.write(f"**执行时间**: {result['execution_time']:.2f} 秒\n")
                    
                    # 错误信息
                    if result.get('errors'):
                        f.write("\n**错误信息**:\n")
                        for error in result['errors']:
                            f.write(f"- {error}\n")
                    
                    # 警告信息
                    if result.get('warnings'):
                        f.write("\n**警告信息**:\n")
                        for warning in result['warnings']:
                            f.write(f"- {warning}\n")
                    
                    f.write("\n")
                
                # 性能分析
                f.write("## 📈 性能分析\n\n")
                f.write(f"- **总执行时间**: {self.performance_metrics.get('total_execution_time', 0):.2f} 秒\n")
                f.write(f"- **平均阶段时间**: {self.performance_metrics.get('total_execution_time', 0) / max(1, self.performance_metrics.get('total_phases', 1)):.2f} 秒\n")
                f.write(f"- **性能评级**: {self.get_performance_grade()}\n\n")
                
                # 建议和总结
                f.write("## 💡 建议和总结\n\n")
                
                success_rate = self.performance_metrics.get('success_rate', 0)
                if success_rate >= 0.9:
                    f.write("🎉 **优秀**: 集成测试表现优秀，所有组件工作正常。\n\n")
                elif success_rate >= 0.8:
                    f.write("✅ **良好**: 集成测试基本通过，少数组件需要优化。\n\n")
                elif success_rate >= 0.6:
                    f.write("⚠️ **一般**: 集成测试部分通过，需要修复一些问题。\n\n")
                else:
                    f.write("❌ **需要改进**: 集成测试失败较多，需要全面检查和修复。\n\n")
                
                # 具体建议
                if self.test_errors:
                    f.write("### 需要解决的问题:\n\n")
                    for error in self.test_errors[:10]:  # 只显示前10个错误
                        f.write(f"- {error}\n")
                    if len(self.test_errors) > 10:
                        f.write(f"- ... 还有 {len(self.test_errors) - 10} 个错误\n")
                    f.write("\n")
                
                f.write("### 后续步骤:\n\n")
                f.write("1. 修复上述错误和警告\n")
                f.write("2. 重新运行失败的测试阶段\n")
                f.write("3. 优化性能较差的组件\n")
                f.write("4. 更新文档和使用指南\n\n")
                
                f.write("---\n")
                f.write(f"*报告由集成测试套件 v{self.version} 自动生成*\n")
            
            self.logger.info(f"集成测试报告已生成: {report_file}")
            return str(report_file)
            
        except Exception as e:
            self.logger.error(f"生成测试报告失败: {str(e)}")
            return ""
    
    def get_performance_grade(self) -> str:
        """获取性能评级"""
        score = self.performance_metrics.get('performance_score', 0)
        
        if score >= 90:
            return "A+ (优秀)"
        elif score >= 80:
            return "A (良好)"
        elif score >= 70:
            return "B (一般)"
        elif score >= 60:
            return "C (及格)"
        else:
            return "D (需要改进)"
    
    def print_summary(self):
        """打印测试摘要"""
        print("\n" + "="*70)
        print(f"🧪 集成测试套件 v{self.version} - 测试摘要")
        print("="*70)
        
        print(f"📊 总体统计:")
        print(f"  测试阶段: {self.performance_metrics.get('total_phases', 0)}")
        print(f"  成功阶段: {self.performance_metrics.get('successful_phases', 0)}")
        print(f"  失败阶段: {self.performance_metrics.get('total_phases', 0) - self.performance_metrics.get('successful_phases', 0)}")
        print(f"  成功率: {self.performance_metrics.get('success_rate', 0):.1%}")
        print(f"  性能得分: {self.performance_metrics.get('performance_score', 0):.1f}/100")
        print(f"  性能评级: {self.get_performance_grade()}")
        print(f"  测试耗时: {self.performance_metrics.get('total_execution_time', 0):.2f} 秒")
        
        print(f"\n📋 阶段结果:")
        for phase, result in self.phase_results.items():
            status_icon = "✅" if result.get('success', False) else "❌"
            execution_time = result.get('execution_time', 0)
            print(f"  {status_icon} {phase}: {execution_time:.2f}s")
        
        if self.test_errors:
            print(f"\n⚠️ 主要错误 (前5个):")
            for error in self.test_errors[:5]:
                print(f"  - {error}")
        
        print(f"\n📁 报告文件:")
        print(f"  测试报告: {self.report_dir / f'integration_test_report_{self.session_id}.md'}")
        print(f"  详细日志: {self.log_dir / f'integration_test_{self.session_id}.log'}")
        
        print("="*70)

def main():
    """主函数"""
    import argparse
    
    parser = argparse.ArgumentParser(description="集成测试套件 v2.0")
    parser.add_argument('--full', action='store_true', help='运行完整测试套件')
    parser.add_argument('--phase', type=str, help='运行特定测试阶段')
    parser.add_argument('--report-only', action='store_true', help='仅生成报告')
    parser.add_argument('--quiet', action='store_true', help='静默模式')
    
    args = parser.parse_args()
    
    # 创建测试套件实例
    test_suite = IntegrationTestSuite()
    
    try:
        if args.phase:
            # 运行特定阶段
            test_methods = {
                'environment': test_suite.check_environment,
                'scripts': test_suite.validate_scripts,
                'deployment': test_suite.test_deployment,
                'verification': test_suite.test_verification,
                'cleanup': test_suite.test_cleanup,
                'integration': test_suite.validate_integration
            }
            
            if args.phase in test_methods:
                result = test_methods[args.phase]()
                test_suite.phase_results[args.phase] = result
                
                if not args.quiet:
                    print(f"\n阶段 '{args.phase}' 测试结果: {'成功' if result['success'] else '失败'}")
            else:
                print(f"未知测试阶段: {args.phase}")
                print(f"可用阶段: {', '.join(test_methods.keys())}")
                sys.exit(1)
        
        elif args.full:
            # 运行完整测试套件
            test_suite.run_full_test_suite()
        
        else:
            parser.print_help()
            sys.exit(0)
        
        # 生成报告
        if not args.report_only:
            if not args.quiet:
                test_suite.print_summary()
        
        test_suite.generate_test_report()
        
        # 返回适当的退出码
        if test_suite.performance_metrics:
            success_rate = test_suite.performance_metrics.get('success_rate', 0)
            if success_rate >= 0.8:
                sys.exit(0)  # 成功
            elif success_rate >= 0.5:
                sys.exit(1)  # 部分成功
            else:
                sys.exit(2)  # 失败
        else:
            sys.exit(0)
            
    except KeyboardInterrupt:
        test_suite.logger.warning("测试被用户中断")
        sys.exit(130)
    except Exception as e:
        test_suite.logger.error(f"测试过程发生异常: {str(e)}")
        sys.exit(1)

if __name__ == "__main__":
    main()