#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
简化测试运行器 v1.0
针对privilege-maDev项目的快速功能验证

功能特性:
- 🔍 文件存在性检查
- 📊 代码质量分析
- 🚀 基础功能验证
- 📋 测试报告生成
"""

import os
import sys
import json
import time
import subprocess
from datetime import datetime
from pathlib import Path

class SimpleTestRunner:
    """简化测试运行器"""
    
    def __init__(self):
        self.project_root = Path(__file__).parent.parent
        self.session_id = datetime.now().strftime('%Y%m%d_%H%M%S')
        self.test_results = []
        
        print(f"🚀 简化测试运行器启动 - 会话ID: {self.session_id}")
        print("="*60)
    
    def run_all_tests(self):
        """运行所有测试"""
        try:
            # 1. 项目结构检查
            self._check_project_structure()
            
            # 2. 核心文件检查
            self._check_core_files()
            
            # 3. 脚本可执行性检查
            self._check_script_executability()
            
            # 4. 配置文件检查
            self._check_configuration_files()
            
            # 5. 文档完整性检查
            self._check_documentation()
            
            # 生成测试报告
            self._generate_report()
            
        except Exception as e:
            print(f"❌ 测试运行失败: {e}")
    
    def _check_project_structure(self):
        """检查项目结构"""
        print("\n📁 检查项目结构...")
        
        required_dirs = [
            "modules", "apps", "config", "scripts", 
            "docs", "examples", "reports", "ansible"
        ]
        
        structure_results = {
            'total_dirs': len(required_dirs),
            'existing_dirs': [],
            'missing_dirs': []
        }
        
        for dir_name in required_dirs:
            dir_path = self.project_root / dir_name
            if dir_path.exists() and dir_path.is_dir():
                structure_results['existing_dirs'].append(dir_name)
                print(f"  ✅ {dir_name}/")
            else:
                structure_results['missing_dirs'].append(dir_name)
                print(f"  ❌ {dir_name}/ (缺失)")
        
        completion_rate = len(structure_results['existing_dirs']) / len(required_dirs) * 100
        structure_results['completion_rate'] = completion_rate
        
        self.test_results.append({
            'test_name': '项目结构检查',
            'success': completion_rate >= 80,
            'details': structure_results
        })
        
        print(f"  📊 结构完整度: {completion_rate:.1f}%")
    
    def _check_core_files(self):
        """检查核心文件"""
        print("\n📄 检查核心文件...")
        
        core_files = [
            "apps/check.py",
            "apps/redteam_launcher.py", 
            "modules/exploits/main_exploit.py",
            "modules/automation/one_click_deployment.py",
            "modules/intelligence/intelligent_decision_engine.py",
            "scripts/ultimate_persistence_tester.py",
            "config/log_manager.py",
            "config/config_auto_repair.py"
        ]
        
        file_results = {
            'total_files': len(core_files),
            'existing_files': [],
            'missing_files': [],
            'file_sizes': {}
        }
        
        for file_path in core_files:
            full_path = self.project_root / file_path
            if full_path.exists() and full_path.is_file():
                file_size = full_path.stat().st_size
                file_results['existing_files'].append(file_path)
                file_results['file_sizes'][file_path] = file_size
                print(f"  ✅ {file_path} ({file_size} bytes)")
            else:
                file_results['missing_files'].append(file_path)
                print(f"  ❌ {file_path} (缺失)")
        
        completion_rate = len(file_results['existing_files']) / len(core_files) * 100
        file_results['completion_rate'] = completion_rate
        
        self.test_results.append({
            'test_name': '核心文件检查',
            'success': completion_rate >= 70,
            'details': file_results
        })
        
        print(f"  📊 文件完整度: {completion_rate:.1f}%")
    
    def _check_script_executability(self):
        """检查脚本可执行性"""
        print("\n⚡ 检查脚本可执行性...")
        
        test_scripts = [
            "apps/check.py",
            "apps/redteam_launcher.py",
            "scripts/ultimate_persistence_tester.py"
        ]
        
        exec_results = {
            'total_scripts': len(test_scripts),
            'executable_scripts': [],
            'non_executable_scripts': [],
            'execution_details': {}
        }
        
        for script_path in test_scripts:
            full_path = self.project_root / script_path
            if full_path.exists():
                try:
                    # 尝试运行帮助命令
                    result = subprocess.run([
                        sys.executable, str(full_path), "--help"
                    ], capture_output=True, text=True, timeout=10)
                    
                    if result.returncode == 0 or "help" in result.stdout.lower() or "usage" in result.stdout.lower():
                        exec_results['executable_scripts'].append(script_path)
                        exec_results['execution_details'][script_path] = {
                            'status': 'success',
                            'returncode': result.returncode,
                            'has_help': True
                        }
                        print(f"  ✅ {script_path} (可执行)")
                    else:
                        exec_results['non_executable_scripts'].append(script_path)
                        exec_results['execution_details'][script_path] = {
                            'status': 'failed',
                            'returncode': result.returncode,
                            'error': result.stderr[:100] if result.stderr else "无错误信息"
                        }
                        print(f"  ⚠️ {script_path} (执行异常)")
                        
                except subprocess.TimeoutExpired:
                    exec_results['non_executable_scripts'].append(script_path)
                    exec_results['execution_details'][script_path] = {
                        'status': 'timeout',
                        'error': '执行超时'
                    }
                    print(f"  ⚠️ {script_path} (执行超时)")
                except Exception as e:
                    exec_results['non_executable_scripts'].append(script_path)
                    exec_results['execution_details'][script_path] = {
                        'status': 'error',
                        'error': str(e)
                    }
                    print(f"  ❌ {script_path} (执行错误: {e})")
            else:
                exec_results['non_executable_scripts'].append(script_path)
                print(f"  ❌ {script_path} (文件不存在)")
        
        success_rate = len(exec_results['executable_scripts']) / len(test_scripts) * 100
        exec_results['success_rate'] = success_rate
        
        self.test_results.append({
            'test_name': '脚本可执行性检查',
            'success': success_rate >= 60,
            'details': exec_results
        })
        
        print(f"  📊 可执行率: {success_rate:.1f}%")
    
    def _check_configuration_files(self):
        """检查配置文件"""
        print("\n⚙️ 检查配置文件...")
        
        config_files = [
            "config/log_manager.py",
            "config/config_auto_repair.py",
            "config/config_template_generator.py",
            "config/distro_specific_checks.py",
            "config/config_checks.py"
        ]
        
        config_results = {
            'total_configs': len(config_files),
            'existing_configs': [],
            'missing_configs': [],
            'config_sizes': {}
        }
        
        for config_file in config_files:
            full_path = self.project_root / config_file
            if full_path.exists():
                file_size = full_path.stat().st_size
                config_results['existing_configs'].append(config_file)
                config_results['config_sizes'][config_file] = file_size
                print(f"  ✅ {config_file} ({file_size} bytes)")
            else:
                config_results['missing_configs'].append(config_file)
                print(f"  ❌ {config_file} (缺失)")
        
        completion_rate = len(config_results['existing_configs']) / len(config_files) * 100
        config_results['completion_rate'] = completion_rate
        
        self.test_results.append({
            'test_name': '配置文件检查',
            'success': completion_rate >= 80,
            'details': config_results
        })
        
        print(f"  📊 配置完整度: {completion_rate:.1f}%")
    
    def _check_documentation(self):
        """检查文档完整性"""
        print("\n📚 检查文档完整性...")
        
        doc_files = [
            "README.md",
            "docs/CHANGELOG.md",
            "docs/DETAILED_CHANGELOG.md",
            "docs/OPTIMIZATION_TOOLS_GUIDE.md",
            "docs/LINUX_AUTOMATION_TEST_GUIDE.md"
        ]
        
        doc_results = {
            'total_docs': len(doc_files),
            'existing_docs': [],
            'missing_docs': [],
            'doc_sizes': {}
        }
        
        for doc_file in doc_files:
            full_path = self.project_root / doc_file
            if full_path.exists():
                file_size = full_path.stat().st_size
                doc_results['existing_docs'].append(doc_file)
                doc_results['doc_sizes'][doc_file] = file_size
                print(f"  ✅ {doc_file} ({file_size} bytes)")
            else:
                doc_results['missing_docs'].append(doc_file)
                print(f"  ❌ {doc_file} (缺失)")
        
        completion_rate = len(doc_results['existing_docs']) / len(doc_files) * 100
        doc_results['completion_rate'] = completion_rate
        
        self.test_results.append({
            'test_name': '文档完整性检查',
            'success': completion_rate >= 80,
            'details': doc_results
        })
        
        print(f"  📊 文档完整度: {completion_rate:.1f}%")
    
    def _generate_report(self):
        """生成测试报告"""
        print("\n📊 生成测试报告...")
        
        # 统计结果
        total_tests = len(self.test_results)
        passed_tests = sum(1 for result in self.test_results if result['success'])
        failed_tests = total_tests - passed_tests
        overall_success_rate = (passed_tests / total_tests * 100) if total_tests > 0 else 0
        
        # 创建报告
        report = {
            'session_id': self.session_id,
            'timestamp': datetime.now().isoformat(),
            'summary': {
                'total_tests': total_tests,
                'passed_tests': passed_tests,
                'failed_tests': failed_tests,
                'overall_success_rate': round(overall_success_rate, 2)
            },
            'test_results': self.test_results,
            'recommendations': self._generate_recommendations()
        }
        
        # 保存JSON报告
        report_dir = self.project_root / "reports"
        report_dir.mkdir(exist_ok=True)
        
        json_report_file = report_dir / f"simple_test_report_{self.session_id}.json"
        with open(json_report_file, 'w', encoding='utf-8') as f:
            json.dump(report, f, indent=2, ensure_ascii=False)
        
        # 生成Markdown报告
        md_report_file = report_dir / f"simple_test_report_{self.session_id}.md"
        self._generate_markdown_report(md_report_file, report)
        
        # 显示摘要
        print("\n" + "="*60)
        print("📋 测试结果摘要")
        print("="*60)
        print(f"总测试数: {total_tests}")
        print(f"通过测试: {passed_tests}")
        print(f"失败测试: {failed_tests}")
        print(f"总体成功率: {overall_success_rate:.1f}%")
        print(f"JSON报告: {json_report_file}")
        print(f"Markdown报告: {md_report_file}")
        
        # 显示失败的测试
        if failed_tests > 0:
            print(f"\n❌ 失败的测试:")
            for result in self.test_results:
                if not result['success']:
                    print(f"  - {result['test_name']}")
        
        # 显示建议
        recommendations = report['recommendations']
        if recommendations:
            print(f"\n💡 改进建议:")
            for rec in recommendations:
                print(f"  - {rec}")
        
        print("="*60)
    
    def _generate_recommendations(self):
        """生成改进建议"""
        recommendations = []
        
        for result in self.test_results:
            if not result['success']:
                test_name = result['test_name']
                details = result['details']
                
                if test_name == '项目结构检查':
                    if details['completion_rate'] < 80:
                        missing = details['missing_dirs']
                        recommendations.append(f"补充缺失的目录: {', '.join(missing)}")
                
                elif test_name == '核心文件检查':
                    if details['completion_rate'] < 70:
                        missing = details['missing_files']
                        recommendations.append(f"补充缺失的核心文件: {', '.join(missing[:3])}...")
                
                elif test_name == '脚本可执行性检查':
                    if details['success_rate'] < 60:
                        recommendations.append("修复脚本执行问题，确保脚本可以正常运行")
                
                elif test_name == '配置文件检查':
                    if details['completion_rate'] < 80:
                        recommendations.append("补充缺失的配置文件")
                
                elif test_name == '文档完整性检查':
                    if details['completion_rate'] < 80:
                        recommendations.append("完善项目文档")
        
        return recommendations
    
    def _generate_markdown_report(self, report_file, report_data):
        """生成Markdown格式报告"""
        try:
            with open(report_file, 'w', encoding='utf-8') as f:
                f.write(f"# 简化测试报告\n\n")
                f.write(f"**会话ID**: {report_data['session_id']}\n")
                f.write(f"**测试时间**: {report_data['timestamp']}\n\n")
                
                # 摘要
                summary = report_data['summary']
                f.write(f"## 测试摘要\n\n")
                f.write(f"- **总测试数**: {summary['total_tests']}\n")
                f.write(f"- **通过测试**: {summary['passed_tests']}\n")
                f.write(f"- **失败测试**: {summary['failed_tests']}\n")
                f.write(f"- **总体成功率**: {summary['overall_success_rate']}%\n\n")
                
                # 详细结果
                f.write(f"## 详细测试结果\n\n")
                for result in report_data['test_results']:
                    status = "✅ 通过" if result['success'] else "❌ 失败"
                    f.write(f"### {result['test_name']} - {status}\n\n")
                    
                    details = result['details']
                    for key, value in details.items():
                        if isinstance(value, list):
                            f.write(f"- **{key}**: {', '.join(value) if value else '无'}\n")
                        else:
                            f.write(f"- **{key}**: {value}\n")
                    f.write(f"\n")
                
                # 建议
                if report_data['recommendations']:
                    f.write(f"## 改进建议\n\n")
                    for rec in report_data['recommendations']:
                        f.write(f"- {rec}\n")
                    f.write(f"\n")
                
            print(f"✅ Markdown报告已生成: {report_file}")
            
        except Exception as e:
            print(f"⚠️ Markdown报告生成失败: {e}")

def main():
    """主函数"""
    print("🚀 启动简化测试运行器...")
    
    try:
        runner = SimpleTestRunner()
        runner.run_all_tests()
        print("\n🎉 简化测试完成!")
        
    except KeyboardInterrupt:
        print("\n⚠️ 测试被用户中断")
    except Exception as e:
        print(f"\n❌ 测试运行失败: {e}")

if __name__ == "__main__":
    main()