#!/usr/bin/env python3
"""
性能测试和基准测试脚本
用于测试优化前后的性能差异
"""

import os
import sys
import time
import tempfile
import shutil
import subprocess
import json
import psutil
from pathlib import Path
from typing import List, Dict, Any
import matplotlib.pyplot as plt
import pandas as pd


class PerformanceTester:
    """性能测试器"""
    
    def __init__(self):
        """初始化测试器"""
        self.test_results = []
        self.temp_dir = None
        self.test_repos = []
        
    def setup_test_environment(self, num_repos: int = 50, repo_size: str = 'small'):
        """
        设置测试环境
        
        Args:
            num_repos: 仓库数量
            repo_size: 仓库大小（small, medium, large）
        """
        print(f"创建测试环境: {num_repos} 个 {repo_size} 仓库...")
        
        # 创建临时目录
        self.temp_dir = tempfile.mkdtemp(prefix='git_pull_test_')
        
        # 创建测试仓库
        self.test_repos = []
        for i in range(num_repos):
            repo_path = os.path.join(self.temp_dir, f'repo_{i:03d}')
            self._create_test_repo(repo_path, repo_size)
            self.test_repos.append(repo_path)
        
        print(f"测试环境创建完成: {self.temp_dir}")
    
    def _create_test_repo(self, repo_path: str, repo_size: str):
        """
        创建测试仓库
        
        Args:
            repo_path: 仓库路径
            repo_size: 仓库大小
        """
        os.makedirs(repo_path, exist_ok=True)
        
        # 初始化 Git 仓库
        subprocess.run(['git', 'init'], cwd=repo_path, capture_output=True)
        subprocess.run(['git', 'config', 'user.name', 'Test User'], cwd=repo_path)
        subprocess.run(['git', 'config', 'user.email', 'test@example.com'], cwd=repo_path)
        
        # 根据大小创建文件
        if repo_size == 'small':
            file_count = 10
            file_size = 100
        elif repo_size == 'medium':
            file_count = 100
            file_size = 1000
        else:  # large
            file_count = 1000
            file_size = 10000
        
        # 创建文件并提交
        for i in range(file_count):
            file_path = os.path.join(repo_path, f'file_{i:03d}.txt')
            with open(file_path, 'w') as f:
                f.write('x' * file_size)
        
        subprocess.run(['git', 'add', '.'], cwd=repo_path)
        subprocess.run(['git', 'commit', '-m', 'Initial commit'], cwd=repo_path)
        
        # 创建一些分支和提交
        for i in range(5):
            with open(os.path.join(repo_path, f'update_{i}.txt'), 'w') as f:
                f.write(f'Update {i}\n')
            subprocess.run(['git', 'add', '.'], cwd=repo_path)
            subprocess.run(['git', 'commit', '-m', f'Update {i}'], cwd=repo_path)
    
    def cleanup_test_environment(self):
        """清理测试环境"""
        if self.temp_dir and os.path.exists(self.temp_dir):
            shutil.rmtree(self.temp_dir)
            print("测试环境已清理")
    
    def measure_performance(self, 
                          script_path: str,
                          args: List[str],
                          test_name: str) -> Dict[str, Any]:
        """
        测量性能
        
        Args:
            script_path: 脚本路径
            args: 参数列表
            test_name: 测试名称
            
        Returns:
            Dict[str, Any]: 性能指标
        """
        print(f"\n运行测试: {test_name}")
        
        # 记录开始时间
        start_time = time.time()
        
        # 记录开始时的系统资源
        process = psutil.Process()
        start_memory = process.memory_info().rss
        start_cpu = process.cpu_percent()
        
        # 运行脚本
        try:
            result = subprocess.run(
                [sys.executable, script_path] + args,
                capture_output=True,
                text=True,
                timeout=300
            )
            
            # 记录结束时间
            end_time = time.time()
            
            # 记录结束时的系统资源
            end_memory = process.memory_info().rss
            end_cpu = process.cpu_percent()
            
            # 计算性能指标
            execution_time = end_time - start_time
            memory_usage = (end_memory - start_memory) / 1024 / 1024  # MB
            cpu_usage = end_cpu
            
            # 收集结果
            performance_data = {
                'test_name': test_name,
                'execution_time': execution_time,
                'memory_usage_mb': memory_usage,
                'cpu_usage_percent': cpu_usage,
                'return_code': result.returncode,
                'stdout': result.stdout,
                'stderr': result.stderr,
                'success': result.returncode == 0
            }
            
            print(f"执行时间: {execution_time:.2f}秒")
            print(f"内存使用: {memory_usage:.2f}MB")
            print(f"CPU使用率: {cpu_usage:.1f}%")
            print(f"成功: {result.returncode == 0}")
            
            self.test_results.append(performance_data)
            return performance_data
            
        except subprocess.TimeoutExpired:
            print("测试超时")
            return {
                'test_name': test_name,
                'execution_time': 300,
                'memory_usage_mb': 0,
                'cpu_usage_percent': 0,
                'return_code': -1,
                'stdout': '',
                'stderr': 'Timeout',
                'success': False
            }
    
    def run_comparison_tests(self):
        """运行对比测试"""
        print("="*60)
        print("开始性能对比测试")
        print("="*60)
        
        # 测试不同规模的仓库
        test_configs = [
            {'repos': 10, 'size': 'small'},
            {'repos': 25, 'size': 'small'},
            {'repos': 50, 'size': 'small'},
            {'repos': 10, 'size': 'medium'},
            {'repos': 25, 'size': 'medium'},
        ]
        
        for config in test_configs:
            print(f"\n{'='*20} 测试配置: {config['repos']} 个 {config['size']} 仓库 {'='*20}")
            
            # 设置测试环境
            self.setup_test_environment(config['repos'], config['size'])
            
            # 测试原始版本
            self.measure_performance(
                script_path='main.py',
                args=['--directory', self.temp_dir, '--verbose'],
                test_name=f"原始版本_{config['repos']}_{config['size']}"
            )
            
            # 测试优化版本
            self.measure_performance(
                script_path='main_optimized.py',
                args=['--directory', self.temp_dir, '--verbose'],
                test_name=f"优化版本_{config['repos']}_{config['size']}"
            )
            
            # 清理测试环境
            self.cleanup_test_environment()
    
    def generate_report(self):
        """生成性能报告"""
        print("\n生成性能报告...")
        
        # 转换为 DataFrame
        df = pd.DataFrame(self.test_results)
        
        # 保存原始数据
        df.to_csv('performance_results.csv', index=False)
        print("原始数据已保存到: performance_results.csv")
        
        # 生成对比报告
        report = []
        report.append("# Git Pull All 性能测试报告\n")
        report.append(f"测试时间: {time.strftime('%Y-%m-%d %H:%M:%S')}\n")
        
        # 按测试分组
        original_tests = [r for r in self.test_results if '原始版本' in r['test_name']]
        optimized_tests = [r for r in self.test_results if '优化版本' in r['test_name']]
        
        report.append("## 性能对比摘要\n")
        report.append("| 测试场景 | 原始版本(秒) | 优化版本(秒) | 加速比 | 内存减少(MB) |")
        report.append("|---------|-------------|-------------|--------|--------------|")
        
        for orig, opt in zip(original_tests, optimized_tests):
            speedup = orig['execution_time'] / opt['execution_time']
            memory_reduction = orig['memory_usage_mb'] - opt['memory_usage_mb']
            
            test_name = orig['test_name'].replace('原始版本_', '').replace('_', ' ')
            report.append(f"| {test_name} | {orig['execution_time']:.2f} | {opt['execution_time']:.2f} | "
                         f"{speedup:.2f}x | {memory_reduction:.2f} |")
        
        # 添加详细分析
        report.append("\n## 详细分析\n")
        
        # 计算平均改进
        avg_speedup = sum(o['execution_time'] / opt['execution_time'] 
                         for o, opt in zip(original_tests, optimized_tests)) / len(original_tests)
        avg_memory_reduction = sum(o['memory_usage_mb'] - opt['memory_usage_mb'] 
                                 for o, opt in zip(original_tests, optimized_tests)) / len(original_tests)
        
        report.append(f"- **平均加速比**: {avg_speedup:.2f}x")
        report.append(f"- **平均内存减少**: {avg_memory_reduction:.2f}MB")
        
        # 生成图表
        self._generate_charts(df)
        
        # 保存报告
        report_content = '\n'.join(report)
        with open('performance_report.md', 'w', encoding='utf-8') as f:
            f.write(report_content)
        
        print("性能报告已保存到: performance_report.md")
        print("图表已保存到: performance_charts.png")
        
        # 打印摘要
        print("\n" + "="*60)
        print("性能测试摘要")
        print("="*60)
        print(f"平均加速比: {avg_speedup:.2f}x")
        print(f"平均内存减少: {avg_memory_reduction:.2f}MB")
        print("="*60)
    
    def _generate_charts(self, df: pd.DataFrame):
        """生成性能图表"""
        plt.figure(figsize=(12, 8))
        
        # 准备数据
        original_df = df[df['test_name'].str.contains('原始版本')]
        optimized_df = df[df['test_name'].str.contains('优化版本')]
        
        # 提取测试名称
        original_df['test_label'] = original_df['test_name'].str.replace('原始版本_', '')
        optimized_df['test_label'] = optimized_df['test_name'].str.replace('优化版本_', '')
        
        # 执行时间对比
        plt.subplot(2, 2, 1)
        x = range(len(original_df))
        width = 0.35
        
        plt.bar([i - width/2 for i in x], original_df['execution_time'], 
                width, label='原始版本', alpha=0.8)
        plt.bar([i + width/2 for i in x], optimized_df['execution_time'], 
                width, label='优化版本', alpha=0.8)
        
        plt.xlabel('测试场景')
        plt.ylabel('执行时间 (秒)')
        plt.title('执行时间对比')
        plt.xticks(x, original_df['test_label'], rotation=45)
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        # 内存使用对比
        plt.subplot(2, 2, 2)
        plt.bar([i - width/2 for i in x], original_df['memory_usage_mb'], 
                width, label='原始版本', alpha=0.8)
        plt.bar([i + width/2 for i in x], optimized_df['memory_usage_mb'], 
                width, label='优化版本', alpha=0.8)
        
        plt.xlabel('测试场景')
        plt.ylabel('内存使用 (MB)')
        plt.title('内存使用对比')
        plt.xticks(x, original_df['test_label'], rotation=45)
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        # 加速比
        plt.subplot(2, 2, 3)
        speedups = original_df['execution_time'].values / optimized_df['execution_time'].values
        plt.bar(x, speedups, alpha=0.8, color='green')
        
        plt.xlabel('测试场景')
        plt.ylabel('加速比')
        plt.title('性能加速比')
        plt.xticks(x, original_df['test_label'], rotation=45)
        plt.axhline(y=1, color='red', linestyle='--', alpha=0.5)
        plt.grid(True, alpha=0.3)
        
        # CPU 使用率对比
        plt.subplot(2, 2, 4)
        plt.bar([i - width/2 for i in x], original_df['cpu_usage_percent'], 
                width, label='原始版本', alpha=0.8)
        plt.bar([i + width/2 for i in x], optimized_df['cpu_usage_percent'], 
                width, label='优化版本', alpha=0.8)
        
        plt.xlabel('测试场景')
        plt.ylabel('CPU 使用率 (%)')
        plt.title('CPU 使用率对比')
        plt.xticks(x, original_df['test_label'], rotation=45)
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.savefig('performance_charts.png', dpi=300, bbox_inches='tight')
        plt.close()


def main():
    """主函数"""
    print("Git Pull All 性能测试工具")
    print("=" * 60)
    
    # 创建测试器
    tester = PerformanceTester()
    
    try:
        # 运行对比测试
        tester.run_comparison_tests()
        
        # 生成报告
        tester.generate_report()
        
    except KeyboardInterrupt:
        print("\n测试被中断")
    except Exception as e:
        print(f"测试过程中出错: {e}")
    finally:
        # 确保清理测试环境
        tester.cleanup_test_environment()


if __name__ == "__main__":
    main()