#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
综合实验Script

自动化RunIntermediate value模型评估实验：
1. GenerateConfigurationFile
2. 编译固件
3. 采集Trace
4. Analyze相关性
"""

import os
import sys
import json
import subprocess
from pathlib import Path
from datetime import datetime
from typing import List, Dict

# 导入其他Module
from generate_config import generate_config_file, ALGORITHM_CONFIG
from trace_collector import CPUTraceCollector
from correlation_analyzer import CorrelationAnalyzer, ComparisonAnalyzer


class ExperimentRunner:
    """实验Run器"""
    
    def __init__(self, base_dir: Path = None):
        if base_dir is None:
            base_dir = Path(__file__).parent
        self.base_dir = base_dir
        self.firmware_dir = base_dir.parent / 'stm32_intermediate_values'
        self.results_dir = base_dir / 'results'
        self.results_dir.mkdir(parents=True, exist_ok=True)
    
    def run_single_experiment(
        self,
        algorithm: str,
        model: str,
        num_traces: int = 100,
        trace_delay_us: int = 500000,
        auto_compile: bool = True,
        output_name: str = None
    ) -> Dict:
        """
        RunSingle实验
        
        Args:
            algorithm: Algorithm名称
            model: 模型名称
            num_traces: Trace数量
            trace_delay_us: Trace延时（微秒）
            auto_compile: YesNo自动编译
            output_name: Output名称
            
        Returns:
            实验ResultDictionary
        """
        print("\n" + "="*70)
        print(f"Running experiment: {algorithm} - {model}")
        print("="*70)
        
        # 1. Generate configuration file
        print("\n[1/4] Generating configuration file...")
        try:
            config_path, metadata = generate_config_file(
                algorithm=algorithm,
                model=model,
                num_traces=num_traces,
                trace_delay_us=trace_delay_us,
                output_dir=self.firmware_dir
            )
            print(f"Configuration file generated: {config_path}")
        except Exception as e:
            print(f"ERROR: Configuration generation failed: {e}")
            return {'success': False, 'error': str(e)}
        
        # 2. Compile firmware
        if auto_compile:
            print("\n[2/4] Compiling firmware...")
            if not self.compile_firmware():
                return {'success': False, 'error': 'Firmware compilation failed'}
        else:
            print("\n[2/4] Skipping firmware compilation (manual compile required)")
        
        # 3. Collect traces
        print("\n[3/4] Collecting CPU traces...")
        
        # Find the .elf file
        build_dir = self.firmware_dir / 'build'
        elf_files = list(build_dir.glob('*.elf'))
        
        if not elf_files:
            print(f"ERROR: No .elf file found in {build_dir}")
            return {'success': False, 'error': 'No firmware file found'}
        
        firmware_path = elf_files[0]
        print(f"Using firmware: {firmware_path.name}")
        
        collector = CPUTraceCollector(str(firmware_path))
        results = collector.run_collection(num_traces)
        
        if not results or results['num_traces'] == 0:
            print("ERROR: Trace collection failed")
            return {'success': False, 'error': 'Trace collection failed'}
        
        print(f"Collection successful: {results['num_traces']} traces")
        
        # 4. SaveResult
        if output_name is None:
            output_name = f"{algorithm}_{model}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
        
        output_dir = self.results_dir / output_name
        output_dir.mkdir(parents=True, exist_ok=True)
        
        trace_path = output_dir / 'traces'
        collector.save_results(results, str(trace_path), metadata)
        
        # 5. Correlation analysis
        print("\n[4/4] Analyzing correlation...")
        analyzer = CorrelationAnalyzer(str(trace_path.with_suffix('.npz')))
        correlation = analyzer.generate_report(str(output_dir))
        
        print(f"\nExperiment complete! Results saved to: {output_dir}")
        
        return {
            'success': True,
            'algorithm': algorithm,
            'model': model,
            'num_traces': results['num_traces'],
            'correlation': correlation['pearson_coefficient'],
            'significance': correlation['significance'],
            'output_dir': str(output_dir)
        }
    
    def compile_firmware(self) -> bool:
        """编译固件"""
        try:
            # 清理
            subprocess.run(
                ['make', 'clean'],
                cwd=self.firmware_dir,
                check=True,
                capture_output=True
            )
            
            # 编译
            result = subprocess.run(
                ['make'],
                cwd=self.firmware_dir,
                check=True,
                capture_output=True,
                text=True
            )
            
            print("Firmware compilation successful")
            return True
            
        except subprocess.CalledProcessError as e:
            print(f"ERROR: Firmware compilation failed:")
            print(e.stderr)
            return False
    
    def run_algorithm_all_models(
        self,
        algorithm: str,
        num_traces: int = 100,
        trace_delay_us: int = 500000,
        auto_compile: bool = True
    ) -> List[Dict]:
        """
        Run某个Algorithm的All模型
        
        Args:
            algorithm: Algorithm名称
            num_traces: Each模型的Trace数量
            trace_delay_us: Trace延时（微秒）
            auto_compile: YesNo自动编译
            
        Returns:
            All实验ResultList
        """
        if algorithm not in ALGORITHM_CONFIG:
            print(f"ERROR: Unsupported algorithm: {algorithm}")
            return []
        
        models = list(ALGORITHM_CONFIG[algorithm]['models'].keys())
        print(f"\nRunning {len(models)} models for {algorithm}...")
        
        results = []
        for i, model in enumerate(models, 1):
            print(f"\n{'='*70}")
            print(f"Progress: {i}/{len(models)} - {model}")
            print(f"{'='*70}")
            
            result = self.run_single_experiment(
                algorithm=algorithm,
                model=model,
                num_traces=num_traces,
                trace_delay_us=trace_delay_us,
                auto_compile=auto_compile,
                output_name=f"{algorithm}_{model}"
            )
            results.append(result)
            
            # 只InFirst次编译
            auto_compile = False
        
        # Generate对比报告
        if len(results) > 1:
            self.generate_algorithm_summary(algorithm, results)
        
        return results
    
    def generate_algorithm_summary(self, algorithm: str, results: List[Dict]):
        """GenerateAlgorithm总结报告"""
        summary_dir = self.results_dir / f"{algorithm}_summary"
        summary_dir.mkdir(parents=True, exist_ok=True)
        
        # 收集AllSuccess的TraceFile
        trace_files = []
        for result in results:
            if result['success']:
                trace_file = Path(result['output_dir']) / 'traces.npz'
                if trace_file.exists():
                    trace_files.append(str(trace_file))
        
        if len(trace_files) > 1:
            print(f"\nGenerating summary report for {algorithm}...")
            analyzer = ComparisonAnalyzer(trace_files)
            analyzer.generate_comparison_report(str(summary_dir))
            print(f"Summary report saved: {summary_dir}")
    
    def run_multi_algorithm_comparison(
        self,
        algorithms: List[str],
        model: str,
        num_traces: int = 100,
        trace_delay_us: int = 500000
    ) -> List[Dict]:
        """
        RunMultipleAlgorithm的同一模型对比
        
        Args:
            algorithms: AlgorithmList
            model: 模型名称
            num_traces: Trace数量
            trace_delay_us: Trace延时（微秒）
            
        Returns:
            All实验ResultList
        """
        print(f"\nRunning multi-algorithm comparison: {model}")
        print(f"Algorithms: {', '.join(algorithms)}")
        
        results = []
        auto_compile = True
        
        for i, algorithm in enumerate(algorithms, 1):
            # CheckAlgorithmYesNoSupport该模型
            if algorithm not in ALGORITHM_CONFIG:
                print(f"WARNING: Skipping unsupported algorithm: {algorithm}")
                continue
            
            if model not in ALGORITHM_CONFIG[algorithm]['models']:
                print(f"WARNING: {algorithm} does not support model: {model}")
                continue
            
            print(f"\n{'='*70}")
            print(f"Progress: {i}/{len(algorithms)} - {algorithm}")
            print(f"{'='*70}")
            
            result = self.run_single_experiment(
                algorithm=algorithm,
                model=model,
                num_traces=num_traces,
                trace_delay_us=trace_delay_us,
                auto_compile=auto_compile,
                output_name=f"{algorithm}_{model}"
            )
            results.append(result)
            
            # 只InFirst个Algorithm时编译
            auto_compile = False
        
        # Generate对比报告
        if len(results) > 1:
            self.generate_model_comparison(model, results)
        
        return results
    
    def generate_model_comparison(self, model: str, results: List[Dict]):
        """Generate模型对比报告"""
        summary_dir = self.results_dir / f"{model}_comparison"
        summary_dir.mkdir(parents=True, exist_ok=True)
        
        trace_files = []
        for result in results:
            if result['success']:
                trace_file = Path(result['output_dir']) / 'traces.npz'
                if trace_file.exists():
                    trace_files.append(str(trace_file))
        
        if len(trace_files) > 1:
            print(f"\nGenerating comparison report for {model}...")
            analyzer = ComparisonAnalyzer(trace_files)
            analyzer.generate_comparison_report(str(summary_dir))
            print(f"Comparison report saved: {summary_dir}")


def main():
    """主Function"""
    import argparse
    
    parser = argparse.ArgumentParser(description='Intermediate value模型综合实验Script')
    parser.add_argument('--algorithm', '-a', type=str, help='Algorithm名称')
    parser.add_argument('--algorithms', nargs='+', help='MultipleAlgorithm（用于对比）')
    parser.add_argument('--model', '-m', type=str, help='模型名称')
    parser.add_argument('--all-models', action='store_true', help='RunAlgorithm的All模型')
    parser.add_argument('--traces', '-n', type=int, default=100, help='Trace数量')
    parser.add_argument('--trace-delay', '-d', type=int, default=500000, help='Trace延时（微秒），Default500000us=500ms')
    parser.add_argument('--no-compile', action='store_true', help='Skip自动编译')
    parser.add_argument('--compare', action='store_true', help='Generate对比报告')
    parser.add_argument('--output', '-o', type=str, help='Output名称')
    
    args = parser.parse_args()
    
    runner = ExperimentRunner()
    
    # 模式1: SingleAlgorithmSingle模型
    if args.algorithm and args.model:
        result = runner.run_single_experiment(
            algorithm=args.algorithm,
            model=args.model,
            num_traces=args.traces,
            trace_delay_us=args.trace_delay,
            auto_compile=not args.no_compile,
            output_name=args.output
        )
        
        if result['success']:
            print(f"\nExperiment successful")
            print(f"   Correlation coefficient: {result['correlation']:.4f}")
            print(f"   Significance: {result['significance']}")
        else:
            print(f"\nERROR: Experiment failed: {result.get('error', 'Unknown')}")
    
    # 模式2: SingleAlgorithmAll模型
    elif args.algorithm and args.all_models:
        results = runner.run_algorithm_all_models(
            algorithm=args.algorithm,
            num_traces=args.traces,
            trace_delay_us=args.trace_delay,
            auto_compile=not args.no_compile
        )
        
        success_count = sum(1 for r in results if r['success'])
        print(f"\nSummary: {success_count}/{len(results)} models successful")
    
    # 模式3: MultipleAlgorithm同一模型对比
    elif args.algorithms and args.model:
        results = runner.run_multi_algorithm_comparison(
            algorithms=args.algorithms,
            model=args.model,
            num_traces=args.traces,
            trace_delay_us=args.trace_delay
        )
        
        success_count = sum(1 for r in results if r['success'])
        print(f"\nSummary: {success_count}/{len(args.algorithms)} algorithms successful")
    
    else:
        parser.print_help()
        print("\nUsage examples:")
        print("  # Single experiment")
        print("  python run_experiment.py -a aes -m sbox_output -n 100")
        print("\n  # All models for algorithm")
        print("  python run_experiment.py -a aes --all-models -n 100")
        print("\n  # Multi-algorithm comparison")
        print("  python run_experiment.py --algorithms aes sm4 -m sbox_output -n 100")


if __name__ == '__main__':
    main()
