#!/usr/bin/env python3
"""
Performance Comparison Framework for GCR Solvers.

This script provides a comprehensive framework for comparing performance between
different GCR solver implementations:
- Original GCR-NCCL (C++/CUDA/HIP)
- GCR-Julia (Pure Julia with GPU support)
- Other solver frameworks

Features:
- Automated benchmarking across multiple problem sizes
- Performance metrics collection and analysis
- Visual comparison charts and reports
- Statistical analysis of convergence and timing
- Cross-platform compatibility testing
"""

import sys
import os
import json
import time
import subprocess
import tempfile
from pathlib import Path
from dataclasses import dataclass
from typing import Dict, List, Optional, Any, Tuple
import argparse
from concurrent.futures import ProcessPoolExecutor, as_completed
import csv

# Add GSM source to path
sys.path.insert(0, str(Path(__file__).parent / "src"))

try:
    from plugins.gcr_julia.extension import GCRJuliaExtension
    from gcr_solver_manager.extensions.base_runner import RunConfig
except ImportError as e:
    print(f"Warning: GSM modules not available: {e}")
    print("Some comparison features may be limited")


@dataclass
class BenchmarkResult:
    """Container for benchmark results."""
    solver: str
    algorithm: str
    backend: str
    problem_size: int
    condition_number: float
    success: bool
    iterations: Optional[int] = None
    convergence_time: Optional[float] = None
    residual_norm: Optional[float] = None
    memory_usage_mb: Optional[float] = None
    setup_time: Optional[float] = None
    total_time: Optional[float] = None
    error_message: Optional[str] = None
    metadata: Optional[Dict[str, Any]] = None


class GCRPerformanceComparison:
    """Framework for comparing GCR solver implementations."""
    
    def __init__(self, output_dir: Path = None):
        """Initialize comparison framework."""
        self.output_dir = output_dir or Path("./benchmark_results")
        self.output_dir.mkdir(exist_ok=True)
        
        # Available solvers
        self.solvers = {}
        self._discover_solvers()
        
        # Benchmark configurations
        self.problem_sizes = [100, 500, 1000, 5000, 10000]
        self.condition_numbers = [1e2, 1e4, 1e6, 1e8]
        self.algorithms = ["gcr", "gmres"]
        
        # Results storage
        self.results: List[BenchmarkResult] = []
        
    def _discover_solvers(self):
        """Discover available solver implementations."""
        print("🔍 Discovering available solvers...")
        
        # 1. GCR-Julia
        gcr_julia_path = Path("../GCR-Julia").resolve()
        if gcr_julia_path.exists():
            try:
                # Test Julia availability
                julia_result = subprocess.run(
                    ["julia", "--version"], 
                    capture_output=True, 
                    timeout=5
                )
                if julia_result.returncode == 0:
                    self.solvers["gcr-julia"] = {
                        "name": "GCR-Julia",
                        "path": gcr_julia_path,
                        "type": "julia",
                        "available": True,
                        "algorithms": ["gcr", "enhanced-gcr", "gmres", "ca-gcr"],
                        "backends": ["cpu", "gpu-cuda", "gpu-amdgpu"]
                    }
                    print("  ✅ GCR-Julia: Available")
                else:
                    print("  ❌ GCR-Julia: Julia not available")
            except (subprocess.TimeoutExpired, FileNotFoundError):
                print("  ❌ GCR-Julia: Julia not found")
        else:
            print("  ❌ GCR-Julia: Package not found")
            
        # 2. Original GCR-NCCL
        original_gcr_path = Path("../").resolve()
        makefile_path = original_gcr_path / "makefile"
        if makefile_path.exists():
            self.solvers["gcr-nccl"] = {
                "name": "Original GCR-NCCL",
                "path": original_gcr_path,
                "type": "cpp",
                "available": True,
                "algorithms": ["gcr", "ca-gcr", "gmres", "bca-gmres"],
                "backends": ["cpu", "cuda", "hip"]
            }
            print("  ✅ Original GCR-NCCL: Available")
        else:
            print("  ❌ Original GCR-NCCL: Build system not found")
            
        # 3. PETSc (if available)
        try:
            petsc_result = subprocess.run(
                ["julia", "-e", "using PETSc_jll; println(\"OK\")"],
                capture_output=True,
                timeout=10
            )
            if petsc_result.returncode == 0:
                self.solvers["petsc"] = {
                    "name": "PETSc Reference",
                    "type": "petsc",
                    "available": True,
                    "algorithms": ["gmres", "gcr"],
                    "backends": ["cpu", "mpi"]
                }
                print("  ✅ PETSc Reference: Available")
        except:
            print("  ❌ PETSc Reference: Not available")
            
        print(f"Found {len(self.solvers)} solver implementations")
        
    def run_gcr_julia_benchmark(self, algorithm: str, backend: str, 
                              problem_size: int, condition_number: float) -> BenchmarkResult:
        """Run benchmark for GCR-Julia solver."""
        gcr_julia_info = self.solvers["gcr-julia"]
        gcr_julia_path = gcr_julia_info["path"]
        
        result = BenchmarkResult(
            solver="gcr-julia",
            algorithm=algorithm,
            backend=backend,
            problem_size=problem_size,
            condition_number=condition_number,
            success=False
        )
        
        try:
            # Create Julia benchmark script
            julia_script = f'''
            using GCR
            using LinearAlgebra
            using Printf
            
            # Problem setup
            n = {problem_size}
            
            # Generate matrix with specified condition number
            if {condition_number} > 1000
                A = generate_test_matrix(Float64, n; 
                                       type=:random, 
                                       condition_number={condition_number})
            else
                A = generate_test_matrix(Float64, n; type=:laplacian)
            end
            
            b = rand(n)
            
            println("Problem size: $n")
            println("Matrix condition estimate: $(cond(Matrix(A[1:min(10,n), 1:min(10,n)])))")
            
            # Choose solver function
            solve_func = {{"gcr" => solve_gcr, 
                          "enhanced-gcr" => solve_enhanced_gcr,
                          "gmres" => solve_gmres}}["{algorithm}"]
            
            # Benchmark execution
            setup_time = @elapsed begin
                # Any setup needed
            end
            
            GC.gc()  # Clean garbage before timing
            
            convergence_time = @elapsed begin
                global solution = solve_func(A, b, tol=1e-6, maxiter=min(1000, 2*n), verbose=false)
            end
            
            # Results
            println("SUCCESS")
            println("iterations: $(solution.iterations)")
            println("residual_norm: $(solution.residual_norm)")
            println("converged: $(solution.converged)")
            println("convergence_time: $convergence_time")
            println("setup_time: $setup_time")
            
            # Memory estimate (rough)
            memory_mb = (sizeof(A) + 2*sizeof(b) + sizeof(solution.x)) / 1024^2
            println("memory_usage_mb: $memory_mb")
            '''
            
            # Execute benchmark
            julia_cmd = [
                "julia",
                f"--project={gcr_julia_path}",
                "--optimize=2",
                "-e", julia_script
            ]
            
            start_time = time.time()
            proc_result = subprocess.run(
                julia_cmd,
                capture_output=True,
                text=True,
                timeout=120  # 2 minute timeout
            )
            total_time = time.time() - start_time
            
            if proc_result.returncode == 0 and "SUCCESS" in proc_result.stdout:
                # Parse results
                output_lines = proc_result.stdout.strip().split('\n')
                
                for line in output_lines:
                    if line.startswith("iterations: "):
                        result.iterations = int(line.split(": ")[1])
                    elif line.startswith("residual_norm: "):
                        result.residual_norm = float(line.split(": ")[1])
                    elif line.startswith("convergence_time: "):
                        result.convergence_time = float(line.split(": ")[1])
                    elif line.startswith("setup_time: "):
                        result.setup_time = float(line.split(": ")[1])
                    elif line.startswith("memory_usage_mb: "):
                        result.memory_usage_mb = float(line.split(": ")[1])
                        
                result.total_time = total_time
                result.success = True
                
            else:
                result.error_message = f"Execution failed: {proc_result.stderr}"
                
        except subprocess.TimeoutExpired:
            result.error_message = "Execution timed out"
        except Exception as e:
            result.error_message = f"Benchmark error: {e}"
            
        return result
    
    def run_original_gcr_benchmark(self, algorithm: str, backend: str,
                                 problem_size: int, condition_number: float) -> BenchmarkResult:
        """Run benchmark for original GCR-NCCL solver."""
        result = BenchmarkResult(
            solver="gcr-nccl",
            algorithm=algorithm,
            backend=backend,
            problem_size=problem_size,
            condition_number=condition_number,
            success=False
        )
        
        try:
            # Note: This would require integration with existing build system
            # For now, we'll simulate or skip this
            result.error_message = "Original GCR-NCCL benchmarking not yet implemented"
            
        except Exception as e:
            result.error_message = f"Original GCR benchmark error: {e}"
            
        return result
    
    def run_single_benchmark(self, solver: str, algorithm: str, backend: str,
                           problem_size: int, condition_number: float) -> BenchmarkResult:
        """Run a single benchmark configuration."""
        print(f"  🏃 {solver}/{algorithm}/{backend}: n={problem_size}, cond={condition_number:.1e}")
        
        if solver == "gcr-julia":
            return self.run_gcr_julia_benchmark(algorithm, backend, problem_size, condition_number)
        elif solver == "gcr-nccl":
            return self.run_original_gcr_benchmark(algorithm, backend, problem_size, condition_number)
        else:
            return BenchmarkResult(
                solver=solver,
                algorithm=algorithm,
                backend=backend,
                problem_size=problem_size,
                condition_number=condition_number,
                success=False,
                error_message=f"Unknown solver: {solver}"
            )
    
    def run_comprehensive_benchmark(self, max_workers: int = 2) -> List[BenchmarkResult]:
        """Run comprehensive benchmark across all configurations."""
        print("🚀 Starting Comprehensive Benchmark")
        print("=" * 50)
        
        # Generate benchmark configurations
        configurations = []
        
        for solver_name, solver_info in self.solvers.items():
            if not solver_info["available"]:
                continue
                
            for algorithm in solver_info.get("algorithms", []):
                if algorithm in self.algorithms:  # Only test specified algorithms
                    for backend in solver_info.get("backends", ["cpu"]):
                        if backend in ["cpu", "gpu-cuda"]:  # Focus on key backends
                            for problem_size in self.problem_sizes:
                                for condition_number in self.condition_numbers[:2]:  # Limit condition numbers
                                    configurations.append({
                                        "solver": solver_name,
                                        "algorithm": algorithm,
                                        "backend": backend,
                                        "problem_size": problem_size,
                                        "condition_number": condition_number
                                    })
        
        print(f"Total configurations: {len(configurations)}")
        
        # Run benchmarks
        results = []
        
        if max_workers > 1:
            # Parallel execution
            with ProcessPoolExecutor(max_workers=max_workers) as executor:
                future_to_config = {
                    executor.submit(
                        self.run_single_benchmark,
                        config["solver"],
                        config["algorithm"], 
                        config["backend"],
                        config["problem_size"],
                        config["condition_number"]
                    ): config for config in configurations
                }
                
                for future in as_completed(future_to_config):
                    config = future_to_config[future]
                    try:
                        result = future.result()
                        results.append(result)
                    except Exception as e:
                        print(f"  ❌ Error in {config}: {e}")
        else:
            # Sequential execution
            for config in configurations:
                result = self.run_single_benchmark(
                    config["solver"],
                    config["algorithm"],
                    config["backend"], 
                    config["problem_size"],
                    config["condition_number"]
                )
                results.append(result)
                
                # Progress update
                if len(results) % 5 == 0:
                    print(f"  📊 Completed {len(results)}/{len(configurations)} benchmarks")
        
        self.results = results
        return results
    
    def analyze_results(self) -> Dict[str, Any]:
        """Analyze benchmark results and generate statistics."""
        print("\n📊 Analyzing Benchmark Results")
        print("=" * 50)
        
        successful_results = [r for r in self.results if r.success]
        failed_results = [r for r in self.results if not r.success]
        
        analysis = {
            "summary": {
                "total_benchmarks": len(self.results),
                "successful": len(successful_results),
                "failed": len(failed_results),
                "success_rate": len(successful_results) / len(self.results) if self.results else 0
            },
            "by_solver": {},
            "performance_comparison": {},
            "convergence_analysis": {}
        }
        
        # Analysis by solver
        for solver in set(r.solver for r in successful_results):
            solver_results = [r for r in successful_results if r.solver == solver]
            
            if solver_results:
                times = [r.convergence_time for r in solver_results if r.convergence_time]
                iterations = [r.iterations for r in solver_results if r.iterations]
                
                analysis["by_solver"][solver] = {
                    "benchmarks": len(solver_results),
                    "avg_time": sum(times) / len(times) if times else None,
                    "avg_iterations": sum(iterations) / len(iterations) if iterations else None,
                    "min_time": min(times) if times else None,
                    "max_time": max(times) if times else None
                }
        
        # Performance comparison for same problem sizes
        problem_sizes = set(r.problem_size for r in successful_results)
        for size in problem_sizes:
            size_results = [r for r in successful_results if r.problem_size == size]
            size_comparison = {}
            
            for solver in set(r.solver for r in size_results):
                solver_size_results = [r for r in size_results if r.solver == solver]
                if solver_size_results:
                    avg_time = sum(r.convergence_time for r in solver_size_results 
                                 if r.convergence_time) / len(solver_size_results)
                    size_comparison[solver] = avg_time
            
            if len(size_comparison) > 1:
                analysis["performance_comparison"][f"size_{size}"] = size_comparison
        
        # Print summary
        print(f"✅ Successful benchmarks: {analysis['summary']['successful']}")
        print(f"❌ Failed benchmarks: {analysis['summary']['failed']}")
        print(f"📈 Success rate: {analysis['summary']['success_rate']:.1%}")
        
        print("\n🏆 Performance by Solver:")
        for solver, stats in analysis["by_solver"].items():
            print(f"  {solver}:")
            print(f"    Benchmarks: {stats['benchmarks']}")
            if stats['avg_time']:
                print(f"    Avg Time: {stats['avg_time']:.3f}s")
            if stats['avg_iterations']:
                print(f"    Avg Iterations: {stats['avg_iterations']:.1f}")
        
        return analysis
    
    def save_results(self, filename: str = None):
        """Save benchmark results to files."""
        if not filename:
            timestamp = time.strftime("%Y%m%d_%H%M%S")
            filename = f"gcr_benchmark_{timestamp}"
        
        # Save raw results as JSON
        json_file = self.output_dir / f"{filename}.json"
        results_data = []
        for result in self.results:
            result_dict = {
                "solver": result.solver,
                "algorithm": result.algorithm,
                "backend": result.backend,
                "problem_size": result.problem_size,
                "condition_number": result.condition_number,
                "success": result.success,
                "iterations": result.iterations,
                "convergence_time": result.convergence_time,
                "residual_norm": result.residual_norm,
                "memory_usage_mb": result.memory_usage_mb,
                "setup_time": result.setup_time,
                "total_time": result.total_time,
                "error_message": result.error_message
            }
            results_data.append(result_dict)
        
        with open(json_file, 'w') as f:
            json.dump(results_data, f, indent=2)
        
        # Save as CSV for analysis
        csv_file = self.output_dir / f"{filename}.csv"
        with open(csv_file, 'w', newline='') as f:
            if self.results:
                fieldnames = ["solver", "algorithm", "backend", "problem_size", 
                             "condition_number", "success", "iterations", 
                             "convergence_time", "residual_norm", "memory_usage_mb",
                             "total_time", "error_message"]
                writer = csv.DictWriter(f, fieldnames=fieldnames)
                writer.writeheader()
                
                for result in self.results:
                    row = {field: getattr(result, field) for field in fieldnames}
                    writer.writerow(row)
        
        print(f"\n💾 Results saved:")
        print(f"  📄 JSON: {json_file}")
        print(f"  📊 CSV: {csv_file}")
        
        return json_file, csv_file
    
    def generate_report(self) -> str:
        """Generate a comprehensive benchmark report."""
        analysis = self.analyze_results()
        
        report = f"""
# GCR Solver Performance Comparison Report

Generated: {time.strftime("%Y-%m-%d %H:%M:%S")}

## Summary

- **Total Benchmarks**: {analysis['summary']['total_benchmarks']}
- **Successful**: {analysis['summary']['successful']}
- **Failed**: {analysis['summary']['failed']}  
- **Success Rate**: {analysis['summary']['success_rate']:.1%}

## Available Solvers

"""
        
        for solver_name, solver_info in self.solvers.items():
            status = "✅" if solver_info["available"] else "❌"
            report += f"- {status} **{solver_info['name']}** ({solver_name})\n"
            if solver_info["available"]:
                report += f"  - Algorithms: {', '.join(solver_info.get('algorithms', []))}\n"
                report += f"  - Backends: {', '.join(solver_info.get('backends', []))}\n"
        
        report += f"\n## Performance Analysis\n\n"
        
        for solver, stats in analysis["by_solver"].items():
            report += f"### {solver}\n\n"
            report += f"- Benchmarks completed: {stats['benchmarks']}\n"
            if stats['avg_time']:
                report += f"- Average convergence time: {stats['avg_time']:.3f}s\n"
            if stats['avg_iterations']:
                report += f"- Average iterations: {stats['avg_iterations']:.1f}\n"
            if stats['min_time'] and stats['max_time']:
                report += f"- Time range: {stats['min_time']:.3f}s - {stats['max_time']:.3f}s\n"
            report += "\n"
        
        # Performance comparisons
        if analysis["performance_comparison"]:
            report += "## Performance Comparisons\n\n"
            for size_key, comparison in analysis["performance_comparison"].items():
                size = size_key.replace("size_", "")
                report += f"### Problem Size {size}\n\n"
                for solver, time_val in comparison.items():
                    report += f"- {solver}: {time_val:.3f}s\n"
                report += "\n"
        
        # Failed benchmarks
        failed_results = [r for r in self.results if not r.success]
        if failed_results:
            report += "## Failed Benchmarks\n\n"
            for result in failed_results[:10]:  # Show first 10 failures
                report += f"- {result.solver}/{result.algorithm}/{result.backend} "
                report += f"(n={result.problem_size}): {result.error_message}\n"
        
        report += f"\n## Recommendations\n\n"
        
        # Generate recommendations based on results
        successful_solvers = list(analysis["by_solver"].keys())
        if "gcr-julia" in successful_solvers:
            report += "- ✅ GCR-Julia is functional and can be used for Julia-based workflows\n"
        
        if len(successful_solvers) > 1:
            fastest_solver = min(analysis["by_solver"].items(), 
                                key=lambda x: x[1]["avg_time"] or float('inf'))
            report += f"- 🏆 Fastest average performance: {fastest_solver[0]}\n"
        
        report += "\n---\n*Generated by GCR Performance Comparison Framework*\n"
        
        return report


def main():
    """Main comparison function."""
    parser = argparse.ArgumentParser(description="Compare GCR solver implementations")
    parser.add_argument("--output-dir", type=Path, default="./benchmark_results",
                       help="Output directory for results")
    parser.add_argument("--quick", action="store_true",
                       help="Run quick benchmark (fewer configurations)")
    parser.add_argument("--solvers", nargs="+", 
                       choices=["gcr-julia", "gcr-nccl", "petsc"],
                       help="Specific solvers to benchmark")
    parser.add_argument("--algorithms", nargs="+", 
                       choices=["gcr", "gmres", "ca-gcr"],
                       default=["gcr", "gmres"],
                       help="Algorithms to benchmark")
    parser.add_argument("--max-workers", type=int, default=1,
                       help="Maximum parallel workers")
    parser.add_argument("--save-report", action="store_true",
                       help="Save detailed report")
    
    args = parser.parse_args()
    
    # Initialize comparison framework
    comparison = GCRPerformanceComparison(args.output_dir)
    
    if args.quick:
        comparison.problem_sizes = [100, 1000]
        comparison.condition_numbers = [1e2, 1e4]
    
    if args.algorithms:
        comparison.algorithms = args.algorithms
    
    # Filter solvers if specified
    if args.solvers:
        comparison.solvers = {k: v for k, v in comparison.solvers.items() 
                            if k in args.solvers}
    
    # Run benchmarks
    print(f"🎯 Target algorithms: {comparison.algorithms}")
    print(f"📏 Problem sizes: {comparison.problem_sizes}")
    print(f"🔢 Condition numbers: {comparison.condition_numbers}")
    
    results = comparison.run_comprehensive_benchmark(max_workers=args.max_workers)
    
    # Analyze results
    analysis = comparison.analyze_results()
    
    # Save results
    json_file, csv_file = comparison.save_results()
    
    # Generate and save report
    if args.save_report or len(results) > 10:
        report = comparison.generate_report()
        report_file = args.output_dir / f"benchmark_report_{time.strftime('%Y%m%d_%H%M%S')}.md"
        with open(report_file, 'w') as f:
            f.write(report)
        print(f"  📋 Report: {report_file}")
        
        # Print summary to console
        print("\n" + "="*60)
        print("BENCHMARK SUMMARY")
        print("="*60)
        print(report.split("## Summary")[1].split("## Available Solvers")[0])
    
    # Final status
    if analysis["summary"]["successful"] > 0:
        print("\n🎉 Benchmark completed successfully!")
        return 0
    else:
        print("\n❌ No successful benchmarks completed")
        return 1


if __name__ == "__main__":
    sys.exit(main())