#!/usr/bin/env python3

"""
Generate publication-ready figures for DRP4DVar vs 3DVar comparison

This script reads the CSV results from the comprehensive evaluation and generates
publication-quality figures suitable for manuscript inclusion.
"""

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.gridspec import GridSpec
import os

# Set publication-quality style
plt.style.use('default')
plt.rcParams.update({
    'font.family': 'serif',
    'font.size': 10,
    'axes.labelsize': 12,
    'axes.titlesize': 14,
    'xtick.labelsize': 10,
    'ytick.labelsize': 10,
    'legend.fontsize': 10,
    'figure.titlesize': 16,
    'figure.dpi': 300,
    'savefig.dpi': 300,
    'savefig.bbox': 'tight',
    'savefig.format': 'png',
    'axes.grid': True,
    'grid.alpha': 0.3
})

def load_data(csv_path):
    """Load comprehensive results from CSV file"""
    df = pd.read_csv(csv_path)
    return df

def create_performance_comparison(df):
    """Create performance comparison figure"""
    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(12, 10))
    fig.suptitle('DRP4DVar vs 3DVar: Comprehensive Performance Comparison', fontweight='bold')

    # Filter medium configuration for detailed analysis
    medium_data = df[df['config'] == 'medium'].copy()

    # Extract ensemble sizes as integers
    medium_data['ensemble_size_int'] = pd.to_numeric(medium_data['ensemble_size'], errors='coerce')
    medium_data = medium_data.dropna(subset=['ensemble_size_int'])

    # Plot 1: Solve Time Comparison
    ax1.set_title('Computational Performance')

    # 3DVar baseline
    baseline_time = medium_data[medium_data['method'] == '3DVar']['solve_time'].mean()
    ax1.axhline(y=baseline_time, color='red', linestyle='--', label='3DVar Baseline', alpha=0.7)

    # DRP4DVar results
    drp_data = medium_data[medium_data['method'].str.contains('DRP4DVar')]
    if not drp_data.empty:
        ensemble_sizes = drp_data['ensemble_size_int'].unique()
        mean_times = drp_data.groupby('ensemble_size_int')['solve_time'].mean()
        std_times = drp_data.groupby('ensemble_size_int')['solve_time'].std()

        ax1.errorbar(ensemble_sizes, mean_times, yerr=std_times,
                    marker='o', linewidth=2, markersize=6, capsize=4,
                    label='DRP4DVar', color='blue')

    ax1.set_xlabel('Ensemble Size')
    ax1.set_ylabel('Solve Time (seconds)')
    ax1.legend()
    ax1.grid(True, alpha=0.3)

    # Plot 2: Analysis Accuracy (RMSE)
    ax2.set_title('Analysis Accuracy')

    # 3DVar baseline
    baseline_rmse = medium_data[medium_data['method'] == '3DVar']['analysis_rmse'].mean()
    ax2.axhline(y=baseline_rmse, color='red', linestyle='--', label='3DVar Baseline', alpha=0.7)

    # DRP4DVar results
    if not drp_data.empty:
        mean_rmse = drp_data.groupby('ensemble_size_int')['analysis_rmse'].mean()
        std_rmse = drp_data.groupby('ensemble_size_int')['analysis_rmse'].std()

        ax2.errorbar(ensemble_sizes, mean_rmse, yerr=std_rmse,
                    marker='s', linewidth=2, markersize=6, capsize=4,
                    label='DRP4DVar', color='green')

    ax2.set_xlabel('Ensemble Size')
    ax2.set_ylabel('Analysis RMSE')
    ax2.legend()
    ax2.grid(True, alpha=0.3)

    # Plot 3: Cost Reduction
    ax3.set_title('Cost Function Reduction')

    if not drp_data.empty:
        mean_cost = drp_data.groupby('ensemble_size_int')['cost_reduction'].mean()
        std_cost = drp_data.groupby('ensemble_size_int')['cost_reduction'].std()

        bars = ax3.bar(ensemble_sizes, mean_cost, yerr=std_cost,
                      color='orange', alpha=0.7, capsize=4)
        ax3.set_xlabel('Ensemble Size')
        ax3.set_ylabel('Cost Reduction (%)')
        ax3.grid(True, alpha=0.3)

        # Add value labels on bars
        for bar, val in zip(bars, mean_cost):
            height = bar.get_height()
            ax3.text(bar.get_x() + bar.get_width()/2., height + 0.5,
                    f'{val:.1f}%', ha='center', va='bottom', fontweight='bold')

    # Plot 4: Speedup Analysis
    ax4.set_title('Speedup vs 3DVar Baseline')

    if not drp_data.empty and baseline_time > 0:
        speedups = baseline_time / mean_times
        ax4.plot(ensemble_sizes, speedups, 'go-', linewidth=2, markersize=6, label='Speedup')
        ax4.axhline(y=1.0, color='red', linestyle='--', alpha=0.7, label='No speedup')
        ax4.set_xlabel('Ensemble Size')
        ax4.set_ylabel('Speedup Factor')
        ax4.legend()
        ax4.grid(True, alpha=0.3)

        # Add value labels
        for ens, sp in zip(ensemble_sizes, speedups):
            ax4.annotate(f'{sp:.2f}×', (ens, sp), textcoords="offset points",
                        xytext=(0,10), ha='center')

    plt.tight_layout()
    return fig

def create_comprehensive_summary(df):
    """Create comprehensive summary figure with all configurations"""
    fig, axes = plt.subplots(2, 3, figsize=(15, 10))
    fig.suptitle('DRP4DVar vs 3DVar: Comprehensive Analysis Summary', fontweight='bold')

    # Prepare data
    summary_stats = df.groupby(['config', 'method']).agg({
        'solve_time': ['mean', 'std'],
        'analysis_rmse': ['mean', 'std'],
        'cost_reduction': ['mean', 'std'],
        'memory_usage': ['mean', 'std']
    }).round(4)

    # Plot configurations
    configs = df['config'].unique()
    methods = df['method'].unique()
    colors = plt.cm.Set1(np.linspace(0, 1, len(methods)))

    # 1. Solve Time by Configuration
    ax = axes[0, 0]
    for i, method in enumerate(methods):
        method_data = df[df['method'] == method]
        config_means = method_data.groupby('config')['solve_time'].mean()
        config_stds = method_data.groupby('config')['solve_time'].std()

        x_pos = np.arange(len(configs)) + i * 0.15
        ax.bar(x_pos, config_means.values, yerr=config_stds.values,
               width=0.15, label=method, color=colors[i], alpha=0.7)

    ax.set_xlabel('Configuration')
    ax.set_ylabel('Solve Time (seconds)')
    ax.set_title('Computational Performance by Configuration')
    ax.set_xticks(np.arange(len(configs)) + 0.15)
    ax.set_xticklabels(configs)
    ax.legend()

    # 2. Analysis Accuracy by Configuration
    ax = axes[0, 1]
    for i, method in enumerate(methods):
        method_data = df[df['method'] == method]
        config_means = method_data.groupby('config')['analysis_rmse'].mean()
        config_stds = method_data.groupby('config')['analysis_rmse'].std()

        x_pos = np.arange(len(configs)) + i * 0.15
        ax.bar(x_pos, config_means.values, yerr=config_stds.values,
               width=0.15, label=method, color=colors[i], alpha=0.7)

    ax.set_xlabel('Configuration')
    ax.set_ylabel('Analysis RMSE')
    ax.set_title('Analysis Accuracy by Configuration')
    ax.set_xticks(np.arange(len(configs)) + 0.15)
    ax.set_xticklabels(configs)
    ax.legend()

    # 3. Memory Usage Comparison
    ax = axes[0, 2]
    for i, method in enumerate(methods):
        method_data = df[df['method'] == method]
        config_means = method_data.groupby('config')['memory_usage'].mean()
        config_stds = method_data.groupby('config')['memory_usage'].std()

        x_pos = np.arange(len(configs)) + i * 0.15
        ax.bar(x_pos, config_means.values, yerr=config_stds.values,
               width=0.15, label=method, color=colors[i], alpha=0.7)

    ax.set_xlabel('Configuration')
    ax.set_ylabel('Memory Usage (MB)')
    ax.set_title('Memory Usage by Configuration')
    ax.set_xticks(np.arange(len(configs)) + 0.15)
    ax.set_xticklabels(configs)
    ax.legend()

    # 4. Ensemble Size Sensitivity (for DRP4DVar only)
    ax = axes[1, 0]
    drp_data = df[df['method'].str.contains('DRP4DVar')].copy()
    if not drp_data.empty:
        drp_data['ensemble_size_int'] = pd.to_numeric(drp_data['ensemble_size'], errors='coerce')
        drp_data = drp_data.dropna(subset=['ensemble_size_int'])

        for config in configs:
            config_drp = drp_data[drp_data['config'] == config]
            if not config_drp.empty:
                mean_time = config_drp.groupby('ensemble_size_int')['solve_time'].mean()
                std_time = config_drp.groupby('ensemble_size_int')['solve_time'].std()

                ax.errorbar(mean_time.index, mean_time.values, yerr=std_time.values,
                           marker='o', linewidth=2, capsize=4, label=f'{config} config')

    ax.set_xlabel('Ensemble Size')
    ax.set_ylabel('Solve Time (seconds)')
    ax.set_title('Ensemble Size Sensitivity Analysis')
    ax.legend()

    # 5. Cost Reduction by Configuration
    ax = axes[1, 1]
    for config in configs:
        config_data = df[df['config'] == config]
        drp_config = config_data[config_data['method'].str.contains('DRP4DVar')]

        if not drp_config.empty:
            cost_reduction = drp_config['cost_reduction'].mean()
            ax.bar(config, cost_reduction, alpha=0.7, label=f'{config} (DRP4DVar avg)')

    ax.set_xlabel('Configuration')
    ax.set_ylabel('Cost Reduction (%)')
    ax.set_title('Cost Reduction by Configuration')
    ax.legend()

    # 6. Summary Statistics Table
    ax = axes[1, 2]
    ax.axis('off')

    # Create summary text
    summary_text = "Summary Statistics:\n\n"
    for config in configs:
        config_data = df[df['config'] == config]
        baseline = config_data[config_data['method'] == '3DVar']['solve_time'].mean()
        drp_best = config_data[config_data['method'].str.contains('DRP4DVar')]['solve_time'].min()
        speedup = baseline / drp_best if drp_best > 0 else 0

        summary_text += f"{config.upper()}:\n"
        summary_text += f"  Best speedup: {speedup:.2f}×\n"
        summary_text += f"  3DVar time: {baseline:.3f}s\n"
        summary_text += f"  Best DRP time: {drp_best:.3f}s\n\n"

    ax.text(0.05, 0.95, summary_text, transform=ax.transAxes, fontsize=10,
            verticalalignment='top', fontfamily='monospace',
            bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5))

    plt.tight_layout()
    return fig

def create_detailed_metrics_plot(df):
    """Create detailed metrics visualization"""
    fig, axes = plt.subplots(2, 2, figsize=(12, 10))
    fig.suptitle('DRP4DVar vs 3DVar: Detailed Performance Metrics', fontweight='bold')

    # Focus on medium configuration for detailed analysis
    medium_data = df[df['config'] == 'medium'].copy()
    medium_data['ensemble_size_int'] = pd.to_numeric(medium_data['ensemble_size'], errors='coerce')

    # 1. Computational Efficiency Scatter Plot
    ax = axes[0, 0]
    drp_data = medium_data[medium_data['method'].str.contains('DRP4DVar')]
    baseline_data = medium_data[medium_data['method'] == '3DVar']

    if not drp_data.empty and not baseline_data.empty:
        baseline_time = baseline_data['solve_time'].mean()
        baseline_rmse = baseline_data['analysis_rmse'].mean()

        # Plot DRP4DVar points
        scatter = ax.scatter(drp_data['solve_time'], drp_data['analysis_rmse'],
                           c=drp_data['ensemble_size_int'], s=100, alpha=0.7,
                           cmap='viridis', label='DRP4DVar')

        # Plot 3DVar baseline
        ax.scatter(baseline_time, baseline_rmse, s=200, c='red',
                  marker='*', label='3DVar Baseline', edgecolors='black', linewidth=2)

        # Add colorbar
        cbar = plt.colorbar(scatter, ax=ax)
        cbar.set_label('Ensemble Size')

        ax.set_xlabel('Solve Time (seconds)')
        ax.set_ylabel('Analysis RMSE')
        ax.set_title('Efficiency vs Accuracy Trade-off')
        ax.legend()
        ax.grid(True, alpha=0.3)

    # 2. Convergence and Cost Analysis
    ax = axes[0, 1]

    # Group by method and ensemble size
    grouped = medium_data.groupby(['method', 'ensemble_size_int']).agg({
        'convergence_iterations': 'mean',
        'cost_reduction': 'mean'
    }).reset_index()

    for method in grouped['method'].unique():
        method_group = grouped[grouped['method'] == method]
        if 'DRP4DVar' in method:
            ax.scatter(method_group['convergence_iterations'], method_group['cost_reduction'],
                      s=100, alpha=0.7, label=f'DRP4DVar')
        else:
            ax.scatter(method_group['convergence_iterations'], method_group['cost_reduction'],
                      s=200, c='red', marker='*', label='3DVar', edgecolors='black', linewidth=2)

    ax.set_xlabel('Convergence Iterations')
    ax.set_ylabel('Cost Reduction (%)')
    ax.set_title('Convergence vs Cost Reduction')
    ax.legend()
    ax.grid(True, alpha=0.3)

    # 3. Memory Efficiency Analysis
    ax = axes[1, 0]

    if not drp_data.empty:
        memory_by_ensemble = drp_data.groupby('ensemble_size_int')['memory_usage'].agg(['mean', 'std'])

        ax.errorbar(memory_by_ensemble.index, memory_by_ensemble['mean'],
                   yerr=memory_by_ensemble['std'], marker='o', linewidth=2,
                   markersize=6, capsize=4, label='DRP4DVar')

        baseline_memory = baseline_data['memory_usage'].mean()
        ax.axhline(y=baseline_memory, color='red', linestyle='--',
                  label='3DVar Baseline', alpha=0.7)

    ax.set_xlabel('Ensemble Size')
    ax.set_ylabel('Memory Usage (MB)')
    ax.set_title('Memory Usage by Ensemble Size')
    ax.legend()
    ax.grid(True, alpha=0.3)

    # 4. Forecast Skill Comparison
    ax = axes[1, 1]

    if not drp_data.empty:
        skill_by_ensemble = drp_data.groupby('ensemble_size_int')['forecast_skill'].agg(['mean', 'std'])

        ax.errorbar(skill_by_ensemble.index, skill_by_ensemble['mean'],
                   yerr=skill_by_ensemble['std'], marker='s', linewidth=2,
                   markersize=6, capsize=4, label='DRP4DVar')

        baseline_skill = baseline_data['forecast_skill'].mean()
        ax.axhline(y=baseline_skill, color='red', linestyle='--',
                  label='3DVar Baseline', alpha=0.7)

    ax.set_xlabel('Ensemble Size')
    ax.set_ylabel('Forecast Skill Score')
    ax.set_title('Forecast Skill by Ensemble Size')
    ax.legend()
    ax.grid(True, alpha=0.3)

    plt.tight_layout()
    return fig

def main():
    """Main function to generate all figures"""
    # Load data
    csv_path = '/home/linden/code/julia/GSICoreAnalysis.jl/results/drp_vs_3dvar_comprehensive/comprehensive_results.csv'

    if not os.path.exists(csv_path):
        print(f"Error: CSV file not found at {csv_path}")
        return

    print("Loading data...")
    df = load_data(csv_path)
    print(f"Loaded {len(df)} experimental results")

    # Create output directory
    output_dir = '/home/linden/code/julia/GSICoreAnalysis.jl/results/drp_vs_3dvar_comprehensive/figures'
    os.makedirs(output_dir, exist_ok=True)

    print("Generating figures...")

    # Generate figures
    fig1 = create_performance_comparison(df)
    fig1.savefig(os.path.join(output_dir, 'performance_comparison.png'))
    print("  ✓ Saved performance comparison")

    fig2 = create_comprehensive_summary(df)
    fig2.savefig(os.path.join(output_dir, 'comprehensive_summary.png'))
    print("  ✓ Saved comprehensive summary")

    fig3 = create_detailed_metrics_plot(df)
    fig3.savefig(os.path.join(output_dir, 'detailed_metrics.png'))
    print("  ✓ Saved detailed metrics analysis")

    # Generate summary statistics
    print("\nGenerating summary statistics...")

    # Overall performance summary
    summary_stats = df.groupby('method').agg({
        'solve_time': ['mean', 'std', 'min', 'max'],
        'analysis_rmse': ['mean', 'std', 'min', 'max'],
        'cost_reduction': ['mean', 'std', 'min', 'max'],
        'memory_usage': ['mean', 'std', 'min', 'max'],
        'forecast_skill': ['mean', 'std', 'min', 'max']
    }).round(4)

    summary_file = os.path.join(output_dir, 'performance_summary.txt')
    with open(summary_file, 'w') as f:
        f.write("DRP4DVar vs 3DVar Performance Summary\n")
        f.write("=" * 50 + "\n\n")
        f.write(summary_stats.to_string())

        # Add speedup calculations
        f.write("\n\nSpeedup Calculations:\n")
        f.write("-" * 30 + "\n")

        for config in df['config'].unique():
            config_data = df[df['config'] == config]
            baseline_time = config_data[config_data['method'] == '3DVar']['solve_time'].mean()

            f.write(f"\n{config.upper()} Configuration:\n")
            f.write(f"  3DVar baseline time: {baseline_time:.3f}s\n")

            drp_data = config_data[config_data['method'].str.contains('DRP4DVar')]
            if not drp_data.empty:
                best_time = drp_data['solve_time'].min()
                best_ensemble = drp_data.loc[drp_data['solve_time'].idxmin(), 'ensemble_size']
                speedup = baseline_time / best_time

                f.write(f"  Best DRP4DVar time: {best_time:.3f}s (ensemble={best_ensemble})\n")
                f.write(f"  Maximum speedup: {speedup:.2f}×\n")

    print(f"  ✓ Saved performance summary to {summary_file}")

    print(f"\nAll figures and summaries saved to {output_dir}")
    print("Files generated:")
    print("  - performance_comparison.png")
    print("  - comprehensive_summary.png")
    print("  - detailed_metrics.png")
    print("  - performance_summary.txt")

if __name__ == "__main__":
    main()