"""
Generate DRP4DVar Validation Visualizations

Creates publication-ready figures for DRP4DVar correctness validation campaign.

Visualizations:
1. RMSE vs Ensemble Size (DRP vs 3DVar)
2. Speedup vs Problem Size
3. Cost Reduction Comparison
4. Accuracy Gap Analysis
5. Convergence Behavior Comparison

Output: PNG (300 DPI) and PDF formats
"""

import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
from pathlib import Path
import json

# Set publication-quality defaults
plt.rcParams['figure.dpi'] = 300
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['font.size'] = 10
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['axes.labelsize'] = 11
plt.rcParams['axes.titlesize'] = 12
plt.rcParams['xtick.labelsize'] = 9
plt.rcParams['ytick.labelsize'] = 9
plt.rcParams['legend.fontsize'] = 9

# Output directories
VALIDATION_DIR = Path("results/drp4dvar_validation")
VALIDATION_DIR.mkdir(parents=True, exist_ok=True)

FIGURES_DIR = VALIDATION_DIR / "figures"
FIGURES_DIR.mkdir(exist_ok=True)

# Load comprehensive results
COMP_CSV = Path("results/drp_vs_3dvar_comprehensive/comprehensive_results.csv")

def load_comprehensive_data():
    """Load and process comprehensive evaluation data"""
    if not COMP_CSV.exists():
        raise FileNotFoundError(f"Comprehensive results not found: {COMP_CSV}")

    df = pd.read_csv(COMP_CSV)

    # Aggregate by method and configuration
    agg_df = df.groupby(['config', 'method', 'ensemble_size']).agg({
        'analysis_rmse': ['mean', 'std'],
        'cost_reduction': ['mean', 'std'],
        'solve_time': ['mean', 'std'],
        'forecast_skill': ['mean', 'std']
    }).reset_index()

    return df, agg_df

def load_integration_metrics():
    """Load integration test metrics"""
    metrics_dir = Path("results/drp4dvar_integration")
    json_files = sorted(metrics_dir.glob("*.json"))

    if not json_files:
        print("Warning: No integration metrics found")
        return None

    latest_file = json_files[-1]
    print(f"Loading integration metrics from: {latest_file.name}")

    with open(latest_file, 'r') as f:
        metrics = json.load(f)

    return metrics

def plot_rmse_vs_ensemble_size(df_raw, agg_df):
    """Figure 1: RMSE vs Ensemble Size"""
    fig, axes = plt.subplots(1, 2, figsize=(10, 4))

    configs = ['small', 'medium']
    ensemble_sizes = [10, 20, 40, 80]

    for idx, config in enumerate(configs):
        ax = axes[idx]

        # Get 3DVar baseline
        drvar_data = df_raw[(df_raw['config'] == config) & (df_raw['method'] == '3DVar')]
        if len(drvar_data) > 0:
            drvar_rmse = drvar_data['analysis_rmse'].mean()
            ax.axhline(y=drvar_rmse, color='red', linestyle='--', linewidth=2,
                      label='3DVar Baseline', alpha=0.7)

        # Plot DRP4DVar
        drp_rmses = []
        drp_stds = []

        for ens in ensemble_sizes:
            method_name = f"DRP4DVar_{ens}"
            drp_data = df_raw[(df_raw['config'] == config) & (df_raw['method'] == method_name)]

            if len(drp_data) > 0:
                drp_rmses.append(drp_data['analysis_rmse'].mean())
                drp_stds.append(drp_data['analysis_rmse'].std())
            else:
                drp_rmses.append(np.nan)
                drp_stds.append(0)

        # Plot with error bars
        ax.errorbar(ensemble_sizes, drp_rmses, yerr=drp_stds,
                   marker='o', markersize=6, linewidth=2, capsize=4,
                   label='DRP4DVar', color='blue')

        ax.set_xlabel('Ensemble Size')
        ax.set_ylabel('Analysis RMSE')
        ax.set_title(f'{config.upper()} Grid Configuration')
        ax.legend()
        ax.grid(True, alpha=0.3)
        ax.set_xticks(ensemble_sizes)

    plt.tight_layout()

    # Save
    png_file = FIGURES_DIR / "rmse_vs_ensemble_size.png"
    pdf_file = FIGURES_DIR / "rmse_vs_ensemble_size.pdf"
    plt.savefig(png_file, dpi=300, bbox_inches='tight')
    plt.savefig(pdf_file, bbox_inches='tight')
    print(f"Saved: {png_file.name}")
    plt.close()

def plot_speedup_vs_problem_size(df_raw):
    """Figure 2: Speedup vs Problem Size"""
    fig, ax = plt.subplots(figsize=(8, 5))

    configs = ['small', 'medium']
    ensemble_sizes = [10, 20, 40, 80]
    colors = plt.cm.viridis(np.linspace(0.2, 0.9, len(ensemble_sizes)))

    x_positions = np.arange(len(configs))
    width = 0.18

    for idx, ens in enumerate(ensemble_sizes):
        speedups = []

        for config in configs:
            # 3DVar time
            drvar_data = df_raw[(df_raw['config'] == config) & (df_raw['method'] == '3DVar')]
            drvar_time = drvar_data['solve_time'].mean() if len(drvar_data) > 0 else np.nan

            # DRP time
            method_name = f"DRP4DVar_{ens}"
            drp_data = df_raw[(df_raw['config'] == config) & (df_raw['method'] == method_name)]
            drp_time = drp_data['solve_time'].mean() if len(drp_data) > 0 else np.nan

            speedup = drvar_time / drp_time if (drvar_time and drp_time and drp_time > 0) else np.nan
            speedups.append(speedup)

        offset = width * (idx - len(ensemble_sizes)/2 + 0.5)
        ax.bar(x_positions + offset, speedups, width,
               label=f'Ens={ens}', color=colors[idx])

    # Baseline line at 1.0×
    ax.axhline(y=1.0, color='black', linestyle='--', linewidth=1, alpha=0.5)

    ax.set_xlabel('Problem Size')
    ax.set_ylabel('Speedup Factor (×)')
    ax.set_title('DRP4DVar Speedup vs 3DVar Baseline')
    ax.set_xticks(x_positions)
    ax.set_xticklabels([c.upper() for c in configs])
    ax.legend(ncol=2)
    ax.grid(True, alpha=0.3, axis='y')

    plt.tight_layout()

    # Save
    png_file = FIGURES_DIR / "speedup_vs_problem_size.png"
    pdf_file = FIGURES_DIR / "speedup_vs_problem_size.pdf"
    plt.savefig(png_file, dpi=300, bbox_inches='tight')
    plt.savefig(pdf_file, bbox_inches='tight')
    print(f"Saved: {png_file.name}")
    plt.close()

def plot_cost_reduction_comparison(df_raw):
    """Figure 3: Cost Reduction Comparison"""
    fig, ax = plt.subplots(figsize=(8, 5))

    configs = ['small', 'medium']
    ensemble_sizes = [10, 20, 40, 80]

    x_labels = []
    drvar_costs = []
    drp_costs = {ens: [] for ens in ensemble_sizes}

    for config in configs:
        # 3DVar
        drvar_data = df_raw[(df_raw['config'] == config) & (df_raw['method'] == '3DVar')]
        drvar_cost = drvar_data['cost_reduction'].mean() if len(drvar_data) > 0 else 0
        drvar_costs.append(drvar_cost)

        # DRP variants
        for ens in ensemble_sizes:
            method_name = f"DRP4DVar_{ens}"
            drp_data = df_raw[(df_raw['config'] == config) & (df_raw['method'] == method_name)]
            drp_cost = drp_data['cost_reduction'].mean() if len(drp_data) > 0 else 0
            drp_costs[ens].append(drp_cost)

        x_labels.append(config.upper())

    x = np.arange(len(x_labels))
    width = 0.15

    # Plot 3DVar
    ax.bar(x - 2*width, drvar_costs, width, label='3DVar', color='red', alpha=0.7)

    # Plot DRP variants
    colors = plt.cm.Blues(np.linspace(0.4, 0.9, len(ensemble_sizes)))
    for idx, ens in enumerate(ensemble_sizes):
        offset = (idx - 1) * width
        ax.bar(x + offset, drp_costs[ens], width,
               label=f'DRP Ens={ens}', color=colors[idx])

    ax.set_xlabel('Problem Size')
    ax.set_ylabel('Cost Reduction (%)')
    ax.set_title('Cost Function Reduction: DRP4DVar vs 3DVar')
    ax.set_xticks(x)
    ax.set_xticklabels(x_labels)
    ax.legend(ncol=3, fontsize=8)
    ax.grid(True, alpha=0.3, axis='y')

    plt.tight_layout()

    # Save
    png_file = FIGURES_DIR / "cost_reduction_comparison.png"
    pdf_file = FIGURES_DIR / "cost_reduction_comparison.pdf"
    plt.savefig(png_file, dpi=300, bbox_inches='tight')
    plt.savefig(pdf_file, bbox_inches='tight')
    print(f"Saved: {png_file.name}")
    plt.close()

def plot_accuracy_gap_analysis(df_raw):
    """Figure 4: Accuracy Gap Analysis"""
    fig, ax = plt.subplots(figsize=(8, 5))

    configs = ['small', 'medium']
    ensemble_sizes = [10, 20, 40, 80]
    colors = plt.cm.Reds(np.linspace(0.4, 0.9, len(configs)))

    for config_idx, config in enumerate(configs):
        accuracy_gaps = []

        # Get 3DVar baseline
        drvar_data = df_raw[(df_raw['config'] == config) & (df_raw['method'] == '3DVar')]
        drvar_rmse = drvar_data['analysis_rmse'].mean() if len(drvar_data) > 0 else 1.0

        for ens in ensemble_sizes:
            method_name = f"DRP4DVar_{ens}"
            drp_data = df_raw[(df_raw['config'] == config) & (df_raw['method'] == method_name)]
            drp_rmse = drp_data['analysis_rmse'].mean() if len(drp_data) > 0 else drvar_rmse

            gap = 100 * (drp_rmse - drvar_rmse) / drvar_rmse
            accuracy_gaps.append(gap)

        ax.plot(ensemble_sizes, accuracy_gaps, marker='o', markersize=8,
                linewidth=2, label=f'{config.upper()} Grid',
                color=colors[config_idx])

    # Reference lines
    ax.axhline(y=0, color='green', linestyle='--', linewidth=1.5,
               label='3DVar Baseline', alpha=0.6)
    ax.axhline(y=20, color='orange', linestyle=':', linewidth=1.5,
               label='20% Threshold', alpha=0.6)

    ax.set_xlabel('Ensemble Size')
    ax.set_ylabel('Accuracy Gap (%)')
    ax.set_title('DRP4DVar Accuracy Gap from 3DVar Baseline')
    ax.legend()
    ax.grid(True, alpha=0.3)
    ax.set_xticks(ensemble_sizes)

    # Shade acceptable region
    ax.fill_between([10, 80], 0, 20, alpha=0.1, color='green',
                    label='Acceptable Range')

    plt.tight_layout()

    # Save
    png_file = FIGURES_DIR / "accuracy_gap_analysis.png"
    pdf_file = FIGURES_DIR / "accuracy_gap_analysis.pdf"
    plt.savefig(png_file, dpi=300, bbox_inches='tight')
    plt.savefig(pdf_file, bbox_inches='tight')
    print(f"Saved: {png_file.name}")
    plt.close()

def plot_summary_dashboard(df_raw):
    """Figure 5: Comprehensive Summary Dashboard"""
    fig = plt.figure(figsize=(12, 8))
    gs = fig.add_gridspec(2, 2, hspace=0.3, wspace=0.3)

    configs = ['small', 'medium']
    ensemble_sizes = [10, 20, 40, 80]

    # Subplot 1: RMSE Comparison
    ax1 = fig.add_subplot(gs[0, 0])
    for config in configs:
        drp_rmses = []
        for ens in ensemble_sizes:
            method_name = f"DRP4DVar_{ens}"
            drp_data = df_raw[(df_raw['config'] == config) & (df_raw['method'] == method_name)]
            rmse = drp_data['analysis_rmse'].mean() if len(drp_data) > 0 else np.nan
            drp_rmses.append(rmse)

        ax1.plot(ensemble_sizes, drp_rmses, marker='o', label=config.upper(), linewidth=2)

    ax1.set_xlabel('Ensemble Size')
    ax1.set_ylabel('Analysis RMSE')
    ax1.set_title('(a) Analysis Accuracy')
    ax1.legend()
    ax1.grid(True, alpha=0.3)
    ax1.set_xticks(ensemble_sizes)

    # Subplot 2: Speedup
    ax2 = fig.add_subplot(gs[0, 1])
    for config in configs:
        speedups = []
        drvar_data = df_raw[(df_raw['config'] == config) & (df_raw['method'] == '3DVar')]
        drvar_time = drvar_data['solve_time'].mean() if len(drvar_data) > 0 else 1.0

        for ens in ensemble_sizes:
            method_name = f"DRP4DVar_{ens}"
            drp_data = df_raw[(df_raw['config'] == config) & (df_raw['method'] == method_name)]
            drp_time = drp_data['solve_time'].mean() if len(drp_data) > 0 else 1.0
            speedup = drvar_time / drp_time if drp_time > 0 else 0
            speedups.append(speedup)

        ax2.plot(ensemble_sizes, speedups, marker='s', label=config.upper(), linewidth=2)

    ax2.axhline(y=1.0, color='black', linestyle='--', alpha=0.5)
    ax2.set_xlabel('Ensemble Size')
    ax2.set_ylabel('Speedup Factor (×)')
    ax2.set_title('(b) Computational Efficiency')
    ax2.legend()
    ax2.grid(True, alpha=0.3)
    ax2.set_xticks(ensemble_sizes)

    # Subplot 3: Cost Reduction
    ax3 = fig.add_subplot(gs[1, 0])
    for config in configs:
        cost_reductions = []
        for ens in ensemble_sizes:
            method_name = f"DRP4DVar_{ens}"
            drp_data = df_raw[(df_raw['config'] == config) & (df_raw['method'] == method_name)]
            cost_red = drp_data['cost_reduction'].mean() if len(drp_data) > 0 else 0
            cost_reductions.append(cost_red)

        ax3.plot(ensemble_sizes, cost_reductions, marker='^', label=config.upper(), linewidth=2)

    ax3.set_xlabel('Ensemble Size')
    ax3.set_ylabel('Cost Reduction (%)')
    ax3.set_title('(c) Cost Function Reduction')
    ax3.legend()
    ax3.grid(True, alpha=0.3)
    ax3.set_xticks(ensemble_sizes)

    # Subplot 4: Accuracy Gap
    ax4 = fig.add_subplot(gs[1, 1])
    for config in configs:
        gaps = []
        drvar_data = df_raw[(df_raw['config'] == config) & (df_raw['method'] == '3DVar')]
        drvar_rmse = drvar_data['analysis_rmse'].mean() if len(drvar_data) > 0 else 1.0

        for ens in ensemble_sizes:
            method_name = f"DRP4DVar_{ens}"
            drp_data = df_raw[(df_raw['config'] == config) & (df_raw['method'] == method_name)]
            drp_rmse = drp_data['analysis_rmse'].mean() if len(drp_data) > 0 else drvar_rmse
            gap = 100 * (drp_rmse - drvar_rmse) / drvar_rmse
            gaps.append(gap)

        ax4.plot(ensemble_sizes, gaps, marker='d', label=config.upper(), linewidth=2)

    ax4.axhline(y=20, color='orange', linestyle=':', linewidth=1.5, alpha=0.6)
    ax4.set_xlabel('Ensemble Size')
    ax4.set_ylabel('Accuracy Gap (%)')
    ax4.set_title('(d) Accuracy Trade-off')
    ax4.legend()
    ax4.grid(True, alpha=0.3)
    ax4.set_xticks(ensemble_sizes)

    plt.suptitle('DRP4DVar Validation Summary Dashboard', fontsize=14, fontweight='bold')

    # Save
    png_file = FIGURES_DIR / "validation_summary_dashboard.png"
    pdf_file = FIGURES_DIR / "validation_summary_dashboard.pdf"
    plt.savefig(png_file, dpi=300, bbox_inches='tight')
    plt.savefig(pdf_file, bbox_inches='tight')
    print(f"Saved: {png_file.name}")
    plt.close()

def main():
    """Generate all validation figures"""
    print("="*80)
    print("DRP4DVAR VALIDATION FIGURE GENERATION")
    print("="*80)

    # Load data
    print("\nLoading data...")
    df_raw, agg_df = load_comprehensive_data()
    integration_metrics = load_integration_metrics()

    print(f"\nLoaded {len(df_raw)} data points from comprehensive evaluation")
    print(f"Configurations: {df_raw['config'].unique()}")
    print(f"Methods: {df_raw['method'].unique()}")

    # Generate figures
    print("\nGenerating figures...")
    print("-" * 80)

    plot_rmse_vs_ensemble_size(df_raw, agg_df)
    plot_speedup_vs_problem_size(df_raw)
    plot_cost_reduction_comparison(df_raw)
    plot_accuracy_gap_analysis(df_raw)
    plot_summary_dashboard(df_raw)

    print("-" * 80)
    print(f"\n✓ All figures saved to: {FIGURES_DIR}")
    print(f"\nGenerated files:")
    for f in sorted(FIGURES_DIR.glob("*")):
        print(f"  - {f.name}")

    print("\n" + "="*80)
    print("FIGURE GENERATION COMPLETE")
    print("="*80)

if __name__ == "__main__":
    main()
