import os
import argparse
import pandas as pd
import numpy as np
from collections import defaultdict
from db_utils import InstanceDatabase
from sklearn.model_selection import KFold
import glob

try:
    import matplotlib
    matplotlib.use('Agg')  # Use non-interactive backend to avoid X server issues
    import matplotlib.pyplot as plt
    import seaborn as sns
    HAS_PLOTTING = True
    # Set up matplotlib for publication-quality figures
    plt.rcParams.update({
        'font.size': 14,
        'axes.labelsize': 16,
        'axes.titlesize': 18,
        'xtick.labelsize': 16,
        'ytick.labelsize': 16,
        'legend.fontsize': 14,
        'figure.titlesize': 20,
        'lines.linewidth': 3,
        'axes.linewidth': 2,
        'grid.linewidth': 1.5,
        'savefig.dpi': 300,
        'savefig.bbox': 'tight',
        'font.family': 'sans-serif',
        'font.sans-serif': ['Arial', 'Helvetica', 'Inter', 'DejaVu Sans']
    })
except ImportError:
    HAS_PLOTTING = False

def read_cluster_files(cluster_dir):
    """Read all .txt files in cluster_dir and build mappings."""
    instance_to_cluster = dict()
    cluster_to_instances = defaultdict(list)
    for fname in os.listdir(cluster_dir):
        if fname.endswith('.txt'):
            cluster_id = os.path.splitext(fname)[0]
            with open(os.path.join(cluster_dir, fname), 'r') as f:
                for line in f:
                    hash_id = line.strip()
                    if hash_id:
                        instance_to_cluster[hash_id] = cluster_id
                        cluster_to_instances[cluster_id].append(hash_id)
    return instance_to_cluster, cluster_to_instances

def filter_by_track(instance_hashes, db_path, track_name):
    """Return set of hashes that are in the specified track."""
    with InstanceDatabase(db_path) as db:
        track_hashes = db.get_track_uuids(track_name)
    return set(instance_hashes) & set(track_hashes)

def cross_validate_cluster_solver(df, instance_to_cluster, solver_cols, n_folds=5, out_dir=None):
    """Perform 5-fold cross-validation for cluster-based solver selection and plot performance curves."""
    import matplotlib.pyplot as plt
    import numpy as np
    # Prepare data
    df = df.copy()
    df['cluster'] = df['hash'].map(instance_to_cluster)
    df = df[~df['cluster'].isnull()]
    df = df.reset_index(drop=True)

    kf = KFold(n_splits=n_folds, shuffle=True, random_state=42)
    cutoff_times = np.logspace(0, 4, 100)  # 1 to 10000, log scale

    all_portfolio_curves = []
    all_vbs_curves = []
    all_sbs_curves = []

    for fold_idx, (train_idx, test_idx) in enumerate(kf.split(df)):
        train_df = df.iloc[train_idx]
        test_df = df.iloc[test_idx]

        # Find best solver per cluster on train set
        cluster_to_best_solver = {}
        for cluster, group in train_df.groupby('cluster'):
            solver_means = group[solver_cols].replace('timeout', np.nan).astype(float).mean()
            if solver_means.isnull().all():
                best_solver = None
            else:
                best_solver = solver_means.idxmin()
            cluster_to_best_solver[cluster] = best_solver

        # For each test instance, get the predicted best solver for its cluster
        test_solver_data = test_df[solver_cols].replace('timeout', np.nan).astype(float)
        test_df = test_df.copy()
        test_df['predicted_best_solver'] = test_df['cluster'].map(cluster_to_best_solver)
        test_df['portfolio_time'] = test_df.apply(
            lambda row: test_solver_data.loc[row.name, row['predicted_best_solver']]
            if row['predicted_best_solver'] in test_solver_data.columns else np.nan,
            axis=1
        )
        # VBS and SBS on test set
        test_df['vbs_time'] = test_solver_data.min(axis=1)
        solver_means = test_solver_data.mean()
        sbs_solver = solver_means.idxmin()
        test_df['sbs_time'] = test_solver_data[sbs_solver]

        # Calculate performance curves
        total_instances = len(test_df)
        vbs_curve = []
        sbs_curve = []
        portfolio_curve = []
        for cutoff in cutoff_times:
            vbs_solved = (~test_df['vbs_time'].isna() & (test_df['vbs_time'] <= cutoff)).sum() / total_instances
            sbs_solved = (~test_df['sbs_time'].isna() & (test_df['sbs_time'] <= cutoff)).sum() / total_instances
            portfolio_solved = (~test_df['portfolio_time'].isna() & (test_df['portfolio_time'] <= cutoff)).sum() / total_instances
            vbs_curve.append(vbs_solved)
            sbs_curve.append(sbs_solved)
            portfolio_curve.append(portfolio_solved)

        all_vbs_curves.append(vbs_curve)
        all_sbs_curves.append(sbs_curve)
        all_portfolio_curves.append(portfolio_curve)

    # Average curves
    mean_vbs_curve = np.mean(all_vbs_curves, axis=0)
    mean_sbs_curve = np.mean(all_sbs_curves, axis=0)
    mean_portfolio_curve = np.mean(all_portfolio_curves, axis=0)

    # Plot with improved styling
    fig, ax = plt.subplots(figsize=(12, 8))
    
    # Colors for different methods
    colors = {
        'VBS': '#2E8B57',  # Sea Green
        'SBS': '#4169E1',  # Royal Blue
        'Portfolio': '#8A2BE2',  # Blue Violet
    }
    
    # Plot curves with thicker lines
    ax.plot(cutoff_times, mean_vbs_curve, label='VBS', color=colors['VBS'], linewidth=3, marker='o', markersize=0)
    ax.plot(cutoff_times, mean_sbs_curve, label='SBS', color=colors['SBS'], linewidth=3, marker='s', markersize=0)
    ax.plot(cutoff_times, mean_portfolio_curve, label='Portfolio (CV)', color=colors['Portfolio'], linewidth=3, marker='^', markersize=0)
    
    ax.set_xscale('log')
    ax.grid(True, alpha=0.3, linewidth=1.5)
    ax.set_xlabel('Cutoff Time (seconds)', fontsize=16, fontweight='bold')
    ax.set_ylabel('Fraction Instances Solved', fontsize=16, fontweight='bold')
    ax.set_title('5-Fold Cross-Validated Solver Performance Comparison', fontsize=18, fontweight='bold')
    
    # Set proper axis limits
    max_solved = max(max(mean_vbs_curve), max(mean_sbs_curve), max(mean_portfolio_curve))
    max_cutoff_idx = 0
    for i, (vbs_val, sbs_val, portfolio_val) in enumerate(zip(mean_vbs_curve, mean_sbs_curve, mean_portfolio_curve)):
        if max(vbs_val, sbs_val, portfolio_val) >= max_solved * 0.99:
            max_cutoff_idx = i
            break
    
    ax.set_xlim(1, cutoff_times[max_cutoff_idx])
    ax.set_ylim(0, 1.0)
    
    # Improve tick formatting
    ax.tick_params(axis='both', which='major', labelsize=14, width=2, length=6)
    ax.tick_params(axis='both', which='minor', width=1, length=3)
    
    # Add legend outside the figure on the right
    fig.legend(loc='center right', bbox_to_anchor=(1.05, 0.5), frameon=True, fancybox=True, shadow=True, fontsize=14)
    
    # Adjust layout to make room for legend
    plt.subplots_adjust(right=0.85)
    
    if out_dir:
        plt.savefig(os.path.join(out_dir, 'performance_curves_cv.png'), dpi=300, bbox_inches='tight')
    plt.close()

def analyze_solver_performance(solver_csv, instance_to_cluster, cluster_to_instances, valid_hashes, out_dir, feature_dir):
    # Read CSV and drop unnecessary columns
    df = pd.read_csv(solver_csv)
    columns_to_drop = ['benchmark', 'verified-result', 'claimed-result']
    df = df.drop(columns=[col for col in columns_to_drop if col in df.columns])
    
    df = df[df['hash'].isin(valid_hashes)]
    # Map each instance to its cluster
    df['cluster'] = df['hash'].map(instance_to_cluster)
    # Only keep rows with a valid cluster
    df = df[~df['cluster'].isnull()]
    
    # Load features for all valid instances
    features_dict = load_instance_features(feature_dir, df['hash'])
    
    # Compute cluster centers and diameters
    cluster_centers = {}
    cluster_diameters = {}
    for cluster, hashes in cluster_to_instances.items():
        hashes = set(hashes) & set(valid_hashes)
        cluster_features = [features_dict[h] for h in hashes if h in features_dict]
        if not cluster_features:
            continue
        cluster_features = np.stack(cluster_features)
        center = np.mean(cluster_features, axis=0)
        dists = np.linalg.norm(cluster_features - center, axis=1)
        diameter = np.max(dists)
        cluster_centers[cluster] = center
        cluster_diameters[cluster] = diameter

    # Identify solver columns (all numeric columns except 'hash' and 'cluster')
    solver_cols = []
    for col in df.columns:
        if col not in ('hash', 'cluster'):
            # Check if column contains only numeric values or 'timeout'
            try:
                # Replace 'timeout' with PAR-2 penalty score
                test_series = df[col].replace('timeout', '10000.0')
                # Try to convert to float
                test_series.astype(float)
                solver_cols.append(col)
            except ValueError:
                print(f"Warning: Column {col} contains non-numeric data and will be excluded")
    
    print(f"Found {len(solver_cols)} solver columns")

    # Constants for timeout handling - adjusted for PAR-2
    TIMEOUT_PENALTY = 10000.0  # PAR-2 penalty score
    TIMEOUT_THRESHOLD = 5000.0  # Original timeout threshold
    
    def normalize_performance(data, metric_cols, timeout_threshold=TIMEOUT_THRESHOLD, timeout_value=TIMEOUT_PENALTY):
        """Helper function to normalize performance data, handling timeouts with PAR-2 scoring."""
        norm_vals = data[metric_cols].values
        # Convert all values >= threshold to timeout penalty
        norm_vals = np.where(norm_vals >= timeout_threshold, timeout_value, norm_vals)
        min_vals = np.min(norm_vals, axis=1, keepdims=True)
        min_vals = np.where(min_vals == 0, 1.0, min_vals)  # Avoid division by zero
        normed = norm_vals / min_vals
        return pd.DataFrame(normed, columns=metric_cols, index=data.index)

    def add_white_columns(ax, n_rows, col_idx, values, fmt, label):
        for i in range(n_rows):
            ax.add_patch(plt.Rectangle((col_idx, i), 1, 1, color='#ffffff', lw=0))
            ax.text(col_idx + 0.5, i + 0.5, fmt.format(values.iloc[i]),
                    ha='center', va='center', color='black', fontsize=10, fontweight='bold')

    def plot_heatmap(data, title, output_path, timeout_threshold=TIMEOUT_THRESHOLD, timeout_value=TIMEOUT_PENALTY, cluster_vbs=None):
        # Get instance counts per cluster and add as a new column
        cluster_counts = stats_df.set_index('cluster')['num_instances']
        cluster_vbs = stats_df.set_index('cluster')['mean_vbs'] if cluster_vbs is None else cluster_vbs
        data_with_counts = data.copy()
        data_with_counts['Instances'] = cluster_counts
        data_with_counts['Mean VBS'] = cluster_vbs

        # Separate solver columns and the 'Instances' and 'Mean VBS' columns
        solver_cols = data_with_counts.columns[:-2]
        instances_col = data_with_counts.columns[-2]
        vbs_col = data_with_counts.columns[-1]
        solver_data = data_with_counts[solver_cols]
        instances_data = data_with_counts[instances_col]
        vbs_data = data_with_counts[vbs_col]

        fig, ax = plt.subplots(figsize=(16, max(6, len(data)//2)), gridspec_kw={'right': 0.85})

        # Mask for timeouts
        mask_timeouts = solver_data >= timeout_value/2

        # Plot the heatmap for solver columns only
        sns.heatmap(solver_data.mask(mask_timeouts), 
                    annot=True, 
                    fmt='.2f',
                    cmap='YlOrRd',
                    vmin=1.0,
                    vmax=10.0,
                    cbar_kws={'label': 'Relative to Best (1.0=Best)'},
                    ax=ax)

        # Overlay timeouts as gray
        sns.heatmap(solver_data.where(mask_timeouts),
                    annot=True,
                    fmt='.0f',
                    cmap=['#808080'],
                    cbar=False,
                    alpha=0.5,
                    ax=ax)

        n_rows = data_with_counts.shape[0]
        n_solver_cols = len(solver_cols)
        # Add the 'Instances' column
        add_white_columns(ax, n_rows, n_solver_cols, instances_data, "{:d}", 'Instances')
        # Add the 'Mean VBS' column
        add_white_columns(ax, n_rows, n_solver_cols + 1, vbs_data, "{:.2f}", 'Mean VBS')

        # Set the x-tick labels to include solver names, 'Instances', and 'Mean VBS'
        ax.set_xticks(list(range(n_solver_cols + 2)))
        ax.set_xticklabels(list(solver_cols) + ['Instances', 'Mean VBS'], rotation=45, ha='right')

        plt.title(title)
        plt.ylabel('Cluster')
        plt.xlabel('Solver')
        plt.subplots_adjust(right=0.80)
        plt.savefig(output_path)
        plt.close()

    # Per-cluster, per-solver stats
    stats = []
    vbs_scores = {}  # cluster -> mean VBS
    for cluster, hashes in cluster_to_instances.items():
        hashes = set(hashes) & set(valid_hashes)
        if not hashes:
            continue
        subdf = df[df['hash'].isin(hashes)]
        row = {'cluster': cluster, 'num_instances': len(subdf)}
        
        # Calculate mean VBS for this cluster
        if not subdf.empty:
            solver_values = subdf[solver_cols].replace('timeout', TIMEOUT_PENALTY).astype(float)
            # Apply PAR-2 scoring
            solver_values = solver_values.where(solver_values < TIMEOUT_THRESHOLD, TIMEOUT_PENALTY)
            vbs_per_instance = solver_values.min(axis=1)
            mean_vbs = vbs_per_instance.mean()
        else:
            mean_vbs = np.nan
        vbs_scores[cluster] = mean_vbs
        
        for solver in solver_cols:
            vals = subdf[solver].replace('timeout', TIMEOUT_PENALTY).astype(float)
            # Apply PAR-2 scoring
            vals = vals.where(vals < TIMEOUT_THRESHOLD, TIMEOUT_PENALTY)
            row[f'{solver}_mean'] = vals.mean()
            row[f'{solver}_median'] = vals.median()
            row[f'{solver}_min'] = vals.min()
            row[f'{solver}_max'] = vals.max()
            row[f'{solver}_std'] = vals.std()
            row[f'{solver}_solved'] = (vals < TIMEOUT_THRESHOLD).sum()
        stats.append(row)
    
    stats_df = pd.DataFrame(stats)
    stats_df['mean_vbs'] = stats_df['cluster'].map(vbs_scores)
    stats_df.to_csv(os.path.join(out_dir, 'cluster_solver_stats.csv'), index=False)

    # --- Mean-based Analysis ---
    mean_cols = [f'{solver}_mean' for solver in solver_cols]
    stats_mean_df = stats_df.set_index('cluster')[mean_cols]
    norm_mean_df = normalize_performance(stats_mean_df, mean_cols)
    norm_mean_df.to_csv(os.path.join(out_dir, 'cluster_solver_normalized_mean.csv'))
    
    # Disabled heatmap generation due to memory constraints
    # if HAS_PLOTTING:
    #     plot_heatmap(norm_mean_df, 
    #                 'Normalized Mean Solver Performance per Cluster\n(1.0=Best, Gray=Timeout)',
    #                 os.path.join(out_dir, 'cluster_solver_normalized_mean_heatmap.png'),
    #                 cluster_vbs=stats_df.set_index('cluster')['mean_vbs'])

    # --- Median-based Analysis ---
    median_cols = [f'{solver}_median' for solver in solver_cols]
    stats_median_df = stats_df.set_index('cluster')[median_cols]
    norm_median_df = normalize_performance(stats_median_df, median_cols)
    norm_median_df.to_csv(os.path.join(out_dir, 'cluster_solver_normalized_median.csv'))
    
    # Disabled heatmap generation due to memory constraints
    # if HAS_PLOTTING:
    #     plot_heatmap(norm_median_df,
    #                 'Normalized Median Solver Performance per Cluster\n(1.0=Best, Gray=Timeout)',
    #                 cluster_vbs=stats_df.set_index('cluster')['mean_vbs'])

    # Best solver per cluster (both mean and median based)
    best_solvers = []
    # --- New: Solver ranking per cluster ---
    solver_ranking_rows = []
    for _, row in stats_df.iterrows():
        cluster_data = {'cluster': row['cluster'], 'num_instances': row['num_instances']}
        # Mean-based best (including timeouts in the calculation)
        means = {solver: row[f'{solver}_mean'] for solver in solver_cols}
        best_mean = min(means.items(), key=lambda x: x[1])
        cluster_data.update({
            'best_solver_mean': best_mean[0],
            'best_mean': best_mean[1]
        })
        # Median-based best (including timeouts in the calculation)
        medians = {solver: row[f'{solver}_median'] for solver in solver_cols}
        best_median = min(medians.items(), key=lambda x: x[1])
        cluster_data.update({
            'best_solver_median': best_median[0],
            'best_median': best_median[1]
        })
        best_solvers.append(cluster_data)

        # --- New: collect sorted solver means for this cluster ---
        sorted_means = sorted(means.items(), key=lambda x: x[1])
        for rank, (solver, mean_score) in enumerate(sorted_means, 1):
            solver_ranking_rows.append({
                'cluster': row['cluster'],
                'solver_rank': rank,
                'solver_name': solver,
                'mean_score': mean_score,
                'num_instances': row['num_instances']
            })
    # --- New: Save solver ranking per cluster ---
    solver_ranking_df = pd.DataFrame(solver_ranking_rows)
    solver_ranking_df.to_csv(os.path.join(out_dir, 'solver_ranking_per_cluster.csv'), index=False)
    # --- End new code ---

    best_df = pd.DataFrame(best_solvers)
    best_df.to_csv(os.path.join(out_dir, 'best_solver_per_cluster.csv'), index=False)

    # Add the performance curves plot
    if HAS_PLOTTING:
        result_data = plot_performance_curves_improved(df, instance_to_cluster, best_df, out_dir)
        return result_data
    return None

    # Raw performance heatmaps - DISABLED due to memory constraints
    # if HAS_PLOTTING:
    #     # Add instance counts and mean VBS as new columns
    #     cluster_counts = stats_df.set_index('cluster')['num_instances']
    #     cluster_vbs = stats_df.set_index('cluster')['mean_vbs']
    #     stats_mean_df_with_counts = stats_mean_df.copy()
    #     stats_mean_df_with_counts['Instances'] = cluster_counts
    #     stats_mean_df_with_counts['Mean VBS'] = cluster_vbs
    #     stats_median_df_with_counts = stats_median_df.copy()
    #     stats_median_df_with_counts['Instances'] = cluster_counts
    #     stats_median_df_with_counts['Mean VBS'] = cluster_vbs

    #     # Mean
    #     fig, ax = plt.subplots(figsize=(16, max(6, len(stats_df)//2)), gridspec_kw={'right': 0.85})
    #     solver_cols_only = stats_mean_df_with_counts.columns[:-2]
    #     instances_col = stats_mean_df_with_counts.columns[-2]
    #     vbs_col = stats_mean_df_with_counts.columns[-1]
    #     solver_data = stats_mean_df_with_counts[solver_cols_only]
    #     instances_data = stats_mean_df_with_counts[instances_col]
    #     vbs_data = stats_mean_df_with_counts[vbs_col]

    #     # Create a mask for the best (lowest) score in each row
    #     min_values = solver_data.min(axis=1)
    #     best_mask = solver_data.eq(min_values, axis=0)

    #     # Create the heatmap without annotations first
    #     sns.heatmap(solver_data,
    #                annot=False,
    #                fmt='.1f',
    #                cmap='viridis',
    #                ax=ax)

    #     # Add annotations with custom formatting for best scores
    #     for i in range(solver_data.shape[0]):
    #     for j in range(solver_data.shape[1]):
    #         value = solver_data.iloc[i, j]
    #         is_best = best_mask.iloc[i, j]
    #         # Format the text: bold and asterisk for best scores
    #         text_color = 'white'
    #         weight = 'bold' if is_best else 'normal'
    #         # Add asterisk for best scores
    #         value_text = f'*{value:.1f}' if is_best else f'{value:.1f}'
    #         ax.text(j + 0.5, i + 0.5, value_text,
    #                ha='center', va='center',
    #                color=text_color,
    #                fontweight=weight)

    #     n_rows = stats_mean_df_with_counts.shape[0]
    #     n_solver_cols = len(solver_cols_only)
    #     # Add the 'Instances' column
    #     for i in range(n_rows):
    #         ax.add_patch(plt.Rectangle((n_solver_cols, i), 1, 1, color='#ffffff', lw=0))
    #         ax.text(n_solver_cols + 0.5, i + 0.5, f"{int(instances_data.iloc[i])}",
    #                 ha='center', va='center', color='black', fontsize=10, fontweight='bold')
    #     # Add the 'Mean VBS' column
    #     for i in range(n_rows):
    #         ax.add_patch(plt.Rectangle((n_solver_cols + 1, i), 1, 1, color='#ffffff', lw=0))
    #         ax.text(n_solver_cols + 1 + 0.5, i + 0.5, f"{vbs_data.iloc[i]:.2f}",
    #                 ha='center', va='center', color='black', fontsize=10, fontweight='bold')
    #     ax.set_xticks(list(range(n_solver_cols + 2)))
    #     ax.set_xticklabels(list(solver_cols_only) + ['Instances', 'Mean VBS'], rotation=45, ha='right')
    #     plt.title('Mean Solver Performance per Cluster (lower is better)\nBest scores in bold')
    #     plt.ylabel('Cluster')
    #     plt.xlabel('Solver')
    #     plt.subplots_adjust(right=0.80)
    #     plt.savefig(os.path.join(out_dir, 'cluster_solver_mean_heatmap.png'))
    #     plt.close()

    #     # Median
    #     fig, ax = plt.subplots(figsize=(16, max(6, len(stats_df)//2)), gridspec_kw={'right': 0.85})
    #     solver_cols_only = stats_median_df_with_counts.columns[:-2]
    #     instances_col = stats_median_df_with_counts.columns[-2]
    #     vbs_col = stats_median_df_with_counts.columns[-1]
    #     solver_data = stats_median_df_with_counts[solver_cols_only]
    #     instances_data = stats_median_df_with_counts[instances_col]
    #     vbs_data = stats_median_df_with_counts[vbs_col]

    #     # Create a mask for the best (lowest) score in each row
    #     min_values = solver_data.min(axis=1)
    #     best_mask = solver_data.eq(min_values, axis=0)

    #     # Create the heatmap without annotations first
    #     sns.heatmap(solver_data,
    #                annot=False,
    #                fmt='.1f',
    #                cmap='viridis',
    #                ax=ax)

    #     # Add annotations with custom formatting for best scores
    #     for i in range(solver_data.shape[0]):
    #     for j in range(solver_data.shape[1]):
    #         value = solver_data.iloc[i, j]
    #         is_best = best_mask.iloc[i, j]
    #         # Format the text: bold and asterisk for best scores
    #         text_color = 'white'
    #         weight = 'bold' if is_best else 'normal'
    #         # Add asterisk for best scores
    #         value_text = f'*{value:.1f}' if is_best else f'{value:.1f}'
    #         ax.text(j + 0.5, i + 0.5, value_text,
    #                ha='center', va='center',
    #                color=text_color,
    #                fontweight=weight)

    #     n_rows = stats_median_df_with_counts.shape[0]
    #     n_solver_cols = len(solver_cols_only)
    #     # Add the 'Instances' column
    #     for i in range(n_rows):
    #         ax.add_patch(plt.Rectangle((n_solver_cols, i), 1, 1, color='#ffffff', lw=0))
    #         ax.text(n_solver_cols + 0.5, i + 0.5, f"{int(instances_data.iloc[i])}",
    #                 ha='center', va='center', color='black', fontsize=10, fontweight='bold')
    #     # Add the 'Mean VBS' column
    #     for i in range(n_rows):
    #         ax.add_patch(plt.Rectangle((n_solver_cols + 1, i), 1, 1, color='#ffffff', lw=0))
    #         ax.text(n_solver_cols + 1 + 0.5, i + 0.5, f"{vbs_data.iloc[i]:.2f}",
    #                 ha='center', va='center', color='black', fontsize=10, fontweight='bold')
    #     ax.set_xticks(list(range(n_solver_cols + 2)))
    #     ax.set_xticklabels(list(solver_cols_only) + ['Instances', 'Mean VBS'], rotation=45, ha='right')
    #     plt.title('Median Solver Performance per Cluster (lower is better)\nBest scores in bold')
    #     plt.ylabel('Cluster')
    #     plt.xlabel('Solver')
    #     plt.subplots_adjust(right=0.80)
    #     plt.savefig(os.path.join(out_dir, 'cluster_solver_median_heatmap.png'))
    #     plt.close()

    # 5-fold cross-validation performance evaluation
    if HAS_PLOTTING:
        cross_validate_cluster_solver(df, instance_to_cluster, solver_cols, n_folds=5, out_dir=out_dir)



def load_instance_features(feature_dir, instance_hashes):
    """Load features for the given instance hashes from the feature_dir. Returns a dict: hash -> feature (np.array)."""
    features = {}
    # Map hash to file
    feature_files = {os.path.basename(f).split('-')[0]: f for f in glob.glob(os.path.join(feature_dir, "*.npz"))}
    for hash_val in instance_hashes:
        if hash_val in feature_files:
            try:
                feature_data = np.load(feature_files[hash_val])
                feature_array = feature_data['sorted']
                if feature_array.shape == (512,):
                    features[hash_val] = feature_array
            except Exception:
                continue
    return features

def analyze_multiple_csvs(csv_dir, instance_to_cluster, cluster_to_instances, db_path, out_dir, feature_dir):
    """Analyze multiple CSV files and create combined performance plots."""
    # Find all CSV files in the directory
    csv_files = glob.glob(os.path.join(csv_dir, "*.csv"))
    if not csv_files:
        print(f"No CSV files found in {csv_dir}")
        return
    
    print(f"Found {len(csv_files)} CSV files to analyze")
    
    # Process each CSV file
    all_results = []
    for csv_file in csv_files:
        csv_name = os.path.basename(csv_file)
        track_name = os.path.splitext(csv_name)[0]  # Use CSV filename as track name
        print(f"Processing {csv_name} with track name: {track_name}")
        
        # Filter valid hashes for this track
        valid_hashes = filter_by_track(instance_to_cluster.keys(), db_path, track_name)
        print(f"Found {len(valid_hashes)} valid instances for track {track_name}")
        
        # Create subdirectory for this CSV's results
        csv_out_dir = os.path.join(out_dir, track_name)
        os.makedirs(csv_out_dir, exist_ok=True)
        
        # Analyze this CSV
        result = analyze_solver_performance(csv_file, instance_to_cluster, cluster_to_instances, 
                                         valid_hashes, csv_out_dir, feature_dir)
        if result:
            all_results.append((csv_name, result))
    
    # Create combined performance plots
    if all_results and HAS_PLOTTING:
        create_combined_performance_plots(all_results, out_dir)

def create_combined_performance_plots(all_results, out_dir):
    """Create combined performance plots with shared legend."""
    # Create figure with subplots and space for legend
    fig, axes = plt.subplots(2, 2, figsize=(18, 12))
    fig.suptitle('Solver Performance Comparison Across Datasets', fontsize=20, fontweight='bold')
    
    # Colors for different methods
    colors = {
        'VBS': '#2E8B57',  # Sea Green
        'SBS': '#4169E1',  # Royal Blue
        'Portfolio': '#8A2BE2',  # Blue Violet
    }
    
    # Plot each dataset
    for idx, (csv_name, result) in enumerate(all_results):
        if idx >= 4:  # Limit to 4 subplots
            break
            
        ax = axes[idx // 2, idx % 2]
        
        # Plot curves with thicker lines
        cutoff_times = result['cutoff_times']
        ax.plot(cutoff_times, result['vbs_curve'], label='VBS', color=colors['VBS'], linewidth=3, marker='o', markersize=0)
        ax.plot(cutoff_times, result['sbs_curve'], label='SBS', color=colors['SBS'], linewidth=3, marker='s', markersize=0)
        ax.plot(cutoff_times, result['portfolio_curve'], label='Portfolio', color=colors['Portfolio'], linewidth=3, marker='^', markersize=0)
        
        ax.set_xscale('log')
        ax.grid(True, alpha=0.3, linewidth=1.5)
        ax.set_xlabel('Cutoff Time (seconds)', fontsize=14, fontweight='bold')
        ax.set_ylabel('Fraction Instances Solved', fontsize=14, fontweight='bold')
        ax.set_title(f'Dataset: {os.path.splitext(csv_name)[0]}', fontsize=16, fontweight='bold')
        
        # Set proper axis limits (exclude timeout points)
        max_time = max([max(result['vbs_curve']), max(result['sbs_curve']), max(result['portfolio_curve'])])
        
        # Find the cutoff time where we reach max_time
        max_idx = 0
        for i, val in enumerate(result['vbs_curve']):
            if val >= max_time * 0.99:  # 99% of max
                max_idx = i
                break
        
        if max_idx > 0:
            ax.set_xlim(1, cutoff_times[max_idx])
        
        ax.set_ylim(0, 1.0)
        
        # Improve tick formatting
        ax.tick_params(axis='both', which='major', labelsize=16, width=2, length=6)
        ax.tick_params(axis='both', which='minor', width=1, length=3)
    
    # Hide empty subplots
    for idx in range(len(all_results), 4):
        axes[idx // 2, idx % 2].set_visible(False)
    
    # Add shared legend outside the figure on the right (middle)
    handles, labels = axes[0, 0].get_legend_handles_labels()
    fig.legend(handles, labels, loc='center right', bbox_to_anchor=(0.95, 0.75), 
               frameon=True, fancybox=True, shadow=True, fontsize=14)
    
    # Adjust layout to make room for legend
    plt.subplots_adjust(right=0.85)
    plt.savefig(os.path.join(out_dir, 'combined_performance_plots.png'), dpi=300, bbox_inches='tight')
    plt.close()

def plot_performance_curves_improved(df, instance_to_cluster, best_solvers_df, out_dir):
    """Improved performance curves plotting with better styling and proper axis limits."""
    # Get solver columns (excluding non-solver columns)
    solver_cols = [col for col in df.columns if col not in ('hash', 'vresult', 'cluster')]
    
    # Add cluster information to the dataframe
    df['cluster'] = df['hash'].map(instance_to_cluster)
    
    # Convert solver times to float, replacing 'timeout' with np.nan
    solver_data = df[solver_cols].replace('timeout', np.nan).astype(float)
    
    # Filter out instances with timeout (score of 10000)
    timeout_threshold = 5000.0  # Original timeout threshold
    valid_mask = (solver_data < timeout_threshold).any(axis=1)
    df_filtered = df[valid_mask].copy()
    solver_data_filtered = solver_data[valid_mask]
    
    print(f"Original instances: {len(df)}")
    print(f"Instances after filtering timeouts: {len(df_filtered)}")
    
    # Calculate VBS (minimum time across all solvers for each instance)
    df_filtered['vbs_time'] = solver_data_filtered.min(axis=1)
    
    # Calculate SBS (Single Best Solver)
    solver_means = solver_data_filtered.mean()
    sbs_solver = solver_means.idxmin()
    df_filtered['sbs_time'] = solver_data_filtered[sbs_solver]
    
    # Get the best solver for each cluster
    cluster_to_best_solver = best_solvers_df.set_index('cluster')['best_solver_mean'].to_dict()
    
    def get_portfolio_time(row):
        best_solver = cluster_to_best_solver.get(row['cluster'])
        if best_solver == 'all_timeout':
            return np.nan
        return solver_data_filtered.loc[row.name, best_solver]
    
    df_filtered['portfolio_time'] = df_filtered.apply(get_portfolio_time, axis=1)
    
    # Calculate performance curves with proper cutoff times
    cutoff_times = np.logspace(0, 4, 100)  # 1 to 10000, log scale
    vbs_curve = []
    sbs_curve = []
    portfolio_curve = []
    
    total_instances = len(df_filtered)
    print(f"Total instances after filtering: {total_instances}")
    
    # Calculate curves
    for cutoff in cutoff_times:
        vbs_solved = (~df_filtered['vbs_time'].isna() & (df_filtered['vbs_time'] <= cutoff)).sum() / total_instances
        sbs_solved = (~df_filtered['sbs_time'].isna() & (df_filtered['sbs_time'] <= cutoff)).sum() / total_instances
        portfolio_solved = (~df_filtered['portfolio_time'].isna() & (df_filtered['portfolio_time'] <= cutoff)).sum() / total_instances
        
        vbs_curve.append(vbs_solved)
        sbs_curve.append(sbs_solved)
        portfolio_curve.append(portfolio_solved)
    
    # Find the maximum solved fraction to determine proper x-axis limits
    max_solved = max(max(vbs_curve), max(sbs_curve), max(portfolio_curve))
    
    # Find the cutoff time where we reach 99% of max_solved
    max_cutoff_idx = 0
    for i, (vbs_val, sbs_val, portfolio_val) in enumerate(zip(vbs_curve, sbs_curve, portfolio_curve)):
        if max(vbs_val, sbs_val, portfolio_val) >= max_solved * 0.99:
            max_cutoff_idx = i
            break
    
    # Create the plot with improved styling and legend outside
    fig, ax = plt.subplots(figsize=(12, 8))
    
    # Colors for different methods
    colors = {
        'VBS': '#2E8B57',  # Sea Green
        'SBS': '#4169E1',  # Royal Blue
        'Portfolio': '#8A2BE2',  # Blue Violet
        'Hybrid': '#FF8C00'  # Dark Orange
    }
    
    # Plot curves with thicker lines
    ax.plot(cutoff_times, vbs_curve, label='VBS', color=colors['VBS'], linewidth=3, marker='o', markersize=0)
    ax.plot(cutoff_times, sbs_curve, label=f'SBS ({sbs_solver})', color=colors['SBS'], linewidth=3, marker='s', markersize=0)
    ax.plot(cutoff_times, portfolio_curve, label='Portfolio', color=colors['Portfolio'], linewidth=3, marker='^', markersize=0)
    
    ax.set_xscale('log')
    ax.grid(True, alpha=0.3, linewidth=1.5)
    ax.set_xlabel('Cutoff Time (seconds)', fontsize=16, fontweight='bold')
    ax.set_ylabel('Fraction Instances Solved', fontsize=16, fontweight='bold')
    ax.set_title('Solver Performance Comparison', fontsize=18, fontweight='bold')
    
    # Set proper axis limits
    ax.set_xlim(1, cutoff_times[max_cutoff_idx])
    ax.set_ylim(0, 1.0)
    
    # Improve tick formatting
    ax.tick_params(axis='both', which='major', labelsize=14, width=2, length=6)
    ax.tick_params(axis='both', which='minor', width=1, length=3)
    
    # Add legend outside the figure on the right
    fig.legend(loc='center right', bbox_to_anchor=(1.05, 0.5), frameon=True, fancybox=True, shadow=True, fontsize=14)
    
    # Adjust layout to make room for legend
    plt.subplots_adjust(right=0.85)
    
    # Save the plot
    output_path = os.path.join(out_dir, 'performance_curves.png')
    plt.savefig(output_path, dpi=300, bbox_inches='tight')
    plt.close()
    
    # Print statistics
    print(f"\nSolver Statistics:")
    print(f"Single Best Solver (SBS): {sbs_solver}")
    print(f"Mean solve times for each solver:")
    for solver, mean_time in solver_means.items():
        print(f"  {solver}: {mean_time:.2f}")
    
    # Return data for combined plots
    result_data = {
        'cutoff_times': cutoff_times,
        'vbs_curve': vbs_curve,
        'sbs_curve': sbs_curve,
        'portfolio_curve': portfolio_curve,
        'sbs_solver': sbs_solver
    }
    
    # Hybrid plotting removed as it doesn't provide better results
    
    return result_data

def main():
    parser = argparse.ArgumentParser(description='Analyze correlation between clustering and solver performance')
    parser.add_argument('--cluster_dir', required=True, help='Directory with cluster .txt files')
    parser.add_argument('--solver_csv', help='Single CSV file with solver results (use --csv_dir for multiple files)')
    parser.add_argument('--csv_dir', help='Directory containing multiple CSV files with solver results')
    parser.add_argument('--db', required=True, help='Path to instance database')
    parser.add_argument('--track', help='Track name to filter instances (required for single CSV, ignored for multiple CSVs)')
    parser.add_argument('--out_dir', required=True, help='Directory to save analysis results')
    parser.add_argument('--feature_dir', required=True, help='Directory containing .npz feature files for all instances')

    args = parser.parse_args()

    # Check that either solver_csv or csv_dir is provided
    if not args.solver_csv and not args.csv_dir:
        parser.error("Either --solver_csv or --csv_dir must be provided")
    if args.solver_csv and args.csv_dir:
        parser.error("Only one of --solver_csv or --csv_dir should be provided")
    
    # Check track argument requirements
    if args.solver_csv and not args.track:
        parser.error("--track is required when using --solver_csv")
    if args.csv_dir and args.track:
        print("Warning: --track argument will be ignored when using --csv_dir (track names are derived from CSV filenames)")

    os.makedirs(args.out_dir, exist_ok=True)
    instance_to_cluster, cluster_to_instances = read_cluster_files(args.cluster_dir)
    
    if args.csv_dir:
        # Process multiple CSV files - each CSV filename is used as track name
        analyze_multiple_csvs(args.csv_dir, instance_to_cluster, cluster_to_instances, args.db, args.out_dir, args.feature_dir)
    else:
        # Process single CSV file - use provided track name
        valid_hashes = filter_by_track(instance_to_cluster.keys(), args.db, args.track)
        analyze_solver_performance(args.solver_csv, instance_to_cluster, cluster_to_instances, valid_hashes, args.out_dir, args.feature_dir)
    
    print(f"Analysis complete. Results saved to {args.out_dir}")

if __name__ == '__main__':
    main() 