import os
import argparse
import numpy as np
import pandas as pd
import pickle
from collections import defaultdict
import matplotlib.pyplot as plt
import seaborn as sns
import inspect

def load_clustering_results(results_dir):
    """Load clustering results from the output of analyze-hdbscan.py (supports HDBSCAN and KMeans)"""
    with open(os.path.join(results_dir, 'clustering_results.pkl'), 'rb') as f:
        results = pickle.load(f)
    return results

def read_solver_results(solver_csv):
    """Read and process solver results CSV."""
    df = pd.read_csv(solver_csv)
    # Drop unnecessary columns if they exist
    columns_to_drop = ['benchmark', 'verified-result', 'claimed-result']
    df = df.drop(columns=[col for col in columns_to_drop if col in df.columns])
    return df

def get_solver_columns(df):
    """Identify solver columns in the dataframe."""
    solver_cols = []
    for col in df.columns:
        if col not in ('hash', 'cluster'):
            try:
                test_series = df[col].replace('timeout', '10000.0')
                test_series.astype(float)
                solver_cols.append(col)
            except ValueError:
                continue
    return solver_cols

def plot_performance_curves(test_solver_df, cluster_best_solvers, solver_cols, out_dir, algorithm, cluster_results):
    """Plot performance curves comparing VBS, SBS and cluster-based portfolio solver performance on test instances."""
    # Convert solver times to float, replacing 'timeout' with np.nan
    solver_data = test_solver_df[solver_cols].replace('timeout', np.nan).astype(float)
    
    # Calculate VBS (minimum time across all solvers for each instance)
    test_solver_df['vbs_time'] = solver_data.min(axis=1)
    
    # Calculate SBS (Single Best Solver)
    # First calculate mean time for each solver (excluding timeouts)
    # SBS should be calculated over all instances in the full solver_df, not just the test set
    # Find the parent solver_df from the outer scope (if available)
    outer_frame = inspect.currentframe().f_back
    full_solver_df = None
    if 'solver_df' in outer_frame.f_locals:
        full_solver_df = outer_frame.f_locals['solver_df']
    if full_solver_df is not None:
        full_solver_data = full_solver_df[solver_cols].replace('timeout', np.nan).astype(float)
        solver_means = full_solver_data.mean()
        sbs_solver = solver_means.idxmin()
    else:
        # fallback to current test set only
        solver_means = solver_data.mean()
        sbs_solver = solver_means.idxmin()
    test_solver_df['sbs_time'] = solver_data[sbs_solver]  # Get times for the SBS
    
    # --- KMeans: Compute cluster diameters ---
    kmeans_diameters = None
    kmeans_centers = None
    if algorithm == 'kmeans':
        train_features_pca = cluster_results['train_features_pca']
        test_features_pca = cluster_results['test_features_pca']
        train_labels = np.array(cluster_results['train_labels'])
        test_labels = np.array(cluster_results['test_labels'])
        clusterer = cluster_results['clusterer']
        kmeans_centers = clusterer.cluster_centers_
        # Compute diameter for each cluster (max distance from center to any training point in that cluster)
        kmeans_diameters = {}
        for cluster_id in range(kmeans_centers.shape[0]):
            cluster_mask = train_labels == cluster_id
            if not np.any(cluster_mask):
                kmeans_diameters[cluster_id] = 0.0
                continue
            dists = np.linalg.norm(train_features_pca[cluster_mask] - kmeans_centers[cluster_id], axis=1)
            kmeans_diameters[cluster_id] = np.max(dists)
        # Precompute test distances to assigned center
        test_to_center_dist = np.linalg.norm(test_features_pca - kmeans_centers[test_labels], axis=1)
        print("test_to_center_dist shape:", test_to_center_dist.shape)
    # Get the time for the best solver in each cluster (portfolio approach)
    def get_portfolio_time(row):
        # For HDBSCAN, if noise (-1), use SBS as default solver
        if row['cluster'] == -1 and algorithm == 'hdbscan':
            return solver_data.iloc[row.name][sbs_solver]
        # For KMeans, if test instance is farther than cluster diameter, use SBS
        if algorithm == 'kmeans':
            idx = row.name
            assigned_cluster = row['cluster']
            if assigned_cluster in kmeans_diameters:
                # test_to_center_dist is indexed the same as test_solver_df
                if test_to_center_dist[idx] > kmeans_diameters[assigned_cluster]:
                    return solver_data.iloc[idx][sbs_solver]
        best_solver = cluster_best_solvers.get(row['cluster'])
        if best_solver is None:
            return np.nan
        return solver_data.iloc[row.name][best_solver]
    
    test_solver_df = test_solver_df.reset_index(drop=True)
    test_solver_df['portfolio_time'] = test_solver_df.apply(get_portfolio_time, axis=1)
    
    # Calculate performance curves
    cutoff_times = np.logspace(0, 4, 100)  # 1 to 10000, log scale
    vbs_curve = []
    sbs_curve = []
    portfolio_curve = []
    
    # Count total valid instances (excluding timeouts and noise points)
    valid_mask = ~((test_solver_df['cluster'] == -1) & (algorithm == 'hdbscan'))
    total_instances = valid_mask.sum()
    print(f"Total test instances (excluding noise): {total_instances}")
    
    # Calculate curves
    for cutoff in cutoff_times:
        vbs_solved = (~test_solver_df['vbs_time'].isna() & (test_solver_df['vbs_time'] <= cutoff) & valid_mask).sum() / total_instances
        sbs_solved = (~test_solver_df['sbs_time'].isna() & (test_solver_df['sbs_time'] <= cutoff) & valid_mask).sum() / total_instances
        portfolio_solved = (~test_solver_df['portfolio_time'].isna() & (test_solver_df['portfolio_time'] <= cutoff) & valid_mask).sum() / total_instances
        
        vbs_curve.append(vbs_solved)
        sbs_curve.append(sbs_solved)
        portfolio_curve.append(portfolio_solved)
    
    # Create the plot
    plt.figure(figsize=(10, 6))
    plt.plot(cutoff_times, vbs_curve, label='VBS', color='green', linewidth=2)
    plt.plot(cutoff_times, sbs_curve, label=f'SBS ({sbs_solver})', color='blue', linewidth=2)
    plt.plot(cutoff_times, portfolio_curve, label='Cluster Portfolio', color='purple', linewidth=2)
    
    plt.xscale('log')
    plt.grid(True, alpha=0.3)
    plt.xlabel('Cutoff Time (seconds)')
    plt.ylabel('Fraction Instances Solved')
    plt.title(f'Test Set Performance Comparison ({algorithm.upper()})')
    plt.legend()
    
    # Set axis limits
    plt.xlim(1, 10000)
    plt.ylim(0, 1.0)
    
    # Save the plot
    output_path = os.path.join(out_dir, 'test_performance_curves.png')
    plt.savefig(output_path, dpi=300, bbox_inches='tight')
    plt.close()
    
    # Print some statistics
    print(f"\nTest Set Solver Statistics:")
    print(f"Single Best Solver (SBS): {sbs_solver}")
    print(f"Mean solve times for each solver:")
    for solver, mean_time in solver_means.items():
        print(f"  {solver}: {mean_time:.2f}")
    
    # Calculate and print portfolio performance summary
    portfolio_stats = {
        'vbs_solved': (~test_solver_df['vbs_time'].isna() & valid_mask).sum(),
        'sbs_solved': (~test_solver_df['sbs_time'].isna() & valid_mask).sum(),
        'portfolio_solved': (~test_solver_df['portfolio_time'].isna() & valid_mask).sum(),
        'total': total_instances
    }
    
    print(f"\nPortfolio Performance Summary:")
    print(f"VBS solved: {portfolio_stats['vbs_solved']}/{portfolio_stats['total']} ({portfolio_stats['vbs_solved']/portfolio_stats['total']:.3f})")
    print(f"SBS solved: {portfolio_stats['sbs_solved']}/{portfolio_stats['total']} ({portfolio_stats['sbs_solved']/portfolio_stats['total']:.3f})")
    print(f"Portfolio solved: {portfolio_stats['portfolio_solved']}/{portfolio_stats['total']} ({portfolio_stats['portfolio_solved']/portfolio_stats['total']:.3f})")
    
    # Save portfolio statistics
    portfolio_summary = {
        'algorithm': algorithm,
        'sbs_solver': sbs_solver,
        'vbs_solved': portfolio_stats['vbs_solved'],
        'sbs_solved': portfolio_stats['sbs_solved'],
        'portfolio_solved': portfolio_stats['portfolio_solved'],
        'total_instances': portfolio_stats['total'],
        'vbs_fraction': portfolio_stats['vbs_solved']/portfolio_stats['total'],
        'sbs_fraction': portfolio_stats['sbs_solved']/portfolio_stats['total'],
        'portfolio_fraction': portfolio_stats['portfolio_solved']/portfolio_stats['total']
    }
    
    with open(os.path.join(out_dir, 'portfolio_performance_summary.txt'), 'w') as f:
        f.write(f"Algorithm: {algorithm}\n")
        f.write(f"Single Best Solver: {sbs_solver}\n\n")
        f.write(f"VBS solved: {portfolio_stats['vbs_solved']}/{portfolio_stats['total']} ({portfolio_stats['vbs_solved']/portfolio_stats['total']:.3f})\n")
        f.write(f"SBS solved: {portfolio_stats['sbs_solved']}/{portfolio_stats['total']} ({portfolio_stats['sbs_solved']/portfolio_stats['total']:.3f})\n")
        f.write(f"Portfolio solved: {portfolio_stats['portfolio_solved']}/{portfolio_stats['total']} ({portfolio_stats['portfolio_solved']/portfolio_stats['total']:.3f})\n")
    
    return portfolio_summary

def analyze_cluster_solver_correlation(cluster_results, solver_df, out_dir):
    """Analyze correlation between clusters and solver performance (supports HDBSCAN and KMeans)."""
    solver_cols = get_solver_columns(solver_df)
    TIMEOUT_PENALTY = 10000.0
    
    # Print which clustering algorithm was used
    algorithm = cluster_results.get('params', {}).get('algorithm', 'unknown')
    print(f"Clustering algorithm used: {algorithm}")
    
    # Analyze training instances
    train_names = cluster_results['train_names']
    train_labels = cluster_results['train_labels']
    train_uuids = [name.split('-')[0] for name in train_names]
    
    # Create mapping from UUID to cluster
    train_uuid_to_cluster = dict(zip(train_uuids, train_labels))
    
    # Filter solver results for training instances
    train_solver_df = solver_df[solver_df['hash'].isin(train_uuids)].copy()
    train_solver_df['cluster'] = train_solver_df['hash'].map(train_uuid_to_cluster)
    
    # Calculate best solver per cluster
    cluster_best_solvers = {}
    cluster_solver_stats = defaultdict(lambda: defaultdict(list))
    
    for cluster in np.unique(train_labels):
        # For HDBSCAN, -1 is noise; for KMeans, there is no noise cluster
        if cluster == -1 and algorithm == 'hdbscan':  # Only skip -1 for HDBSCAN
            continue
            
        cluster_instances = train_solver_df[train_solver_df['cluster'] == cluster]
        if cluster_instances.empty:
            continue
            
        # Calculate mean performance for each solver
        solver_means = {}
        for solver in solver_cols:
            times = cluster_instances[solver].replace('timeout', str(TIMEOUT_PENALTY)).astype(float)
            solver_means[solver] = times.mean()
        
        # Find best solver
        best_solver = min(solver_means.items(), key=lambda x: x[1])
        cluster_best_solvers[cluster] = best_solver[0]
        
        # Collect detailed statistics
        for solver in solver_cols:
            times = cluster_instances[solver].replace('timeout', str(TIMEOUT_PENALTY)).astype(float)
            cluster_solver_stats[cluster][solver].extend(times)
    
    # Analyze test instances
    test_names = cluster_results['test_names']
    test_labels = cluster_results['test_labels']
    test_strengths = cluster_results['test_strengths']
    test_uuids = [name.split('-')[0] for name in test_names]
    
    # Create mapping from UUID to predicted cluster and strength
    test_uuid_to_info = {uuid: (label, strength) 
                        for uuid, label, strength in zip(test_uuids, test_labels, test_strengths)}
    
    # Filter solver results for test instances
    test_solver_df = solver_df[solver_df['hash'].isin(test_uuids)].copy()
    test_solver_df['cluster'] = test_solver_df['hash'].map(lambda x: test_uuid_to_info[x][0])
    test_solver_df['prediction_strength'] = test_solver_df['hash'].map(lambda x: test_uuid_to_info[x][1])
    
    # Analyze prediction accuracy
    prediction_results = {
        'correct_predictions': 0,
        'total_predictions': 0,
        'cluster_accuracies': defaultdict(lambda: {'correct': 0, 'total': 0}),
        'strength_vs_accuracy': [],
        'detailed_predictions': []
    }
    
    # print(test_solver_df)

    for _, row in test_solver_df.iterrows():
        # For HDBSCAN, -1 is noise; for KMeans, there is no noise cluster
        if row['cluster'] == -1 and algorithm == 'hdbscan':
            continue
            
        # Get actual best solver
        # print({solver: row[solver] for solver in solver_cols})
        solver_times = {solver: float(row[solver]) 
                       for solver in solver_cols}
        actual_best = min(solver_times.items(), key=lambda x: x[1])[0]
        
        # Get predicted best solver
        predicted_best = cluster_best_solvers.get(row['cluster'])
        
        if predicted_best is not None:
            prediction_results['total_predictions'] += 1
            correct = (predicted_best == actual_best)
            if correct:
                prediction_results['correct_predictions'] += 1
            
            # Update cluster-specific accuracy
            prediction_results['cluster_accuracies'][row['cluster']]['total'] += 1
            if correct:
                prediction_results['cluster_accuracies'][row['cluster']]['correct'] += 1
            
            # Record strength vs accuracy
            prediction_results['strength_vs_accuracy'].append({
                'strength': row['prediction_strength'],
                'correct': int(correct)
            })
            
            # Record detailed prediction
            prediction_results['detailed_predictions'].append({
                'uuid': row['hash'],
                'cluster': row['cluster'],
                'strength': row['prediction_strength'],
                'predicted_solver': predicted_best,
                'actual_solver': actual_best,
                'correct': correct
            })
    
    # Save results
    save_analysis_results(out_dir, cluster_best_solvers, cluster_solver_stats, 
                         prediction_results, solver_cols, algorithm)
    
    # Plot performance curves
    portfolio_summary = plot_performance_curves(test_solver_df, cluster_best_solvers, solver_cols, out_dir, algorithm, cluster_results)
    
    return cluster_best_solvers, cluster_solver_stats, prediction_results, portfolio_summary

def save_analysis_results(out_dir, cluster_best_solvers, cluster_solver_stats, 
                         prediction_results, solver_cols, algorithm):
    """Save analysis results to files and create visualizations."""
    os.makedirs(out_dir, exist_ok=True)
    
    # Save cluster best solvers
    with open(os.path.join(out_dir, 'cluster_best_solvers.txt'), 'w') as f:
        for cluster, solver in sorted(cluster_best_solvers.items()):
            f.write(f"Cluster {cluster}: {solver}\n")
    
    # Create and save solver performance heatmap
    plt.figure(figsize=(12, 8))
    performance_matrix = []
    clusters = []
    for cluster in sorted(cluster_solver_stats.keys()):
        # For HDBSCAN, -1 is noise; for KMeans, there is no noise cluster
        if cluster == -1 and algorithm == 'hdbscan':
            continue
        clusters.append(cluster)
        cluster_perf = []
        for solver in solver_cols:
            mean_time = np.mean(cluster_solver_stats[cluster][solver])
            cluster_perf.append(mean_time)
        performance_matrix.append(cluster_perf)
    
    performance_df = pd.DataFrame(performance_matrix, 
                                index=[f"Cluster {c}" for c in clusters],
                                columns=solver_cols)
    
    # Normalize performance matrix
    normalized_df = performance_df.div(performance_df.min(axis=1), axis=0)
    
    sns.heatmap(normalized_df, annot=True, fmt='.2f', cmap='YlOrRd')
    plt.title('Normalized Solver Performance by Cluster')
    plt.tight_layout()
    plt.savefig(os.path.join(out_dir, 'solver_performance_heatmap.png'))
    plt.close()
    
    # Save prediction results
    prediction_summary = {
        'overall_accuracy': prediction_results['correct_predictions'] / max(1, prediction_results['total_predictions']),
        'total_predictions': prediction_results['total_predictions'],
        'correct_predictions': prediction_results['correct_predictions']
    }
    
    # Calculate cluster-specific accuracies
    cluster_accuracies = {}
    for cluster, stats in prediction_results['cluster_accuracies'].items():
        if stats['total'] > 0:
            cluster_accuracies[cluster] = stats['correct'] / stats['total']
    
    # Save prediction summary
    with open(os.path.join(out_dir, 'prediction_summary.txt'), 'w') as f:
        f.write(f"Overall Accuracy: {prediction_summary['overall_accuracy']:.4f}\n")
        f.write(f"Total Predictions: {prediction_summary['total_predictions']}\n")
        f.write(f"Correct Predictions: {prediction_summary['correct_predictions']}\n\n")
        f.write("Cluster-specific Accuracies:\n")
        for cluster, acc in sorted(cluster_accuracies.items()):
            f.write(f"Cluster {cluster}: {acc:.4f}\n")
    
    # Plot strength vs accuracy
    strength_df = pd.DataFrame(prediction_results['strength_vs_accuracy'])
    plt.figure(figsize=(10, 6))
    sns.boxplot(x='correct', y='strength', data=strength_df)
    plt.title('Prediction Strength vs Accuracy')
    plt.xlabel('Correct Prediction')
    plt.ylabel('Prediction Strength')
    plt.savefig(os.path.join(out_dir, 'strength_vs_accuracy.png'))
    plt.close()
    
    # Save detailed predictions
    pd.DataFrame(prediction_results['detailed_predictions']).to_csv(
        os.path.join(out_dir, 'detailed_predictions.csv'), index=False)

def main():
    parser = argparse.ArgumentParser(description='Analyze cluster-solver correlation with prediction validation (supports HDBSCAN and KMeans)')
    parser.add_argument('--results_dir', required=True, help='Directory containing clustering results')
    parser.add_argument('--solver_csv', required=True, help='CSV file with solver results')
    parser.add_argument('--out_dir', required=True, help='Directory to save analysis results')
    
    args = parser.parse_args()
    
    # Load clustering results
    cluster_results = load_clustering_results(args.results_dir)
    
    # Read solver results
    solver_df = read_solver_results(args.solver_csv)
    
    # Analyze correlation and predictions
    analyze_cluster_solver_correlation(cluster_results, solver_df, args.out_dir)
    
    print(f"Analysis complete. Results saved to {args.out_dir}")

if __name__ == '__main__':
    main() 