import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.decomposition import IncrementalPCA

from sklearn.cluster import KMeans, DBSCAN
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import silhouette_score
from sklearn.neighbors import NearestNeighbors
import matplotlib
matplotlib.use('Agg')  # Use non-interactive backend to avoid X11 errors
import matplotlib.pyplot as plt
import argparse
import seaborn as sns
from collections import Counter
import pickle
import joblib
from db_utils import InstanceDatabase

try:
    import hdbscan
    HAS_HDBSCAN = True
except ImportError:
    HAS_HDBSCAN = False
    print("Warning: hdbscan not installed. Install with: pip install hdbscan")


def extract_uuid_and_family(filename):
    """
    Extract UUID and family name from filename with pattern:
    uuid-family-scope.npz
    
    Args:
        filename: The filename to parse
        
    Returns:
        tuple: (uuid, family_name)
    """
    # Remove .npz extension
    base_name = filename.replace('.npz', '')
    
    # Split by dash and extract parts
    parts = base_name.split('-')
    
    if len(parts) >= 3 and parts[-1] == 'scope':
        # Pattern: uuid-family-scope
        uuid = parts[0]
        family_name = '-'.join(parts[1:-1])  # Join middle parts in case family name contains dashes
        return uuid, family_name
    else:
        # Fallback for old pattern or unexpected format
        if '~' in filename:
            # Old pattern: uuid~family.npz
            parts = filename.split('~')
            uuid = parts[0]
            family_name = parts[1].replace('.npz', '')
            return uuid, family_name
        else:
            # Unknown pattern
            return filename.replace('.npz', ''), 'unknown'


def load_features_scope(feature_dir, sources=None, analysis_dir=None, track_uuids=None):
    all_features = []
    image_names = []
    family_names = []
    expected_length = None
    feature_files = [f for f in os.listdir(feature_dir) if f.endswith('.npz')]
    if not feature_files:
        print("No feature files found. Please run feature extraction first.")
        exit(1)
    print(f"Found {len(feature_files)} feature files")
    
    # Filter files by track if specified
    if track_uuids is not None:
        filtered_files = []
        for npz_file in feature_files:
            uuid, _ = extract_uuid_and_family(npz_file)
            if uuid in track_uuids:
                filtered_files.append(npz_file)
        feature_files = filtered_files
        print(f"Filtered to {len(feature_files)} files in specified track")
    
    skipped_log_path = os.path.join(analysis_dir, 'skipped_files.log') if analysis_dir else 'skipped_files.log'
    skipped_log = open(skipped_log_path, 'w')
    for npz_file in tqdm(feature_files, desc="Loading features"):
        npz_path = os.path.join(feature_dir, npz_file)
        if os.path.exists(npz_path):
            data = np.load(npz_path)
            try:
                # New structure: just 'origin' and 'sorted' keys
                required_keys = {'origin', 'sorted'}
                data_keys = set(data.files)
                if not required_keys.issubset(data_keys):
                    reason = f"File {npz_file} does not have all required feature keys, skipping. Keys found: {sorted(data_keys)}\n"
                    skipped_log.write(reason)
                    continue
                
                # Select keys based on sources parameter
                selected_keys = []
                if sources:
                    # Filter by requested sources
                    for source in sources:
                        if source in data_keys:
                            selected_keys.append(source)
                else:
                    # Use all available keys
                    selected_keys = list(data_keys)
                
                if not selected_keys:
                    reason = f"No matching features in {npz_file}, skipping.\n"
                    skipped_log.write(reason)
                    continue
                
                # Combine selected features
                combined_feature = np.concatenate([data[key].flatten() for key in selected_keys])
                if expected_length is None:
                    expected_length = len(combined_feature)
                if len(combined_feature) != expected_length:
                    reason = f"Warning: Feature length mismatch in {npz_file} (got {len(combined_feature)}, expected {expected_length}). Skipping this file.\n"
                    skipped_log.write(reason)
                    continue
                all_features.append(combined_feature)
                image_names.append(npz_file.replace('.npz', ''))
                uuid, family_name = extract_uuid_and_family(npz_file)
                family_names.append(family_name)
            except Exception as e:
                reason = f"Error processing {npz_file}: {str(e)}\n"
                skipped_log.write(reason)
                continue
    skipped_log.close()
    if not all_features:
        print("No features were loaded.")
        exit(1)
    return np.array(all_features), image_names, family_names


def load_features_ascii(feature_dir, analysis_dir=None, feature_types=None, track_uuids=None):
    all_features = []
    image_names = []
    family_names = []
    expected_length = None
    feature_files = [f for f in os.listdir(feature_dir) if f.endswith('.npz')]
    if not feature_files:
        print("No ASCII feature files found.")
        exit(1)
    print(f"Found {len(feature_files)} ASCII feature files")
    
    # Filter files by track if specified
    if track_uuids is not None:
        filtered_files = []
        for npz_file in feature_files:
            uuid, _ = extract_uuid_and_family(npz_file)
            if uuid in track_uuids:
                filtered_files.append(npz_file)
        feature_files = filtered_files
        print(f"Filtered to {len(feature_files)} files in specified track")
    
    skipped_log_path = os.path.join(analysis_dir, 'skipped_files.log') if analysis_dir else 'skipped_files.log'
    skipped_log = open(skipped_log_path, 'w')
    for npz_file in tqdm(feature_files, desc="Loading ASCII features"):
        npz_path = os.path.join(feature_dir, npz_file)
        if os.path.exists(npz_path):
            data = np.load(npz_path)
            try:
                required_keys = {'min', 'max', 'mean', 'direct'}
                data_keys = set(data.files)
                if not required_keys.issubset(data_keys):
                    reason = f"File {npz_file} does not have all required ASCII feature keys, skipping. Keys found: {sorted(data_keys)}\n"
                    skipped_log.write(reason)
                    continue
                # Select only the requested feature types if specified
                if feature_types:
                    selected_keys = [k for k in feature_types if k in data_keys]
                else:
                    selected_keys = ['min', 'max', 'mean', 'direct']
                if not selected_keys:
                    reason = f"No matching ASCII features in {npz_file}, skipping.\n"
                    skipped_log.write(reason)
                    continue
                combined_feature = np.concatenate([data[key].flatten() for key in selected_keys])
                if expected_length is None:
                    expected_length = len(combined_feature)
                if len(combined_feature) != expected_length:
                    reason = f"Warning: Feature length mismatch in {npz_file} (got {len(combined_feature)}, expected {expected_length}). Skipping this file.\n"
                    skipped_log.write(reason)
                    continue
                all_features.append(combined_feature)
                image_names.append(npz_file.replace('.npz', ''))
                uuid, family_name = extract_uuid_and_family(npz_file)
                family_names.append(family_name)
            except Exception as e:
                reason = f"Error processing {npz_file}: {str(e)}\n"
                skipped_log.write(reason)
                continue
    skipped_log.close()
    if not all_features:
        print("No ASCII features were loaded.")
        exit(1)
    return np.array(all_features), image_names, family_names


class TrimmedKMeans:
    def __init__(self, n_clusters, trim_ratio=0.02, random_state=None, max_iter=100):
        self.n_clusters = n_clusters
        self.trim_ratio = trim_ratio
        self.random_state = random_state
        self.max_iter = max_iter
        self.kmeans = KMeans(n_clusters=n_clusters, random_state=random_state)
        self.labels_ = None
        self.cluster_centers_ = None
        self.inertia_ = None
        self.n_trim = None
        
    def fit(self, X):
        n_samples = X.shape[0]
        self.n_trim = int(np.ceil(n_samples * self.trim_ratio))
        n_keep = n_samples - self.n_trim
        
        # Initial clustering
        self.kmeans.fit(X)
        best_inertia = float('inf')
        best_labels = None
        best_centers = None
        
        for _ in range(self.max_iter):
            # Get distances to nearest centers
            distances = np.zeros(n_samples)
            for i in range(n_samples):
                # Calculate distances from point i to all centers
                point_distances = np.sqrt(np.sum((X[i] - self.kmeans.cluster_centers_) ** 2, axis=1))
                distances[i] = np.min(point_distances)
            
            # Keep only the closest n_keep points
            keep_indices = np.argsort(distances)[:n_keep]
            X_trimmed = X[keep_indices]
            
            # Fit KMeans on trimmed data
            self.kmeans.fit(X_trimmed)
            
            # Calculate inertia for kept points
            current_inertia = 0
            for i in range(len(X_trimmed)):
                point_distances = np.sqrt(np.sum((X_trimmed[i] - self.kmeans.cluster_centers_) ** 2, axis=1))
                current_inertia += np.min(point_distances)
            
            if current_inertia < best_inertia:
                best_inertia = current_inertia
                best_centers = self.kmeans.cluster_centers_.copy()
                
                # Assign clusters to all points, including trimmed ones
                best_labels = np.zeros(n_samples, dtype=int)
                for i in range(n_samples):
                    point_distances = np.sqrt(np.sum((X[i] - best_centers) ** 2, axis=1))
                    best_labels[i] = np.argmin(point_distances)
            else:
                break
        
        self.labels_ = best_labels
        self.cluster_centers_ = best_centers
        self.inertia_ = best_inertia
        return self

    def predict(self, X):
        n_samples = X.shape[0]
        labels = np.zeros(n_samples, dtype=int)
        for i in range(n_samples):
            point_distances = np.sqrt(np.sum((X[i] - self.cluster_centers_) ** 2, axis=1))
            labels[i] = np.argmin(point_distances)
        return labels


class KMeansWrapper:
    def __init__(self, n_clusters, random_state=None):
        self.n_clusters = n_clusters
        self.random_state = random_state
        self.kmeans = KMeans(n_clusters=n_clusters, random_state=random_state)
        self.labels_ = None
        self.cluster_centers_ = None
        self.inertia_ = None
        self.n_trim = 0  # For API compatibility with TrimmedKMeans
        
    def fit(self, X):
        self.kmeans.fit(X)
        self.labels_ = self.kmeans.labels_
        self.cluster_centers_ = self.kmeans.cluster_centers_
        self.inertia_ = self.kmeans.inertia_
        return self
    
    def predict(self, X):
        return self.kmeans.predict(X)


class DBSCANWrapper:
    def __init__(self, eps=0.5, min_samples=5, random_state=None):
        self.eps = eps
        self.min_samples = min_samples
        self.random_state = random_state
        self.dbscan = DBSCAN(eps=eps, min_samples=min_samples)
        self.labels_ = None
        self.cluster_centers_ = None
        self.inertia_ = None
        self.n_trim = 0  # For API compatibility
        
    def fit(self, X):
        self.dbscan.fit(X)
        self.labels_ = self.dbscan.labels_
        
        # Calculate cluster centers (mean of points in each cluster)
        unique_labels = np.unique(self.labels_)
        n_clusters = len(unique_labels) - (1 if -1 in self.labels_ else 0)  # Exclude noise points
        
        if n_clusters > 0:
            centers = []
            for label in unique_labels:
                if label != -1:  # Skip noise points
                    cluster_points = X[self.labels_ == label]
                    centers.append(np.mean(cluster_points, axis=0))
            self.cluster_centers_ = np.array(centers)
        else:
            self.cluster_centers_ = np.array([])
        
        # Calculate inertia (sum of squared distances to cluster centers)
        if len(self.cluster_centers_) > 0:
            self.inertia_ = 0
            for i, label in enumerate(self.labels_):
                if label != -1:  # Only consider non-noise points
                    center_idx = np.where(unique_labels == label)[0][0]
                    if center_idx < len(self.cluster_centers_):
                        self.inertia_ += np.sum((X[i] - self.cluster_centers_[center_idx]) ** 2)
        else:
            self.inertia_ = float('inf')
        
        return self
    
    def predict(self, X):
        return self.dbscan.fit_predict(X)


class HDBSCANWrapper:
    def __init__(self, min_cluster_size=5, min_samples=None, cluster_selection_epsilon=0.0, 
                 cluster_selection_method='eom', random_state=None):
        if not HAS_HDBSCAN:
            raise ImportError("hdbscan is required for HDBSCAN clustering. Install with: pip install hdbscan")
        
        self.min_cluster_size = min_cluster_size
        self.min_samples = min_samples
        self.cluster_selection_epsilon = cluster_selection_epsilon
        self.cluster_selection_method = cluster_selection_method
        self.random_state = random_state
        self.hdbscan = hdbscan.HDBSCAN(
            min_cluster_size=min_cluster_size,
            min_samples=min_samples,
            cluster_selection_epsilon=cluster_selection_epsilon,
            cluster_selection_method=cluster_selection_method
        )
        self.labels_ = None
        self.cluster_centers_ = None
        self.inertia_ = None
        self.n_trim = 0  # For API compatibility
        self.probabilities_ = None
        
    def fit(self, X):
        self.hdbscan.fit(X)
        self.labels_ = self.hdbscan.labels_
        self.probabilities_ = self.hdbscan.probabilities_
        
        # Calculate cluster centers (mean of points in each cluster)
        unique_labels = np.unique(self.labels_)
        n_clusters = len(unique_labels) - (1 if -1 in self.labels_ else 0)  # Exclude noise points
        
        if n_clusters > 0:
            centers = []
            for label in unique_labels:
                if label != -1:  # Skip noise points
                    cluster_points = X[self.labels_ == label]
                    centers.append(np.mean(cluster_points, axis=0))
            self.cluster_centers_ = np.array(centers)
        else:
            self.cluster_centers_ = np.array([])
        
        # Calculate inertia (sum of squared distances to cluster centers)
        if len(self.cluster_centers_) > 0:
            self.inertia_ = 0
            for i, label in enumerate(self.labels_):
                if label != -1:  # Only consider non-noise points
                    center_idx = np.where(unique_labels == label)[0][0]
                    if center_idx < len(self.cluster_centers_):
                        self.inertia_ += np.sum((X[i] - self.cluster_centers_[center_idx]) ** 2)
        else:
            self.inertia_ = float('inf')
        
        return self
    
    def predict(self, X):
        return self.hdbscan.fit_predict(X)


def run_analysis(all_features, image_names, family_names, analysis_dir, clustering_method='kmeans', use_trimmed=True, trim_ratio=0.02, eps=0.5, min_samples=5, analyze_eps=False, min_cluster_size=5, cluster_selection_epsilon=0.0, cluster_selection_method='eom', max_pca_components=None, variance_threshold=0.95, skip_pca_analysis=False, save_pca_model_path=None, load_pca_model_path=None, use_clustering_evaluation=False, n_clusters_eval=10):
    os.makedirs(analysis_dir, exist_ok=True)
    # Load or create PCA model
    if load_pca_model_path is not None:
        # Load existing PCA model
        print(f"Loading PCA model from {load_pca_model_path}...")
        pca, scaler, metadata = load_pca_model(load_pca_model_path)
        
        # Apply the loaded model
        pca_features = apply_pca_model(all_features, pca, scaler)
        print(f"Applied loaded PCA model: {pca.n_components_} components")
        
        if metadata:
            print(f"Model metadata: {metadata}")
    else:
        # Normalize features
        scaler = StandardScaler()
        normalized_features = scaler.fit_transform(all_features)
        
        # Dimensionality reduction with PCA for clustering
        if skip_pca_analysis:
            # Use the old approach with fixed 50 components
            print("Using IncrementalPCA for dimensionality reduction (skipping variance analysis)...")
            pca = IncrementalPCA(n_components=min(50, len(normalized_features)))
            pca_features = pca.fit_transform(normalized_features)
            print(f"Using 50 PCA components with {sum(pca.explained_variance_ratio_):.4f} explained variance")
        else:
            # Use variance analysis to determine optimal dimensions for clustering
            print("Analyzing PCA variance to determine optimal dimensions for clustering...")
            
            # Use the function parameters for clustering-aware evaluation
            
            if use_clustering_evaluation:
                print("Using clustering-aware PCA evaluation...")
                optimal_n_components, pca, explained_variance = analyze_pca_variance_with_clustering(
                    normalized_features, 
                    max_components=max_pca_components,
                    variance_threshold=variance_threshold,
                    analysis_dir=analysis_dir,
                    use_clustering_evaluation=True,
                    n_clusters=n_clusters_eval
                )
            else:
                optimal_n_components, pca, explained_variance = analyze_pca_variance(
                    normalized_features, 
                    max_components=max_pca_components,
                    variance_threshold=variance_threshold,
                    analysis_dir=analysis_dir
                )
            pca_features = pca.transform(normalized_features)
            print(f"Using {optimal_n_components} PCA components for clustering with {explained_variance:.4f} explained variance")
        
        # Save PCA model if requested
        if save_pca_model_path is not None:
            metadata = {
                'n_components': pca.n_components_,
                'explained_variance_ratio': pca.explained_variance_ratio_.tolist(),
                'cumulative_explained_variance': sum(pca.explained_variance_ratio_),
                'variance_threshold': variance_threshold,
                'max_pca_components': max_pca_components,
                'skip_pca_analysis': skip_pca_analysis,
                'feature_shape': all_features.shape
            }
            save_pca_model(pca, scaler, save_pca_model_path, metadata)
    
    # Run eps analysis if requested
    if analyze_eps:
        plot_eps_analysis(pca_features, analysis_dir)
        print("Eps analysis complete. Check the generated plots and CSV file for parameter suggestions.")
        return
    

    
    if clustering_method == 'hdbscan':
        # HDBSCAN clustering
        print(f"Running HDBSCAN clustering (min_cluster_size={min_cluster_size}, min_samples={min_samples}, cluster_selection_method={cluster_selection_method})...")
        hdbscan_clusterer = HDBSCANWrapper(
            min_cluster_size=min_cluster_size, 
            min_samples=min_samples,
            cluster_selection_epsilon=cluster_selection_epsilon,
            cluster_selection_method=cluster_selection_method
        )
        hdbscan_clusterer.fit(pca_features)
        clusters_final = hdbscan_clusterer.labels_
        
        # For HDBSCAN, we don't need K selection, but we can analyze the results
        n_clusters = len(set(clusters_final)) - (1 if -1 in clusters_final else 0)
        n_noise = list(clusters_final).count(-1)
        print(f"HDBSCAN found {n_clusters} clusters with {n_noise} noise points")
        
        # Calculate silhouette score (excluding noise points)
        non_noise_mask = clusters_final != -1
        if len(set(clusters_final[non_noise_mask])) > 1:
            silhouette_score_final = silhouette_score(pca_features[non_noise_mask], clusters_final[non_noise_mask])
            print(f"Silhouette score (excluding noise): {silhouette_score_final:.4f}")
        else:
            silhouette_score_final = -1
            print("Cannot calculate silhouette score (need at least 2 clusters)")
        
        # For HDBSCAN, noise points are like trimmed points
        noise_indices = np.where(clusters_final == -1)[0]
        trimmed_indices = noise_indices
        
        # Save HDBSCAN results
        silhouette_csv_path = os.path.join(analysis_dir, 'hdbscan_results.csv')
        with open(silhouette_csv_path, 'w') as f:
            f.write('metric,value\n')
            f.write(f'n_clusters,{n_clusters}\n')
            f.write(f'n_noise_points,{n_noise}\n')
            f.write(f'silhouette_score,{silhouette_score_final}\n')
            f.write(f'min_cluster_size,{min_cluster_size}\n')
            f.write(f'min_samples,{min_samples}\n')
            f.write(f'cluster_selection_epsilon,{cluster_selection_epsilon}\n')
            f.write(f'cluster_selection_method,{cluster_selection_method}\n')
        print(f"HDBSCAN results saved to {silhouette_csv_path}")
        
    elif clustering_method == 'dbscan':
        # DBSCAN clustering
        print(f"Running DBSCAN clustering (eps={eps}, min_samples={min_samples})...")
        dbscan = DBSCANWrapper(eps=eps, min_samples=min_samples, random_state=42)
        dbscan.fit(pca_features)
        clusters_final = dbscan.labels_
        
        # For DBSCAN, we don't need K selection, but we can analyze the results
        n_clusters = len(set(clusters_final)) - (1 if -1 in clusters_final else 0)
        n_noise = list(clusters_final).count(-1)
        print(f"DBSCAN found {n_clusters} clusters with {n_noise} noise points")
        
        # Calculate silhouette score (excluding noise points)
        non_noise_mask = clusters_final != -1
        if len(set(clusters_final[non_noise_mask])) > 1:
            silhouette_score_final = silhouette_score(pca_features[non_noise_mask], clusters_final[non_noise_mask])
            print(f"Silhouette score (excluding noise): {silhouette_score_final:.4f}")
        else:
            silhouette_score_final = -1
            print("Cannot calculate silhouette score (need at least 2 clusters)")
        
        # For DBSCAN, noise points are like trimmed points
        noise_indices = np.where(clusters_final == -1)[0]
        trimmed_indices = noise_indices
        
        # Save DBSCAN results
        silhouette_csv_path = os.path.join(analysis_dir, 'dbscan_results.csv')
        with open(silhouette_csv_path, 'w') as f:
            f.write('metric,value\n')
            f.write(f'n_clusters,{n_clusters}\n')
            f.write(f'n_noise_points,{n_noise}\n')
            f.write(f'silhouette_score,{silhouette_score_final}\n')
            f.write(f'eps,{eps}\n')
            f.write(f'min_samples,{min_samples}\n')
        print(f"DBSCAN results saved to {silhouette_csv_path}")
        
    else:
        # K-means clustering (normal or trimmed)
        clustering_type = "trimmed k-means" if use_trimmed else "normal k-means"
        print(f"Running {clustering_type} clustering...")
        if use_trimmed:
            print(f"Using trim ratio: {trim_ratio}")
        K_range = range(2, 512)
        wcss = []
        silhouette_scores = []
        
        for k in tqdm(K_range, desc=f"{clustering_type} for K selection"):
            if use_trimmed:
                kmeans = TrimmedKMeans(n_clusters=k, trim_ratio=trim_ratio, random_state=42)
            else:
                kmeans = KMeansWrapper(n_clusters=k, random_state=42)
            kmeans.fit(pca_features)
            wcss.append(kmeans.inertia_)
            
            # For silhouette score, we only consider non-trimmed points for trimmed k-means
            if use_trimmed:
                n_samples = len(pca_features)
                distances = np.zeros(n_samples)
                for i in range(n_samples):
                    point_distances = np.sqrt(np.sum((pca_features[i] - kmeans.cluster_centers_) ** 2, axis=1))
                    distances[i] = np.min(point_distances)
                
                n_keep = len(pca_features) - kmeans.n_trim
                keep_indices = np.argsort(distances)[:n_keep]
                
                if len(set(kmeans.labels_[keep_indices])) > 1:  # Need at least 2 clusters for silhouette score
                    silhouette_scores.append(silhouette_score(pca_features[keep_indices], 
                                                            kmeans.labels_[keep_indices]))
                else:
                    silhouette_scores.append(-1)  # Invalid score for single cluster
            else:
                if len(set(kmeans.labels_)) > 1:  # Need at least 2 clusters for silhouette score
                    silhouette_scores.append(silhouette_score(pca_features, kmeans.labels_))
                else:
                    silhouette_scores.append(-1)  # Invalid score for single cluster
        
        best_k = list(K_range)[np.argmax(silhouette_scores)]
        print(f"Best K by silhouette score: {best_k} (score={max(silhouette_scores):.4f})")
        
        # Final clustering with best K
        if use_trimmed:
            kmeans_final = TrimmedKMeans(n_clusters=best_k, trim_ratio=trim_ratio, random_state=42)
        else:
            kmeans_final = KMeansWrapper(n_clusters=best_k, random_state=42)
        kmeans_final.fit(pca_features)
        clusters_final = kmeans_final.labels_
        
        # Get trimmed points for visualization (empty for normal k-means)
        n_samples = len(pca_features)
        distances_final = np.zeros(n_samples)
        for i in range(n_samples):
            point_distances = np.sqrt(np.sum((pca_features[i] - kmeans_final.cluster_centers_) ** 2, axis=1))
            distances_final[i] = np.min(point_distances)
        
        if use_trimmed:
            trimmed_indices = np.argsort(distances_final)[len(pca_features) - kmeans_final.n_trim:]
        else:
            trimmed_indices = np.array([], dtype=int)  # Empty array for normal k-means
        
        # Save silhouette scores as CSV
        silhouette_csv_path = os.path.join(analysis_dir, 'silhouette_scores.csv')
        with open(silhouette_csv_path, 'w') as f:
            f.write('K,silhouette_score\n')
            for k, score in zip(K_range, silhouette_scores):
                f.write(f'{k},{score}\n')
        print(f"Silhouette scores saved to {silhouette_csv_path}")
    
    # Save results
    np.savez_compressed(os.path.join(analysis_dir, 'clustering_results.npz'),
                       pca_features=pca_features,
                       image_names=np.array(image_names),
                       clusters=clusters_final,
                       trimmed_indices=trimmed_indices)
    
    feature_df = pd.DataFrame(pca_features)
    feature_df['name'] = image_names
    feature_df['cluster'] = clusters_final
    feature_df['trimmed'] = False
    feature_df.loc[trimmed_indices, 'trimmed'] = True
    feature_df.to_csv(os.path.join(analysis_dir, 'feature_analysis.csv'), index=False)
    

    
    # Only plot Elbow Method and Silhouette Score for K-means
    if clustering_method == 'kmeans':
        # Plot Elbow Method
        plt.figure()
        plt.plot(list(K_range), wcss, 'bo-')
        plt.xlabel('Number of clusters (K)')
        plt.ylabel('Within-cluster sum of squares (WCSS)')
        plt.title('Elbow Method For Optimal K')
        plt.tight_layout()
        plt.savefig(os.path.join(analysis_dir, 'elbow_method.png'))
        plt.close()
        # Plot Silhouette Score
        plt.figure()
        plt.plot(list(K_range), silhouette_scores, 'ro-')
        plt.xlabel('Number of clusters (K)')
        plt.ylabel('Silhouette Score')
        plt.title('Silhouette Score For Optimal K')
        plt.tight_layout()
        plt.savefig(os.path.join(analysis_dir, 'silhouette_score.png'))
        plt.close()
    print(f"Analysis complete. Results saved to {analysis_dir}")
    
    # --- Confusion Matrix: Family vs Cluster ---
    print("Creating confusion matrix (family vs cluster)...")
    
    if clustering_method in ['dbscan', 'hdbscan']:
        # For DBSCAN/HDBSCAN, exclude noise points from family analysis
        valid_clusters = [cl for cl in clusters_final if cl != -1]
        valid_family_names = [fam for fam, cl in zip(family_names, clusters_final) if cl != -1]
        valid_image_names = [name for name, cl in zip(image_names, clusters_final) if cl != -1]
        
        families = sorted(set(valid_family_names))
        n_clusters = len(set(valid_clusters))
        family_to_idx = {fam: i for i, fam in enumerate(families)}
        cluster_to_idx = {cl: i for i, cl in enumerate(sorted(set(valid_clusters)))}
        matrix = np.zeros((len(families), n_clusters), dtype=float)
        family_counts = Counter(valid_family_names)
        
        for fam, cl in zip(valid_family_names, valid_clusters):
            i = family_to_idx[fam]
            j = cluster_to_idx[cl]
            matrix[i, j] += 1
        
        for i, fam in enumerate(families):
            if family_counts[fam] > 0:
                matrix[i, :] /= family_counts[fam]
        
        cm_df = pd.DataFrame(matrix, index=families, columns=[f"cluster_{c}" for c in sorted(set(valid_clusters))])
        cm_df.to_csv(os.path.join(analysis_dir, "family_cluster_confusion_matrix.csv"))
        
        method_name = 'HDBSCAN' if clustering_method == 'hdbscan' else 'DBSCAN'
        plt.figure(figsize=(max(8, n_clusters//0.9), max(6, len(families)//3.3)))
        sns.heatmap(cm_df, annot=True, fmt=".2f", cmap="Blues")
        plt.title(f"Normalized Confusion Matrix: Family vs Cluster ({method_name}, excluding noise)")
        plt.ylabel("Family")
        plt.xlabel("Cluster")
        plt.tight_layout()
        plt.savefig(os.path.join(analysis_dir, "family_cluster_confusion_matrix.png"))
        plt.close()
        
        # Dominant families analysis for DBSCAN/HDBSCAN
        family_cluster_counts = {fam: {cl: 0 for cl in sorted(set(valid_clusters))} for fam in families}
        family_total_counts = Counter(valid_family_names)
        
        for fam, cl in zip(valid_family_names, valid_clusters):
            family_cluster_counts[fam][cl] += 1
        
        dominant_dir = os.path.join(analysis_dir, 'dominant_families')
        os.makedirs(dominant_dir, exist_ok=True)
        non_dom_dir = os.path.join(analysis_dir, 'non_dominant_families')
        os.makedirs(non_dom_dir, exist_ok=True)
        
        dominant_families = set()
        dominant_family_to_clusters = dict()
        
        for fam in families:
            total = family_total_counts[fam]
            dom_clusters = []
            for cl in sorted(set(valid_clusters)):
                count = family_cluster_counts[fam][cl]
                if total > 0 and count / total >= 0.8:
                    dom_clusters.append(cl)
            if dom_clusters:
                dominant_families.add(fam)
                dominant_family_to_clusters[fam] = dom_clusters
                for cl in dom_clusters:
                    with open(os.path.join(dominant_dir, f'cluster_{cl}.txt'), 'a') as f:
                        f.write(f"{fam}\n")
            else:
                fam_file = os.path.join(non_dom_dir, f'family_{fam}.md')
                with open(fam_file, 'w') as f:
                    for cl in sorted(set(valid_clusters)):
                        uuids = [extract_uuid_and_family(name + '.npz')[0] for name, fam_name, c in zip(valid_image_names, valid_family_names, valid_clusters) if fam_name == fam and c == cl]
                        if uuids:
                            f.write(f"## Cluster {cl}:\n")
                            for uuid in uuids:
                                f.write(f"  {uuid}\n")
    else:
        # Original K-means logic
        families = sorted(set(family_names))
        n_clusters = len(set(clusters_final))
        family_to_idx = {fam: i for i, fam in enumerate(families)}
        cluster_to_idx = {cl: i for i, cl in enumerate(sorted(set(clusters_final)))}
        matrix = np.zeros((len(families), n_clusters), dtype=float)
        family_counts = Counter(family_names)
        for fam, cl in zip(family_names, clusters_final):
            i = family_to_idx[fam]
            j = cluster_to_idx[cl]
            matrix[i, j] += 1
        for i, fam in enumerate(families):
            if family_counts[fam] > 0:
                matrix[i, :] /= family_counts[fam]
        cm_df = pd.DataFrame(matrix, index=families, columns=[f"cluster_{c}" for c in sorted(set(clusters_final))])
        cm_df.to_csv(os.path.join(analysis_dir, "family_cluster_confusion_matrix.csv"))
        plt.figure(figsize=(max(8, n_clusters//0.9), max(6, len(families)//3.3)))
        sns.heatmap(cm_df, annot=True, fmt=".2f", cmap="Blues")
        plt.title("Normalized Confusion Matrix: Family vs Cluster")
        plt.ylabel("Family")
        plt.xlabel("Cluster")
        plt.tight_layout()
        plt.savefig(os.path.join(analysis_dir, "family_cluster_confusion_matrix.png"))
        plt.close()
        # --- Dominant and Non-dominant Family Log (REVISED) ---
        family_cluster_counts = {fam: [0]*n_clusters for fam in families}
        family_total_counts = Counter(family_names)
        for fam, cl in zip(family_names, clusters_final):
            family_cluster_counts[fam][cl] += 1
        dominant_dir = os.path.join(analysis_dir, 'dominant_families')
        os.makedirs(dominant_dir, exist_ok=True)
        non_dom_dir = os.path.join(analysis_dir, 'non_dominant_families')
        os.makedirs(non_dom_dir, exist_ok=True)
        dominant_families = set()
        dominant_family_to_clusters = dict()
        for fam in families:
            total = family_total_counts[fam]
            dom_clusters = []
            for cl in range(n_clusters):
                count = family_cluster_counts[fam][cl]
                if total > 0 and count / total >= 0.8:
                    dom_clusters.append(cl)
            if dom_clusters:
                dominant_families.add(fam)
                dominant_family_to_clusters[fam] = dom_clusters
                for cl in dom_clusters:
                    with open(os.path.join(dominant_dir, f'cluster_{cl}.txt'), 'a') as f:
                        f.write(f"{fam}\n")
            else:
                fam_file = os.path.join(non_dom_dir, f'family_{fam}.md')
                with open(fam_file, 'w') as f:
                    for cl in range(n_clusters):
                        uuids = [extract_uuid_and_family(name + '.npz')[0] for name, fam_name, c in zip(image_names, family_names, clusters_final) if fam_name == fam and c == cl]
                        if uuids:
                            f.write(f"## Cluster {cl}:\n")
                            for uuid in uuids:
                                f.write(f"  {uuid}\n")
    
    print(f"Confusion matrix saved to {analysis_dir}")
    
    # --- Clustered Instances Log ---
    clustered_dir = os.path.join(analysis_dir, 'clustered_instances')
    os.makedirs(clustered_dir, exist_ok=True)
    
    # Handle different clustering methods
    if clustering_method in ['dbscan', 'hdbscan']:
        # For DBSCAN/HDBSCAN, handle noise points (-1) separately
        unique_clusters = sorted(set(clusters_final))
        cluster_to_uuids = {}
        
        for cl in unique_clusters:
            if cl == -1:
                # Noise points
                cluster_to_uuids['noise'] = []
            else:
                cluster_to_uuids[cl] = []
        
        for name, cl in zip(image_names, clusters_final):
            uuid, _ = extract_uuid_and_family(name + '.npz')
            if cl == -1:
                cluster_to_uuids['noise'].append(uuid)
            else:
                cluster_to_uuids[cl].append(uuid)
        
        # Save cluster files
        for cl, uuids in cluster_to_uuids.items():
            if cl == 'noise':
                filename = 'noise_points.txt'
            else:
                filename = f'cluster_{cl}.txt'
            with open(os.path.join(clustered_dir, filename), 'w') as f:
                for uuid in uuids:
                    f.write(f"{uuid}\n")
    else:
        # For K-means (normal or trimmed)
        if use_trimmed and len(trimmed_indices) > 0:
            # For trimmed K-means, save trimmed points as noise
            cluster_to_uuids = {i: [] for i in range(n_clusters)}
            cluster_to_uuids['noise'] = []  # Add noise category for trimmed points
            
            for i, (name, cl) in enumerate(zip(image_names, clusters_final)):
                uuid, _ = extract_uuid_and_family(name + '.npz')
                if i in trimmed_indices:
                    cluster_to_uuids['noise'].append(uuid)
                else:
                    cluster_to_uuids[cl].append(uuid)
            
            # Save cluster files
            for cl, uuids in cluster_to_uuids.items():
                if cl == 'noise':
                    filename = 'noise_points.txt'
                else:
                    filename = f'cluster_{cl}.txt'
                with open(os.path.join(clustered_dir, filename), 'w') as f:
                    for uuid in uuids:
                        f.write(f"{uuid}\n")
        else:
            # For normal K-means, use numeric cluster labels
            cluster_to_uuids = {i: [] for i in range(n_clusters)}
            for name, cl in zip(image_names, clusters_final):
                uuid, _ = extract_uuid_and_family(name + '.npz')
                cluster_to_uuids[cl].append(uuid)
            for cl, uuids in cluster_to_uuids.items():
                with open(os.path.join(clustered_dir, f'cluster_{cl}.txt'), 'w') as f:
                    for uuid in uuids:
                        f.write(f"{uuid}\n")
    
    # Final report for both methods
    report_path = os.path.join(analysis_dir, 'dominant_families_report.txt')
    with open(report_path, 'w') as f:
        f.write("Dominant Families Report\n")
        f.write("-----------------------\n")
        f.write(f"Total families: {len(families)}\n")
        f.write(f"Dominant families: {len(dominant_families)}\n")
        ratio = len(dominant_families) / len(families) if len(families) > 0 else 0
        f.write(f"Ratio: {ratio:.4f}\n\n")
        f.write("Dominant families (family_name: dominant_cluster(s)):\n")
        for fam in sorted(dominant_families):
            clusters_str = ','.join(str(cl) for cl in dominant_family_to_clusters[fam])
            f.write(f"{fam}: {clusters_str}\n")
    print(f"Dominant families report saved to {report_path}")


def suggest_eps_values(X, k_range=range(5, 21)):
    """
    Suggest eps values for DBSCAN using k-nearest neighbors analysis.
    
    Args:
        X: Input data (n_samples, n_features)
        k_range: Range of k values to test
        
    Returns:
        dict: Dictionary with k values and corresponding eps suggestions
    """
    print("Analyzing data distribution to suggest eps values...")
    
    eps_suggestions = {}
    
    for k in k_range:
        # Fit k-nearest neighbors
        nbrs = NearestNeighbors(n_neighbors=k).fit(X)
        distances, indices = nbrs.kneighbors(X)
        
        # Get the k-th nearest neighbor distance for each point
        k_distances = distances[:, -1]
        
        # Sort distances
        k_distances_sorted = np.sort(k_distances)
        
        # Suggest eps as the distance at the "elbow" point
        # We'll use the 90th percentile as a starting point
        eps_90 = np.percentile(k_distances_sorted, 90)
        eps_75 = np.percentile(k_distances_sorted, 75)
        eps_50 = np.percentile(k_distances_sorted, 50)
        
        eps_suggestions[k] = {
            'eps_50': eps_50,
            'eps_75': eps_75,
            'eps_90': eps_90,
            'min_distance': np.min(k_distances_sorted),
            'max_distance': np.max(k_distances_sorted)
        }
    
    return eps_suggestions


def plot_eps_analysis(X, analysis_dir, k_range=range(5, 21)):
    """
    Plot k-distance graph to help visualize eps selection.
    
    Args:
        X: Input data
        analysis_dir: Directory to save plots
        k_range: Range of k values to test
    """
    print("Generating eps analysis plots...")
    
    eps_suggestions = suggest_eps_values(X, k_range)
    
    # Create k-distance plot
    plt.figure(figsize=(15, 10))
    
    for i, k in enumerate(k_range):
        plt.subplot(4, 4, i+1)
        
        # Get k-nearest neighbors for this k
        nbrs = NearestNeighbors(n_neighbors=k).fit(X)
        distances, indices = nbrs.kneighbors(X)
        k_distances = distances[:, -1]
        k_distances_sorted = np.sort(k_distances)
        
        # Plot sorted distances
        plt.plot(range(len(k_distances_sorted)), k_distances_sorted, 'b-', alpha=0.7)
        
        # Mark suggested eps values
        eps_75 = eps_suggestions[k]['eps_75']
        eps_90 = eps_suggestions[k]['eps_90']
        
        # Find indices for these eps values
        idx_75 = np.searchsorted(k_distances_sorted, eps_75)
        idx_90 = np.searchsorted(k_distances_sorted, eps_90)
        
        plt.axhline(y=eps_75, color='orange', linestyle='--', alpha=0.7, label=f'75%: {eps_75:.3f}')
        plt.axhline(y=eps_90, color='red', linestyle='--', alpha=0.7, label=f'90%: {eps_90:.3f}')
        
        plt.title(f'k={k}')
        plt.xlabel('Points')
        plt.ylabel('Distance')
        plt.legend(fontsize=8)
        plt.grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.savefig(os.path.join(analysis_dir, 'eps_analysis.png'), dpi=150, bbox_inches='tight')
    plt.close()
    
    # Save eps suggestions to CSV
    eps_csv_path = os.path.join(analysis_dir, 'eps_suggestions.csv')
    with open(eps_csv_path, 'w') as f:
        f.write('k,eps_50,eps_75,eps_90,min_distance,max_distance\n')
        for k in k_range:
            suggestions = eps_suggestions[k]
            f.write(f'{k},{suggestions["eps_50"]:.6f},{suggestions["eps_75"]:.6f},'
                   f'{suggestions["eps_90"]:.6f},{suggestions["min_distance"]:.6f},'
                   f'{suggestions["max_distance"]:.6f}\n')
    
    print(f"Eps analysis saved to {analysis_dir}")
    print("\nSuggested eps values:")
    for k in k_range:
        suggestions = eps_suggestions[k]
        print(f"  k={k}: eps_75={suggestions['eps_75']:.4f}, eps_90={suggestions['eps_90']:.4f}")
    
    return eps_suggestions


def evaluate_pca_with_clustering(X, n_components, n_clusters=10, random_state=42):
    """
    Evaluate PCA quality using k-means clustering performance.
    
    Args:
        X: Input data (n_samples, n_features)
        n_components: Number of PCA components to test
        n_clusters: Number of clusters for k-means evaluation
        random_state: Random state for reproducibility
        
    Returns:
        dict: Dictionary with clustering evaluation metrics
    """
    # Apply PCA
    pca = IncrementalPCA(n_components=n_components)
    X_pca = pca.fit_transform(X)
    
    # Run k-means clustering
    kmeans = KMeans(n_clusters=n_clusters, random_state=random_state, n_init=10)
    labels = kmeans.fit_predict(X_pca)
    
    # Calculate evaluation metrics
    metrics = {}
    
    # 1. Silhouette score (higher is better)
    if len(set(labels)) > 1:
        metrics['silhouette_score'] = silhouette_score(X_pca, labels)
    else:
        metrics['silhouette_score'] = -1  # Invalid clustering
    
    # 2. Inertia (lower is better) - within-cluster sum of squares
    metrics['inertia'] = kmeans.inertia_
    
    # 3. Explained variance ratio
    metrics['explained_variance_ratio'] = sum(pca.explained_variance_ratio_)
    
    # 4. Number of components
    metrics['n_components'] = n_components
    
    # 5. Normalized inertia (inertia per component)
    metrics['normalized_inertia'] = kmeans.inertia_ / n_components
    
    return metrics


def analyze_pca_variance_with_clustering(X, max_components=None, variance_threshold=0.95, 
                                        analysis_dir=None, use_clustering_evaluation=False,
                                        n_clusters=10, random_state=42):
    """
    Analyze PCA variance with optional clustering-based evaluation.
    
    Args:
        X: Input data (n_samples, n_features)
        max_components: Maximum number of components to consider (default: auto-detect)
        variance_threshold: Target cumulative explained variance ratio (default: 0.95)
        analysis_dir: Directory to save analysis plots
        use_clustering_evaluation: Whether to use clustering performance for evaluation
        n_clusters: Number of clusters for k-means evaluation
        random_state: Random state for reproducibility
        
    Returns:
        tuple: (optimal_n_components, pca_object, explained_variance_ratio)
    """
    n_samples, n_features = X.shape
    
    # Auto-detect max_components if not specified
    if max_components is None:
        # For ResNet features (512-dim), allow more components
        if n_features >= 500:
            # Allow up to 80% of features for high-dimensional data
            max_components = min(n_samples, int(n_features * 0.8))
        else:
            # For lower dimensional data, use all features
            max_components = min(n_samples, n_features)
    else:
        max_components = min(max_components, n_samples, n_features)
    
    print(f"Analyzing PCA variance for {n_features} features...")
    print(f"Maximum components to test: {max_components}")
    
    if use_clustering_evaluation:
        print(f"Using clustering-aware evaluation with k-means (n_clusters={n_clusters})")
    
    # Test different numbers of components
    component_range = range(1, max_components + 1)
    explained_variances = []
    cumulative_variances = []
    clustering_metrics = []
    
    for n_comp in tqdm(component_range, desc="Testing PCA components"):
        pca_temp = IncrementalPCA(n_components=n_comp)
        pca_temp.fit(X)
        explained_variances.append(pca_temp.explained_variance_ratio_)
        cumulative_variances.append(np.sum(pca_temp.explained_variance_ratio_))
        
        # Evaluate clustering performance if requested
        if use_clustering_evaluation:
            metrics = evaluate_pca_with_clustering(X, n_comp, n_clusters, random_state)
            clustering_metrics.append(metrics)
    
    # Find optimal number of components
    if use_clustering_evaluation:
        # Use clustering performance to find optimal components
        print("Finding optimal components based on clustering performance...")
        
        # Find best silhouette score
        silhouette_scores = [m['silhouette_score'] for m in clustering_metrics]
        best_silhouette_idx = np.argmax(silhouette_scores)
        optimal_n_components_silhouette = best_silhouette_idx + 1
        
        # Find best normalized inertia (lower is better)
        normalized_inertias = [m['normalized_inertia'] for m in clustering_metrics]
        best_inertia_idx = np.argmin(normalized_inertias)
        optimal_n_components_inertia = best_inertia_idx + 1
        
        # Combine metrics (weighted approach)
        # Normalize scores to 0-1 range
        max_silhouette = max(silhouette_scores)
        min_inertia = min(normalized_inertias)
        
        combined_scores = []
        for i, metrics in enumerate(clustering_metrics):
            # Weight silhouette more heavily (0.7) than inertia (0.3)
            silhouette_norm = metrics['silhouette_score'] / max_silhouette if max_silhouette > 0 else 0
            inertia_norm = 1 - (metrics['normalized_inertia'] - min_inertia) / (max(normalized_inertias) - min_inertia) if max(normalized_inertias) > min_inertia else 0
            
            combined_score = 0.7 * silhouette_norm + 0.3 * inertia_norm
            combined_scores.append(combined_score)
        
        optimal_n_components = np.argmax(combined_scores) + 1
        
        print(f"Optimal components by silhouette: {optimal_n_components_silhouette}")
        print(f"Optimal components by inertia: {optimal_n_components_inertia}")
        print(f"Optimal components by combined score: {optimal_n_components}")
        print(f"Best silhouette score: {max(silhouette_scores):.4f}")
        print(f"Best normalized inertia: {min(normalized_inertias):.4f}")
        
    else:
        # Use traditional variance-based approach
        optimal_n_components = None
        for i, cum_var in enumerate(cumulative_variances):
            if cum_var >= variance_threshold:
                optimal_n_components = i + 1
                break
        
        if optimal_n_components is None:
            optimal_n_components = max_components
            print(f"Warning: Could not reach {variance_threshold*100}% variance with {max_components} components")
    
    print(f"Optimal number of components: {optimal_n_components}")
    print(f"Explained variance: {cumulative_variances[optimal_n_components-1]:.4f}")
    
    # Create final PCA with optimal components
    final_pca = IncrementalPCA(n_components=optimal_n_components)
    final_pca.fit(X)
    
    # Save analysis plots if directory provided
    if analysis_dir:
        # Plot explained variance ratio
        plt.figure(figsize=(15, 5))
        
        # Plot 1: Individual explained variance
        plt.subplot(1, 3, 1)
        plt.plot(component_range, [ev[0] if len(ev) > 0 else 0 for ev in explained_variances], 'bo-')
        plt.xlabel('Number of Components')
        plt.ylabel('Explained Variance Ratio (1st component)')
        plt.title('First Component Explained Variance')
        plt.grid(True, alpha=0.3)
        
        # Plot 2: Cumulative explained variance
        plt.subplot(1, 3, 2)
        plt.plot(component_range, cumulative_variances, 'ro-')
        if not use_clustering_evaluation:
            plt.axhline(y=variance_threshold, color='g', linestyle='--', 
                       label=f'{variance_threshold*100}% threshold')
        plt.axvline(x=optimal_n_components, color='g', linestyle=':', 
                   label=f'Optimal: {optimal_n_components}')
        plt.xlabel('Number of Components')
        plt.ylabel('Cumulative Explained Variance Ratio')
        plt.title('Cumulative Explained Variance')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        # Plot 3: Clustering metrics (if available) or scree plot
        plt.subplot(1, 3, 3)
        if use_clustering_evaluation and clustering_metrics:
            # Plot clustering metrics
            silhouette_scores = [m['silhouette_score'] for m in clustering_metrics]
            normalized_inertias = [m['normalized_inertia'] for m in clustering_metrics]
            
            ax1 = plt.gca()
            ax2 = ax1.twinx()
            
            line1 = ax1.plot(component_range, silhouette_scores, 'go-', label='Silhouette Score')
            line2 = ax2.plot(component_range, normalized_inertias, 'mo-', label='Normalized Inertia')
            
            ax1.set_xlabel('Number of Components')
            ax1.set_ylabel('Silhouette Score', color='g')
            ax2.set_ylabel('Normalized Inertia', color='m')
            ax1.tick_params(axis='y', labelcolor='g')
            ax2.tick_params(axis='y', labelcolor='m')
            
            plt.title('Clustering Performance Metrics')
            plt.grid(True, alpha=0.3)
            
            # Add legend
            lines = line1 + line2
            labels = [l.get_label() for l in lines]
            ax1.legend(lines, labels, loc='upper right')
        else:
            # Scree plot (log scale)
            first_component_variances = [ev[0] if len(ev) > 0 else 0 for ev in explained_variances]
            plt.semilogy(component_range, first_component_variances, 'go-')
            plt.xlabel('Number of Components')
            plt.ylabel('Explained Variance Ratio (log scale)')
            plt.title('Scree Plot (Log Scale)')
            plt.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.savefig(os.path.join(analysis_dir, 'pca_variance_analysis.png'), dpi=150, bbox_inches='tight')
        plt.close()
        
        # Save variance data to CSV
        variance_csv_path = os.path.join(analysis_dir, 'pca_variance_analysis.csv')
        with open(variance_csv_path, 'w') as f:
            if use_clustering_evaluation and clustering_metrics:
                f.write('n_components,first_component_variance,cumulative_variance,silhouette_score,normalized_inertia\n')
                for i, (ev, cum_var, metrics) in enumerate(zip(explained_variances, cumulative_variances, clustering_metrics)):
                    first_var = ev[0] if len(ev) > 0 else 0
                    f.write(f'{i+1},{first_var:.6f},{cum_var:.6f},{metrics["silhouette_score"]:.6f},{metrics["normalized_inertia"]:.6f}\n')
            else:
                f.write('n_components,first_component_variance,cumulative_variance\n')
                for i, (ev, cum_var) in enumerate(zip(explained_variances, cumulative_variances)):
                    first_var = ev[0] if len(ev) > 0 else 0
                    f.write(f'{i+1},{first_var:.6f},{cum_var:.6f}\n')
        
        print(f"PCA variance analysis saved to {analysis_dir}")
    
    return optimal_n_components, final_pca, cumulative_variances[optimal_n_components-1]


def analyze_pca_variance(X, max_components=None, variance_threshold=0.95, analysis_dir=None):
    """
    Analyze PCA variance to determine optimal number of components.
    
    Args:
        X: Input data (n_samples, n_features)
        max_components: Maximum number of components to consider (default: auto-detect)
        variance_threshold: Target cumulative explained variance ratio (default: 0.95)
        analysis_dir: Directory to save analysis plots
        
    Returns:
        tuple: (optimal_n_components, pca_object, explained_variance_ratio)
    """
    return analyze_pca_variance_with_clustering(X, max_components, variance_threshold, 
                                               analysis_dir, use_clustering_evaluation=False)


def save_pca_model(pca, scaler, model_path, metadata=None):
    """
    Save PCA model and scaler to disk.
    
    Args:
        pca: Fitted PCA object
        scaler: Fitted StandardScaler object
        model_path: Path to save the model (without extension)
        metadata: Optional metadata dictionary to save with the model
    """
    # Save PCA model
    pca_path = f"{model_path}_pca.pkl"
    with open(pca_path, 'wb') as f:
        pickle.dump(pca, f)
    
    # Save scaler
    scaler_path = f"{model_path}_scaler.pkl"
    with open(scaler_path, 'wb') as f:
        pickle.dump(scaler, f)
    
    # Save metadata
    if metadata is not None:
        metadata_path = f"{model_path}_metadata.pkl"
        with open(metadata_path, 'wb') as f:
            pickle.dump(metadata, f)
    
    print(f"PCA model saved to {model_path}_*.pkl")


def load_pca_model(model_path):
    """
    Load PCA model and scaler from disk.
    
    Args:
        model_path: Path to the model (without extension)
        
    Returns:
        tuple: (pca, scaler, metadata)
    """
    # Load PCA model
    pca_path = f"{model_path}_pca.pkl"
    with open(pca_path, 'rb') as f:
        pca = pickle.load(f)
    
    # Load scaler
    scaler_path = f"{model_path}_scaler.pkl"
    with open(scaler_path, 'rb') as f:
        scaler = pickle.load(f)
    
    # Load metadata (optional)
    metadata = None
    metadata_path = f"{model_path}_metadata.pkl"
    if os.path.exists(metadata_path):
        with open(metadata_path, 'rb') as f:
            metadata = pickle.load(f)
    
    print(f"PCA model loaded from {model_path}_*.pkl")
    return pca, scaler, metadata


def apply_pca_model(X, pca, scaler):
    """
    Apply a saved PCA model to new data.
    
    Args:
        X: Input data (n_samples, n_features)
        pca: Fitted PCA object
        scaler: Fitted StandardScaler object
        
    Returns:
        array: Transformed data
    """
    # Scale the data
    X_scaled = scaler.transform(X)
    
    # Apply PCA transformation
    X_pca = pca.transform(X_scaled)
    
    return X_pca


def main():
    parser = argparse.ArgumentParser(description='Clustering and analysis of image features')
    subparsers = parser.add_subparsers(dest='command', required=True)

    # Common arguments for both subcommands
    def add_common_args(parser):
        parser.add_argument('--clustering', choices=['kmeans', 'dbscan', 'hdbscan'], default='kmeans', 
                           help='Clustering method to use (default: kmeans)')
        parser.add_argument('--use_trimmed', action='store_true', help='Use trimmed k-means instead of normal k-means')
        parser.add_argument('--trim_ratio', type=float, default=0.02, help='Trim ratio for trimmed k-means (default: 0.02)')
        parser.add_argument('--eps', type=float, default=0.5, help='Epsilon parameter for DBSCAN (default: 0.5)')
        parser.add_argument('--min_samples', type=int, default=5, help='Min samples parameter for DBSCAN/HDBSCAN (default: 5)')
        parser.add_argument('--min_cluster_size', type=int, default=5, help='Min cluster size for HDBSCAN (default: 5)')
        parser.add_argument('--cluster_selection_epsilon', type=float, default=0.0, help='Cluster selection epsilon for HDBSCAN (default: 0.0)')
        parser.add_argument('--cluster_selection_method', choices=['eom', 'leaf'], default='eom', help='Cluster selection method for HDBSCAN (default: eom)')
        parser.add_argument('--analyze_eps', action='store_true', help='Run eps analysis for DBSCAN parameter selection')
        
        # PCA parameters
        pca_group = parser.add_argument_group('PCA options')
        pca_group.add_argument('--max_pca_components', type=int, default=None, 
                              help='Maximum number of PCA components to test (default: auto-detect based on data)')
        pca_group.add_argument('--variance_threshold', type=float, default=0.95,
                              help='Target cumulative explained variance ratio (default: 0.95)')
        pca_group.add_argument('--skip_pca_analysis', action='store_true',
                              help='Skip PCA variance analysis and use default 50 components')

        pca_group.add_argument('--save_pca_model', type=str, default=None,
                              help='Path to save PCA model (without extension)')
        pca_group.add_argument('--load_pca_model', type=str, default=None,
                              help='Path to load PCA model (without extension)')
        pca_group.add_argument('--use_clustering_evaluation', action='store_true',
                              help='Use k-means clustering performance to evaluate PCA quality')
        pca_group.add_argument('--n_clusters_eval', type=int, default=10,
                              help='Number of clusters for PCA evaluation (default: 10)')
        
        add_db_args(parser)

    # Common database arguments
    def add_db_args(parser):
        db_group = parser.add_argument_group('Database options')
        db_group.add_argument('--db', default='../meta.db', help='Path to the instance database')
        db_group.add_argument('--track', help='Track name for instances')
        db_group.add_argument('--list-tracks', action='store_true', help='List available tracks and exit')

    # SCOPE subcommand (was SAT)
    parser_scope = subparsers.add_parser('scope', help='Analyze SCOPE (SAT) image features')
    parser_scope.add_argument('--feature_dir', type=str, default='features', help='Directory containing SCOPE feature files')
    parser_scope.add_argument('--analysis_dir', type=str, default='analysis_results', help='Directory to save SCOPE analysis results')
    parser_scope.add_argument('--sources', type=str, nargs='+', default=None,
                         help='Feature sources to include (origin, sorted). Default: all')
    add_common_args(parser_scope)

    # ASCII subcommand
    parser_ascii = subparsers.add_parser('ascii', help='Analyze ASCII-based image features')
    parser_ascii.add_argument('--feature_dir', type=str, default='ascii_features', help='Directory containing ASCII feature files')
    parser_ascii.add_argument('--analysis_dir', type=str, default='ascii_analysis_results', help='Directory to save ASCII analysis results')
    parser_ascii.add_argument('--feature_types', type=str, nargs='+', default=None,
                         help='ASCII feature types to include (min, max, mean, direct). Default: all')
    add_common_args(parser_ascii)

    args = parser.parse_args()

    # Handle database operations first
    if args.list_tracks:
        try:
            with InstanceDatabase(args.db) as db:
                tracks = db.get_available_tracks()
                if tracks:
                    print("\nAvailable tracks:")
                    for track in tracks:
                        print(f"  - {track}")
                else:
                    print("\nNo tracks found in the database.")
            return
        except Exception as e:
            print(f"Error during database operation: {e}")
            return

    # Get track UUIDs if track is specified
    track_uuids = None
    if args.track:
        try:
            with InstanceDatabase(args.db) as db:
                track_uuids = db.get_track_uuids(args.track)
                if not track_uuids:
                    print(f"No instances found for track '{args.track}'")
                    return
                print(f"Found {len(track_uuids)} instances in track '{args.track}'")
        except Exception as e:
            print(f"Error getting track UUIDs: {e}")
            return

    if args.command == 'scope':
        all_features, image_names, family_names = load_features_scope(
            args.feature_dir, args.sources, args.analysis_dir, track_uuids)
        run_analysis(all_features, image_names, family_names, args.analysis_dir,
                    clustering_method=args.clustering, use_trimmed=args.use_trimmed, 
                    trim_ratio=args.trim_ratio, eps=args.eps, min_samples=args.min_samples,
                    analyze_eps=args.analyze_eps, min_cluster_size=args.min_cluster_size,
                    cluster_selection_epsilon=args.cluster_selection_epsilon,
                    cluster_selection_method=args.cluster_selection_method,
                    max_pca_components=args.max_pca_components,
                    variance_threshold=args.variance_threshold,
                    skip_pca_analysis=args.skip_pca_analysis,
                    save_pca_model_path=args.save_pca_model,
                    load_pca_model_path=args.load_pca_model,
                    use_clustering_evaluation=args.use_clustering_evaluation,
                    n_clusters_eval=args.n_clusters_eval)
    elif args.command == 'ascii':
        all_features, image_names, family_names = load_features_ascii(
            args.feature_dir, args.analysis_dir, args.feature_types, track_uuids)
        run_analysis(all_features, image_names, family_names, args.analysis_dir,
                    clustering_method=args.clustering, use_trimmed=args.use_trimmed, 
                    trim_ratio=args.trim_ratio, eps=args.eps, min_samples=args.min_samples,
                    analyze_eps=args.analyze_eps, min_cluster_size=args.min_cluster_size,
                    cluster_selection_epsilon=args.cluster_selection_epsilon,
                    cluster_selection_method=args.cluster_selection_method,
                    max_pca_components=args.max_pca_components,
                    variance_threshold=args.variance_threshold,
                    skip_pca_analysis=args.skip_pca_analysis,
                    save_pca_model_path=args.save_pca_model,
                    load_pca_model_path=args.load_pca_model,
                    use_clustering_evaluation=args.use_clustering_evaluation,
                    n_clusters_eval=args.n_clusters_eval)
    else:
        parser.print_help()

if __name__ == "__main__":
    main() 