import os
import numpy as np
import pandas as pd
import argparse
from tqdm import tqdm
import pickle
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import IncrementalPCA
import hdbscan
import random
from db_utils import InstanceDatabase
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score

def extract_uuid_and_family(filename):
    """Extract UUID and family name from filename."""
    base_name = filename.replace('.npz', '')
    parts = base_name.split('-')
    
    if len(parts) >= 3 and parts[-1] == 'scope':
        uuid = parts[0]
        family_name = '-'.join(parts[1:-1])
        return uuid, family_name
    else:
        if '~' in filename:
            parts = filename.split('~')
            uuid = parts[0]
            family_name = parts[1].replace('.npz', '')
            return uuid, family_name
        else:
            return filename.replace('.npz', ''), 'unknown'

def load_features(feature_dir, exclude_uuids=None):
    """Load features from npz files, excluding specified UUIDs."""
    all_features = []
    image_names = []
    family_names = []
    feature_files = [f for f in os.listdir(feature_dir) if f.endswith('.npz')]
    
    # Convert exclude_uuids to set for faster lookup
    exclude_set = set(exclude_uuids) if exclude_uuids is not None else set()
    
    # Split files into training and test
    train_files = []
    test_files = []
    for npz_file in feature_files:
        uuid, _ = extract_uuid_and_family(npz_file)
        if uuid in exclude_set:
            test_files.append(npz_file)
        else:
            train_files.append(npz_file)
    
    print(f"Found {len(train_files)} training files and {len(test_files)} test files")
    
    # Load training features
    train_data = {'features': [], 'names': [], 'families': []}
    for npz_file in tqdm(train_files, desc="Loading training features"):
        npz_path = os.path.join(feature_dir, npz_file)
        if os.path.exists(npz_path):
            try:
                data = np.load(npz_path)
                if 'origin' not in data.files or 'sorted' not in data.files:
                    continue
                origin_feat = data['origin'].flatten()
                sorted_feat = data['sorted'].flatten()
                feature = np.concatenate([origin_feat, sorted_feat])
                train_data['features'].append(feature)
                train_data['names'].append(npz_file.replace('.npz', ''))
                uuid, family_name = extract_uuid_and_family(npz_file)
                train_data['families'].append(family_name)
            except Exception as e:
                print(f"Error processing {npz_file}: {e}")
                continue
    
    # Load test features
    test_data = {'features': [], 'names': [], 'families': []}
    for npz_file in tqdm(test_files, desc="Loading test features"):
        npz_path = os.path.join(feature_dir, npz_file)
        if os.path.exists(npz_path):
            try:
                data = np.load(npz_path)
                if 'origin' not in data.files or 'sorted' not in data.files:
                    continue
                origin_feat = data['origin'].flatten()
                sorted_feat = data['sorted'].flatten()
                feature = np.concatenate([origin_feat, sorted_feat])
                test_data['features'].append(feature)
                test_data['names'].append(npz_file.replace('.npz', ''))
                uuid, family_name = extract_uuid_and_family(npz_file)
                test_data['families'].append(family_name)
            except Exception as e:
                print(f"Error processing {npz_file}: {e}")
                continue
    
    return (np.array(train_data['features']), train_data['names'], train_data['families'],
            np.array(test_data['features']), test_data['names'], test_data['families'])

def main():
    parser = argparse.ArgumentParser(description='HDBSCAN or K-Means clustering with prediction capability')
    parser.add_argument('--feature_dir', required=True, help='Directory containing feature files')
    parser.add_argument('--db', required=True, help='Path to instance database')
    parser.add_argument('--track', help='Track name to specify instances pool')
    parser.add_argument('--exclude_ratio', type=float, default=0.2, help='Ratio of instances to exclude for testing from the track')
    parser.add_argument('--out_dir', required=True, help='Directory to save results')
    parser.add_argument('--min_cluster_size', type=int, default=5, help='Min cluster size for HDBSCAN')
    parser.add_argument('--min_samples', type=int, default=None, help='Min samples for HDBSCAN')
    parser.add_argument('--cluster_selection_epsilon', type=float, default=0.0, help='Cluster selection epsilon')
    parser.add_argument('--random_state', type=int, default=42, help='Random state for reproducibility')
    parser.add_argument('--algorithm', choices=['hdbscan', 'kmeans'], default='hdbscan', help='Clustering algorithm to use')
    parser.add_argument('--max_clusters', type=int, default=10, help='Maximum number of clusters for K-Means (searches for best k in [2, max_clusters])')
    parser.add_argument('--max_pca_components', type=int, default=128, help='Maximum number of PCA components')
    parser.add_argument('--pca_variance', type=float, default=0.95, help='Desired explained variance for PCA (0-1)')
    
    args = parser.parse_args()
    
    # Create output directory
    os.makedirs(args.out_dir, exist_ok=True)
    
    # Set random seed for reproducibility
    random.seed(args.random_state)
    
    # Get track UUIDs and apply exclusion ratio
    exclude_uuids = []
    if args.track:
        with InstanceDatabase(args.db) as db:
            track_uuids = list(db.get_track_uuids(args.track))  # Convert to list
            if not track_uuids:
                print(f"No instances found for track '{args.track}'")
                return
            
            # Calculate number of instances to exclude
            num_exclude = int(len(track_uuids) * args.exclude_ratio)
            exclude_uuids = random.sample(track_uuids, num_exclude)
            print(f"Found {len(track_uuids)} instances in track '{args.track}'")
            print(f"Randomly excluding {len(exclude_uuids)} instances ({args.exclude_ratio * 100:.1f}%) from track for testing")
    else:
        print("No excluded track specified. No instance will be excluded.")
        # return
    
    # Load features, automatically splitting into train and test
    (train_features, train_names, train_families,
     test_features, test_names, test_families) = load_features(args.feature_dir, exclude_uuids)
    
    if len(train_features) == 0:
        print("No training features found!")
        return
    
    if len(test_features) == 0:
        print("No test features found!")
        if (len(exclude_uuids) != 0):
            return
    
    # Save the split information
    split_info = {
        'train_names': train_names,
        'test_names': test_names,
        'track': args.track,
        'exclude_uuids': exclude_uuids
    }
    with open(os.path.join(args.out_dir, 'split_info.pkl'), 'wb') as f:
        pickle.dump(split_info, f)
    
    # Normalize features
    print("Normalizing features...")
    scaler = StandardScaler()
    train_features_scaled = scaler.fit_transform(train_features)
    if len(test_features) > 0:
        test_features_scaled = scaler.transform(test_features)
    else:
        test_features_scaled = None
    
    # Dimensionality reduction with PCA
    print("Applying PCA...")
    # Step 1: Fit PCA with up to max_pca_components (or fewer if limited by data)
    max_components = min(args.max_pca_components, train_features_scaled.shape[0], train_features_scaled.shape[1])
    pca_temp = IncrementalPCA(n_components=max_components)
    pca_temp.fit(train_features_scaled)
    cumulative_variance = np.cumsum(pca_temp.explained_variance_ratio_)
    
    # Step 2: Find the number of components for desired variance
    n_components = np.searchsorted(cumulative_variance, args.pca_variance) + 1
    n_components = min(n_components, max_components)
    print(f"Selected {n_components} PCA components to explain at least {args.pca_variance*100:.1f}% variance (max allowed: {max_components})")
    
    # Step 3: Fit PCA with the selected number of components
    pca = IncrementalPCA(n_components=n_components)
    train_features_pca = pca.fit_transform(train_features_scaled)
    if test_features_scaled is not None:
        test_features_pca = pca.transform(test_features_scaled)
    else:
        test_features_pca = None
    explained_var = pca.explained_variance_ratio_
    print(f"PCA output dimensions: {train_features_pca.shape[1]}")
    print(f"Total explained variance ratio: {explained_var.sum():.4f}")
    print(f"Explained variance ratio per component (first 10): {explained_var[:10]}")

    # Clustering
    if args.algorithm == 'hdbscan':
        print("Performing HDBSCAN clustering...")
        clusterer = hdbscan.HDBSCAN(
            min_cluster_size=args.min_cluster_size,
            min_samples=args.min_samples,
            cluster_selection_epsilon=args.cluster_selection_epsilon,
            prediction_data=True  # Important for approximate_predict
        )
        clusterer.fit(train_features_pca)
        train_labels = clusterer.labels_
        if test_features_pca is not None:
            print("Predicting clusters for test set...")
            test_labels, strengths = hdbscan.approximate_predict(clusterer, test_features_pca)
        else:
            test_labels = np.array([])
            strengths = np.array([])
    elif args.algorithm == 'kmeans':
        print(f"Searching for best number of clusters (2 to {args.max_clusters}) using silhouette score...")
        best_score = -1
        best_k = 2
        best_kmeans = None
        for k in range(2, args.max_clusters + 1):
            kmeans = KMeans(n_clusters=k, random_state=args.random_state)
            labels = kmeans.fit_predict(train_features_pca)
            if len(set(labels)) == 1:
                continue  # skip if only one cluster
            score = silhouette_score(train_features_pca, labels)
            print(f"k={k}, silhouette score={score:.4f}")
            if score > best_score:
                best_score = score
                best_k = k
                best_kmeans = kmeans
        print(f"Best number of clusters: {best_k} (silhouette score={best_score:.4f})")
        clusterer = best_kmeans
        train_labels = clusterer.labels_
        if test_features_pca is not None:
            print("Predicting clusters for test set...")
            test_labels = clusterer.predict(test_features_pca)
            # For strengths, use negative distance to assigned cluster center as a proxy
            distances = clusterer.transform(test_features_pca)
            strengths = -np.min(distances, axis=1)
        else:
            test_labels = np.array([])
            strengths = np.array([])
    else:
        raise ValueError(f"Unknown algorithm: {args.algorithm}")
    
    # Save results
    results = {
        'scaler': scaler,
        'pca': pca,
        'clusterer': clusterer,
        'train_features': train_features,
        'train_names': train_names,
        'train_families': train_families,
        'train_labels': train_labels,
        'test_features': test_features if len(test_features) > 0 else None,
        'test_names': test_names if len(test_names) > 0 else None,
        'test_families': test_families if len(test_families) > 0 else None,
        'test_labels': test_labels if len(test_labels) > 0 else None,
        'test_strengths': strengths if len(strengths) > 0 else None,
        'params': vars(args),
        'train_features_pca': train_features_pca,
        'test_features_pca': test_features_pca if test_features_pca is not None else None
    }
    
    with open(os.path.join(args.out_dir, 'clustering_results.pkl'), 'wb') as f:
        pickle.dump(results, f)
    
    # Save cluster assignments to text files
    cluster_dir = os.path.join(args.out_dir, 'clusters')
    os.makedirs(cluster_dir, exist_ok=True)
    
    # Training instances
    unique_clusters = np.unique(train_labels)
    for cluster in unique_clusters:
        if cluster == -1:
            filename = 'noise_points.txt'
        else:
            filename = f'cluster_{cluster}.txt'
        
        with open(os.path.join(cluster_dir, filename), 'w') as f:
            mask = train_labels == cluster
            for name in np.array(train_names)[mask]:
                uuid, _ = extract_uuid_and_family(name + '.npz')
                f.write(f"{uuid}\n")
    
    # Test instances (in separate directory)
    if test_features_pca is not None and len(test_labels) > 0:
        test_cluster_dir = os.path.join(args.out_dir, 'test_clusters')
        os.makedirs(test_cluster_dir, exist_ok=True)
        
        unique_test_clusters = np.unique(test_labels)
        for cluster in unique_test_clusters:
            if cluster == -1:
                filename = 'noise_points.txt'
            else:
                filename = f'cluster_{cluster}.txt'
            
            with open(os.path.join(test_cluster_dir, filename), 'w') as f:
                mask = test_labels == cluster
                for name, strength in zip(np.array(test_names)[mask], strengths[mask]):
                    uuid, _ = extract_uuid_and_family(name + '.npz')
                    f.write(f"{uuid}\t{strength:.4f}\n")
    
    print(f"Analysis complete. Results saved to {args.out_dir}")
    print(f"\nClustering Statistics:")
    print(f"Training instances: {len(train_labels)}")
    if test_features_pca is not None and len(test_labels) > 0:
        print(f"Test instances: {len(test_labels)}")
        print(f"Noise points in test: {np.sum(test_labels == -1)}")
    else:
        print("Test instances: 0 (no test set)")
    print(f"Number of clusters: {len(unique_clusters) - (1 if -1 in unique_clusters else 0)}")
    print(f"Noise points in training: {np.sum(train_labels == -1)}")

    # Detailed clustering summary
    print("\n=== Detailed Clustering Summary ===")
    
    # Count instances per cluster in training set
    cluster_counts = {}
    for label in train_labels:
        if label not in cluster_counts:
            cluster_counts[label] = 0
        cluster_counts[label] += 1
    
    # Sort clusters by size (excluding noise points)
    sorted_clusters = sorted(
        [(k, v) for k, v in cluster_counts.items() if k != -1],
        key=lambda x: x[1],
        reverse=True
    )
    
    print("\nCluster sizes (Training set):")
    for cluster_id, count in sorted_clusters:
        print(f"Cluster {cluster_id}: {count} instances")
        
        # Get family distribution for this cluster
        cluster_mask = train_labels == cluster_id
        cluster_families = np.array(train_families)[cluster_mask]
        family_counts = {}
        for family in cluster_families:
            if family not in family_counts:
                family_counts[family] = 0
            family_counts[family] += 1
        
        # Print top 3 families in this cluster
        print("Top families in cluster:")
        sorted_families = sorted(family_counts.items(), key=lambda x: x[1], reverse=True)[:3]
        for family, fam_count in sorted_families:
            percentage = (fam_count / count) * 100
            print(f"  - {family}: {fam_count} instances ({percentage:.1f}%)")
        print()

if __name__ == '__main__':
    main() 