import os
import pickle
import numpy as np
from collections import Counter

def summarize_clustering(results_path):
    # Load the clustering results
    with open(results_path, 'rb') as f:
        results = pickle.load(f)
    
    # Get the labels and names
    train_labels = results['train_labels']
    train_names = results['train_names']
    train_families = results['train_families']
    
    # Count instances per cluster
    cluster_counts = Counter(train_labels)
    
    # Sort clusters by size (excluding noise points)
    sorted_clusters = sorted(
        [(k, v) for k, v in cluster_counts.items() if k != -1],
        key=lambda x: x[1],
        reverse=True
    )
    
    print("\n=== Clustering Summary ===")
    print(f"Total training instances: {len(train_labels)}")
    print(f"Number of clusters: {len(sorted_clusters)}")
    print(f"Noise points: {cluster_counts.get(-1, 0)}")
    
    print("\nCluster sizes:")
    for cluster_id, count in sorted_clusters:
        print(f"Cluster {cluster_id}: {count} instances")
        
        # Get family distribution for this cluster
        cluster_mask = train_labels == cluster_id
        cluster_families = np.array(train_families)[cluster_mask]
        family_counts = Counter(cluster_families)
        
        # Print top 3 families in this cluster
        print("Top families in cluster:")
        for family, fam_count in sorted(family_counts.items(), key=lambda x: x[1], reverse=True)[:3]:
            percentage = (fam_count / count) * 100
            print(f"  - {family}: {fam_count} instances ({percentage:.1f}%)")
        print()

if __name__ == '__main__':
    import sys
    if len(sys.argv) != 2:
        print("Usage: python analyze-clusters.py <path_to_clustering_results.pkl>")
        sys.exit(1)
    
    summarize_clustering(sys.argv[1]) 