import os
import torch
import numpy as np
import argparse
from sklearn.cluster import AgglomerativeClustering
from data_preprocess import get_dataloader
from torchvision.models import resnet50
import torch.nn as nn  # 添加缺失的导入


def extract_features(args):
    """提取所有图片的特征向量"""
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    # 加载模型
    model = resnet50(pretrained=False)
    model.fc = nn.Identity()  # 移除分类头
    model.load_state_dict(torch.load(os.path.join(args.save_dir, 'best_backbone.pth')))
    model = model.to(device)
    model.eval()
    
    # 加载数据
    dataloader = get_dataloader(
        root_dir=args.data_dir,
        batch_size=args.batch_size,
        split='val'
    )
    
    # 提取特征和图像路径
    features = []
    image_paths = []
    
    with torch.no_grad():
        for batch in dataloader:
            images = batch['image'].to(device)
            feat = model(images)  # [B, 2048]
            features.append(feat.cpu().numpy())
            
            # 如果数据集提供了图像路径，保存它们
            if hasattr(dataloader.dataset, 'samples'):
                start_idx = len(image_paths)
                end_idx = start_idx + images.shape[0]
                # 获取当前批次的图像路径
                batch_paths = [sample[0] for sample in dataloader.dataset.samples[start_idx:end_idx]]
                image_paths.extend(batch_paths)
    
    return np.vstack(features), image_paths


def perform_clustering(features, image_paths=None, distance_threshold=0.3):
    """执行层次聚类并返回聚类结果"""
    # L2归一化
    features = features / np.linalg.norm(features, axis=1, keepdims=True)
    
    # 层次聚类（基于余弦距离）
    clustering = AgglomerativeClustering(
        n_clusters=None,
        affinity='cosine',
        linkage='average',
        distance_threshold=distance_threshold  # 距离阈值控制聚类粒度
    )
    pred_labels = clustering.fit_predict(features)
    
    # 获取聚类数量
    n_clusters = len(set(pred_labels))
    print(f'聚类完成，共得到 {n_clusters} 个簇')
    
    # 如果提供了图像路径，输出每个簇包含的图像数量
    if image_paths is not None:
        cluster_counts = {i: 0 for i in range(n_clusters)}
        for label in pred_labels:
            cluster_counts[label] += 1
        
        print('每个簇的图像数量:')
        for cluster_id, count in sorted(cluster_counts.items()):
            print(f'簇 {cluster_id}: {count} 张图像')
    
    return pred_labels


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_dir', type=str, required=True, help='数据集根目录')
    parser.add_argument('--save_dir', type=str, default='checkpoints', help='模型保存目录')
    parser.add_argument('--batch_size', type=int, default=32, help='批次大小')
    parser.add_argument('--distance_threshold', type=float, default=0.3, help='聚类距离阈值')
    args = parser.parse_args()
    
    # 提取特征并聚类
    features, image_paths = extract_features(args)
    pred_labels = perform_clustering(features, image_paths, args.distance_threshold)
    
    # 可选：保存聚类结果
    np.save(os.path.join(args.save_dir, 'cluster_labels.npy'), pred_labels)
    print(f'聚类标签已保存到 {os.path.join(args.save_dir, "cluster_labels.npy")}')
    
    if len(image_paths) > 0:
        # 保存图像路径和对应聚类标签的映射
        with open(os.path.join(args.save_dir, 'image_cluster_mapping.txt'), 'w') as f:
            for img_path, cluster_id in zip(image_paths, pred_labels):
                f.write(f'{img_path},{cluster_id}\n')
        print(f'图像-聚类映射已保存到 {os.path.join(args.save_dir, 'image_cluster_mapping.txt')}')