import os
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.decomposition import PCA
from tqdm import tqdm
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset
from PIL import Image
import random


# 设置随机种子，保证结果可复现
def set_seed(seed=42):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True


set_seed()


class UnsupervisedImageDataset(Dataset):
    """自定义无标签图像数据集"""

    def __init__(self, root_dir, model_transform=None, color_transform=None):
        self.root_dir = root_dir
        self.model_transform = model_transform
        self.color_transform = color_transform
        self.image_files = []

        # 遍历目录获取所有图像文件
        for subdir, _, files in os.walk(root_dir):
            for file in files:
                if file.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp')):
                    self.image_files.append(os.path.join(subdir, file))

    def __len__(self):
        return len(self.image_files)

    def __getitem__(self, idx):
        image_path = self.image_files[idx]
        image = Image.open(image_path).convert('RGB')

        model_image = self.model_transform(image) if self.model_transform else image
        color_image = self.color_transform(image) if self.color_transform else image

        return model_image, color_image, image_path


class MultiFeatureExtractor(nn.Module):
    """提取多层级特征"""

    def __init__(self, model_name='resnet50', layers=['layer3', 'layer4']):
        super().__init__()
        self.base_model = torchvision.models.__dict__[model_name](pretrained=True)
        self.layers = layers
        self.features = {}
        self.hooks = []

        # 注册hook捕获中间层输出
        for layer in layers:
            layer_module = dict([*self.base_model.named_modules()])[layer]

            def hook(module, input, output, layer_name=layer):
                self.features[layer_name] = output

            self.hooks.append(layer_module.register_forward_hook(hook))

        # 冻结参数
        for param in self.base_model.parameters():
            param.requires_grad = False
        self.base_model.eval()

    def forward(self, x):
        self.features.clear()
        _ = self.base_model(x)  # 前向传播触发hook
        output_features = {}
        for layer in self.layers:
            feat = self.features[layer]
            # 全局平均池化并展平
            if feat.dim() == 4:
                feat = nn.functional.adaptive_avg_pool2d(feat, (1, 1))
                feat = feat.view(feat.size(0), -1)
            output_features[layer] = feat
        return output_features

def extract_color_features(color_imgs):
    """从RGB tensor提取颜色直方图特征"""
    color_imgs = (color_imgs * 255).byte().cpu().numpy()
    batch_size = color_imgs.shape[0]
    hist_features = []
    for i in range(batch_size):
        img = color_imgs[i]  # [3, H, W]
        # 计算每个通道的直方图（16 bins）
        r_hist = np.histogram(img[0], bins=16, range=(0, 255))[0]
        g_hist = np.histogram(img[1], bins=16, range=(0, 255))[0]
        b_hist = np.histogram(img[2], bins=16, range=(0, 255))[0]
        # 归一化
        hist = np.concatenate([r_hist, g_hist, b_hist])
        hist = hist / hist.sum()
        hist_features.append(hist)
    return np.array(hist_features)


class FeatureExtractor(nn.Module):
    """特征提取器，使用预训练的ResNet模型"""

    def __init__(self, model_name='resnet50', layer_name='avgpool'):
        super().__init__()

        # 加载预训练模型
        if model_name == 'resnet50':
            base_model = torchvision.models.resnet18(pretrained=True)
        elif model_name == 'resnet18':
            base_model = torchvision.models.resnet50(pretrained=True)
        else:
            raise ValueError(f"不支持的模型: {model_name}")

        # 创建特征提取模型
        if layer_name == 'avgpool':
            # 提取avgpool层的特征
            self.feature_extractor = nn.Sequential(
                *list(base_model.children())[:-1]
            )
        elif layer_name == 'layer4':
            # 提取layer4的特征
            self.feature_extractor = nn.Sequential(
                *list(base_model.children())[:-2]
            )
        else:
            raise ValueError(f"不支持的层: {layer_name}")

        # 设置为评估模式
        self.feature_extractor.eval()

        # 冻结参数
        for param in self.feature_extractor.parameters():
            param.requires_grad = False

    def forward(self, x):
        x = self.feature_extractor(x)
        # 如果是提取layer4的特征，需要进行全局平均池化
        if x.dim() == 4:
            x = nn.functional.adaptive_avg_pool2d(x, (1, 1))
        x = x.view(x.size(0), -1)
        return x


def extract_features(dataloader, feature_extractor, device='cpu',
                     color_weight=3.0, texture_weight=2.0, shape_weight=1.0):
    """提取加权组合特征"""
    texture_features = []
    shape_features = []
    color_features = []
    paths = []

    feature_extractor.to(device)

    with torch.no_grad():
        for model_imgs, color_imgs, image_paths in tqdm(dataloader, desc="提取特征"):
            # 模型特征提取
            model_imgs = model_imgs.to(device)
            features = feature_extractor(model_imgs)
            texture_features.append(features['layer3'].cpu().numpy())
            shape_features.append(features['layer4'].cpu().numpy())

            # 颜色特征提取
            color_feat = extract_color_features(color_imgs)
            color_features.append(color_feat)

            paths.extend(image_paths)

    # 合并特征并归一化
    from sklearn.preprocessing import StandardScaler

    texture_features = np.vstack(texture_features)
    shape_features = np.vstack(shape_features)
    color_features = np.vstack(color_features)

    # 标准化各特征
    texture_scaled = StandardScaler().fit_transform(texture_features) * texture_weight
    shape_scaled = StandardScaler().fit_transform(shape_features) * shape_weight
    color_scaled = StandardScaler().fit_transform(color_features) * color_weight

    # 组合特征
    combined_features = np.hstack([color_scaled, texture_scaled, shape_scaled])
    return combined_features, paths

def find_best_k(features, min_k=2, max_k=10, method='silhouette'):
    """寻找最佳聚类数量k"""
    best_k = None
    best_score = -1 if method == 'silhouette' else float('inf')

    scores = []
    k_range = range(min_k, max_k + 1)

    print(f"正在寻找最佳聚类数量k ({min_k}到{max_k})...")

    for k in k_range:
        kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)
        cluster_labels = kmeans.fit_predict(features)

        if method == 'silhouette':
            if k == 1:
                continue  # 轮廓系数需要至少两个簇
            score = silhouette_score(features, cluster_labels)
            higher_better = True
        elif method == 'inertia':
            score = kmeans.inertia_
            higher_better = False
        else:
            raise ValueError(f"不支持的方法: {method}")

        scores.append(score)

        if (higher_better and score > best_score) or (not higher_better and score < best_score):
            best_score = score
            best_k = k

    # 绘制k值选择图
    plt.figure(figsize=(10, 6))
    plt.plot(k_range, scores, 'o-')
    plt.xlabel('聚类数量 (k)')

    if method == 'silhouette':
        plt.ylabel('轮廓系数')
        plt.title('轮廓系数与聚类数量的关系')
    else:
        plt.ylabel('惯性')
        plt.title('惯性与聚类数量的关系')

    plt.xticks(k_range)
    plt.grid(True)
    plt.savefig('k_selection.png')
    plt.close()

    print(f"最佳聚类数量k: {best_k}")
    return best_k


def perform_clustering(features, k, reduce_dim=None):
    """执行聚类"""
    # 特征降维（可选）
    if reduce_dim and reduce_dim < features.shape[1]:
        pca = PCA(n_components=reduce_dim, random_state=42)
        reduced_features = pca.fit_transform(features)
        print(f"特征维度从{features.shape[1]}降到{reduce_dim}")
    else:
        reduced_features = features

    # 执行K-means聚类
    kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)
    cluster_labels = kmeans.fit_predict(reduced_features)

    # 计算聚类中心
    cluster_centers = kmeans.cluster_centers_

    return cluster_labels, cluster_centers, reduced_features


def visualize_clusters(features, labels, k, output_dir='results'):
    """可视化聚类结果"""
    os.makedirs(output_dir, exist_ok=True)

    # 使用PCA降维以便可视化
    pca = PCA(n_components=2, random_state=42)
    features_2d = pca.fit_transform(features)

    # 绘制聚类结果
    plt.figure(figsize=(10, 8))
    scatter = plt.scatter(features_2d[:, 0], features_2d[:, 1], c=labels, cmap='viridis', s=30, alpha=0.7)

    # 添加图例
    legend = plt.legend(*scatter.legend_elements(), title="聚类", loc="best")
    plt.gca().add_artist(legend)

    plt.title(f'K-means聚类结果 (k={k})')
    plt.xlabel('PCA 1')
    plt.ylabel('PCA 2')
    plt.grid(True, linestyle='--', alpha=0.7)
    plt.savefig(os.path.join(output_dir, 'clustering_result.png'))
    plt.close()

    # 为每个聚类绘制单独的散点图
    for cluster in range(k):
        plt.figure(figsize=(8, 6))
        cluster_indices = np.where(labels == cluster)[0]
        plt.scatter(features_2d[cluster_indices, 0], features_2d[cluster_indices, 1],
                    c='blue', s=30, alpha=0.7)
        plt.title(f'聚类 {cluster}')
        plt.xlabel('PCA 1')
        plt.ylabel('PCA 2')
        plt.grid(True, linestyle='--', alpha=0.7)
        plt.savefig(os.path.join(output_dir, f'cluster_{cluster}.png'))
        plt.close()


def save_cluster_results(image_paths, cluster_labels, output_dir='results'):
    """保存聚类结果到文件"""
    os.makedirs(output_dir, exist_ok=True)

    # 创建聚类文件夹
    for cluster in set(cluster_labels):
        os.makedirs(os.path.join(output_dir, f'cluster_{cluster}'), exist_ok=True)

    # 将图像复制到对应的聚类文件夹
    for path, cluster in zip(image_paths, cluster_labels):
        filename = os.path.basename(path)
        target_path = os.path.join(output_dir, f'cluster_{cluster}', filename)

        # 复制文件
        try:
            with open(path, 'rb') as src, open(target_path, 'wb') as dst:
                dst.write(src.read())
        except Exception as e:
            print(f"无法复制文件 {path}: {str(e)}")

    # 保存聚类结果表
    with open(os.path.join(output_dir, 'cluster_assignments.txt'), 'w') as f:
        f.write("图像路径\t聚类\n")
        for path, cluster in zip(image_paths, cluster_labels):
            f.write(f"{path}\t{cluster}\n")


def main():
    # 配置参数
    config = {
        'data_dir': './Data/第二批_备份_提取整理',  # 图像文件夹路径
        'output_dir': 'results2',  # 结果输出文件夹
        'batch_size': 64,  # 批次大小
        'image_size': 512,  # 图像大小
        'feature_model': 'resnet50',  # 特征提取模型
        'feature_layer': 'avgpool',  # 特征提取层
        'min_k': 100,  # 最小聚类数
        'max_k': 100,  # 最大聚类数
        'k_selection_method': 'silhouette',  # k值选择方法: 'silhouette' 或 'inertia'
        'reduce_dim': 128,  # 特征降维后的维度，None表示不降维
        'device': 'cuda' if torch.cuda.is_available() else 'cpu',  # 使用GPU或CPU
         'color_weight': 2.0,  # 颜色特征权重
        'texture_weight': 1.0,  # 纹理特征权重
        'shape_weight': 1.0  # 形状特征权重
    }

    print(f"使用设备: {config['device']}")

    # 数据预处理
    model_transform = transforms.Compose([
        transforms.Resize((config['image_size'], config['image_size'])),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    color_transform = transforms.Compose([
        transforms.Resize((config['image_size'], config['image_size'])),
        transforms.ToTensor(),

    ])


    # 创建数据集
    dataset = UnsupervisedImageDataset(
        root_dir=config['data_dir'],
        model_transform=model_transform,
        color_transform=color_transform
    )
    dataloader = DataLoader(dataset, batch_size=config['batch_size'], shuffle=False)

    print(f"找到 {len(dataset)} 张图像")

    # 特征提取
    # 创建特征提取器
    feature_extractor = MultiFeatureExtractor(
        model_name=config['feature_model'],
        layers=['layer3', 'layer4']  # layer3为纹理，layer4为形状
    )

    # 提取特征（传入权重参数）
    features, image_paths = extract_features(
        dataloader,
        feature_extractor,
        device=config['device'],
        color_weight=config['color_weight'],
        texture_weight=config['texture_weight'],
        shape_weight=config['shape_weight']
    )

    print(f"特征提取完成，特征形状: {features.shape}")

    # 寻找最佳聚类数量k
    best_k = find_best_k(features, config['min_k'], config['max_k'], config['k_selection_method'])

    # 执行聚类
    cluster_labels, cluster_centers, reduced_features = perform_clustering(
        features, best_k, config['reduce_dim']
    )

    # 可视化聚类结果
    visualize_clusters(reduced_features, cluster_labels, best_k, config['output_dir'])

    # 保存聚类结果
    save_cluster_results(image_paths, cluster_labels, config['output_dir'])

    print(f"聚类完成，结果已保存到 {config['output_dir']}")


if __name__ == "__main__":
    main()