import os
import sys
import torch
from torchvision import models, transforms
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
import numpy as np
import pandas as pd
from PIL import Image
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import (
    accuracy_score, confusion_matrix,
    adjusted_rand_score, silhouette_score,
    classification_report
)
from sklearn.cluster import AgglomerativeClustering

# 导入项目工具函数
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.utils import cvtColor, seed_everything

# 1. 加载图像并提取特征
class ImageDataset(Dataset):
    def __init__(self, img_paths, transform=None):
        self.img_paths = img_paths
        self.transform = transform

    def __len__(self):
        return len(self.img_paths)

    def __getitem__(self, idx):
        img_path = self.img_paths[idx]
        try:
            # img = Image.open(img_path)
            # img = cvtColor(img)  # 使用utils中的cvtColor统一转换为RGB
            img = Image.open(img_path).convert('RGB') 
            if self.transform:
                img = self.transform(img)
            return img
        except Exception as e:
            print(f"Error loading image {img_path}: {e}")
            return None

# 使用预训练的ResNet18模型提取特征
def extract_features(img_paths, device, batch_size=32):
    """
    使用ResNet18提取图像特征

    Args:
        img_paths: 图像路径列表
        device: 计算设备 (cpu/cuda)
        batch_size: 批处理大小

    Returns:
        features: 提取的特征数组 (N, 1000)
        valid_img_paths: 成功提取特征的图像路径列表

    Raises:
        RuntimeError: 当所有图像都加载失败时
    """
    # 使用新的weights参数代替已弃用的pretrained
    weights = models.ResNet18_Weights.DEFAULT
    model = models.resnet18(weights=weights)
    model.eval()  # 切换到评估模式
    model = model.to(device)  # 将模型移到GPU

    # 定义图像预处理转换
    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    # 创建数据集和数据加载器
    dataset = ImageDataset(img_paths, transform=transform)
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=0)

    features = []
    valid_indices = []  # 记录成功加载的图像索引
    current_idx = 0

    with torch.no_grad():
        for batch_idx, imgs in enumerate(tqdm(dataloader, desc="提取特征", unit="batch")):
            # 找出batch中成功加载的图像及其索引
            valid_imgs = []
            batch_valid_indices = []

            for i, img in enumerate(imgs):
                global_idx = batch_idx * batch_size + i
                if img is not None and global_idx < len(img_paths):
                    valid_imgs.append(img)
                    batch_valid_indices.append(global_idx)

            if len(valid_imgs) == 0:
                continue

            # 提取特征
            imgs_tensor = torch.stack(valid_imgs).to(device)
            feature = model(imgs_tensor)
            feature = feature.cpu().numpy()
            features.append(feature)
            valid_indices.extend(batch_valid_indices)

    if len(features) == 0:
        raise RuntimeError("所有图像加载失败，无法提取特征")

    features = np.vstack(features)
    valid_img_paths = [img_paths[i] for i in valid_indices]

    failed_count = len(img_paths) - len(valid_img_paths)
    if failed_count > 0:
        print(f"警告: {failed_count} 张图像加载失败，已跳过")

    print(f"成功提取 {len(valid_img_paths)} 张图像的特征")

    return features, valid_img_paths

# 2. 读取标签文件
# def load_labels(label_file, img_paths):
#     """
#     从CSV文件中加载图像标签

#     Args:
#         label_file: CSV文件路径
#         img_paths: 图像路径列表

#     Returns:
#         labels: 二分类标签数组 (0=健康, 1=病变)

#     Raises:
#         ValueError: 当找不到匹配的标签或数量不一致时
#     """
#     df = pd.read_csv(label_file)
#     file_names = [os.path.basename(path) for path in img_paths]

#     # 筛选出匹配的标签
#     matched_df = df[df['file_name'].isin(file_names)]

#     # 检查是否所有图像都有对应标签
#     if len(matched_df) == 0:
#         raise ValueError(f"在 {label_file} 中未找到任何匹配的图像标签")

#     if len(matched_df) != len(img_paths):
#         missing_count = len(img_paths) - len(matched_df)
#         print(f"警告: {missing_count} 张图像在CSV中未找到对应标签")

#     # 按照img_paths的顺序排列标签
#     file_to_label = dict(zip(matched_df['file_name'],
#                              matched_df['dr_grade'].apply(lambda x: 0 if x == 0 else 1)))

#     labels = []
#     for fname in file_names:
#         if fname in file_to_label:
#             labels.append(file_to_label[fname])
#         else:
#             raise ValueError(f"图像 {fname} 在CSV中未找到标签")

#     return np.array(labels)
def load_labels(label_file, img_paths):
    """
    从CSV文件中加载图像标签，只保留图像文件和CSV中都存在的项

    Args:
        label_file: CSV文件路径
        img_paths: 图像路径列表

    Returns:
        filtered_img_paths: 同步过滤后的图像路径列表
        labels: 对应的二分类标签数组 (0=健康, 1=病变)
    """
    # 读取 CSV
    df = pd.read_csv(label_file)

    # 获取文件名列表
    file_names = [os.path.basename(path) for path in img_paths]

    # CSV 中的文件名列名检查
    if 'file_name' not in df.columns:
        raise ValueError(f"CSV 文件 {label_file} 中缺少 'file_name' 列")

    if 'dr_grade' not in df.columns:
        raise ValueError(f"CSV 文件 {label_file} 中缺少 'dr_grade' 列")

    # 取交集
    common_files = set(df['file_name']).intersection(file_names)
    if len(common_files) == 0:
        raise ValueError("没有找到图像文件与标签文件的交集")

    # 筛选 CSV 中匹配的行
    matched_df = df[df['file_name'].isin(common_files)]

    # 建立映射
    file_to_label = dict(zip(
        matched_df['file_name'],
        matched_df['dr_grade'].apply(lambda x: 0 if x == 0 else 1)
    ))

    # 同步过滤图像路径
    filtered_img_paths = [p for p in img_paths if os.path.basename(p) in common_files]
    labels = [file_to_label[os.path.basename(p)] for p in filtered_img_paths]

    print(f"成功匹配 {len(filtered_img_paths)} 张图像和标签")
    print(f"丢弃了 {len(img_paths) - len(filtered_img_paths)} 张未匹配的图像")

    return filtered_img_paths, np.array(labels)


# ==================== 配置参数 ====================
CONFIG = {
    'seed': 42,
    'img_dir': 'datasets/ddr/images_segmented',
    'label_file': 'datasets/ddr/DR_grading_sampled.csv',
    'output_dir': 'Clustering/results',
    'batch_size': 32,
    'n_clusters': 2,
    'linkage': 'ward',
    'device': 'cuda:0' if torch.cuda.is_available() else 'cpu'
}

def main():
    """主函数：执行聚类分析流程"""
    # 3. 设置随机种子确保可复现性
    seed_everything(seed=CONFIG['seed'])

    # 4. 获取图像路径列表
    if not os.path.exists(CONFIG['img_dir']):
        raise FileNotFoundError(f"图像目录不存在: {CONFIG['img_dir']}")

    img_paths = [os.path.join(CONFIG['img_dir'], fname)
                 for fname in os.listdir(CONFIG['img_dir']) if fname.endswith('.jpg')]
    if len(img_paths) == 0:
        raise ValueError(f"在 {CONFIG['img_dir']} 中未找到任何.jpg图像")

    print(f"找到 {len(img_paths)} 张图像")

    # 5. 标签文件路径
    if not os.path.exists(CONFIG['label_file']):
        raise FileNotFoundError(f"标签文件不存在: {CONFIG['label_file']}")

    # 6. 获取图像特征（先提取特征，过滤掉加载失败的图像）
    device = torch.device(CONFIG['device'])
    print(f"使用设备: {device}")
    features, img_paths = extract_features(img_paths, device, batch_size=CONFIG['batch_size'])

    # 7. 根据成功提取特征的图像同步标签
    img_paths, labels_true = load_labels(CONFIG['label_file'], img_paths)

    # 8. 处理NaN值
    if np.any(np.isnan(features)):
        print("发现NaN值，替换为0。")
        features = np.nan_to_num(features)

    # 9. 标准化特征
    scaler = StandardScaler()
    features = scaler.fit_transform(features)

    # 10. 应用Agglomerative聚类
    model = AgglomerativeClustering(n_clusters=CONFIG['n_clusters'], linkage=CONFIG['linkage'])
    labels_pred = model.fit_predict(features)

    # 11. 评估聚类效果
    # 11.1 标签对齐 + 准确率 + 混淆矩阵
    acc_direct = accuracy_score(labels_true, labels_pred)
    acc_flipped = accuracy_score(labels_true, 1 - labels_pred)
    best_acc = max(acc_direct, acc_flipped)
    aligned_labels = labels_pred if acc_direct > acc_flipped else 1 - labels_pred

    print(f"\n===== 聚类评估结果 =====")
    print(f"对齐后准确率: {best_acc:.4f}")
    cm = confusion_matrix(labels_true, aligned_labels)
    print(f"混淆矩阵:\n{cm}")

    # 11.2 ARI (不受标签排列影响)
    ari = adjusted_rand_score(labels_true, labels_pred)
    print(f"ARI: {ari:.4f}")

    # 11.3 轮廓系数 (聚类质量)
    silhouette = silhouette_score(features, labels_pred)
    print(f"轮廓系数: {silhouette:.4f}")

    # 11.4 详细分类报告
    print(f"\n分类报告:")
    print(classification_report(labels_true, aligned_labels, target_names=['健康', '病变']))

    # 12. 保存评估指标到CSV
    tn, fp, fn, tp = cm.ravel()

    # 计算各类别的精确率、召回率和F1
    precision_0 = tn / (tn + fp) if (tn + fp) > 0 else 0
    recall_0 = tn / (tn + fn) if (tn + fn) > 0 else 0
    f1_0 = 2 * (precision_0 * recall_0) / (precision_0 + recall_0) if (precision_0 + recall_0) > 0 else 0

    precision_1 = tp / (tp + fp) if (tp + fp) > 0 else 0
    recall_1 = tp / (tp + fn) if (tp + fn) > 0 else 0
    f1_1 = 2 * (precision_1 * recall_1) / (precision_1 + recall_1) if (precision_1 + recall_1) > 0 else 0

    # 宏平均F1
    f1_macro = (f1_0 + f1_1) / 2

    results_metrics = pd.DataFrame({
        'Accuracy': [best_acc],
        'ARI': [ari],
        'Silhouette': [silhouette],
        'F1_Macro': [f1_macro],
        'F1_Class0': [f1_0],
        'F1_Class1': [f1_1],
        'TN': [tn],
        'FP': [fp],
        'FN': [fn],
        'TP': [tp],
        'Precision_0': [precision_0],
        'Recall_0': [recall_0],
        'Precision_1': [precision_1],
        'Recall_1': [recall_1],
    })

    # 确保输出目录存在
    os.makedirs(CONFIG['output_dir'], exist_ok=True)

    # 保存评估指标
    metrics_file = os.path.join(CONFIG['output_dir'], 'external_validation_ddr.csv')
    results_metrics.to_csv(metrics_file, index=False)
    print(f"\n评估指标已保存到: {metrics_file}")

    # 13. 保存聚类详细结果（每张图像的信息）
    results_detail = pd.DataFrame({
        'image_path': img_paths,
        'image_name': [os.path.basename(p) for p in img_paths],
        'true_label': labels_true,
        'cluster_label': labels_pred,
        'aligned_label': aligned_labels
    })
    detail_file = os.path.join(CONFIG['output_dir'], 'clustering_details.csv')
    results_detail.to_csv(detail_file, index=False)
    print(f"聚类详细结果已保存到: {detail_file}")

    # 14. 保存特征向量和标签（用于可视化）
    features_file = os.path.join(CONFIG['output_dir'], 'features_and_labels.npz')
    np.savez(features_file,
             features=features,
             labels_true=labels_true,
             labels_pred=labels_pred,
             aligned_labels=aligned_labels,
             img_paths=np.array(img_paths))
    print(f"特征向量已保存到: {features_file}")
    print(f"\n所有结果已保存完成！")

if __name__ == '__main__':
    main()
