import os
os.environ["NO_ALBUMENTATIONS_UPDATE"] = "1"

import shutil
import argparse
from pathlib import Path
from PIL import Image
import cv2
from pathlib import Path
import numpy as np
from sklearn.cluster import KMeans
import torch
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import cosine_similarity
import open_clip
import albumentations as A

# 设置设备
# device = "cuda" if torch.cuda.is_available() else "cpu"
device = "cpu"

def is_valid_jpg(file_path):
    try:
        if file_path.suffix.lower() not in ['.jpg', '.jpeg']:
            return False
        with Image.open(file_path) as img:
            return img.format == 'JPEG'
    except Exception:
        return False

def is_low_quality_image(image_path, min_size=32, max_aspect=10.0):
    try:
        img = cv2.imread(str(image_path))
        if img is None:
            return True
        h, w = img.shape[:2]
        if min(h, w) < min_size:
            return True
        if max(w / h, h / w) > max_aspect:
            return True
        return False
    except:
        return True

def load_clip_model():
    model, _, preprocess = open_clip.create_model_and_transforms(
        'ViT-B-32', pretrained='laion2b_s34b_b79k'
    )
    model.eval()
    model.to(device)
    return model, preprocess

def extract_image_features(model, preprocess, image_paths):
    features = []
    valid_paths = []
    for path in image_paths:
        try:
            image = Image.open(path).convert("RGB")
            input_tensor = preprocess(image).unsqueeze(0).to(device)
            with torch.no_grad():
                feat = model.encode_image(input_tensor)
                feat /= feat.norm(dim=-1, keepdim=True)
            features.append(feat.cpu().numpy().flatten())
            valid_paths.append(path)
        except Exception as e:
            print(f"⚠️ 特征提取失败: {path} - {e}")
    return np.array(features), valid_paths

def detect_theme_vector(features, n_clusters=5):
    if len(features) == 0:
        return None
    if len(features) <= n_clusters:
        return np.mean(features, axis=0)
    kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)
    labels = kmeans.fit_predict(features)
    cluster_sizes = np.bincount(labels)
    dominant_cluster = np.argmax(cluster_sizes)
    theme_vector = kmeans.cluster_centers_[dominant_cluster]
    return theme_vector / np.linalg.norm(theme_vector)

# === 新增：数据增强管道 ===
def get_augmentation_pipeline():
    return A.Compose([
        A.HorizontalFlip(p=0.5),
        A.Rotate(limit=15, p=0.7),
        A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.5),
        A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5),
        # 彻底消除 GaussNoise 警告：使用 std_range
        A.GaussNoise(std_range=(0.012, 0.028), p=0.3),  # 对应方差 ~9~49
        A.OneOf([
            A.MotionBlur(p=0.2),
            A.MedianBlur(blur_limit=3, p=0.1),
            A.Blur(blur_limit=3, p=0.1),
        ], p=0.2),
        # 使用 Affine 替代 ShiftScaleRotate
        A.Affine(
            scale=(0.9, 1.1),
            translate_percent=(0.0, 0.1),
            rotate=(-10, 10),
            p=0.5
        ),
    ])

def augment_images_and_save(image_paths, target_dir, aug_per_image=5):
    """
    对每张图生成 aug_per_image 张增强图，保存到 target_dir
    """
    augmenter = get_augmentation_pipeline()
    new_files = []
    for img_path in image_paths:
        try:
            image = cv2.imread(str(img_path))
            if image is None:
                continue
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            base_name = img_path.stem
            ext = img_path.suffix
            for i in range(aug_per_image):
                augmented = augmenter(image=image)['image']
                aug_image = cv2.cvtColor(augmented, cv2.COLOR_RGB2BGR)
                new_name = f"{base_name}_aug_{i:03d}{ext}"
                save_path = target_dir / new_name
                cv2.imwrite(str(save_path), aug_image, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
                new_files.append(save_path)
        except Exception as e:
            print(f"⚠️ 增强失败: {img_path} - {e}")
    return new_files

# === 主清洗函数 ===
def split_directory_by_topics(src_dir, removed_base_dir, image_paths, features, 
                            min_cluster_ratio=0.1):
    """
    将单个目录按视觉主题拆分为多个子目录
    
    Args:
        src_dir: 原始目录 (Path)
        removed_base_dir: removed 根目录 (Path)
        image_paths: 图像路径列表
        features: CLIP 特征
        min_cluster_ratio: 小簇阈值（低于则移入 removed）
    """
    n_images = len(features)
    rel_name = src_dir.name
    removed_dir = removed_base_dir / rel_name
    removed_dir.mkdir(parents=True, exist_ok=True)

    if n_images <= 2:
        print(f"  ⚠️ 图像太少（{n_images}），跳过拆分，保留在原目录。")
        return

    # 自动设置 K
    if n_images < 20:
        k = min(2, n_images)
    elif n_images < 100:
        k = min(4, n_images)
    else:
        k = min(6, n_images)

    # 聚类
    kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)
    labels = kmeans.fit_predict(features)
    
    # 统计簇大小
    unique_labels, counts = np.unique(labels, return_counts=True)
    total = len(labels)
    cluster_info = [(label, count, count / total) for label, count in zip(unique_labels, counts)]
    cluster_info.sort(key=lambda x: x[1], reverse=True)  # 按数量降序

    print(f"  📊 聚类结果 (K={k}):")
    for label, count, ratio in cluster_info:
        status = "📁 拆分" if ratio >= min_cluster_ratio else "🗑️ 移除"
        print(f"    - 簇 {label}: {count} 张 ({ratio:.1%}) → {status}")

    # 分离保留簇 vs 小簇
    keep_labels = []
    minor_labels = set()
    for label, _, ratio in cluster_info:
        if ratio >= min_cluster_ratio:
            keep_labels.append(label)
        else:
            minor_labels.add(label)

    # 1. 移除小簇图像到 removed
    for img_path, label in zip(image_paths, labels):
        if label in minor_labels:
            target = removed_dir / img_path.name
            shutil.move(str(img_path), str(target))

    # 2. 为每个保留簇创建新目录并移动图像
    parent_dir = src_dir.parent
    new_dirs = []
    for idx, label in enumerate(keep_labels, 1):
        new_dir_name = f"{rel_name}-{idx:03d}"
        new_dir = parent_dir / new_dir_name
        new_dir.mkdir(exist_ok=True)
        new_dirs.append(new_dir)

        # 移动该簇所有图像
        for img_path, lbl in zip(image_paths, labels):
            if lbl == label and lbl not in minor_labels:
                target = new_dir / img_path.name
                # 避免重复移动（原文件可能已被移走）
                if img_path.exists():
                    shutil.move(str(img_path), str(target))

    # 3. 删除原始空目录（可选）
    if src_dir.exists() and not any(src_dir.iterdir()):
        src_dir.rmdir()
        print(f"  🧹 原始目录 {rel_name} 已拆分，删除空目录。")

    print(f"  ✅ 拆分为 {len(new_dirs)} 个新目录: {[d.name for d in new_dirs]}")

def remove_minor_clusters(image_paths, features, removed_dir, min_cluster_ratio=0.1):
    """
    移除占比低于 min_cluster_ratio 的小簇，保留多个主主题
    
    Args:
        image_paths: 图像路径列表
        features: CLIP 特征 (N x D)
        removed_dir: 被移除文件目录
        min_cluster_ratio: 小簇阈值（如 0.1 表示 10%）
    """
    n_images = len(features)
    if n_images <= 3:
        print(f"  ⚠️ 图像太少（{n_images}），跳过聚类，全部保留。")
        return

    # 自动设置聚类数量 K
    if n_images < 20:
        k = min(2, n_images)
    elif n_images < 100:
        k = min(4, n_images)
    else:
        k = min(6, n_images)

    # 执行 KMeans 聚类
    kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)
    labels = kmeans.fit_predict(features)
    
    # 统计各簇大小
    unique_labels, counts = np.unique(labels, return_counts=True)
    total = len(labels)
    cluster_info = [(label, count, count / total) for label, count in zip(unique_labels, counts)]
    cluster_info.sort(key=lambda x: x[2], reverse=True)  # 按占比降序

    print(f"  📊 聚类结果 (K={k}):")
    for label, count, ratio in cluster_info:
        status = "✅ 保留" if ratio >= min_cluster_ratio else "🗑️ 移除"
        print(f"    - 簇 {label}: {count} 张 ({ratio:.1%}) → {status}")

    # 找出要移除的簇标签
    minor_labels = {label for label, _, ratio in cluster_info if ratio < min_cluster_ratio}
    
    if not minor_labels:
        print(f"  ✅ 无小簇，全部保留。")
        return

    # 移除小簇图像
    removed_count = 0
    for img_path, label in zip(image_paths, labels):
        if label in minor_labels:
            target = removed_dir / img_path.name
            shutil.move(str(img_path), str(target))
            removed_count += 1

    print(f"  🧹 共移除 {removed_count} 张小簇图像。")

def keep_only_dominant_cluster(image_paths, features, removed_dir, min_cluster_size_ratio=0.4):
    """
    仅保留最大簇的图像，其余移至 removed_dir
    
    Args:
        image_paths: 图像路径列表 (与 features 一一对应)
        features: CLIP 特征数组 (N x D)
        removed_dir: 移除目录
        min_cluster_size_ratio: 主簇最小占比（低于则保留全部）
    """
    n_images = len(features)
    if n_images <= 2:
        print(f"  ⚠️ 图像太少（{n_images}），跳过聚类，全部保留。")
        return

    # 自动设置聚类数量 K
    if n_images < 20:
        k = min(2, n_images)
    elif n_images < 100:
        k = min(3, n_images)
    else:
        k = min(5, n_images)

    # 执行 KMeans 聚类
    kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)
    labels = kmeans.fit_predict(features)
    
    # 统计各簇大小
    unique, counts = np.unique(labels, return_counts=True)
    cluster_sizes = dict(zip(unique, counts))
    dominant_label = max(cluster_sizes, key=cluster_sizes.get)
    dominant_size = cluster_sizes[dominant_label]
    dominant_ratio = dominant_size / n_images

    print(f"  📊 聚类结果: K={k}, 主簇占比 = {dominant_ratio:.1%} ({dominant_size}/{n_images})")

    # 如果主簇占比太小，说明数据太杂，可能全要保留？或全部移除？
    # 这里策略：若主簇 < min_cluster_size_ratio，则保留全部（避免误删）
    if dominant_ratio < min_cluster_size_ratio:
        print(f"  ⚠️ 主簇占比低于 {min_cluster_size_ratio:.0%}，保留所有图像（可能数据太杂）")
        return

    # 移除非主簇图像
    removed_count = 0
    for img_path, label in zip(image_paths, labels):
        if label != dominant_label:
            target = removed_dir / img_path.name
            shutil.move(str(img_path), str(target))
            removed_count += 1
            print(f"  🗑️ [非主主题] {img_path.name}")

    print(f"  ✅ 保留主簇 {dominant_size} 张，移除 {removed_count} 张")

def clean_single_category(src_dir, removed_base_dir, model, preprocess,
                         theme_sim_threshold=0.25, min_cluster_ratio=0.4, min_images=10, aug_per_image=5):
    src_path = Path(src_dir)
    rel_path = src_path.name
    removed_dir = removed_base_dir / rel_path
    removed_dir.mkdir(parents=True, exist_ok=True)

    if not src_path.is_dir():
        print(f"⚠️ 跳过非目录: {src_path}")
        return

    all_files = [f for f in src_path.iterdir() if f.is_file()]
    if not all_files:
        print(f"📁 目录 {rel_path} 为空，跳过。")
        return

    # Step 1: 分离有效 JPG
    jpg_files = [f for f in all_files if is_valid_jpg(f)]
    non_jpg = [f for f in all_files if f not in jpg_files]

    # 移除非 JPG
    for f in non_jpg:
        target = removed_dir / f.name
        shutil.move(str(f), str(target))
        print(f"🗑️ [{rel_path}] [非JPG] {f.name}")

    # Step 2: 低质量过滤
    clean_files = []
    low_quality_files = []
    for f in jpg_files:
        if is_low_quality_image(f):
            low_quality_files.append(f)
        else:
            clean_files.append(f)

    for f in low_quality_files:
        target = removed_dir / f.name
        shutil.move(str(f), str(target))
        print(f"🗑️ [{rel_path}] [低质量] {f.name}")

    current_count = len(clean_files)
    print(f"📊 [{rel_path}] 初筛后有效图像: {current_count}")

    if current_count == 0:
        print(f"❌ [{rel_path}] 无有效图像，跳过。")
        return

    # === 关键分支：数量不足 vs 足够 ===
    if current_count < min_images:
        print(f"📈 [{rel_path}] 图像不足 {min_images}，启动数据增强扩增...")
        new_files = augment_images_and_save(clean_files, src_path, aug_per_image=aug_per_image)
        print(f"✨ [{rel_path}] 生成 {len(new_files)} 张增强图像，保存至原目录。")
        # 注意：不移除任何 clean_files，全部保留
    else:
        # 正常主题清洗流程
        print(f"🎯 [{rel_path}] 检测多主题并保留主簇...")
        features, valid_files = extract_image_features(model, preprocess, clean_files)
        if len(features) == 0:
            print(f"❌ [{rel_path}] 特征提取失败")
            return
        
        # ✅ 关键：调用拆分函数
        # split_directory_by_topics(
        #     src_dir=src_dir,
        #     removed_base_dir=removed_base_dir,
        #     image_paths=valid_files,
        #     features=features,
        #     min_cluster_ratio=min_cluster_ratio
        # )

        # ✅ 关键：移除小簇，保留多个主主题
        # remove_minor_clusters(
        #     image_paths=valid_files,
        #     features=features,
        #     removed_dir=removed_dir,
        #     min_cluster_ratio=min_cluster_ratio  # 从命令行传入
        # )        

        # ✅ 关键：使用主簇保留策略
        # keep_only_dominant_cluster(
        #     image_paths=valid_files,
        #     features=features,
        #     removed_dir=removed_dir,
        #     min_cluster_size_ratio=min_cluster_ratio  # 可配置
        # )  
              
        features, valid_files = extract_image_features(model, preprocess, clean_files)
        if len(features) == 0:
            print(f"❌ [{rel_path}] 特征提取失败，跳过清洗。")
            return

        theme_vector = detect_theme_vector(features)
        if theme_vector is None:
            print(f"❌ [{rel_path}] 无法生成主题向量。")
            return

        similarities = cosine_similarity(features, theme_vector.reshape(1, -1)).flatten()
        kept = 0
        for img_path, sim in zip(valid_files, similarities):
            if sim < theme_sim_threshold:
                target = removed_dir / img_path.name
                shutil.move(str(img_path), str(target))
                print(f"🗑️ [{rel_path}] [偏离主题] {img_path.name} (sim={sim:.3f})")
            else:
                kept += 1
        print(f"✅ [{rel_path}] 主题清洗完成，保留 {kept} 张。")

def main(data_dir, removed_dir, theme_sim_threshold=0.25, min_cluster_ratio=0.4, min_images=10, aug_per_image=5):
    data_path = Path(data_dir)
    removed_path = Path(removed_dir)
    removed_path.mkdir(parents=True, exist_ok=True)

    if not data_path.is_dir():
        raise ValueError(f"数据目录不存在: {data_dir}")

    subdirs = [d for d in data_path.iterdir() if d.is_dir()]
    if not subdirs:
        raise ValueError(f"数据目录 {data_dir} 下没有子目录！")

    print(f"🔍 发现 {len(subdirs)} 个类别目录，加载 CLIP 模型...")
    model, preprocess = load_clip_model()
    print("✅ CLIP 模型加载完成。")

    for subdir in sorted(subdirs):
        print(f"\n{'='*60}")
        print(f"处理类别: {subdir.name}")
        print(f"{'='*60}")
        clean_single_category(
            src_dir=subdir,
            removed_base_dir=removed_path,
            model=model,
            preprocess=preprocess,
            theme_sim_threshold=theme_sim_threshold,
            min_cluster_ratio=min_cluster_ratio,
            min_images=min_images,
            aug_per_image=aug_per_image
        )

    print(f"\n🎉 所有类别处理完成！")
    print(f"  - 移除文件路径: {removed_dir}")
    print(f"  - 少量类别已通过增强扩增")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="批量清洗 + 少量类别自动增强扩增"
    )
    parser.add_argument("data_dir", type=str, help="根数据目录（含多个子类别）")
    parser.add_argument("removed_dir", type=str, help="移除文件根目录")
    parser.add_argument("--threshold", type=float, default=0.25,
                        help="主题相似度阈值（默认 0.25）")
    parser.add_argument("--min-cluster-ratio", type=float, default=0.4,
                        help="主簇最小占比（低于则保留全部），默认 0.4（40%）")    
    parser.add_argument("--min-images", type=int, default=10,
                        help="触发主题清洗的最小图像数（默认 10）")
    parser.add_argument("--aug-per-image", type=int, default=5,
                        help="每张图生成的增强图数量（默认 5）")

    args = parser.parse_args()
    main(
        args.data_dir,
        args.removed_dir,
        args.threshold,
        args.min_cluster_ratio,
        args.min_images,
        args.aug_per_image
    )