#!/usr/bin/env python

"""
Cloud-Net模型训练和压缩实现 - 完全修复版本
基于论文: "Cloud-Net: An End-to-end Cloud Detection Algorithm for Landsat 8 Imagery"
修复了所有已知问题：量化、剪枝、评估方法、数据生成等
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset, random_split
import copy
import numpy as np
import json
from tqdm import tqdm
from sklearn.cluster import MiniBatchKMeans
import rasterio
import torchvision.transforms as transforms
import psutil, time, gc
import kagglehub

class CloudDetectionDataset(Dataset):
    """
    读取 38-Cloud 的 4 通道（R/G/B/NIR）+ GT，自动对齐不同文件前缀
    目录结构示例:
      CloudDetection/
        38-Cloud_training/
          train_red/   *.tif
          train_green/ *.tif
          train_blue/  *.tif
          train_nir/   *.tif
          train_gt/    *.tif
        38-Cloud_test/
          test_red/    *.tif
          test_green/  *.tif
          test_blue/   *.tif
          test_nir/    *.tif
          test_gt/     *.tif
    """
    def __init__(self, root_dir, mode='train', transform=None):
        super().__init__()
        assert mode in ['train', 'test']
        sub = '38-Cloud_training' if mode == 'train' else '38-Cloud_test'
        base = os.path.join(root_dir, sub)

        self.mode = mode
        self.transform = transform
        self.ext = '.tif'

        self.band_dirs = {
            'red':   os.path.join(base, f'{mode}_red'),
            'green': os.path.join(base, f'{mode}_green'),
            'blue':  os.path.join(base, f'{mode}_blue'),
            'nir':   os.path.join(base, f'{mode}_nir'),
        }
        self.gt_dir = os.path.join(base, f'{mode}_gt')

        # 前缀候选（用于剥离，找“公共尾巴”对齐）
        self.prefix_candidates = {
            'red':   ['red_', 'B4_', 'band4_', 'R_'],
            'green': ['green_', 'B3_', 'band3_', 'G_'],
            'blue':  ['blue_', 'B2_', 'band2_', 'B_'],
            'nir':   ['nir_', 'B5_', 'band5_', 'NIR_'],
            'gt':    ['gt_', 'mask_', 'label_', 'cloud_', 'gtmask_'],
        }

        self.index = {b: self._build_index(self.band_dirs[b], b) for b in ['red','green','blue','nir']}
        self.index_gt = self._build_index(self.gt_dir, 'gt')

        ids = set(self.index['red'].keys())
        ids &= set(self.index['green'].keys())
        ids &= set(self.index['blue'].keys())
        ids &= set(self.index['nir'].keys())
        ids &= set(self.index_gt.keys())

        self.ids = sorted(list(ids))
        if not self.ids:
            raise RuntimeError("没有找到可用样本，请检查数据根目录与命名规则。")
        print(f"[{mode}] 可用样本数: {len(self.ids)}")

    def _strip_prefix(self, stem, band_key):
        for p in self.prefix_candidates.get(band_key, []):
            if stem.startswith(p):
                return stem[len(p):]
        return stem

    def _build_index(self, folder, band_key):
        mapping = {}
        if not os.path.isdir(folder):
            if band_key != 'gt':
                raise FileNotFoundError(f"目录不存在: {folder}")
            return mapping
        for f in os.listdir(folder):
            if not f.lower().endswith(self.ext):
                continue
            stem = os.path.splitext(f)[0]
            tail = self._strip_prefix(stem, band_key)
            mapping[tail] = stem
        return mapping

    def __len__(self):
        return len(self.ids)

    def _resolve(self, folder, stem):
        return os.path.join(folder, stem + self.ext)

    def __getitem__(self, idx):
        pid = self.ids[idx]

        # 读四个波段
        bands = []
        for b in ['red','green','blue','nir']:
            stem = self.index[b][pid]
            path = self._resolve(self.band_dirs[b], stem)
            with rasterio.open(path) as src:
                arr = src.read(1).astype(np.float32)
            arr = np.nan_to_num(arr, nan=0.0)
            arr = arr / 10000.0               # 若原始为 0~10000，缩放到 [0,1]
            arr = np.clip(arr, 0.0, 1.0)
            bands.append(arr)
        img = np.stack(bands, axis=0)          # (4,H,W)

        # 掩码 -> 二值
        gt_stem = self.index_gt[pid]
        gt_path = self._resolve(self.gt_dir, gt_stem)
        with rasterio.open(gt_path) as src:
            m = src.read(1)
        m = (m > 0).astype(np.int64)

        img_t  = torch.from_numpy(img)
        mask_t = torch.from_numpy(m)
        if self.transform is not None:
            img_t = self.transform(img_t)
        return img_t, mask_t

def weight_sharing(model, num_clusters=16, device='cpu',
                   sample_threshold=100_000, sample_size=50_000):
    """
    使用MiniBatchKMeans进行权重共享压缩。
    - sample_threshold: 如果权重总数 > 该值，就先采样 sample_size 个点做聚类
    - sample_size: 聚类时使用的样本数量
    """
    for name, param in model.named_parameters():
        if not param.requires_grad or param.dim() <= 1:
            continue

        # 拉平到一维数组
        w_flat = param.data.cpu().numpy().reshape(-1, 1)
        n = w_flat.shape[0]

        # 如果权重数量太大，先随机采样
        if n > sample_threshold:
            idx = np.random.choice(n, size=min(sample_size, n), replace=False)
            w_sample = w_flat[idx]
        else:
            w_sample = w_flat

        # 用MiniBatchKMeans聚类
        kmeans = MiniBatchKMeans(n_clusters=num_clusters,
                                batch_size=sample_size//10,
                                random_state=42)
        kmeans.fit(w_sample)

        # 获取聚类中心
        centers = kmeans.cluster_centers_.flatten()
        # 对所有权重进行聚类预测
        labels_full = kmeans.predict(w_flat)

        # 重构权重张量，使用聚类中心值
        new_w = centers[labels_full].reshape(param.data.shape)
        param.data = torch.from_numpy(new_w).to(device).type(param.data.dtype)

        print(f"✓ layer `{name}`: {n} → {num_clusters} shared weights")

    return model


class CloudNet(nn.Module):
    """
    Cloud-Net模型实现 - 修复版本
    基于U-Net架构，适合云检测任务
    """

    def __init__(self, num_classes=2, dropout_rate=0.1):
        super(CloudNet, self).__init__()

        # 编码器部分
        self.enc1 = self._make_layer(4, 64, dropout_rate)
        self.enc2 = self._make_layer(64, 128, dropout_rate)
        self.enc3 = self._make_layer(128, 256, dropout_rate)
        self.enc4 = self._make_layer(256, 512, dropout_rate)

        # 中心部分
        self.center = self._make_layer(512, 1024, dropout_rate)

        # 解码器部分
        self.dec4 = self._make_decoder_layer(1024, 512)
        self.dec3 = self._make_decoder_layer(1024, 256)  # 1024 = 512 + 512 (skip connection)
        self.dec2 = self._make_decoder_layer(512, 128)  # 512 = 256 + 256
        self.dec1 = self._make_decoder_layer(256, 64)  # 256 = 128 + 128

        # 最终分类层
        self.final = nn.Conv2d(128, num_classes, 1)  # 128 = 64 + 64

        # 池化层
        self.pool = nn.MaxPool2d(2, 2)

        self._initialize_weights()

    def _make_layer(self, in_channels, out_channels, dropout_rate=0.1):
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout_rate),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def _make_decoder_layer(self, in_channels, out_channels):
        return nn.Sequential(
            nn.ConvTranspose2d(in_channels, out_channels, 2, stride=2),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.ConvTranspose2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')

    def forward(self, x):
        # 编码器
        enc1 = self.enc1(x)
        enc2 = self.enc2(self.pool(enc1))
        enc3 = self.enc3(self.pool(enc2))
        enc4 = self.enc4(self.pool(enc3))

        # 中心
        center = self.center(self.pool(enc4))

        # 解码器 + 跳跃连接
        dec4 = self.dec4(center)
        dec4 = torch.cat([dec4, enc4], dim=1)

        dec3 = self.dec3(dec4)
        dec3 = torch.cat([dec3, enc3], dim=1)

        dec2 = self.dec2(dec3)
        dec2 = torch.cat([dec2, enc2], dim=1)

        dec1 = self.dec1(dec2)
        dec1 = torch.cat([dec1, enc1], dim=1)

        # 最终输出
        out = self.final(dec1)

        return out

def dice_loss(pred, target, smooth=1e-5):
    """Dice损失"""
    pred = torch.softmax(pred, dim=1)
    pred_cloud = pred[:, 1, :, :]
    target_cloud = target.float()

    intersection = (pred_cloud * target_cloud).sum(dim=(1, 2))
    pred_sum = pred_cloud.sum(dim=(1, 2))
    target_sum = target_cloud.sum(dim=(1, 2))

    dice = (2.0 * intersection + smooth) / (pred_sum + target_sum + smooth)
    return 1 - dice.mean()


def focal_loss(pred, target, alpha=0.25, gamma=2.0):
    """Focal损失"""
    ce_loss = F.cross_entropy(pred, target, reduction='none')
    pt = torch.exp(-ce_loss)
    focal_loss = alpha * (1 - pt) ** gamma * ce_loss
    return focal_loss.mean()


def calculate_metrics_fixed(pred, target, threshold=0.5):
    """
    修复的评估指标计算 - 使用固定阈值
    """
    pred_prob = torch.softmax(pred, dim=1)[:, 1, :, :]
    pred_binary = (pred_prob > threshold).float()
    target_binary = target.float()

    # IoU
    intersection = (pred_binary * target_binary).sum()
    union = pred_binary.sum() + target_binary.sum() - intersection
    iou = intersection / (union + 1e-8)

    # 其他指标
    tp = (pred_binary * target_binary).sum()
    fp = (pred_binary * (1 - target_binary)).sum()
    fn = ((1 - pred_binary) * target_binary).sum()
    tn = ((1 - pred_binary) * (1 - target_binary)).sum()

    precision = tp / (tp + fp + 1e-8)
    recall = tp / (tp + fn + 1e-8)
    f1 = 2 * precision * recall / (precision + recall + 1e-8)
    accuracy = (tp + tn) / (tp + tn + fp + fn + 1e-8)

    return {
        'iou': iou.item(),
        'f1': f1.item(),
        'precision': precision.item(),
        'recall': recall.item(),
        'accuracy': accuracy.item()
    }


def get_model_size_mb(model):
    """
    精确计算模型大小（以MB为单位）
    修复版本 - 避免临时文件权限问题
    """
    try:
        # 方法1: 尝试使用临时文件
        import tempfile
        with tempfile.NamedTemporaryFile(delete=False, suffix='.pth') as tmp_file:
            torch.save(model.state_dict(), tmp_file.name)
            size_mb = os.path.getsize(tmp_file.name) / (1024 * 1024)
            # 清理临时文件
            try:
                os.unlink(tmp_file.name)
            except:
                pass
        return size_mb
    except Exception as e:
        print(f"临时文件方法失败: {e}")
        # 方法2: 使用内存计算（备用方案）
        total_params = 0
        for param in model.parameters():
            if param.requires_grad:
                total_params += param.numel()

        # 估算模型大小 (假设每个参数4字节 + 一些开销)
        estimated_size_mb = total_params * 4 / (1024 * 1024) * 1.2  # 加20%开销
        print(f"使用估算方法计算模型大小: {estimated_size_mb:.2f} MB")
        return estimated_size_mb

def train_model(model, train_loader, val_loader, device, args):
    """训练函数"""
    print(f"开始训练Cloud-Net模型...")

    optimizer = optim.AdamW(model.parameters(),
                            lr=args.learning_rate,
                            weight_decay=args.weight_decay,
                            betas=(0.9, 0.999))

    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs, eta_min=1e-6)

    # 动态类别权重
    ce_loss_fn = nn.CrossEntropyLoss()

    best_iou = 0.0
    best_model_state = None
    train_history = {'loss': [], 'iou': [], 'val_iou': [], 'val_f1': []}

    patience = 10
    patience_counter = 0

    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        running_metrics = {'iou': 0.0, 'f1': 0.0}
        num_batches = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for batch_idx, (images, masks) in enumerate(pbar):
            images, masks = images.to(device), masks.to(device)

            optimizer.zero_grad()
            outputs = model(images)

            # 组合损失
            ce_loss = ce_loss_fn(outputs, masks)
            d_loss = dice_loss(outputs, masks)
            f_loss = focal_loss(outputs, masks, alpha=0.25, gamma=2.0)

            total_loss = 0.3 * ce_loss + 0.4 * d_loss + 0.3 * f_loss

            total_loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            optimizer.step()

            # 计算指标（固定阈值）
            metrics = calculate_metrics_fixed(outputs, masks, threshold=0.5)
            running_loss += total_loss.item()
            running_metrics['iou'] += metrics['iou']
            running_metrics['f1'] += metrics['f1']
            num_batches += 1

            pbar.set_postfix({
                'Loss': f'{total_loss.item():.4f}',
                'IoU': f'{metrics["iou"]:.4f}',
                'F1': f'{metrics["f1"]:.4f}'
            })

        # 计算平均训练指标
        avg_train_loss = running_loss / num_batches
        avg_train_iou = running_metrics['iou'] / num_batches

        # 验证阶段
        val_metrics = evaluate_model(model, val_loader, device)

        scheduler.step()

        # 保存最佳模型
        if val_metrics['iou'] > best_iou:
            best_iou = val_metrics['iou']
            best_model_state = copy.deepcopy(model.state_dict())
            patience_counter = 0
        else:
            patience_counter += 1

        # 记录历史
        train_history['loss'].append(avg_train_loss)
        train_history['iou'].append(avg_train_iou)
        train_history['val_iou'].append(val_metrics['iou'])
        train_history['val_f1'].append(val_metrics['f1'])

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train IoU: {avg_train_iou:.4f}, '
              f'Val IoU: {val_metrics["iou"]:.4f}, '
              f'Val F1: {val_metrics["f1"]:.4f}')

        # 早停
        if patience_counter >= patience:
            print(f"早停在epoch {epoch + 1}, 最佳IoU: {best_iou:.4f}")
            break

    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)

    return model, best_iou, train_history


def evaluate_model(model, dataloader, device, threshold=0.5):
    """评估模型性能 - 固定阈值"""
    model.eval()
    total_metrics = {'iou': 0.0, 'f1': 0.0, 'precision': 0.0, 'recall': 0.0, 'accuracy': 0.0}
    num_batches = 0

    with torch.no_grad():
        for images, masks in dataloader:
            images, masks = images.to(device), masks.to(device)
            outputs = model(images)
            metrics = calculate_metrics_fixed(outputs, masks, threshold)

            for key in total_metrics:
                total_metrics[key] += metrics[key]
            num_batches += 1

    # 计算平均指标
    avg_metrics = {key: value / num_batches for key, value in total_metrics.items()}
    return avg_metrics


def count_parameters(model):
    """统计模型参数数量"""
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

# ====== 照片版评估函数 ======

def evaluate_performance_spec(model, dataloader, device, threshold=0.5):
    """模型性能（照片版）：准确率、精度"""
    model.eval()
    eps = 1e-8
    TP = FP = FN = TN = 0.0
    with torch.no_grad():
        for images, masks in dataloader:
            images, masks = images.to(device), masks.to(device)
            logits = model(images)
            probs  = torch.softmax(logits, dim=1)[:, 1, :, :]
            preds  = (probs > threshold).to(masks.dtype)

            p = preds.view(-1).float()
            t = masks.view(-1).float()

            TP += float((p * t).sum().item())
            FP += float((p * (1 - t)).sum().item())
            FN += float(((1 - p) * t).sum().item())
            TN += float(((1 - p) * (1 - t)).sum().item())

    accuracy  = (TP + TN) / (TP + TN + FP + FN + eps)
    precision = TP / (TP + FP + eps)
    return {"accuracy": float(accuracy), "precision": float(precision)}

def _measure_inference_memory_mb(model, sample_images, device):
    """一次前向推理的增量内存（MB）"""
    model.eval()
    with torch.no_grad():
        _ = model(sample_images)  # 预热

    if device.type == "cuda":
        torch.cuda.empty_cache()
        torch.cuda.reset_peak_memory_stats(device)
        start = torch.cuda.memory_allocated(device)
        with torch.no_grad():
            _ = model(sample_images)
        peak = torch.cuda.max_memory_allocated(device)
        delta = max(peak - start, 0)
        torch.cuda.empty_cache()
        return float(delta / (1024 * 1024))

    # CPU 路径：进程RSS增量
    proc = psutil.Process(os.getpid())
    gc.collect(); time.sleep(0.02)
    rss_before = proc.memory_info().rss
    with torch.no_grad():
        _ = model(sample_images)
    gc.collect(); time.sleep(0.02)
    rss_after = proc.memory_info().rss
    delta = max(rss_after - rss_before, 0)
    return float(delta / (1024 * 1024))

def evaluate_efficiency_spec(model, dataloader, device):
    """模型效率（照片版）：参数数量、模型大小(MB)、内存占用(MB)"""
    # 取一个batch
    first_images = None
    for images, _ in dataloader:
        first_images = images.to(device)
        break
    if first_images is None:
        raise RuntimeError("dataloader 为空，无法评估效率指标。")

    params  = count_parameters(model)
    size_mb = get_model_size_mb(model)
    mem_mb  = _measure_inference_memory_mb(model, first_images, device)
    return {"params": int(params), "model_size_mb": float(size_mb), "memory_mb": float(mem_mb)}

def build_eval_report_spec(model, dataloader, device, threshold=0.5):
    """汇总为照片版：模型性能+模型效率"""
    perf = evaluate_performance_spec(model, dataloader, device, threshold=threshold)
    eff  = evaluate_efficiency_spec(model, dataloader, device)
    return {
        "模型性能": {"准确率": perf["accuracy"], "精度": perf["precision"]},
        "模型效率": {"参数数量": eff["params"], "模型大小(MB)": eff["model_size_mb"], "内存占用(MB)": eff["memory_mb"]},
    }

def create_dataloaders(args):
    """创建 38-Cloud 数据加载器（train/val 从 training 切 8/2）"""
    print("准备 38-Cloud 数据集...")

    normalize = transforms.Normalize(
        mean=[0.485, 0.456, 0.406, 0.5],   # R,G,B,NIR
        std =[0.229, 0.224, 0.225, 0.2]
    )

    full_dataset = CloudDetectionDataset(
        root_dir=args.data_root,
        mode='train',
        transform=normalize
    )

    train_size = int(0.8 * len(full_dataset))
    val_size = len(full_dataset) - train_size
    train_dataset, val_dataset = random_split(full_dataset, [train_size, val_size])

    # Windows/CPU 推荐先 num_workers=0, pin_memory=False 更稳
    train_loader = DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=True,
        num_workers=0, pin_memory=False
    )
    val_loader = DataLoader(
        val_dataset, batch_size=args.batch_size, shuffle=False,
        num_workers=0, pin_memory=False
    )

    print(f"训练集大小: {len(train_dataset)}")
    print(f"验证集大小: {len(val_dataset)}")
    return train_loader, val_loader

def save_results(results, output_dir):
    """保存结果"""
    results_path = os.path.join(output_dir, 'compression_results.json')

    def convert_types(obj):
        if isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, (np.integer, np.floating)):
            return obj.item()
        elif isinstance(obj, dict):
            return {key: convert_types(value) for key, value in obj.items()}
        elif isinstance(obj, list):
            return [convert_types(item) for item in obj]
        else:
            return obj

    converted_results = convert_types(results)

    with open(results_path, 'w', encoding='utf-8') as f:
        json.dump(converted_results, f, indent=2, ensure_ascii=False)

    print(f"结果已保存到: {results_path}")

def download_dataset_if_missing(root_dir, dataset_name="sorour/38cloud-cloud-segmentation-in-satellite-images"):
    """
    检查指定的 38-Cloud 数据集是否存在。如果不存在，则从 KaggleHub 自动下载数据集。
    下载后会将数据集移动到指定的 root_dir 目录下。

    :param root_dir: 数据集根目录
    :param dataset_name: KaggleHub 数据集名称
    :return: 下载后的数据集路径
    """
    # 定义数据集的训练和测试路径
    train_path = os.path.join(root_dir, '38-Cloud_training')
    test_path = os.path.join(root_dir, '38-Cloud_test')

    # 如果数据集目录不存在，则自动下载
    if not os.path.isdir(train_path) or not os.path.isdir(test_path):
        print(f"数据集不存在，开始自动下载：{dataset_name}")
        path = kagglehub.dataset_download(dataset_name)
        print(f"数据集下载路径: {path}")
        # 将下载的文件移动到指定的根目录
        os.rename(path, root_dir)  # 移动整个数据集文件夹到指定路径
        print(f"数据集已成功下载并移动到: {root_dir}")

    return root_dir


def get_args():
    parser = argparse.ArgumentParser(description="Cloud-Net 模型训练 + 权重共享压缩 + 微调（38-Cloud）")

    # 默认数据根目录：以脚本为基准 ..\..\实习数据集\CloudDetection
    script_dir = os.path.dirname(os.path.abspath(__file__))
    default_data_root = os.path.normpath(
        os.path.join(script_dir, "..", "CloudDetection")
    )

    # 环境变量覆盖（可选）：set CLOUD_DATA_ROOT=E:\USM\year2sem3\实习数据集\CloudDetection
    env_root = os.environ.get("CLOUD_DATA_ROOT")
    if env_root and os.path.isdir(env_root):
        default_data_root = env_root

    # 下载数据集（如果数据集不存在）
    default_data_root = download_dataset_if_missing(default_data_root)

    # 基础目录/输出
    parser.add_argument("--data-root",       default=default_data_root, help="38-Cloud 数据集根目录（支持相对路径）")
    parser.add_argument("--output-dir",      default="./output",        help="输出目录")

    # 训练参数
    parser.add_argument("--epochs",          type=int, default=25,      help="训练轮数")
    parser.add_argument("--batch-size",      type=int, default=8,       help="批次大小")
    parser.add_argument("--learning-rate",   type=float, default=1e-3,  help="学习率")
    parser.add_argument("--weight-decay",    type=float, default=1e-4,  help="权重衰减")

    # ✅ 权重共享（KMeans）参数
    parser.add_argument("--sharing-clusters", type=int, default=16,     help="权重共享聚类中心数量")
    parser.add_argument("--share-threshold",  type=int, default=100000, help="聚类前采样阈值（权重数大于该值才采样）")
    parser.add_argument("--share-samples",    type=int, default=50000,  help="聚类采样数量")

    # 微调参数（权重共享后）
    parser.add_argument("--ft-epochs",        type=int, default=3,      help="共享后微调轮数")
    parser.add_argument("--ft-lr",            type=float, default=1e-4, help="共享后微调学习率")

    # 评估阈值（照片版/常规评估都会用到）
    parser.add_argument("--threshold",        type=float, default=0.5,  help="云像素判定阈值")

    args = parser.parse_args()

    # 友好检查（只检查 training 的五个子目录，避免 test_* 缺失时误报）
    expected_train_dirs = [
        os.path.join(args.data_root, "38-Cloud_training", "train_red"),
        os.path.join(args.data_root, "38-Cloud_training", "train_green"),
        os.path.join(args.data_root, "38-Cloud_training", "train_blue"),
        os.path.join(args.data_root, "38-Cloud_training", "train_nir"),
        os.path.join(args.data_root, "38-Cloud_training", "train_gt"),
    ]
    missing = [p for p in expected_train_dirs if not os.path.isdir(p)]
    if missing:
        print("[提示] 数据目录可能不完整或路径不对：")
        for p in missing:
            print("  - 缺少目录：", p)
        print("你可以通过命令行覆盖默认路径，例如：")
        print(r'  python main.py --data-root "..\..\实习数据集\CloudDetection"')
        print("或设置环境变量 CLOUD_DATA_ROOT 指向数据根目录。")

    return args

def main():
    args = get_args()
    os.makedirs(args.output_dir, exist_ok=True)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")

    # 固定随机种子
    torch.manual_seed(42)
    np.random.seed(42)

    # 数据加载
    train_loader, val_loader = create_dataloaders(args)

    # ===== STAGE 1: 基础训练 =====
    print("=== STAGE: TRAIN ===")
    model = CloudNet(num_classes=2, dropout_rate=0.1).to(device)
    model, orig_iou, _ = train_model(model, train_loader, val_loader, device, args)
    orig_size   = get_model_size_mb(model)
    orig_params = count_parameters(model)
    torch.save(model.state_dict(), os.path.join(args.output_dir, "cloudnet_trained.pth"))
    print(f"训练完成，原始模型 IoU={orig_iou:.4f}, 大小={orig_size:.2f}MB, 参数={orig_params:,}")

    # 照片版评估（原始）
    spec_orig = build_eval_report_spec(model, val_loader, device, threshold=getattr(args, "threshold", 0.5))
    print("【原始模型-照片版评估】", spec_orig)

    # ===== STAGE 2: 权重共享 =====
    print("=== STAGE: WEIGHT SHARING ===")
    model.load_state_dict(torch.load(os.path.join(args.output_dir, "cloudnet_trained.pth"), map_location=device))
    model.eval()
    shared_model = weight_sharing(
        model,
        num_clusters=args.sharing_clusters,
        device=device,
        sample_threshold=args.share_threshold,
        sample_size=args.share_samples
    )
    torch.save(shared_model.state_dict(), os.path.join(args.output_dir, "cloudnet_shared.pth"))
    shared_iou    = evaluate_model(shared_model, val_loader, device)["iou"]
    shared_size   = get_model_size_mb(shared_model)
    shared_params = count_parameters(shared_model)
    print(f"共享模型 IoU={shared_iou:.4f}, 大小={shared_size:.2f}MB, 参数={shared_params:,}")

    # 照片版评估（共享后）
    spec_shared = build_eval_report_spec(shared_model, val_loader, device, threshold=getattr(args, "threshold", 0.5))
    print("【共享后模型-照片版评估】", spec_shared)

    # ===== STAGE 3: 微调 =====
    print("=== STAGE: FINE-TUNE AFTER SHARING ===")
    ft_args = argparse.Namespace(
        epochs        = args.ft_epochs,
        learning_rate = args.ft_lr,
        weight_decay  = args.weight_decay
    )
    shared_model = shared_model.to(device)
    shared_model, ft_iou, _ = train_model(shared_model, train_loader, val_loader, device, ft_args)
    torch.save(shared_model.state_dict(), os.path.join(args.output_dir, "cloudnet_shared_ft.pth"))
    ft_size   = get_model_size_mb(shared_model)
    ft_params = count_parameters(shared_model)
    print(f"微调后模型 IoU={ft_iou:.4f}, 大小={ft_size:.2f}MB, 参数={ft_params:,}")

    # 照片版评估（微调后）
    spec_ft = build_eval_report_spec(shared_model, val_loader, device, threshold=getattr(args, "threshold", 0.5))
    print("【微调后模型-照片版评估】", spec_ft)

    # ===== 汇总并保存 =====
    stats = {
        "original":   {"iou": orig_iou,  "size_mb": orig_size,  "params": orig_params},
        "shared":     {"iou": shared_iou,"size_mb": shared_size,"params": shared_params},
        "fine_tuned": {"iou": ft_iou,    "size_mb": ft_size,    "params": ft_params},
        "compression": {
            "clusters": args.sharing_clusters,
            "share_drop_pct": (orig_iou - shared_iou) / max(orig_iou, 1e-8) * 100.0,
            "ft_gain_pct":    (ft_iou   - shared_iou) / max(shared_iou, 1e-8) * 100.0
        },
        # 一并保存“照片版指标”
        "spec_report": {
            "original":   spec_orig,
            "shared":     spec_shared,
            "fine_tuned": spec_ft
        }
    }
    save_results(stats, args.output_dir)

if __name__ == "__main__":
    main()