#!/usr/bin/env python
"""
Cloud-Net模型训练和压缩实现 - 完全修复版本
基于论文: "Cloud-Net: An End-to-end Cloud Detection Algorithm for Landsat 8 Imagery"
修复了所有已知问题：量化、剪枝、评估方法、数据生成等
"""
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset, random_split
import copy
import time
import numpy as np
import json
import matplotlib.pyplot as plt
from tqdm import tqdm
from collections import OrderedDict
import math
import tempfile
from pathlib import Path
import rasterio
import psutil, time, gc

class Cloud38Dataset(Dataset):

    def __init__(self, root: str, img_size: int = 256, augment: bool = False):
        super().__init__()
        self.root = Path(root)
        self.img_size = img_size
        self.augment = augment

        train_dir = self.root / "38-Cloud_training"
        self.red_dir   = train_dir / "train_red"
        self.green_dir = train_dir / "train_green"
        self.blue_dir  = train_dir / "train_blue"
        self.nir_dir   = train_dir / "train_nir"
        self.gt_dir    = train_dir / "train_gt"

        assert self.red_dir.exists(),   f"缺少目录: {self.red_dir}"
        assert self.green_dir.exists(), f"缺少目录: {self.green_dir}"
        assert self.blue_dir.exists(),  f"缺少目录: {self.blue_dir}"
        assert self.nir_dir.exists(),   f"缺少目录: {self.nir_dir}"
        assert self.gt_dir.exists(),    f"缺少目录: {self.gt_dir}"

        self.samples = []
        red_files = sorted(self.red_dir.glob("*.tif"))
        missing = 0

        # 允许的 GT 前缀（不同版本里可能不一致）
        gt_prefixes = ["GT_", "gt_", "cloud_", "Cloud_", "mask_", "reference_"]

        for red_path in red_files:
            name = red_path.name  # e.g. red_patch_150_7_by_18_....tif
            # 以 red_ 为分隔，拿到“后缀”
            if not name.startswith("red_"):
                # 容错：如果不是 red_ 开头，就用第一个下划线后面的后缀
                parts = name.split("_", 1)
                if len(parts) < 2:
                    continue
                suffix = parts[1]
            else:
                suffix = name[len("red_"):]  # patch_150_7_by_18_....tif

            green_path = self.green_dir / f"green_{suffix}"
            blue_path  = self.blue_dir  / f"blue_{suffix}"
            nir_path   = self.nir_dir   / f"nir_{suffix}"

            # 找 GT（尝试多种前缀）
            gt_path = None
            for pref in gt_prefixes:
                cand = self.gt_dir / f"{pref}{suffix}"
                if cand.exists():
                    gt_path = cand
                    break

            if all(p.exists() for p in [green_path, blue_path, nir_path]) and gt_path is not None:
                self.samples.append((red_path, green_path, blue_path, nir_path, gt_path))
            else:
                missing += 1

        if len(self.samples) == 0:
            raise RuntimeError("未找到有效样本，请检查数据集路径/命名是否匹配。")

        if missing > 0:
            print(f"注意：有 {missing} 个 red 样本在其它波段或 GT 中未对齐，已跳过。")

        print(f"38-Cloud 数据集可用样本数: {len(self.samples)}")

    def __len__(self):
        return len(self.samples)

    def _read_band(self, path: Path):
        with rasterio.open(path) as src:
            arr = src.read(1)  # HxW
        # 归一化到 [0,1]
        if arr.dtype == np.uint16:
            arr = arr.astype(np.float32) / 65535.0
        else:
            arr = arr.astype(np.float32)
            # 再保险：如果值都>1，做一次 max 归一化
            m = arr.max()
            if m > 1.0:
                arr = arr / (m + 1e-6)
        return arr

    def _read_mask(self, path: Path):
        with rasterio.open(path) as src:
            arr = src.read(1)  # HxW
        # 统一成 0/1
        # 常见 GT 为 0（非云）/255（云），也有 0/1
        if arr.max() > 1:
            arr = (arr > 127).astype(np.uint8)
        else:
            arr = arr.astype(np.uint8)
        return arr

    def _resize_pair(self, img4: torch.Tensor, mask: torch.Tensor):
        # img4: [4,H,W], mask:[1,H,W]
        img4 = img4.unsqueeze(0)  # [1,4,H,W]
        mask = mask.unsqueeze(0).float()  # [1,1,H,W]
        img4 = F.interpolate(img4, size=(self.img_size, self.img_size), mode="bilinear", align_corners=False)
        mask = F.interpolate(mask, size=(self.img_size, self.img_size), mode="nearest")
        return img4.squeeze(0), mask.squeeze(0).long()

    def __getitem__(self, idx):
        r, g, b, n, gt = self.samples[idx]
        r = self._read_band(r)
        g = self._read_band(g)
        b = self._read_band(b)
        n = self._read_band(n)
        m = self._read_mask(gt)

        img = np.stack([r, g, b, n], axis=0)  # [4,H,W]
        img = torch.from_numpy(img).float()
        mask = torch.from_numpy(m[None, ...])  # [1,H,W]

        # 简单增强（可选）
        if self.augment:
            if np.random.rand() < 0.5:
                img = torch.flip(img, dims=[2])   # 水平
                mask = torch.flip(mask, dims=[2])
            if np.random.rand() < 0.5:
                img = torch.flip(img, dims=[1])   # 垂直
                mask = torch.flip(mask, dims=[1])

        # 统一尺寸
        img, mask = self._resize_pair(img, mask)  # img:[4,S,S], mask:[1,S,S]->[S,S]
        mask = mask.squeeze(0)  # [S,S]
        return img, mask


class CloudNet(nn.Module):
    """
    Cloud-Net模型实现 - 修复版本
    基于U-Net架构，适合云检测任务
    """
    def __init__(self, num_classes=2, dropout_rate=0.1):
        super(CloudNet, self).__init__()
        self.enc1 = self._make_layer(4, 64, dropout_rate)
        self.enc2 = self._make_layer(64, 128, dropout_rate)
        self.enc3 = self._make_layer(128, 256, dropout_rate)
        self.enc4 = self._make_layer(256, 512, dropout_rate)
        self.center = self._make_layer(512, 1024, dropout_rate)
        self.dec4 = self._make_decoder_layer(1024, 512)
        self.dec3 = self._make_decoder_layer(1024, 256)
        self.dec2 = self._make_decoder_layer(512, 128)
        self.dec1 = self._make_decoder_layer(256, 64)
        self.final = nn.Conv2d(128, num_classes, 1)
        self.pool = nn.MaxPool2d(2, 2)
        self._initialize_weights()

    def _make_layer(self, in_ch, out_ch, d):
        return nn.Sequential(
            nn.Conv2d(in_ch, out_ch, 3, padding=1),
            nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True),
            nn.Dropout2d(d),
            nn.Conv2d(out_ch, out_ch, 3, padding=1),
            nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True)
        )

    def _make_decoder_layer(self, in_ch, out_ch):
        return nn.Sequential(
            nn.ConvTranspose2d(in_ch, out_ch, 2, stride=2),
            nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True),
            nn.Conv2d(out_ch, out_ch, 3, padding=1),
            nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True)
        )

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
                nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
                if m.bias is not None: nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1); nn.init.constant_(m.bias, 0)

    def forward(self, x):
        e1 = self.enc1(x); e2 = self.enc2(self.pool(e1))
        e3 = self.enc3(self.pool(e2)); e4 = self.enc4(self.pool(e3))
        c = self.center(self.pool(e4))
        d4 = torch.cat([self.dec4(c), e4], dim=1)
        d3 = torch.cat([self.dec3(d4), e3], dim=1)
        d2 = torch.cat([self.dec2(d3), e2], dim=1)
        d1 = torch.cat([self.dec1(d2), e1], dim=1)
        return self.final(d1)

def low_rank_decompose_conv(conv: nn.Conv2d, rank: int):
    """
    将 conv.weight (out_ch, in_ch, k, k) 展成矩阵后做 SVD，
    用前 rank 个奇异值做低秩近似，分解成两层卷积：
      Conv1: in_ch -> rank, kernel 比原来小 or 相同
      Conv2: rank -> out_ch, kernel 1×1
    """
    # 1. 把 weight 展平到二维： (out_ch, in_ch*k*k)
    w = conv.weight.data
    out_ch, in_ch, k, _ = w.shape
    W = w.reshape(out_ch, -1)  # [O, I*K*K]

    # 2. SVD 分解
    U, S, Vt = torch.svd(W)   # U [O,O], S [min(O,I*K*K)], Vt [...]
    U_r = U[:, :rank]         # [O, r]
    S_r = S[:rank]            # [r]
    V_r = Vt[:, :rank]        # [I*K*K, r]

    # 3. 构造两层卷积
    # ConvA: 输入 in_ch -> rank feature，kernel k×k
    convA = nn.Conv2d(in_ch, rank, kernel_size=k, padding=conv.padding, bias=False)
    weightA = (V_r.t() * torch.sqrt(S_r).unsqueeze(1)).reshape(rank, in_ch, k, k)
    convA.weight.data.copy_(weightA)

    # ConvB: rank -> out_ch, kernel 1×1（融合 U_r * sqrt(S_r)）
    convB = nn.Conv2d(rank, out_ch, kernel_size=1, bias=True)
    weightB = (U_r * torch.sqrt(S_r)).reshape(out_ch, rank, 1, 1)
    convB.weight.data.copy_(weightB)
    convB.bias.data.copy_(conv.bias.data)

    return nn.Sequential(convA, convB)

def apply_low_rank(model: nn.Module, rank: int):
    """
    遍历 model，遇到 Conv2d 就替换成低秩分解版
    """
    for name, module in list(model.named_children()):
        if isinstance(module, nn.Conv2d) and module.kernel_size != (1, 1):
            setattr(model, name, low_rank_decompose_conv(module, rank))
        else:
            apply_low_rank(module, rank)
    return model



def dice_loss(pred, target, smooth=1e-5):
    """Dice损失"""
    pred = torch.softmax(pred, dim=1)
    pred_cloud = pred[:, 1, :, :]
    target_cloud = target.float()

    intersection = (pred_cloud * target_cloud).sum(dim=(1, 2))
    pred_sum = pred_cloud.sum(dim=(1, 2))
    target_sum = target_cloud.sum(dim=(1, 2))

    dice = (2.0 * intersection + smooth) / (pred_sum + target_sum + smooth)
    return 1 - dice.mean()


def focal_loss(pred, target, alpha=0.25, gamma=2.0):
    """Focal损失"""
    ce_loss = F.cross_entropy(pred, target, reduction='none')
    pt = torch.exp(-ce_loss)
    focal_loss = alpha * (1 - pt) ** gamma * ce_loss
    return focal_loss.mean()


def calculate_metrics_fixed(pred, target, threshold=0.5):
    """
    修复的评估指标计算 - 使用固定阈值
    """
    pred_prob = torch.softmax(pred, dim=1)[:, 1, :, :]
    pred_binary = (pred_prob > threshold).float()
    target_binary = target.float()

    # IoU
    intersection = (pred_binary * target_binary).sum()
    union = pred_binary.sum() + target_binary.sum() - intersection
    iou = intersection / (union + 1e-8)

    # 其他指标
    tp = (pred_binary * target_binary).sum()
    fp = (pred_binary * (1 - target_binary)).sum()
    fn = ((1 - pred_binary) * target_binary).sum()
    tn = ((1 - pred_binary) * (1 - target_binary)).sum()

    precision = tp / (tp + fp + 1e-8)
    recall = tp / (tp + fn + 1e-8)
    f1 = 2 * precision * recall / (precision + recall + 1e-8)
    accuracy = (tp + tn) / (tp + tn + fp + fn + 1e-8)

    return {
        'iou': iou.item(),
        'f1': f1.item(),
        'precision': precision.item(),
        'recall': recall.item(),
        'accuracy': accuracy.item()
    }


def get_model_size_mb(model):
    """
    精确计算模型大小（以MB为单位）
    修复版本 - 避免临时文件权限问题
    """
    try:
        # 方法1: 尝试使用临时文件
        import tempfile
        with tempfile.NamedTemporaryFile(delete=False, suffix='.pth') as tmp_file:
            torch.save(model.state_dict(), tmp_file.name)
            size_mb = os.path.getsize(tmp_file.name) / (1024 * 1024)
            # 清理临时文件
            try:
                os.unlink(tmp_file.name)
            except:
                pass
        return size_mb
    except Exception as e:
        print(f"临时文件方法失败: {e}")
        # 方法2: 使用内存计算（备用方案）
        total_params = 0
        for param in model.parameters():
            if param.requires_grad:
                total_params += param.numel()

        # 估算模型大小 (假设每个参数4字节 + 一些开销)
        estimated_size_mb = total_params * 4 / (1024 * 1024) * 1.2  # 加20%开销
        print(f"使用估算方法计算模型大小: {estimated_size_mb:.2f} MB")
        return estimated_size_mb

def train_model(model, train_loader, val_loader, device, args):
    """训练函数"""
    print(f"开始训练Cloud-Net模型...")

    optimizer = optim.AdamW(model.parameters(),
                            lr=args.learning_rate,
                            weight_decay=args.weight_decay,
                            betas=(0.9, 0.999))

    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs, eta_min=1e-6)

    # 动态类别权重
    ce_loss_fn = nn.CrossEntropyLoss()

    best_iou = 0.0
    best_model_state = None
    train_history = {'loss': [], 'iou': [], 'val_iou': [], 'val_f1': []}

    patience = 10
    patience_counter = 0

    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        running_metrics = {'iou': 0.0, 'f1': 0.0}
        num_batches = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for batch_idx, (images, masks) in enumerate(pbar):
            images, masks = images.to(device), masks.to(device)

            optimizer.zero_grad()
            outputs = model(images)

            # 组合损失
            ce_loss = ce_loss_fn(outputs, masks)
            d_loss = dice_loss(outputs, masks)
            f_loss = focal_loss(outputs, masks, alpha=0.25, gamma=2.0)

            total_loss = 0.3 * ce_loss + 0.4 * d_loss + 0.3 * f_loss

            total_loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            optimizer.step()

            # 计算指标（固定阈值）
            metrics = calculate_metrics_fixed(outputs, masks, threshold=0.5)
            running_loss += total_loss.item()
            running_metrics['iou'] += metrics['iou']
            running_metrics['f1'] += metrics['f1']
            num_batches += 1

            pbar.set_postfix({
                'Loss': f'{total_loss.item():.4f}',
                'IoU': f'{metrics["iou"]:.4f}',
                'F1': f'{metrics["f1"]:.4f}'
            })

        # 计算平均训练指标
        avg_train_loss = running_loss / num_batches
        avg_train_iou = running_metrics['iou'] / num_batches

        # 验证阶段
        val_metrics = evaluate_model(model, val_loader, device)

        scheduler.step()

        # 保存最佳模型
        if val_metrics['iou'] > best_iou:
            best_iou = val_metrics['iou']
            best_model_state = copy.deepcopy(model.state_dict())
            patience_counter = 0
        else:
            patience_counter += 1

        # 记录历史
        train_history['loss'].append(avg_train_loss)
        train_history['iou'].append(avg_train_iou)
        train_history['val_iou'].append(val_metrics['iou'])
        train_history['val_f1'].append(val_metrics['f1'])

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train IoU: {avg_train_iou:.4f}, '
              f'Val IoU: {val_metrics["iou"]:.4f}, '
              f'Val F1: {val_metrics["f1"]:.4f}')

        # 早停
        if patience_counter >= patience:
            print(f"早停在epoch {epoch + 1}, 最佳IoU: {best_iou:.4f}")
            break

    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)

    return model, best_iou, train_history


def evaluate_model(model, dataloader, device, threshold=0.5):
    """评估模型性能 - 固定阈值"""
    model.eval()
    total_metrics = {'iou': 0.0, 'f1': 0.0, 'precision': 0.0, 'recall': 0.0, 'accuracy': 0.0}
    num_batches = 0

    with torch.no_grad():
        for images, masks in dataloader:
            images, masks = images.to(device), masks.to(device)
            outputs = model(images)
            metrics = calculate_metrics_fixed(outputs, masks, threshold)

            for key in total_metrics:
                total_metrics[key] += metrics[key]
            num_batches += 1

    # 计算平均指标
    avg_metrics = {key: value / num_batches for key, value in total_metrics.items()}
    return avg_metrics


def count_parameters(model):
    """统计模型参数数量"""
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

def evaluate_performance_spec(model, dataloader, device, threshold=0.5):
    model.eval()
    eps = 1e-8
    TP = FP = FN = TN = 0.0
    with torch.no_grad():
        for images, masks in dataloader:
            images, masks = images.to(device), masks.to(device)
            logits = model(images)
            probs  = torch.softmax(logits, dim=1)[:, 1, :, :]
            preds  = (probs > threshold).to(masks.dtype)
            p = preds.view(-1).float(); t = masks.view(-1).float()
            TP += float((p * t).sum().item())
            FP += float((p * (1 - t)).sum().item())
            FN += float(((1 - p) * t).sum().item())
            TN += float(((1 - p) * (1 - t)).sum().item())
    accuracy  = (TP + TN) / (TP + TN + FP + FN + eps)
    precision = TP / (TP + FP + eps)
    return {"accuracy": float(accuracy), "precision": float(precision)}

def _measure_inference_memory_mb(model, sample_images, device):
    process = psutil.Process(os.getpid())
    model.eval()
    with torch.no_grad():
        _ = model(sample_images)  # 预热
    if device.type == "cuda":
        torch.cuda.empty_cache()
        torch.cuda.reset_peak_memory_stats(device)
        start = torch.cuda.memory_allocated(device)
        with torch.no_grad():
            _ = model(sample_images)
        peak = torch.cuda.max_memory_allocated(device)
        return float(max(peak - start, 0) / (1024 * 1024))
    gc.collect(); time.sleep(0.02)
    rss_before = process.memory_info().rss
    with torch.no_grad():
        _ = model(sample_images)
    gc.collect(); time.sleep(0.02)
    rss_after = process.memory_info().rss
    return float(max(rss_after - rss_before, 0) / (1024 * 1024))

def evaluate_efficiency_spec(model, dataloader, device):
    first_images = None
    for images, _ in dataloader:
        first_images = images.to(device); break
    if first_images is None:
        raise RuntimeError("dataloader 为空，无法评估效率指标。")
    return {
        "params": int(count_parameters(model)),
        "model_size_mb": float(get_model_size_mb(model)),
        "memory_mb": float(_measure_inference_memory_mb(model, first_images, device)),
    }

def build_eval_report_spec(model, dataloader, device, threshold=0.5):
    perf = evaluate_performance_spec(model, dataloader, device, threshold)
    eff  = evaluate_efficiency_spec(model, dataloader, device)
    return {
        "模型性能": {"准确率": perf["accuracy"], "精度": perf["precision"]},
        "模型效率": {"参数数量": eff["params"], "模型大小(MB)": eff["model_size_mb"], "内存占用(MB)": eff["memory_mb"]},
    }

def create_dataloaders(args):
    print("准备 38-Cloud 真实数据集...")

    full_dataset = Cloud38Dataset(
        root=args.data_root,
        img_size=args.img_size,
        augment=True
    )

    train_size = int(0.8 * len(full_dataset))
    val_size = len(full_dataset) - train_size
    train_dataset, val_dataset = random_split(full_dataset, [train_size, val_size])

    pin_mem = torch.cuda.is_available()

    train_loader = DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=0,
        pin_memory=pin_mem
    )
    val_loader = DataLoader(
        val_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=0,
        pin_memory=pin_mem
    )

    print(f"训练集大小: {len(train_dataset)}")
    print(f"验证集大小: {len(val_dataset)}")
    return train_loader, val_loader

def save_results(results, output_dir):
    """保存结果"""
    results_path = os.path.join(output_dir, 'compression_results.json')

    def convert_types(obj):
        if isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, (np.integer, np.floating)):
            return obj.item()
        elif isinstance(obj, dict):
            return {key: convert_types(value) for key, value in obj.items()}
        elif isinstance(obj, list):
            return [convert_types(item) for item in obj]
        else:
            return obj

    converted_results = convert_types(results)

    with open(results_path, 'w', encoding='utf-8') as f:
        json.dump(converted_results, f, indent=2, ensure_ascii=False)

    print(f"结果已保存到: {results_path}")

def _expected_train_dirs(root):
    return [
        os.path.join(root, "38-Cloud_training", "train_red"),
        os.path.join(root, "38-Cloud_training", "train_green"),
        os.path.join(root, "38-Cloud_training", "train_blue"),
        os.path.join(root, "38-Cloud_training", "train_nir"),
        os.path.join(root, "38-Cloud_training", "train_gt"),
    ]

def _has_train_structure(root):
    try:
        return all(os.path.isdir(p) for p in _expected_train_dirs(root))
    except Exception:
        return False

def _extract_all_zips_under(folder):
    import zipfile
    extracted = False
    for cur_root, _, files in os.walk(folder):
        for f in files:
            if f.lower().endswith(".zip"):
                zf = os.path.join(cur_root, f)
                try:
                    with zipfile.ZipFile(zf, "r") as z:
                        z.extractall(cur_root)
                    print(f"[auto] 已解压: {zf}")
                    extracted = True
                except Exception as e:
                    print(f"[auto] 解压失败（忽略）: {zf} -> {e}")
    return extracted

def _find_data_root_with_training(base_dir):
    # 递归寻找包含 38-Cloud_training 且结构完整的根目录
    for cur_root, dirs, _ in os.walk(base_dir):
        if "38-Cloud_training" in dirs:
            cand = cur_root
            if _has_train_structure(cand):
                return cand
    return None

def ensure_dataset_available(args):
    """
    若 args.data_root 下没有 38-Cloud_training/train_* 结构：
      1) 用 kagglehub 下载 38-Cloud 数据集
      2) 自动定位到包含 38-Cloud_training 的根
      3) 若压缩包存在则自动解压后再定位
    成功后会回写 args.data_root。
    """
    if _has_train_structure(args.data_root):
        return args.data_root

    print("[auto] 未发现完整数据集结构，尝试 KaggleHub 自动下载 38-Cloud 数据集...")

    try:
        import kagglehub  # 未安装：pip install kagglehub
    except Exception as e:
        raise RuntimeError(
            "未安装 kagglehub。请先执行：pip install kagglehub\n"
            f"导入失败详情：{e}"
        )

    # 下载
    try:
        dl_path = kagglehub.dataset_download("sorour/38cloud-cloud-segmentation-in-satellite-images")
        dl_path = os.path.normpath(dl_path)
        print(f"[auto] KaggleHub 下载完成：{dl_path}")
    except Exception as e:
        raise RuntimeError(f"下载 Kaggle 数据集失败，请检查网络或 Kaggle 配置：{e}")

    # 直接检查下载目录
    if _has_train_structure(dl_path):
        args.data_root = dl_path
        print(f"[auto] 识别成功：data_root = {args.data_root}")
        return args.data_root

    # 递归寻找符合结构的根
    cand = _find_data_root_with_training(dl_path)
    if cand:
        args.data_root = cand
        print(f"[auto] 已定位到包含 38-Cloud_training 的目录：{args.data_root}")
        return args.data_root

    # 尝试解压后再找一次
    if _extract_all_zips_under(dl_path):
        cand = _find_data_root_with_training(dl_path)
        if cand:
            args.data_root = cand
            print(f"[auto] 解压后已定位：data_root = {args.data_root}")
            return args.data_root

    # 兜底
    print("[auto] 未能自动识别标准目录结构。已将 data_root 指向 Kaggle 下载目录。")
    print("       请确认其下存在 `38-Cloud_training/train_red` 等子目录。")
    args.data_root = dl_path
    return args.data_root

def get_args():

    parser = argparse.ArgumentParser(description="Cloud-Net 模型训练 + 低秩分解 + 微调（38-Cloud）")

    script_dir = os.path.dirname(os.path.abspath(__file__))
    default_data_root = os.path.normpath(
        os.path.join(script_dir, "..", "CloudDetection")
    )
    # 环境变量覆盖（可选）
    env_root = os.environ.get("CLOUD_DATA_ROOT")
    if env_root and os.path.isdir(env_root):
        default_data_root = env_root

    # 基本参数
    parser.add_argument("--data-root",      default=default_data_root, help="38-Cloud 数据集根目录")
    parser.add_argument("--output-dir",     default="./output",        help="输出目录")
    parser.add_argument("--img-size",       type=int, default=256,     help="输入影像统一缩放到的边长")
    parser.add_argument("--epochs",         type=int, default=25,      help="基础训练轮数")
    parser.add_argument("--batch-size",     type=int, default=8,       help="批次大小")
    parser.add_argument("--learning-rate",  type=float, default=1e-3,  help="学习率")
    parser.add_argument("--weight-decay",   type=float, default=1e-4,  help="权重衰减")

    # 低秩分解与微调
    parser.add_argument("--lowrank-rank",   type=int, default=32,      help="低秩分解的秩（rank）")
    parser.add_argument("--ft-epochs",      type=int, default=5,       help="低秩分解后的微调轮数")
    parser.add_argument("--ft-lr",          type=float, default=1e-4,  help="低秩分解后的微调学习率")

    # （如果你不做蒸馏，可以忽略下面两项；保留无妨）
    parser.add_argument("--distill-epochs", type=int, default=0,       help="蒸馏训练轮数（用不到就设0）")
    parser.add_argument("--distill-alpha",  type=float, default=0.5,   help="蒸馏中 CE 权重 α")

    # 训练细节（可选）
    parser.add_argument("--use_amp", action="store_true", default=False, help="开启AMP混合精度（需要CUDA）")
    parser.add_argument("--max_steps_per_epoch", type=int, default=None, help="每epoch最多训练的步数（调试用）")
    parser.add_argument("--patience", type=int, default=10, help="早停patience")
    parser.add_argument("--min_delta", type=float, default=0.0, help="早停最小提升")
    parser.add_argument("--warmup_ratio", type=float, default=0.1, help="LR warmup比例")

    args = parser.parse_args()

    # 友好检查：只强制检查训练集几个必须的文件夹；test 缺失不阻塞
    must_exist = [
        os.path.join(args.data_root, "38-Cloud_training", "train_red"),
        os.path.join(args.data_root, "38-Cloud_training", "train_green"),
        os.path.join(args.data_root, "38-Cloud_training", "train_blue"),
        os.path.join(args.data_root, "38-Cloud_training", "train_nir"),
        os.path.join(args.data_root, "38-Cloud_training", "train_gt"),
    ]
    missing_train = [p for p in must_exist if not os.path.isdir(p)]
    if missing_train:
        print("[提示] 训练所需的目录缺失，路径可能不对：")
        for p in missing_train:
            print("  - 缺少目录：", p)
        print("可以用命令行覆盖默认路径，例如：")
        print(r'  python main.py --data-root "E:\USM\year2sem3\实习数据集\CloudDetection"')
        print("或设置环境变量 CLOUD_DATA_ROOT 指向数据根目录。")

    return args

def main():
    args = get_args()
    os.makedirs(args.output_dir, exist_ok=True)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")

    # 固定随机种子
    torch.manual_seed(42)
    np.random.seed(42)

    # 确保数据集可用（若缺失则用 KaggleHub 自动下载并定位）
    ensure_dataset_available(args)

    # 数据加载
    train_loader, val_loader = create_dataloaders(args)

    # —— STAGE 1: 基础训练 —— #
    print("=== STAGE: TRAIN ===")
    model = CloudNet(num_classes=2, dropout_rate=0.1).to(device)
    model, orig_iou, _ = train_model(model, train_loader, val_loader, device, args)
    orig_size   = get_model_size_mb(model)
    orig_params = count_parameters(model)
    torch.save(model.state_dict(), os.path.join(args.output_dir, "cloudnet_trained.pth"))
    print(f"训练完成，原始模型 IoU={orig_iou:.4f}, 大小={orig_size:.2f}MB, 参数={orig_params:,}")

    # —— STAGE 2: Low-Rank 分解 —— #
    print("=== STAGE: LOW-RANK DECOMPOSITION ===")
    lr_model = apply_low_rank(model, args.lowrank_rank).to(device)
    torch.save(lr_model.state_dict(), os.path.join(args.output_dir, "cloudnet_lowrank.pth"))
    lr_metrics = evaluate_model(lr_model, val_loader, device)
    lr_size   = get_model_size_mb(lr_model)
    lr_params = count_parameters(lr_model)
    print(f"Low-Rank 后模型 IoU={lr_metrics['iou']:.4f}, 大小={lr_size:.2f}MB, 参数={lr_params:,}")

    # —— STAGE 3: 微调 Low-Rank 模型 —— #
    ft_model = None
    if args.ft_epochs > 0:
        print("=== STAGE: FINE-TUNE AFTER LOW-RANK ===")
        ft_model, ft_iou, _ = train_model(
            lr_model, train_loader, val_loader, device,
            argparse.Namespace(
                epochs=args.ft_epochs,
                learning_rate=args.ft_lr,
                weight_decay=args.weight_decay
            )
        )
        torch.save(ft_model.state_dict(), os.path.join(args.output_dir, "cloudnet_lowrank_ft.pth"))
        ft_size   = get_model_size_mb(ft_model)
        ft_params = count_parameters(ft_model)
        print(f"微调后模型 IoU={ft_iou:.4f}, 大小={ft_size:.2f}MB, 参数={ft_params:,}")
    else:
        ft_iou = None
        ft_size = None
        ft_params = None

    # —— 汇总报告（按图） —— #
    report_orig = build_eval_report_spec(model,    val_loader, device)
    report_lr   = build_eval_report_spec(lr_model, val_loader, device)
    report_ft   = build_eval_report_spec(ft_model, val_loader, device) if ft_model is not None else None

    stats = {
        "original":   {"iou": orig_iou,               "size_mb": orig_size, "params": orig_params},
        "low_rank":   {"iou": lr_metrics["iou"],      "size_mb": lr_size,   "params": lr_params},
        "fine_tuned": {"iou": ft_iou,                 "size_mb": ft_size,   "params": ft_params},
        "spec_report": {
            "original":   report_orig,
            "low_rank":   report_lr,
            "fine_tuned": report_ft
        }
    }
    save_results(stats, args.output_dir)

    # 控制台快速查看
    print("\n=== 按图的模型评估 ===")
    def _p(tag, rep):
        if rep is None:
            print(f"{tag}: (无)")
            return
        print(f"{tag} | 性能: 准确率={rep['模型性能']['准确率']:.4f}, 精度={rep['模型性能']['精度']:.4f} | "
              f"效率: 参数={rep['模型效率']['参数数量']}, 大小={rep['模型效率']['模型大小(MB)']:.2f}MB, "
              f"内存={rep['模型效率']['内存占用(MB)']:.2f}MB")
    _p("Original", report_orig)
    _p("Low-Rank", report_lr)
    _p("Fine-Tuned", report_ft)

if __name__ == "__main__":
    main()