#!/usr/bin/env python

"""
Cloud-Net模型训练和压缩实现 - 完全修复版本
基于论文: "Cloud-Net: An End-to-end Cloud Detection Algorithm for Landsat 8 Imagery"
修复了所有已知问题：量化、剪枝、评估方法、数据生成等
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset, random_split
import copy
import numpy as np
import json
from tqdm import tqdm
import rasterio
import psutil
import time

class CloudDetectionDataset(Dataset):
    def __init__(self, root_dir, mode='train', transform=None):
        super().__init__()
        assert mode in ['train', 'test']
        sub = '38-Cloud_training' if mode == 'train' else '38-Cloud_test'
        base = os.path.join(root_dir, sub)

        self.band_dirs = {
            'red':   os.path.join(base, f'{mode}_red'),
            'green': os.path.join(base, f'{mode}_green'),
            'blue':  os.path.join(base, f'{mode}_blue'),
            'nir':   os.path.join(base, f'{mode}_nir'),
        }
        self.gt_dir = os.path.join(base, f'{mode}_gt')

        # 用 red 目录列出文件名（包含 red_ 前缀）
        self.red_files = sorted([
            f for f in os.listdir(self.band_dirs['red'])
            if f.lower().endswith('.tif')
        ])
        self.transform = transform

    def __len__(self):
        return len(self.red_files)

    def __getitem__(self, idx):
        red_name = self.red_files[idx]

        # 生成各波段/GT 对应文件名（把 red_ 前缀替换成相应前缀）
        green_name = red_name.replace('red_', 'green_', 1)
        blue_name  = red_name.replace('red_', 'blue_', 1)
        nir_name   = red_name.replace('red_', 'nir_', 1)
        gt_name    = red_name.replace('red_', 'gt_', 1)

        paths = {
            'red':   os.path.join(self.band_dirs['red'],   red_name),
            'green': os.path.join(self.band_dirs['green'], green_name),
            'blue':  os.path.join(self.band_dirs['blue'],  blue_name),
            'nir':   os.path.join(self.band_dirs['nir'],   nir_name),
            'gt':    os.path.join(self.gt_dir,             gt_name),
        }

        # 友好检查，缺啥直接报清楚
        for k, p in paths.items():
            if not os.path.exists(p):
                raise FileNotFoundError(f"Missing file for '{k}': {p}")

        # 读四个波段 -> [0,1]
        bands = []
        for k in ['red','green','blue','nir']:
            with rasterio.open(paths[k]) as src:
                arr = src.read(1).astype(np.float32)
            arr = np.nan_to_num(arr, nan=0.0)
            arr = np.clip(arr / 10000.0, 0.0, 1.0)
            bands.append(arr)
        img = np.stack(bands, axis=0)  # (4,H,W)

        # 读 gt -> 0/1
        with rasterio.open(paths['gt']) as src:
            m = src.read(1)
        m = (m > 0).astype(np.int64)

        img_t  = torch.from_numpy(img)
        mask_t = torch.from_numpy(m)
        if self.transform is not None:
            img_t = self.transform(img_t)
        return img_t, mask_t

def distill_model(teacher: nn.Module,
                  student: nn.Module,
                  train_loader: DataLoader,
                  val_loader: DataLoader,
                  device: torch.device,
                  epochs: int,
                  alpha: float,
                  lr: float,
                  weight_decay: float,
                  temperature: float = 1.0):
    """
    知识蒸馏：loss = α * CE(student, y) + (1-α) * T^2 * KL(softmax(s/T) || softmax(t/T))
    """
    teacher.eval()
    student.train()

    # 参数分组（BN/bias 不做 decay）
    decay, no_decay = [], []
    for n, p in student.named_parameters():
        if not p.requires_grad:
            continue
        if p.ndim == 1 or "bias" in n.lower() or "bn" in n.lower():
            no_decay.append(p)
        else:
            decay.append(p)
    optimizer = optim.AdamW(
        [
            {"params": decay, "weight_decay": weight_decay},
            {"params": no_decay, "weight_decay": 0.0},
        ],
        lr=lr,
        betas=(0.9, 0.999),
    )

    ce_loss_fn = nn.CrossEntropyLoss()
    kd_loss_fn = nn.KLDivLoss(reduction="batchmean")
    T = float(temperature)

    for epoch in range(1, epochs + 1):
        total_loss = 0.0
        for images, masks in train_loader:
            images, masks = images.to(device), masks.to(device)

            with torch.no_grad():
                t_logits = teacher(images)

            s_logits = student(images)

            loss_ce = ce_loss_fn(s_logits, masks)
            loss_kd = kd_loss_fn(
                F.log_softmax(s_logits / T, dim=1),
                F.softmax(t_logits / T, dim=1)
            ) * (T * T)  # 关键：T^2

            loss = alpha * loss_ce + (1.0 - alpha) * loss_kd

            optimizer.zero_grad(set_to_none=True)
            loss.backward()
            torch.nn.utils.clip_grad_norm_(student.parameters(), max_norm=1.0)
            optimizer.step()

            total_loss += float(loss.item())

        avg_loss = total_loss / max(1, len(train_loader))
        val_metrics = evaluate_model(student, val_loader, device)
        print(f"[Distill {epoch}/{epochs}] loss={avg_loss:.4f}, val IoU={val_metrics['iou']:.4f}")

    return student

class CloudNet(nn.Module):
    """
    Cloud-Net模型实现 - 修复版本
    基于U-Net架构，适合云检测任务
    """
    def __init__(self, num_classes=2, dropout_rate=0.1):
        super(CloudNet, self).__init__()
        self.enc1 = self._make_layer(4, 64, dropout_rate)
        self.enc2 = self._make_layer(64, 128, dropout_rate)
        self.enc3 = self._make_layer(128, 256, dropout_rate)
        self.enc4 = self._make_layer(256, 512, dropout_rate)
        self.center = self._make_layer(512, 1024, dropout_rate)
        self.dec4 = self._make_decoder_layer(1024, 512)
        self.dec3 = self._make_decoder_layer(1024, 256)
        self.dec2 = self._make_decoder_layer(512, 128)
        self.dec1 = self._make_decoder_layer(256, 64)
        self.final = nn.Conv2d(128, num_classes, 1)
        self.pool = nn.MaxPool2d(2, 2)
        self._initialize_weights()

    def _make_layer(self, in_ch, out_ch, d):
        return nn.Sequential(
            nn.Conv2d(in_ch, out_ch, 3, padding=1),
            nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True),
            nn.Dropout2d(d),
            nn.Conv2d(out_ch, out_ch, 3, padding=1),
            nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True)
        )

    def _make_decoder_layer(self, in_ch, out_ch):
        return nn.Sequential(
            nn.ConvTranspose2d(in_ch, out_ch, 2, stride=2),
            nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True),
            nn.Conv2d(out_ch, out_ch, 3, padding=1),
            nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True)
        )

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
                nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
                if m.bias is not None: nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1); nn.init.constant_(m.bias, 0)

    def forward(self, x):
        e1 = self.enc1(x); e2 = self.enc2(self.pool(e1))
        e3 = self.enc3(self.pool(e2)); e4 = self.enc4(self.pool(e3))
        c = self.center(self.pool(e4))
        d4 = torch.cat([self.dec4(c), e4], dim=1)
        d3 = torch.cat([self.dec3(d4), e3], dim=1)
        d2 = torch.cat([self.dec2(d3), e2], dim=1)
        d1 = torch.cat([self.dec1(d2), e1], dim=1)
        return self.final(d1)

def dice_loss(pred, target, smooth=1e-5):
    """Dice损失"""
    pred = torch.softmax(pred, dim=1)
    pred_cloud = pred[:, 1, :, :]
    target_cloud = target.float()

    intersection = (pred_cloud * target_cloud).sum(dim=(1, 2))
    pred_sum = pred_cloud.sum(dim=(1, 2))
    target_sum = target_cloud.sum(dim=(1, 2))

    dice = (2.0 * intersection + smooth) / (pred_sum + target_sum + smooth)
    return 1 - dice.mean()


def focal_loss(pred, target, alpha=0.25, gamma=2.0):
    """Focal损失"""
    ce_loss = F.cross_entropy(pred, target, reduction='none')
    pt = torch.exp(-ce_loss)
    focal_loss = alpha * (1 - pt) ** gamma * ce_loss
    return focal_loss.mean()


def calculate_metrics_fixed(pred, target, threshold=0.5):
    """
    修复的评估指标计算 - 使用固定阈值
    """
    pred_prob = torch.softmax(pred, dim=1)[:, 1, :, :]
    pred_binary = (pred_prob > threshold).float()
    target_binary = target.float()

    # IoU
    intersection = (pred_binary * target_binary).sum()
    union = pred_binary.sum() + target_binary.sum() - intersection
    iou = intersection / (union + 1e-8)

    # 其他指标
    tp = (pred_binary * target_binary).sum()
    fp = (pred_binary * (1 - target_binary)).sum()
    fn = ((1 - pred_binary) * target_binary).sum()
    tn = ((1 - pred_binary) * (1 - target_binary)).sum()

    precision = tp / (tp + fp + 1e-8)
    recall = tp / (tp + fn + 1e-8)
    f1 = 2 * precision * recall / (precision + recall + 1e-8)
    accuracy = (tp + tn) / (tp + tn + fp + fn + 1e-8)

    return {
        'iou': iou.item(),
        'f1': f1.item(),
        'precision': precision.item(),
        'recall': recall.item(),
        'accuracy': accuracy.item()
    }



def get_model_size_mb(model):
    """
    精确计算模型大小（以MB为单位）
    修复版本 - 避免临时文件权限问题
    """
    try:
        # 方法1: 尝试使用临时文件
        import tempfile
        with tempfile.NamedTemporaryFile(delete=False, suffix='.pth') as tmp_file:
            torch.save(model.state_dict(), tmp_file.name)
            size_mb = os.path.getsize(tmp_file.name) / (1024 * 1024)
            # 清理临时文件
            try:
                os.unlink(tmp_file.name)
            except:
                pass
        return size_mb
    except Exception as e:
        print(f"临时文件方法失败: {e}")
        # 方法2: 使用内存计算（备用方案）
        total_params = 0
        for param in model.parameters():
            if param.requires_grad:
                total_params += param.numel()

        # 估算模型大小 (假设每个参数4字节 + 一些开销)
        estimated_size_mb = total_params * 4 / (1024 * 1024) * 1.2  # 加20%开销
        print(f"使用估算方法计算模型大小: {estimated_size_mb:.2f} MB")
        return estimated_size_mb

def train_model(model, train_loader, val_loader, device, args):
    """训练函数（升级版：AMP、warmup+cosine、参数分组、早停 min_delta、可限步）"""
    import math

    print(f"开始训练Cloud-Net模型...")

    # ---- 可选项（不改 get_args 也能用）----
    use_amp = bool(getattr(args, "use_amp", False)) and device.type == "cuda"
    max_steps_per_epoch = getattr(args, "max_steps_per_epoch", None)  # None 或 正整数
    patience = int(getattr(args, "patience", 10))
    min_delta = float(getattr(args, "min_delta", 0.0))
    warmup_ratio = float(getattr(args, "warmup_ratio", 0.1))  # 用于 warmup steps 占比

    # ---- 参数分组：BN/bias 不做 weight decay ----
    decay, no_decay = [], []
    for n, p in model.named_parameters():
        if not p.requires_grad:
            continue
        if p.ndim == 1 or "bias" in n.lower() or "bn" in n.lower():
            no_decay.append(p)
        else:
            decay.append(p)
    optimizer = optim.AdamW(
        [
            {"params": decay, "weight_decay": args.weight_decay},
            {"params": no_decay, "weight_decay": 0.0},
        ],
        lr=args.learning_rate,
        betas=(0.9, 0.999),
    )

    # ---- 混合精度 ----
    scaler = torch.cuda.amp.GradScaler(enabled=use_amp)

    # ---- 逐步调度（warmup + cosine）按 step 而非 epoch ----
    total_steps = max(1, len(train_loader) * max(1, args.epochs))
    warmup_steps = int(total_steps * warmup_ratio)
    def lr_lambda(current_step):
        if total_steps <= 1:
            return 1.0
        if current_step < warmup_steps and warmup_steps > 0:
            return float(current_step + 1) / float(max(1, warmup_steps))
        # cosine 部分
        progress = float(current_step - warmup_steps) / float(max(1, total_steps - warmup_steps))
        return 0.5 * (1.0 + math.cos(math.pi * progress))
    scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)

    ce_loss_fn = nn.CrossEntropyLoss()

    best_iou = -1.0
    best_model_state = None
    train_history = {"loss": [], "iou": [], "val_iou": [], "val_f1": []}
    patience_counter = 0
    global_step = 0

    for epoch in range(args.epochs):
        model.train()
        running_loss = 0.0
        running_metrics = {"iou": 0.0, "f1": 0.0}
        num_batches = 0

        pbar = tqdm(train_loader, desc=f"Epoch {epoch + 1}/{args.epochs}")
        for batch_idx, (images, masks) in enumerate(pbar):
            if max_steps_per_epoch is not None and batch_idx >= max_steps_per_epoch:
                break

            images, masks = images.to(device), masks.to(device)
            optimizer.zero_grad(set_to_none=True)

            with torch.cuda.amp.autocast(enabled=use_amp):
                outputs = model(images)
                ce_loss = ce_loss_fn(outputs, masks)
                d_loss  = dice_loss(outputs, masks)
                f_loss  = focal_loss(outputs, masks, alpha=0.25, gamma=2.0)
                total_loss = 0.3 * ce_loss + 0.4 * d_loss + 0.3 * f_loss

            # 反传 + 更新
            if use_amp:
                scaler.scale(total_loss).backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
                scaler.step(optimizer)
                scaler.update()
            else:
                total_loss.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
                optimizer.step()

            scheduler.step()
            global_step += 1

            # 训练指标（快速看趋势）
            metrics = calculate_metrics_fixed(outputs, masks, threshold=0.5)
            running_loss += float(total_loss.detach().item())
            running_metrics["iou"] += metrics["iou"]
            running_metrics["f1"]  += metrics["f1"]
            num_batches += 1

            pbar.set_postfix({
                "Loss": f"{total_loss.item():.4f}",
                "IoU":  f"{metrics['iou']:.4f}",
                "F1":   f"{metrics['f1']:.4f}",
                "LR":   f"{optimizer.param_groups[0]['lr']:.2e}",
            })

        # 计算平均训练指标
        avg_train_loss = running_loss / max(1, num_batches)
        avg_train_iou  = running_metrics["iou"] / max(1, num_batches)

        # 验证
        val_metrics = evaluate_model(model, val_loader, device)

        # 记录 + 早停
        train_history["loss"].append(avg_train_loss)
        train_history["iou"].append(avg_train_iou)
        train_history["val_iou"].append(val_metrics["iou"])
        train_history["val_f1"].append(val_metrics["f1"])

        print(f"Epoch [{epoch + 1}/{args.epochs}] - "
              f"Train Loss: {avg_train_loss:.4f}, "
              f"Train IoU: {avg_train_iou:.4f}, "
              f"Val IoU: {val_metrics['iou']:.4f}, "
              f"Val F1: {val_metrics['f1']:.4f}")

        improved = (val_metrics["iou"] > best_iou + min_delta)
        if improved:
            best_iou = val_metrics["iou"]
            best_model_state = copy.deepcopy(model.state_dict())
            patience_counter = 0
        else:
            patience_counter += 1

        if patience_counter >= patience:
            print(f"早停在 epoch {epoch + 1}，最佳 IoU: {best_iou:.4f}")
            break

    if best_model_state is not None:
        model.load_state_dict(best_model_state)

    return model, max(best_iou, 0.0), train_history

def evaluate_model(model, dataloader, device, threshold=0.5):
    """评估模型性能（累计混淆矩阵后统一计算，更公正）"""
    model.eval()
    eps = 1e-8
    TP = FP = FN = TN = 0.0

    with torch.no_grad():
        for images, masks in dataloader:
            images, masks = images.to(device), masks.to(device)
            logits = model(images)
            probs  = torch.softmax(logits, dim=1)[:, 1, :, :]
            preds  = (probs > threshold).to(masks.dtype)

            # 展平到一维，累计
            p = preds.view(-1).float()
            t = masks.view(-1).float()

            TP += float((p * t).sum().item())
            FP += float((p * (1 - t)).sum().item())
            FN += float(((1 - p) * t).sum().item())
            TN += float(((1 - p) * (1 - t)).sum().item())

    iou = TP / (TP + FP + FN + eps)
    precision = TP / (TP + FP + eps)
    recall = TP / (TP + FN + eps)
    f1 = 2 * precision * recall / (precision + recall + eps)
    accuracy = (TP + TN) / (TP + TN + FP + FN + eps)

    return {
        "iou": iou,
        "f1": f1,
        "precision": precision,
        "recall": recall,
        "accuracy": accuracy,
    }

def count_parameters(model):
    """统计模型参数数量"""
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

def evaluate_performance_spec(model, dataloader, device, threshold=0.5):
    """
    模型性能（按图）：准确率、精度
    不返回 IoU/F1/Recall 等其它指标。
    """
    model.eval()
    eps = 1e-8
    TP = FP = FN = TN = 0.0

    with torch.no_grad():
        for images, masks in dataloader:
            images, masks = images.to(device), masks.to(device)
            logits = model(images)
            probs  = torch.softmax(logits, dim=1)[:, 1, :, :]
            preds  = (probs > threshold).to(masks.dtype)

            p = preds.view(-1).float()
            t = masks.view(-1).float()

            TP += float((p * t).sum().item())
            FP += float((p * (1 - t)).sum().item())
            FN += float(((1 - p) * t).sum().item())
            TN += float(((1 - p) * (1 - t)).sum().item())

    accuracy  = (TP + TN) / (TP + TN + FP + FN + eps)
    precision = TP / (TP + FP + eps)

    return {
        "accuracy":  float(accuracy),
        "precision": float(precision),
    }


def _measure_inference_memory_mb(model, sample_images, device):
    """
    内存占用（按图）：一次前向推理的增量内存（MB）。
    - CUDA 优先用 CUDA 的峰值统计
    - CPU 用 psutil 记录进程 RSS 的增量
    """
    import gc
    process = psutil.Process(os.getpid())

    model.eval()
    with torch.no_grad():
        # 预热一次，稳定内存
        _ = model(sample_images)

    if device.type == "cuda":
        torch.cuda.empty_cache()
        torch.cuda.reset_peak_memory_stats(device)
        start = torch.cuda.memory_allocated(device)

        with torch.no_grad():
            _ = model(sample_images)

        peak = torch.cuda.max_memory_allocated(device)
        delta_bytes = max(peak - start, 0)
        mem_mb = delta_bytes / (1024 * 1024)
        torch.cuda.empty_cache()
        return float(mem_mb)

    # CPU 路径：用 RSS 增量估计
    gc.collect()
    time.sleep(0.02)  # 轻微等待，降低抖动
    rss_before = process.memory_info().rss

    with torch.no_grad():
        _ = model(sample_images)

    # 再次收集，尽量回收临时张量
    gc.collect()
    time.sleep(0.02)
    rss_after = process.memory_info().rss

    delta_bytes = max(rss_after - rss_before, 0)
    mem_mb = delta_bytes / (1024 * 1024)
    return float(mem_mb)


def evaluate_efficiency_spec(model, dataloader, device):
    """
    模型效率（按图）：参数数量、模型大小(MB)、内存占用(MB)
    - 参数数量：仅统计 requires_grad 的参数
    - 模型大小：复用现有 get_model_size_mb(model)
    - 内存占用：基于首个 batch 的一次前向推理增量内存
    """
    # 拿一个 batch 做内存测试
    first_images = None
    for images, _ in dataloader:
        first_images = images.to(device)
        break
    if first_images is None:
        raise RuntimeError("dataloader 为空，无法评估效率指标。")

    params = count_parameters(model)
    size_mb = get_model_size_mb(model)
    mem_mb  = _measure_inference_memory_mb(model, first_images, device)

    return {
        "params": int(params),
        "model_size_mb": float(size_mb),
        "memory_mb": float(mem_mb),
    }


def build_eval_report_spec(model, dataloader, device, threshold=0.5):
    """
    汇总成按图的“模型评估”结果结构：
    - 模型性能：准确率、精度
    - 模型效率：参数数量、模型大小、内存占用
    """
    perf = evaluate_performance_spec(model, dataloader, device, threshold=threshold)
    eff  = evaluate_efficiency_spec(model, dataloader, device)
    report = {
        "模型性能": {
            "准确率": perf["accuracy"],
            "精度":   perf["precision"],
        },
        "模型效率": {
            "参数数量":     eff["params"],
            "模型大小(MB)": eff["model_size_mb"],
            "内存占用(MB)": eff["memory_mb"],
        }
    }
    return report

def create_dataloaders(args):
    """使用 38-Cloud 真实数据集创建 train/val dataloader（从训练集划分）"""
    print("准备 38-Cloud 真实数据集...")

    norm = transforms.Normalize(
        mean=[0.485, 0.456, 0.406, 0.50],  # R,G,B,NIR
        std =[0.229, 0.224, 0.225, 0.20]
    )

    full_dataset = CloudDetectionDataset(
        root_dir=args.data_root,
        mode='train',
        transform=norm
    )

    train_size = int(0.8 * len(full_dataset))
    val_size   = len(full_dataset) - train_size
    train_dataset, val_dataset = random_split(full_dataset, [train_size, val_size])

    # Windows + rasterio 稳妥起见 num_workers=0
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
                              num_workers=0, pin_memory=True)
    val_loader   = DataLoader(val_dataset,   batch_size=args.batch_size, shuffle=False,
                              num_workers=0, pin_memory=True)

    print(f"训练集大小: {len(train_dataset)}")
    print(f"验证集大小: {len(val_dataset)}")
    return train_loader, val_loader

def save_results(results, output_dir):
    """保存结果"""
    results_path = os.path.join(output_dir, 'compression_results.json')

    def convert_types(obj):
        if isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, (np.integer, np.floating)):
            return obj.item()
        elif isinstance(obj, dict):
            return {key: convert_types(value) for key, value in obj.items()}
        elif isinstance(obj, list):
            return [convert_types(item) for item in obj]
        else:
            return obj

    converted_results = convert_types(results)

    with open(results_path, 'w', encoding='utf-8') as f:
        json.dump(converted_results, f, indent=2, ensure_ascii=False)

    print(f"结果已保存到: {results_path}")

def _expected_train_dirs(root):
    return [
        os.path.join(root, "38-Cloud_training", "train_red"),
        os.path.join(root, "38-Cloud_training", "train_green"),
        os.path.join(root, "38-Cloud_training", "train_blue"),
        os.path.join(root, "38-Cloud_training", "train_nir"),
        os.path.join(root, "38-Cloud_training", "train_gt"),
    ]

def _has_train_structure(root):
    try:
        return all(os.path.isdir(p) for p in _expected_train_dirs(root))
    except Exception:
        return False

def _extract_all_zips_under(folder):
    import zipfile
    extracted = False
    for cur_root, _, files in os.walk(folder):
        for f in files:
            if f.lower().endswith(".zip"):
                zf = os.path.join(cur_root, f)
                try:
                    with zipfile.ZipFile(zf, "r") as z:
                        z.extractall(cur_root)
                    print(f"[auto] 已解压: {zf}")
                    extracted = True
                except Exception as e:
                    print(f"[auto] 解压失败（忽略）: {zf} -> {e}")
    return extracted

def _find_data_root_with_training(base_dir):
    # 递归寻找包含 38-Cloud_training 且结构完整的根目录
    for cur_root, dirs, _ in os.walk(base_dir):
        if "38-Cloud_training" in dirs:
            cand = cur_root
            if _has_train_structure(cand):
                return cand
    return None

def ensure_dataset_available(args):
    """
    若 args.data_root 下没有 38-Cloud_training/train_red 等结构：
      1) 用 kagglehub 下载 38-Cloud 数据集
      2) 自动递归定位到包含 38-Cloud_training 的根
      3) 若压缩包存在则自动解压后再定位
    成功后会回写 args.data_root。
    """
    if _has_train_structure(args.data_root):
        return args.data_root

    print("[auto] 未发现完整数据集结构，尝试 KaggleHub 自动下载 38-Cloud 数据集...")

    try:
        import kagglehub  # 如果未安装，需要先: pip install kagglehub
    except Exception as e:
        raise RuntimeError(
            "未安装 kagglehub。请先执行：pip install kagglehub\n"
            f"导入失败详情：{e}"
        )

    # 下载
    try:
        dl_path = kagglehub.dataset_download("sorour/38cloud-cloud-segmentation-in-satellite-images")
        dl_path = os.path.normpath(dl_path)
        print(f"[auto] KaggleHub 下载完成：{dl_path}")
    except Exception as e:
        raise RuntimeError(f"下载 Kaggle 数据集失败，请检查网络或 Kaggle 配置：{e}")

    # 直接检查下载目录
    if _has_train_structure(dl_path):
        args.data_root = dl_path
        print(f"[auto] 识别成功：data_root = {args.data_root}")
        return args.data_root

    # 递归寻找符合结构的根
    cand = _find_data_root_with_training(dl_path)
    if cand:
        args.data_root = cand
        print(f"[auto] 已定位到包含 38-Cloud_training 的目录：{args.data_root}")
        return args.data_root

    # 尝试解压后再找一次
    if _extract_all_zips_under(dl_path):
        cand = _find_data_root_with_training(dl_path)
        if cand:
            args.data_root = cand
            print(f"[auto] 解压后已定位：data_root = {args.data_root}")
            return args.data_root

    # 兜底
    print("[auto] 未能自动识别标准目录结构。已将 data_root 指向 Kaggle 下载目录。")
    print("       请确认其下存在 `38-Cloud_training/train_red` 等子目录。")
    args.data_root = dl_path
    return args.data_root


def get_args():

    parser = argparse.ArgumentParser(description="Cloud-Net 模型训练 + 知识蒸馏 + 微调（真实38-Cloud）")

    script_dir = os.path.dirname(os.path.abspath(__file__))
    default_data_root = os.path.normpath(
        os.path.join(script_dir, "..", "CloudDetection")
    )

    # 2) 环境变量覆盖（可选）
    env_root = os.environ.get("CLOUD_DATA_ROOT")
    if env_root and os.path.isdir(env_root):
        default_data_root = env_root

    # 3) 常用参数
    parser.add_argument("--data-root",      default=default_data_root, help="38-Cloud 数据集根目录")
    parser.add_argument("--output-dir",     default="./output",        help="输出目录")
    parser.add_argument("--epochs",         type=int,   default=25,    help="基础训练轮数")
    parser.add_argument("--batch-size",     type=int,   default=8,     help="批次大小")
    parser.add_argument("--learning-rate",  type=float, default=1e-3,  help="学习率")
    parser.add_argument("--weight-decay",   type=float, default=1e-4,  help="权重衰减")
    parser.add_argument("--distill-epochs", type=int,   default=3,     help="蒸馏训练轮数")
    parser.add_argument("--distill-alpha",  type=float, default=0.5,   help="蒸馏中 CE 权重 α")
    parser.add_argument("--ft-epochs",      type=int,   default=5,     help="蒸馏后微调轮数")
    parser.add_argument("--ft-lr",          type=float, default=1e-4,  help="蒸馏后微调学习率")

    # 可选：当 epochs=0 时用于直接加载 teacher 的权重
    parser.add_argument("--teacher-ckpt",   type=str,   default=None,
                        help="已训练 teacher 的 .pth 路径；当 --epochs=0 时需要（否则尝试从输出目录找 teacher.pth）")

    # 训练细节（保持你原逻辑的默认值）
    parser.add_argument("--use_amp", action="store_true", default=False, help="开启AMP混合精度（需要CUDA）")
    parser.add_argument("--max_steps_per_epoch", type=int, default=None, help="每epoch最多训练的步数（调试用）")
    parser.add_argument("--patience", type=int, default=10, help="早停patience")
    parser.add_argument("--min_delta", type=float, default=0.0, help="早停最小提升")
    parser.add_argument("--warmup_ratio", type=float, default=0.1, help="LR warmup比例")

    args = parser.parse_args()

    # 4) 友好检查（确保 38-Cloud 的目录结构完整）
    expected_dirs = [
        os.path.join(args.data_root, "38-Cloud_training", "train_red"),
        os.path.join(args.data_root, "38-Cloud_training", "train_green"),
        os.path.join(args.data_root, "38-Cloud_training", "train_blue"),
        os.path.join(args.data_root, "38-Cloud_training", "train_nir"),
        os.path.join(args.data_root, "38-Cloud_training", "train_gt"),
        os.path.join(args.data_root, "38-Cloud_test", "test_red"),
        os.path.join(args.data_root, "38-Cloud_test", "test_green"),
        os.path.join(args.data_root, "38-Cloud_test", "test_blue"),
        os.path.join(args.data_root, "38-Cloud_test", "test_nir"),
        os.path.join(args.data_root, "38-Cloud_test", "test_gt"),
    ]
    missing = [p for p in expected_dirs if not os.path.isdir(p)]
    if missing:
        print("[提示] 数据目录可能不完整或路径不对：")
        for p in missing:
            print("  - 缺少目录：", p)
        print("你可以通过命令行覆盖默认路径，例如：")
        print(r'  python main.py --data-root "E:\USM\year2sem3\实习数据集\CloudDetection"')
        print("或设置环境变量 CLOUD_DATA_ROOT 指向数据根目录。")

    return args

def main():
    args = get_args()
    os.makedirs(args.output_dir, exist_ok=True)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")

    torch.manual_seed(42)
    np.random.seed(42)

    # >>> 新增：确保数据可用（缺失则自动下载并定位）
    ensure_dataset_available(args)
    # <<<

    # —— 数据加载 —— #
    train_loader, val_loader = create_dataloaders(args)

    # —— STAGE 1: 基础训练 —— #
    print("=== STAGE: TRAIN ===")
    teacher = CloudNet(num_classes=2, dropout_rate=0.1).to(device)

    if args.epochs > 0:
        teacher, orig_iou, _ = train_model(teacher, train_loader, val_loader, device, args)
        torch.save(teacher.state_dict(), os.path.join(args.output_dir, "teacher.pth"))
    else:
        ckpt_path = args.teacher_ckpt
        if ckpt_path is None:
            candidate = os.path.join(args.output_dir, "teacher.pth")
            ckpt_path = candidate if os.path.isfile(candidate) else None
        if ckpt_path is None or not os.path.isfile(ckpt_path):
            raise FileNotFoundError("已选择跳过训练（--epochs=0），但未提供 --teacher-ckpt，且输出目录也找不到 teacher.pth")
        teacher.load_state_dict(torch.load(ckpt_path, map_location=device))
        teacher.eval()
        print(f"已从 {ckpt_path} 加载 teacher 权重。")

    orig_metrics = evaluate_model(teacher, val_loader, device)
    orig_iou = orig_metrics['iou']
    orig_size = get_model_size_mb(teacher)
    orig_params = count_parameters(teacher)
    print(f"Teacher 就绪：IoU={orig_iou:.4f}, 大小={orig_size:.2f}MB, 参数={orig_params:,}")

    # —— STAGE 2: 知识蒸馏 —— #
    print("=== STAGE: DISTILLATION ===")
    student = CloudNet(num_classes=2, dropout_rate=0.1).to(device)
    student = distill_model(
        teacher=teacher,
        student=student,
        train_loader=train_loader,
        val_loader=val_loader,
        device=device,
        epochs=args.distill_epochs,
        alpha=args.distill_alpha,
        lr=args.learning_rate,
        weight_decay=args.weight_decay,
        temperature=1.0
    )
    distill_metrics = evaluate_model(student, val_loader, device)
    distill_iou    = distill_metrics['iou']
    distill_size   = get_model_size_mb(student)
    distill_params = count_parameters(student)
    torch.save(student.state_dict(), os.path.join(args.output_dir, "student_distilled.pth"))
    print(f"蒸馏后模型 IoU={distill_iou:.4f}, 大小={distill_size:.2f}MB, 参数={distill_params:,}")

    # —— STAGE 3: 微调 Student —— #
    if args.ft_epochs > 0:
        print("=== STAGE: FINE-TUNE AFTER DISTILLATION ===")
        ft_args = argparse.Namespace(
            epochs=args.ft_epochs,
            learning_rate=args.ft_lr,
            weight_decay=args.weight_decay
        )
        student, ft_iou, _ = train_model(student, train_loader, val_loader, device, ft_args)
        ft_size   = get_model_size_mb(student)
        ft_params = count_parameters(student)
        torch.save(student.state_dict(), os.path.join(args.output_dir, "student_finetuned.pth"))
        print(f"微调后模型 IoU={ft_iou:.4f}, 大小={ft_size:.2f}MB, 参数={ft_params:,}")
    else:
        ft_iou = None
        ft_size = None
        ft_params = None

    # —— 汇总报告（按图） —— #
    report_teacher = build_eval_report_spec(teacher, val_loader, device)
    report_student = build_eval_report_spec(student, val_loader, device)
    report_ft = build_eval_report_spec(student, val_loader, device) if args.ft_epochs > 0 else None

    stats = {
        "teacher": {
            "iou": orig_iou,
            "size_mb": orig_size,
            "params": orig_params
        },
        "student": {
            "iou": distill_metrics["iou"],
            "size_mb": distill_size,
            "params": distill_params
        },
        "fine_tuned": {
            "iou": ft_iou,
            "size_mb": ft_size,
            "params": ft_params
        },
        "spec_report": {
            "teacher": report_teacher,
            "student": report_student,
            "fine_tuned": report_ft
        }
    }

    save_results(stats, args.output_dir)

    print("\n=== 按图的模型评估 ===")
    def _p(tag, rep):
        if rep is None:
            print(f"{tag}: (无)"); return
        print(f"{tag} | 模型性能: 准确率={rep['模型性能']['准确率']:.4f}, 精度={rep['模型性能']['精度']:.4f} | "
              f"模型效率: 参数数量={rep['模型效率']['参数数量']}, 模型大小={rep['模型效率']['模型大小(MB)']:.2f}MB, "
              f"内存占用={rep['模型效率']['内存占用(MB)']:.2f}MB")
    _p("Teacher", report_teacher)
    _p("Student(蒸馏)", report_student)
    _p("Student(微调)", report_ft)

if __name__ == "__main__":
    main()
