#!/usr/bin/env python

"""
Cloud-Net模型训练和压缩实现 - 完全修复版本
基于论文: "Cloud-Net: An End-to-end Cloud Detection Algorithm for Landsat 8 Imagery"
修复了所有已知问题：量化、剪枝、评估方法、数据生成等
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset, random_split
import copy
import time
import numpy as np
import json
import matplotlib.pyplot as plt
from tqdm import tqdm
from collections import OrderedDict
import math
import tempfile
import glob
import rasterio
import psutil


class Cloud38Dataset(Dataset):

    def __init__(self, data_root, img_size=256):
        super().__init__()
        self.img_size = img_size
        base = os.path.join(data_root, "38-Cloud_training")
        self.red_dir   = os.path.join(base, "train_red")
        self.green_dir = os.path.join(base, "train_green")
        self.blue_dir  = os.path.join(base, "train_blue")
        self.nir_dir   = os.path.join(base, "train_nir")
        self.gt_dir    = os.path.join(base, "train_gt")

        red_list = sorted(glob.glob(os.path.join(self.red_dir, "*.tif")))
        self.samples = []

        for rfp in red_list:
            fname = os.path.basename(rfp)
            # 去掉 red_ 前缀（若有）
            stem = fname
            if stem.startswith("red_"):
                stem = stem[len("red_"):]

            # 其他波段
            gfp = os.path.join(self.green_dir, f"green_{stem}")
            bfp = os.path.join(self.blue_dir,  f"blue_{stem}")
            nfp = os.path.join(self.nir_dir,   f"nir_{stem}")
            # 容错：有些数据不带前缀
            if not os.path.exists(gfp):
                gfp = os.path.join(self.green_dir, fname.replace("red_", "green_"))
            if not os.path.exists(bfp):
                bfp = os.path.join(self.blue_dir,  fname.replace("red_", "blue_"))
            if not os.path.exists(nfp):
                nfp = os.path.join(self.nir_dir,   fname.replace("red_", "nir_"))

            # GT 可能是 tif 或 png，且可能叫 gt_*
            candidates = [
                os.path.join(self.gt_dir, f"gt_{stem}"),
                os.path.join(self.gt_dir, stem),
                os.path.join(self.gt_dir, f"gt_{os.path.splitext(stem)[0]}.png"),
                os.path.join(self.gt_dir, f"{os.path.splitext(stem)[0]}.png"),
            ]
            gtp = None
            for c in candidates:
                if os.path.exists(c):
                    gtp = c
                    break
            if gtp is None or not (os.path.exists(rfp) and os.path.exists(gfp) and os.path.exists(bfp) and os.path.exists(nfp)):
                continue

            self.samples.append((rfp, gfp, bfp, nfp, gtp))

        if len(self.samples) == 0:
            raise RuntimeError(f"在 {self.red_dir} 下没有匹配到任何样本，请检查数据路径。")

        print(f"38-Cloud 样本数: {len(self.samples)}")

    def __len__(self):
        return len(self.samples)

    def _read_tif(self, path):
        with rasterio.open(path) as src:
            arr = src.read(1)  # 单通道
        return arr

    def __getitem__(self, idx):
        rfp, gfp, bfp, nfp, gtp = self.samples[idx]
        r = self._read_tif(rfp)
        g = self._read_tif(gfp)
        b = self._read_tif(bfp)
        n = self._read_tif(nfp)

        # 拼成 4 通道
        img = np.stack([r, g, b, n], axis=0).astype(np.float32)

        # 简单归一化到 [0,1]（38-Cloud patch 通常是 0-255）
        if img.max() > 1.0:
            img = img / 255.0

        # 读 GT
        with rasterio.open(gtp) as src:
            gt = src.read(1)
        # 二值化（>0 为云）
        gt = (gt > 127).astype(np.uint8)

        # resize 到 img_size（若需要）
        C, H, W = img.shape
        if (H, W) != (self.img_size, self.img_size):
            import torch.nn.functional as F
            import torch
            img_t = torch.from_numpy(img).unsqueeze(0)                  # [1,4,H,W]
            img_t = F.interpolate(img_t, size=(self.img_size, self.img_size),
                                  mode='bilinear', align_corners=False).squeeze(0)
            gt_t = torch.from_numpy(gt).unsqueeze(0).unsqueeze(0).float()   # [1,1,H,W]
            gt_t = F.interpolate(gt_t, size=(self.img_size, self.img_size),
                                 mode='nearest').squeeze(0).squeeze(0).long()
            img = img_t.numpy()
            gt = gt_t.numpy().astype(np.uint8)

        # 转 torch
        img = torch.from_numpy(img)               # [4,H,W], float32
        mask = torch.from_numpy(gt).long()        # [H,W], {0,1}

        return img, mask

def neural_architecture_search(train_loader, val_loader, device, args):

    print("开始神经架构搜索...")

    # 由于 AutoKeras 存在问题，我们使用简化的架构搜索策略
    # 搜索不同的模型配置
    best_model = None
    best_iou = 0.0

    # 定义搜索空间
    search_configs = [
        {'base_channels': 32, 'dropout': 0.1, 'depth_factor': 0.8},
        {'base_channels': 48, 'dropout': 0.15, 'depth_factor': 1.0},
        {'base_channels': 64, 'dropout': 0.1, 'depth_factor': 1.0},
        {'base_channels': 80, 'dropout': 0.2, 'depth_factor': 1.2},
    ]

    for i, config in enumerate(search_configs[:min(args.nas_trials, len(search_configs))]):
        print(f"测试配置 {i + 1}/{len(search_configs)}: {config}")

        # 创建候选模型
        candidate_model = CloudNetNAS(
            num_classes=2,
            base_channels=config['base_channels'],
            dropout_rate=config['dropout'],
            depth_factor=config['depth_factor'],
            in_channels=4
        ).to(device)

        # 快速训练评估
        optimizer = optim.Adam(candidate_model.parameters(), lr=args.ft_lr)
        candidate_model.train()

        # 训练几个epoch进行评估
        for epoch in range(args.nas_epochs):
            for batch_idx, (images, masks) in enumerate(train_loader):
                if batch_idx >= 10:  # 只训练前10个batch进行快速评估
                    break

                images, masks = images.to(device), masks.to(device)
                optimizer.zero_grad()
                outputs = candidate_model(images)

                ce_loss = F.cross_entropy(outputs, masks)
                d_loss = dice_loss(outputs, masks)
                total_loss = 0.5 * ce_loss + 0.5 * d_loss

                total_loss.backward()
                optimizer.step()

        # 评估当前候选模型
        eval_metrics = evaluate_model(candidate_model, val_loader, device)
        current_iou = eval_metrics['iou']

        print(f"配置 {i + 1} IoU: {current_iou:.4f}")

        if current_iou > best_iou:
            best_iou = current_iou
            best_model = copy.deepcopy(candidate_model)

    print(f"NAS 完成，最佳 IoU: {best_iou:.4f}")
    return best_model if best_model is not None else CloudNet(num_classes=2, dropout_rate=0.1).to(device)

class CloudNet(nn.Module):
    """
    Cloud-Net模型实现 - 修复版本
    基于U-Net架构，适合云检测任务
    """
    def __init__(self, num_classes=2, dropout_rate=0.1):
        super(CloudNet, self).__init__()
        # 编码器
        self.enc1 = self._make_layer(4, 64, dropout_rate)
        self.enc2 = self._make_layer(64, 128, dropout_rate)
        self.enc3 = self._make_layer(128, 256, dropout_rate)
        self.enc4 = self._make_layer(256, 512, dropout_rate)
        # 中心
        self.center = self._make_layer(512, 1024, dropout_rate)
        # 解码器
        self.dec4 = self._make_decoder_layer(1024, 512)
        self.dec3 = self._make_decoder_layer(1024, 256)
        self.dec2 = self._make_decoder_layer(512, 128)
        self.dec1 = self._make_decoder_layer(256, 64)
        # 输出
        self.final = nn.Conv2d(128, num_classes, 1)
        self.pool = nn.MaxPool2d(2, 2)

        self._initialize_weights()

    def _make_layer(self, in_channels, out_channels, dropout_rate):
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout_rate),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def _make_decoder_layer(self, in_channels, out_channels):
        return nn.Sequential(
            nn.ConvTranspose2d(in_channels, out_channels, 2, stride=2),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        # 编码器
        enc1 = self.enc1(x)
        enc2 = self.enc2(self.pool(enc1))
        enc3 = self.enc3(self.pool(enc2))
        enc4 = self.enc4(self.pool(enc3))
        # 中心
        center = self.center(self.pool(enc4))
        # 解码器 + 跳跃连接
        dec4 = self.dec4(center)
        dec4 = torch.cat([dec4, enc4], dim=1)
        dec3 = self.dec3(dec4)
        dec3 = torch.cat([dec3, enc3], dim=1)
        dec2 = self.dec2(dec3)
        dec2 = torch.cat([dec2, enc2], dim=1)
        dec1 = self.dec1(dec2)
        dec1 = torch.cat([dec1, enc1], dim=1)
        # 输出
        out = self.final(dec1)
        return out


class CloudNetNAS(nn.Module):
    """
    可配置的 CloudNet 变体，用于架构搜索
    基于 U-Net 架构，可根据 base_channels、dropout_rate、depth_factor 调整模型
    """
    def __init__(self, num_classes=2, base_channels=64, dropout_rate=0.1,
                 depth_factor=1.0, in_channels=4):  # ← 新增 in_channels，默认 4
        super(CloudNetNAS, self).__init__()
        # 根据搜索参数调整通道数
        c1 = int(base_channels * depth_factor)
        c2 = int(base_channels * 2 * depth_factor)
        c3 = int(base_channels * 4 * depth_factor)
        c4 = int(base_channels * 8 * depth_factor)
        c5 = int(base_channels * 16 * depth_factor)

        # 编码器
        self.enc1 = self._make_layer(in_channels, c1, dropout_rate)  # ← 用传入的 in_channels
        self.enc2 = self._make_layer(c1,   c2, dropout_rate)
        self.enc3 = self._make_layer(c2,   c3, dropout_rate)
        self.enc4 = self._make_layer(c3,   c4, dropout_rate)

        # 中心
        self.center = self._make_layer(c4, c5, dropout_rate)

        # 解码器
        self.dec4 = self._make_decoder_layer(c5,       c4)
        self.dec3 = self._make_decoder_layer(c4 * 2,   c3)
        self.dec2 = self._make_decoder_layer(c3 * 2,   c2)
        self.dec1 = self._make_decoder_layer(c2 * 2,   c1)

        # 最终分类层
        self.final = nn.Conv2d(c1 * 2, num_classes, 1)
        self.pool = nn.MaxPool2d(2, 2)

        self._initialize_weights()

    def _make_layer(self, in_channels, out_channels, dropout_rate):
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout_rate),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
        )

    def _make_decoder_layer(self, in_channels, out_channels):
        return nn.Sequential(
            nn.ConvTranspose2d(in_channels, out_channels, 2, stride=2),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
        )

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        # 编码器
        enc1 = self.enc1(x)
        enc2 = self.enc2(self.pool(enc1))
        enc3 = self.enc3(self.pool(enc2))
        enc4 = self.enc4(self.pool(enc3))
        # 中心
        center = self.center(self.pool(enc4))
        # 解码器 + 跳跃连接
        dec4 = self.dec4(center)
        dec4 = torch.cat([dec4, enc4], dim=1)
        dec3 = self.dec3(dec4)
        dec3 = torch.cat([dec3, enc3], dim=1)
        dec2 = self.dec2(dec3)
        dec2 = torch.cat([dec2, enc2], dim=1)
        dec1 = self.dec1(dec2)
        dec1 = torch.cat([dec1, enc1], dim=1)
        # 输出
        out = self.final(dec1)
        return out


def dice_loss(pred, target, smooth=1e-5):
    """Dice损失"""
    pred = torch.softmax(pred, dim=1)
    pred_cloud = pred[:, 1, :, :]
    target_cloud = target.float()

    intersection = (pred_cloud * target_cloud).sum(dim=(1, 2))
    pred_sum = pred_cloud.sum(dim=(1, 2))
    target_sum = target_cloud.sum(dim=(1, 2))

    dice = (2.0 * intersection + smooth) / (pred_sum + target_sum + smooth)
    return 1 - dice.mean()


def focal_loss(pred, target, alpha=0.25, gamma=2.0):
    """Focal损失"""
    ce_loss = F.cross_entropy(pred, target, reduction='none')
    pt = torch.exp(-ce_loss)
    focal_loss = alpha * (1 - pt) ** gamma * ce_loss
    return focal_loss.mean()


def calculate_metrics_fixed(pred, target, threshold=0.5):
    """
    修复的评估指标计算 - 使用固定阈值
    """
    pred_prob = torch.softmax(pred, dim=1)[:, 1, :, :]
    pred_binary = (pred_prob > threshold).float()
    target_binary = target.float()

    # IoU
    intersection = (pred_binary * target_binary).sum()
    union = pred_binary.sum() + target_binary.sum() - intersection
    iou = intersection / (union + 1e-8)

    # 其他指标
    tp = (pred_binary * target_binary).sum()
    fp = (pred_binary * (1 - target_binary)).sum()
    fn = ((1 - pred_binary) * target_binary).sum()
    tn = ((1 - pred_binary) * (1 - target_binary)).sum()

    precision = tp / (tp + fp + 1e-8)
    recall = tp / (tp + fn + 1e-8)
    f1 = 2 * precision * recall / (precision + recall + 1e-8)
    accuracy = (tp + tn) / (tp + tn + fp + fn + 1e-8)

    return {
        'iou': iou.item(),
        'f1': f1.item(),
        'precision': precision.item(),
        'recall': recall.item(),
        'accuracy': accuracy.item()
    }


def get_model_size_mb(model):
    """
    精确计算模型大小（以MB为单位）
    修复版本 - 避免临时文件权限问题
    """
    try:
        # 方法1: 尝试使用临时文件
        import tempfile
        with tempfile.NamedTemporaryFile(delete=False, suffix='.pth') as tmp_file:
            torch.save(model.state_dict(), tmp_file.name)
            size_mb = os.path.getsize(tmp_file.name) / (1024 * 1024)
            # 清理临时文件
            try:
                os.unlink(tmp_file.name)
            except:
                pass
        return size_mb
    except Exception as e:
        print(f"临时文件方法失败: {e}")
        # 方法2: 使用内存计算（备用方案）
        total_params = 0
        for param in model.parameters():
            if param.requires_grad:
                total_params += param.numel()

        # 估算模型大小 (假设每个参数4字节 + 一些开销)
        estimated_size_mb = total_params * 4 / (1024 * 1024) * 1.2  # 加20%开销
        print(f"使用估算方法计算模型大小: {estimated_size_mb:.2f} MB")
        return estimated_size_mb

def train_model(model, train_loader, val_loader, device, args):
    """训练函数"""
    print(f"开始训练Cloud-Net模型...")

    optimizer = optim.AdamW(model.parameters(),
                            lr=args.learning_rate,
                            weight_decay=args.weight_decay,
                            betas=(0.9, 0.999))

    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs, eta_min=1e-6)

    # 动态类别权重
    ce_loss_fn = nn.CrossEntropyLoss()

    best_iou = 0.0
    best_model_state = None
    train_history = {'loss': [], 'iou': [], 'val_iou': [], 'val_f1': []}

    patience = 10
    patience_counter = 0

    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        running_metrics = {'iou': 0.0, 'f1': 0.0}
        num_batches = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for batch_idx, (images, masks) in enumerate(pbar):
            images, masks = images.to(device), masks.to(device)

            optimizer.zero_grad()
            outputs = model(images)

            # 组合损失
            ce_loss = ce_loss_fn(outputs, masks)
            d_loss = dice_loss(outputs, masks)
            f_loss = focal_loss(outputs, masks, alpha=0.25, gamma=2.0)

            total_loss = 0.3 * ce_loss + 0.4 * d_loss + 0.3 * f_loss

            total_loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            optimizer.step()

            # 计算指标（固定阈值）
            metrics = calculate_metrics_fixed(outputs, masks, threshold=0.5)
            running_loss += total_loss.item()
            running_metrics['iou'] += metrics['iou']
            running_metrics['f1'] += metrics['f1']
            num_batches += 1

            pbar.set_postfix({
                'Loss': f'{total_loss.item():.4f}',
                'IoU': f'{metrics["iou"]:.4f}',
                'F1': f'{metrics["f1"]:.4f}'
            })

        # 计算平均训练指标
        avg_train_loss = running_loss / num_batches
        avg_train_iou = running_metrics['iou'] / num_batches

        # 验证阶段
        val_metrics = evaluate_model(model, val_loader, device)

        scheduler.step()

        # 保存最佳模型
        if val_metrics['iou'] > best_iou:
            best_iou = val_metrics['iou']
            best_model_state = copy.deepcopy(model.state_dict())
            patience_counter = 0
        else:
            patience_counter += 1

        # 记录历史
        train_history['loss'].append(avg_train_loss)
        train_history['iou'].append(avg_train_iou)
        train_history['val_iou'].append(val_metrics['iou'])
        train_history['val_f1'].append(val_metrics['f1'])

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train IoU: {avg_train_iou:.4f}, '
              f'Val IoU: {val_metrics["iou"]:.4f}, '
              f'Val F1: {val_metrics["f1"]:.4f}')

        # 早停
        if patience_counter >= patience:
            print(f"早停在epoch {epoch + 1}, 最佳IoU: {best_iou:.4f}")
            break

    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)

    return model, best_iou, train_history


def evaluate_model(model, dataloader, device, threshold=0.5):
    """评估模型性能 - 固定阈值"""
    model.eval()
    total_metrics = {'iou': 0.0, 'f1': 0.0, 'precision': 0.0, 'recall': 0.0, 'accuracy': 0.0}
    num_batches = 0

    with torch.no_grad():
        for images, masks in dataloader:
            images, masks = images.to(device), masks.to(device)
            outputs = model(images)
            metrics = calculate_metrics_fixed(outputs, masks, threshold)

            for key in total_metrics:
                total_metrics[key] += metrics[key]
            num_batches += 1

    # 计算平均指标
    avg_metrics = {key: value / num_batches for key, value in total_metrics.items()}
    return avg_metrics


def count_parameters(model):
    """统计模型参数数量"""
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

def evaluate_performance_spec(model, dataloader, device, threshold=0.5):
    model.eval()
    eps = 1e-8
    TP = FP = FN = TN = 0.0
    with torch.no_grad():
        for images, masks in dataloader:
            images, masks = images.to(device), masks.to(device)
            logits = model(images)
            probs  = torch.softmax(logits, dim=1)[:, 1, :, :]
            preds  = (probs > threshold).to(masks.dtype)
            p = preds.view(-1).float(); t = masks.view(-1).float()
            TP += float((p * t).sum().item())
            FP += float((p * (1 - t)).sum().item())
            FN += float(((1 - p) * t).sum().item())
            TN += float(((1 - p) * (1 - t)).sum().item())
    accuracy  = (TP + TN) / (TP + TN + FP + FN + eps)
    precision = TP / (TP + FP + eps)
    return {"accuracy": float(accuracy), "precision": float(precision)}

def _measure_inference_memory_mb(model, sample_images, device):
    import gc, time
    process = psutil.Process(os.getpid())
    model.eval()
    with torch.no_grad():
        _ = model(sample_images)  # 预热
    if device.type == "cuda":
        torch.cuda.empty_cache()
        torch.cuda.reset_peak_memory_stats(device)
        start = torch.cuda.memory_allocated(device)
        with torch.no_grad():
            _ = model(sample_images)
        peak = torch.cuda.max_memory_allocated(device)
        return float(max(peak - start, 0) / (1024 * 1024))
    gc.collect(); time.sleep(0.02)
    rss_before = process.memory_info().rss
    with torch.no_grad():
        _ = model(sample_images)
    gc.collect(); time.sleep(0.02)
    rss_after = process.memory_info().rss
    return float(max(rss_after - rss_before, 0) / (1024 * 1024))

def evaluate_efficiency_spec(model, dataloader, device):
    first_images = None
    for images, _ in dataloader:
        first_images = images.to(device); break
    if first_images is None:
        raise RuntimeError("dataloader 为空，无法评估效率指标。")
    return {
        "参数数量": int(count_parameters(model)),
        "模型大小(MB)": float(get_model_size_mb(model)),
        "内存占用(MB)": float(_measure_inference_memory_mb(model, first_images, device)),
    }

def build_eval_report_spec(model, dataloader, device, threshold=0.5):
    perf = evaluate_performance_spec(model, dataloader, device, threshold)
    eff  = evaluate_efficiency_spec(model, dataloader, device)
    return {
        "模型性能": {"准确率": perf["accuracy"], "精度": perf["precision"]},
        "模型效率": eff
    }

def create_dataloaders(args):
    """创建 38-Cloud 数据加载器"""
    print("准备 38-Cloud 真实数据集...")
    full_dataset = Cloud38Dataset(
        data_root=args.data_root,
        img_size=args.img_size
    )

    train_size = int(0.8 * len(full_dataset))
    val_size = len(full_dataset) - train_size
    train_dataset, val_dataset = random_split(full_dataset, [train_size, val_size])

    # Windows + rasterio：建议 num_workers=0 以提升稳定性
    num_workers = 0
    pin_mem = torch.cuda.is_available()

    train_loader = DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=num_workers,
        pin_memory=pin_mem
    )

    val_loader = DataLoader(
        val_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=num_workers,
        pin_memory=pin_mem
    )

    print(f"训练集大小: {len(train_dataset)}")
    print(f"验证集大小: {len(val_dataset)}")
    return train_loader, val_loader

def save_results(results, output_dir):
    """保存结果"""
    results_path = os.path.join(output_dir, 'compression_results.json')

    def convert_types(obj):
        if isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, (np.integer, np.floating)):
            return obj.item()
        elif isinstance(obj, dict):
            return {key: convert_types(value) for key, value in obj.items()}
        elif isinstance(obj, list):
            return [convert_types(item) for item in obj]
        else:
            return obj

    converted_results = convert_types(results)

    with open(results_path, 'w', encoding='utf-8') as f:
        json.dump(converted_results, f, indent=2, ensure_ascii=False)

    print(f"结果已保存到: {results_path}")

def _expected_train_dirs(root):
    return [
        os.path.join(root, "38-Cloud_training", "train_red"),
        os.path.join(root, "38-Cloud_training", "train_green"),
        os.path.join(root, "38-Cloud_training", "train_blue"),
        os.path.join(root, "38-Cloud_training", "train_nir"),
        os.path.join(root, "38-Cloud_training", "train_gt"),
    ]

def _has_train_structure(root):
    try:
        return all(os.path.isdir(p) for p in _expected_train_dirs(root))
    except Exception:
        return False

def _extract_all_zips_under(folder):
    import zipfile
    extracted = False
    for cur_root, _, files in os.walk(folder):
        for f in files:
            if f.lower().endswith(".zip"):
                zf_path = os.path.join(cur_root, f)
                try:
                    with zipfile.ZipFile(zf_path, "r") as zf:
                        zf.extractall(cur_root)
                    print(f"[auto] 已解压: {zf_path}")
                    extracted = True
                except Exception as e:
                    print(f"[auto] 解压失败（忽略）: {zf_path} -> {e}")
    return extracted

def _find_data_root_with_training(base_dir):
    # 递归寻找包含 38-Cloud_training 且结构完整的根目录
    for cur_root, dirs, _ in os.walk(base_dir):
        if "38-Cloud_training" in dirs:
            cand = cur_root
            if _has_train_structure(cand):
                return cand
    return None

def ensure_dataset_available(args):
    """
    若 args.data_root 下没有 38-Cloud_training/train_* 结构：
      1) 用 kagglehub 下载 38-Cloud 数据集
      2) 自动定位到包含 38-Cloud_training 的根
      3) 若压缩包存在则自动解压后再定位
    成功后会回写 args.data_root。
    """
    if _has_train_structure(args.data_root):
        return args.data_root

    print("[auto] 未发现完整数据集结构，尝试 KaggleHub 自动下载 38-Cloud 数据集...")

    try:
        import kagglehub  # 未安装：pip install kagglehub
    except Exception as e:
        raise RuntimeError(
            "未安装 kagglehub。请先执行：pip install kagglehub\n"
            f"导入失败详情：{e}"
        )

    # 下载
    try:
        dl_path = kagglehub.dataset_download("sorour/38cloud-cloud-segmentation-in-satellite-images")
        dl_path = os.path.normpath(dl_path)
        print(f"[auto] KaggleHub 下载完成：{dl_path}")
    except Exception as e:
        raise RuntimeError(f"下载 Kaggle 数据集失败，请检查网络或 Kaggle 配置：{e}")

    # 直接检查下载目录
    if _has_train_structure(dl_path):
        args.data_root = dl_path
        print(f"[auto] 识别成功：data_root = {args.data_root}")
        return args.data_root

    # 递归寻找符合结构的根
    cand = _find_data_root_with_training(dl_path)
    if cand:
        args.data_root = cand
        print(f"[auto] 已定位到包含 38-Cloud_training 的目录：{args.data_root}")
        return args.data_root

    # 尝试解压后再找一次
    if _extract_all_zips_under(dl_path):
        cand = _find_data_root_with_training(dl_path)
        if cand:
            args.data_root = cand
            print(f"[auto] 解压后已定位：data_root = {args.data_root}")
            return args.data_root

    # 兜底
    print("[auto] 未能自动识别标准目录结构。已将 data_root 指向 Kaggle 下载目录。")
    print("       请确认其下存在 `38-Cloud_training/train_red` 等子目录。")
    args.data_root = dl_path
    return args.data_root

def get_args():
    parser = argparse.ArgumentParser(description="Cloud-Net 模型训练 + NAS 搜索 + 微调（38-Cloud）")

    # 1) 默认数据根目录：以脚本为基准的相对路径 ..\..\实习数据集\CloudDetection
    script_dir = os.path.dirname(os.path.abspath(__file__))
    default_data_root = os.path.normpath(
        os.path.join(script_dir, "..", "CloudDetection")
    )

    # 2) 环境变量覆盖（可选）
    env_root = os.environ.get("CLOUD_DATA_ROOT")
    if env_root and os.path.isdir(env_root):
        default_data_root = env_root

    # 3) 常用参数
    parser.add_argument("--data-root",     default=default_data_root, help="38-Cloud 数据集根目录（默认为相对路径）")
    parser.add_argument("--output-dir",    default="./output",        help="输出目录")
    parser.add_argument("--epochs",        type=int, default=25,      help="训练轮数")
    parser.add_argument("--batch-size",    type=int, default=8,       help="批次大小")
    parser.add_argument("--learning-rate", type=float, default=1e-3,  help="学习率")
    parser.add_argument("--weight-decay",  type=float, default=1e-4,  help="权重衰减")
    parser.add_argument("--img-size",      type=int, default=256,     help="训练图像大小（会自动 resize）")
    parser.add_argument("--nas-trials",    type=int, default=16,      help="NAS 搜索子模型最大次数")
    parser.add_argument("--nas-epochs",    type=int, default=5,       help="NAS 中每个子模型训练轮数（快速评估）")
    parser.add_argument("--ft-epochs",     type=int, default=3,       help="微调轮数")
    parser.add_argument("--ft-lr",         type=float, default=1e-4,  help="微调学习率")

    args = parser.parse_args()

    # 4) 友好检查（只检查训练部分需要的目录，避免误报 test_*）
    expected_train_dirs = [
        os.path.join(args.data_root, "38-Cloud_training", "train_red"),
        os.path.join(args.data_root, "38-Cloud_training", "train_green"),
        os.path.join(args.data_root, "38-Cloud_training", "train_blue"),
        os.path.join(args.data_root, "38-Cloud_training", "train_nir"),
        os.path.join(args.data_root, "38-Cloud_training", "train_gt"),
    ]
    missing = [p for p in expected_train_dirs if not os.path.isdir(p)]
    if missing:
        print("[提示] 数据目录可能不完整或路径不对：")
        for p in missing:
            print("  - 缺少目录：", p)
        print("你可以通过命令行覆盖默认路径，例如：")
        print(r'  python main.py --data-root "E:\USM\year2sem3\实习数据集\CloudDetection"')
        print("或设置环境变量 CLOUD_DATA_ROOT 指向数据根目录。")

    return args

def main():
    args = get_args()
    os.makedirs(args.output_dir, exist_ok=True)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")

    # 固定随机种子
    torch.manual_seed(42)
    np.random.seed(42)

    # >>> 新增：确保数据可用（缺失则自动下载并定位）
    ensure_dataset_available(args)
    # <<<

    # 数据加载
    train_loader, val_loader = create_dataloaders(args)

    # —— STAGE 1: 基础训练 —— #
    print("=== STAGE: TRAIN ===")
    model = CloudNet(num_classes=2, dropout_rate=0.1).to(device)
    model, orig_iou, _ = train_model(model, train_loader, val_loader, device, args)
    orig_size   = get_model_size_mb(model)
    orig_params = count_parameters(model)
    torch.save(model.state_dict(), os.path.join(args.output_dir, "cloudnet_trained.pth"))
    print(f"训练完成，原始模型 IoU={orig_iou:.4f}, 大小={orig_size:.2f}MB, 参数={orig_params:,}")

    # —— STAGE 2: NAS 搜索 —— #
    print("=== STAGE: NAS SEARCH ===")
    model.load_state_dict(torch.load(
        os.path.join(args.output_dir, "cloudnet_trained.pth"),
        map_location=device))
    model.eval()
    nas_model = neural_architecture_search(train_loader, val_loader, device, args)
    torch.save(nas_model.state_dict(), os.path.join(args.output_dir, "cloudnet_nas.pth"))
    nas_iou    = evaluate_model(nas_model, val_loader, device)["iou"]
    nas_size   = get_model_size_mb(nas_model)
    nas_params = count_parameters(nas_model)
    print(f"NAS 后模型 IoU={nas_iou:.4f}, 大小={nas_size:.2f}MB, 参数={nas_params:,}")

    # —— STAGE 3: 微调 NAS 模型 —— #
    print("=== STAGE: FINE-TUNE AFTER NAS ===")
    ft_args = argparse.Namespace(
        epochs        = args.ft_epochs,
        learning_rate = args.ft_lr,
        weight_decay  = args.weight_decay
    )
    ft_model, ft_iou, _ = train_model(nas_model.to(device),
                                      train_loader, val_loader,
                                      device, ft_args)
    torch.save(ft_model.state_dict(), os.path.join(args.output_dir, "cloudnet_nas_ft.pth"))
    ft_size   = get_model_size_mb(ft_model)
    ft_params = count_parameters(ft_model)
    print(f"微调后模型 IoU={ft_iou:.4f}, 大小={ft_size:.2f}MB, 参数={ft_params:,}")

    # —— 汇总报告（按图） —— #
    report_orig = build_eval_report_spec(model, val_loader, device)
    report_nas  = build_eval_report_spec(nas_model, val_loader, device)
    report_ft   = build_eval_report_spec(ft_model, val_loader, device)

    stats = {
        "original":   {"iou": orig_iou, "size_mb": get_model_size_mb(model),   "params": count_parameters(model)},
        "nas":        {"iou": nas_iou,  "size_mb": get_model_size_mb(nas_model),"params": count_parameters(nas_model)},
        "fine_tuned": {"iou": ft_iou,   "size_mb": get_model_size_mb(ft_model), "params": count_parameters(ft_model)},
        "spec_report": {
            "original": report_orig,
            "nas":      report_nas,
            "fine_tuned": report_ft
        }
    }
    save_results(stats, args.output_dir)

    # 控制台速览
    print("\n=== 按图的模型评估 ===")
    def _p(tag, rep):
        print(f"{tag} | 性能: 准确率={rep['模型性能']['准确率']:.4f}, 精度={rep['模型性能']['精度']:.4f} | "
              f"效率: 参数={rep['模型效率']['参数数量']}, 大小={rep['模型效率']['模型大小(MB)']:.2f}MB, "
              f"内存={rep['模型效率']['内存占用(MB)']:.2f}MB")
    _p("Original",   report_orig)
    _p("NAS",        report_nas)
    _p("Fine-Tuned", report_ft)

if __name__ == "__main__":
    main()