#!/usr/bin/env python

"""
Cloud-Net模型训练和压缩实现 - 完全修复版本
基于论文: "Cloud-Net: An End-to-end Cloud Detection Algorithm for Landsat 8 Imagery"
修复了所有已知问题：量化、剪枝、评估方法、数据生成等
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset, random_split
import copy
import numpy as np
import json
from tqdm import tqdm
import rasterio
import torchvision.transforms as transforms
import psutil  # 测推理增量内存
import torch
import torch.ao.quantization as tq
from torch.ao.quantization import QConfig
from torch.ao.quantization.observer import MinMaxObserver


class CloudDetectionDataset(Dataset):
    def __init__(self, root_dir, mode='train', transform=None):
        super().__init__()
        assert mode in ['train', 'test']
        sub = '38-Cloud_training' if mode == 'train' else '38-Cloud_test'
        base = os.path.join(root_dir, sub)

        self.mode = mode
        self.transform = transform

        self.band_dirs = {
            'red':   os.path.join(base, f'{mode}_red'),
            'green': os.path.join(base, f'{mode}_green'),
            'blue':  os.path.join(base, f'{mode}_blue'),
            'nir':   os.path.join(base, f'{mode}_nir'),
        }
        self.gt_dir = os.path.join(base, f'{mode}_gt')

        # 允许的文件后缀
        self.ext = '.tif'

        # 常见前缀（会尝试去掉其中一个作为“公共尾巴”）
        self.prefix_candidates = {
            'red':   ['red_', 'B4_', 'band4_', 'R_'],
            'green': ['green_', 'B3_', 'band3_', 'G_'],
            'blue':  ['blue_', 'B2_', 'band2_', 'B_'],
            'nir':   ['nir_', 'B5_', 'band5_', 'NIR_'],
            'gt':    ['gt_', 'mask_', 'label_', 'cloud_', 'gtmask_'],
        }

        # 为每个目录建立 {公共尾巴: 实际文件名(无扩展名)} 的索引
        self.index = {b: self._build_index(self.band_dirs[b], b) for b in ['red','green','blue','nir']}
        self.index_gt = self._build_index(self.gt_dir, 'gt')

        # 取交集，确保样本在四个波段和 gt 都存在
        ids = set(self.index['red'].keys())
        ids &= set(self.index['green'].keys())
        ids &= set(self.index['blue'].keys())
        ids &= set(self.index['nir'].keys())
        ids &= set(self.index_gt.keys())

        self.ids = sorted(list(ids))
        if len(self.ids) == 0:
            raise RuntimeError("没有找到可用样本，请检查目录与命名规则是否正确。")
        print(f"[{mode}] 可用样本数: {len(self.ids)}")

    def _strip_prefix(self, stem, band_key):
        """去掉已知前缀，得到公共尾巴"""
        for p in self.prefix_candidates.get(band_key, []):
            if stem.startswith(p):
                return stem[len(p):]
        return stem  # 没匹配到就原样返回

    def _build_index(self, folder, band_key):
        mapping = {}
        if not os.path.isdir(folder):
            # gt 目录在有些数据集可能不存在；其他波段必须存在
            if band_key != 'gt':
                raise FileNotFoundError(f"目录不存在: {folder}")
            return mapping

        for f in os.listdir(folder):
            if not f.lower().endswith(self.ext):
                continue
            stem = os.path.splitext(f)[0]
            tail = self._strip_prefix(stem, band_key)  # 公共尾巴
            # 若冲突，后写覆盖前写问题不大（通常不会冲突）
            mapping[tail] = stem
        return mapping

    def __len__(self):
        return len(self.ids)

    def _resolve_path(self, folder, stem):
        return os.path.join(folder, stem + self.ext)

    def __getitem__(self, idx):
        pid = self.ids[idx]

        # 读取四个波段，按索引里真实文件名定位
        bands = []
        for b in ['red','green','blue','nir']:
            stem = self.index[b][pid]              # 真实文件名（无扩展）
            path = self._resolve_path(self.band_dirs[b], stem)
            with rasterio.open(path) as src:
                arr = src.read(1).astype(np.float32)
            arr = np.nan_to_num(arr, nan=0.0)
            arr = arr / 10000.0
            arr = np.clip(arr, 0.0, 1.0)
            bands.append(arr)
        img = np.stack(bands, axis=0)  # (4,H,W)

        # 掩码
        if pid not in self.index_gt:
            raise FileNotFoundError(f"找不到该样本的GT: {pid}")
        gt_stem = self.index_gt[pid]
        gt_path = self._resolve_path(self.gt_dir, gt_stem)
        with rasterio.open(gt_path) as src:
            m = src.read(1)
        m = (m > 0).astype(np.int64)

        img_t  = torch.from_numpy(img)
        mask_t = torch.from_numpy(m)

        if self.transform is not None:
            img_t = self.transform(img_t)

        return img_t, mask_t

class CloudNet(nn.Module):
    """
    Cloud-Net模型实现 - 修复版本
    基于U-Net架构，适合云检测任务
    """

    def __init__(self, num_classes=2, dropout_rate=0.1):
        super(CloudNet, self).__init__()

        # 编码器部分
        self.enc1 = self._make_layer(4, 64, dropout_rate)
        self.enc2 = self._make_layer(64, 128, dropout_rate)
        self.enc3 = self._make_layer(128, 256, dropout_rate)
        self.enc4 = self._make_layer(256, 512, dropout_rate)

        # 中心部分
        self.center = self._make_layer(512, 1024, dropout_rate)

        # 解码器部分
        self.dec4 = self._make_decoder_layer(1024, 512)
        self.dec3 = self._make_decoder_layer(1024, 256)  # 1024 = 512 + 512 (skip connection)
        self.dec2 = self._make_decoder_layer(512, 128)  # 512 = 256 + 256
        self.dec1 = self._make_decoder_layer(256, 64)  # 256 = 128 + 128

        # 最终分类层
        self.final = nn.Conv2d(128, num_classes, 1)  # 128 = 64 + 64

        # 池化层
        self.pool = nn.MaxPool2d(2, 2)

        self._initialize_weights()

    def _make_layer(self, in_channels, out_channels, dropout_rate=0.1):
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout_rate),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def _make_decoder_layer(self, in_channels, out_channels):
        return nn.Sequential(
            nn.ConvTranspose2d(in_channels, out_channels, 2, stride=2),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.ConvTranspose2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')

    def forward(self, x):
        # 编码器
        enc1 = self.enc1(x)
        enc2 = self.enc2(self.pool(enc1))
        enc3 = self.enc3(self.pool(enc2))
        enc4 = self.enc4(self.pool(enc3))

        # 中心
        center = self.center(self.pool(enc4))

        # 解码器 + 跳跃连接
        dec4 = self.dec4(center)
        dec4 = torch.cat([dec4, enc4], dim=1)

        dec3 = self.dec3(dec4)
        dec3 = torch.cat([dec3, enc3], dim=1)

        dec2 = self.dec2(dec3)
        dec2 = torch.cat([dec2, enc2], dim=1)

        dec1 = self.dec1(dec2)
        dec1 = torch.cat([dec1, enc1], dim=1)

        # 最终输出
        out = self.final(dec1)

        return out

def dice_loss(pred, target, smooth=1e-5):
    """Dice损失"""
    pred = torch.softmax(pred, dim=1)
    pred_cloud = pred[:, 1, :, :]
    target_cloud = target.float()

    intersection = (pred_cloud * target_cloud).sum(dim=(1, 2))
    pred_sum = pred_cloud.sum(dim=(1, 2))
    target_sum = target_cloud.sum(dim=(1, 2))

    dice = (2.0 * intersection + smooth) / (pred_sum + target_sum + smooth)
    return 1 - dice.mean()


def focal_loss(pred, target, alpha=0.25, gamma=2.0):
    """Focal损失"""
    ce_loss = F.cross_entropy(pred, target, reduction='none')
    pt = torch.exp(-ce_loss)
    focal_loss = alpha * (1 - pt) ** gamma * ce_loss
    return focal_loss.mean()


def calculate_metrics_fixed(pred, target, threshold=0.5):
    """
    修复的评估指标计算 - 使用固定阈值
    """
    pred_prob = torch.softmax(pred, dim=1)[:, 1, :, :]
    pred_binary = (pred_prob > threshold).float()
    target_binary = target.float()

    # IoU
    intersection = (pred_binary * target_binary).sum()
    union = pred_binary.sum() + target_binary.sum() - intersection
    iou = intersection / (union + 1e-8)

    # 其他指标
    tp = (pred_binary * target_binary).sum()
    fp = (pred_binary * (1 - target_binary)).sum()
    fn = ((1 - pred_binary) * target_binary).sum()
    tn = ((1 - pred_binary) * (1 - target_binary)).sum()

    precision = tp / (tp + fp + 1e-8)
    recall = tp / (tp + fn + 1e-8)
    f1 = 2 * precision * recall / (precision + recall + 1e-8)
    accuracy = (tp + tn) / (tp + tn + fp + fn + 1e-8)

    return {
        'iou': iou.item(),
        'f1': f1.item(),
        'precision': precision.item(),
        'recall': recall.item(),
        'accuracy': accuracy.item()
    }


def get_model_size_mb(model):
    """
    精确计算模型大小（以MB为单位）
    修复版本 - 避免临时文件权限问题
    """
    try:
        # 方法1: 尝试使用临时文件
        import tempfile
        with tempfile.NamedTemporaryFile(delete=False, suffix='.pth') as tmp_file:
            torch.save(model.state_dict(), tmp_file.name)
            size_mb = os.path.getsize(tmp_file.name) / (1024 * 1024)
            # 清理临时文件
            try:
                os.unlink(tmp_file.name)
            except:
                pass
        return size_mb
    except Exception as e:
        print(f"临时文件方法失败: {e}")
        # 方法2: 使用内存计算（备用方案）
        total_params = 0
        for param in model.parameters():
            if param.requires_grad:
                total_params += param.numel()

        # 估算模型大小 (假设每个参数4字节 + 一些开销)
        estimated_size_mb = total_params * 4 / (1024 * 1024) * 1.2  # 加20%开销
        print(f"使用估算方法计算模型大小: {estimated_size_mb:.2f} MB")
        return estimated_size_mb

def _expected_train_dirs(root):
    return [
        os.path.join(root, "38-Cloud_training", "train_red"),
        os.path.join(root, "38-Cloud_training", "train_green"),
        os.path.join(root, "38-Cloud_training", "train_blue"),
        os.path.join(root, "38-Cloud_training", "train_nir"),
        os.path.join(root, "38-Cloud_training", "train_gt"),
    ]

def _has_expected_structure(root):
    try:
        expect = _expected_train_dirs(root)
        return all(os.path.isdir(p) for p in expect)
    except Exception:
        return False

def _extract_all_zips_under(folder):
    import zipfile
    extracted_any = False
    for cur_root, _, files in os.walk(folder):
        for f in files:
            if f.lower().endswith(".zip"):
                zf_path = os.path.join(cur_root, f)
                try:
                    with zipfile.ZipFile(zf_path, 'r') as zf:
                        zf.extractall(cur_root)
                        extracted_any = True
                        print(f"[auto] 已解压: {zf_path}")
                except Exception as e:
                    print(f"[auto] 解压失败（忽略）: {zf_path} -> {e}")
    return extracted_any

def _find_data_root_with_training(path):
    # 在 path 下递归寻找包含 38-Cloud_training 并且结构完整的根
    for cur_root, dirs, _ in os.walk(path):
        if "38-Cloud_training" in dirs:
            candidate = cur_root
            if _has_expected_structure(candidate):
                return candidate
    return None

def ensure_dataset_available(args):
    """
    若 args.data_root 不具备期望结构，则自动使用 kagglehub 下载
    'sorour/38cloud-cloud-segmentation-in-satellite-images'，
    并把 args.data_root 指向合适的根目录。
    """
    # 已就绪直接返回
    if _has_expected_structure(args.data_root):
        return args.data_root

    print("[auto] 未发现完整数据集结构，尝试使用 KaggleHub 自动下载 38-Cloud 数据集...")

    # 尝试导入 kagglehub
    try:
        import kagglehub  # pip install kagglehub
    except Exception as e:
        raise RuntimeError(
            "未安装 kagglehub。请先执行：pip install kagglehub\n"
            f"导入失败详情：{e}"
        )

    # 下载
    try:
        dl_path = kagglehub.dataset_download("sorour/38cloud-cloud-segmentation-in-satellite-images")
        dl_path = os.path.normpath(dl_path)
        print(f"[auto] KaggleHub 下载完成：{dl_path}")
    except Exception as e:
        raise RuntimeError(f"下载 Kaggle 数据集失败，请检查网络或 Kaggle 配置：{e}")

    # 直接用下载路径尝试
    if _has_expected_structure(dl_path):
        args.data_root = dl_path
        print(f"[auto] 识别成功：data_root = {args.data_root}")
        return args.data_root

    # 递归寻找合适根目录
    cand = _find_data_root_with_training(dl_path)
    if cand:
        args.data_root = cand
        print(f"[auto] 已定位到包含 38-Cloud_training 的目录：{args.data_root}")
        return args.data_root

    # 若仍未找到，尝试解压所有 zip 再找一次
    did_extract = _extract_all_zips_under(dl_path)
    if did_extract:
        cand = _find_data_root_with_training(dl_path)
        if cand:
            args.data_root = cand
            print(f"[auto] 解压后已定位：data_root = {args.data_root}")
            return args.data_root

    # 兜底：将 data_root 指向下载路径，并给出提示（你的 Dataset 类会依赖目录结构）
    print("[auto] 未能自动识别标准目录结构。已将 data_root 指向 Kaggle 下载目录。")
    print("       请确认其下存在 `38-Cloud_training/train_red` 等子目录。")
    args.data_root = dl_path
    return args.data_root

def proper_quantization(model, calibration_loader, bits=8):
    """
    正确的静态量化实现（兼容性修复版）
    """

    print(f"应用{bits}位静态量化...")

    def check_quantization_support():
        try:
            test_model = torch.nn.Conv2d(1, 1, 1)
            test_model.eval()
            test_model.qconfig = tq.get_default_qconfig('fbgemm')
            test_prepared = tq.prepare(test_model, inplace=False)
            test_input = torch.randn(1, 1, 4, 4)
            test_prepared(test_input)
            test_quantized = tq.convert(test_prepared, inplace=False)
            test_quantized(test_input)
            return True
        except Exception as e:
            print(f"量化支持检查失败: {e}")
            return False

    if bits == 8:
        if not check_quantization_support():
            print("⚠️ 当前PyTorch版本不支持静态量化，使用伪量化作为替代方案...")
            return pseudo_quantization(model, bits=8)

        try:
            torch.backends.quantized.engine = 'fbgemm'
            default_qconfig = tq.get_default_qconfig('fbgemm')

            convtranspose_qconfig = QConfig(
                activation=default_qconfig.activation,
                weight=MinMaxObserver.with_args(
                    dtype=torch.qint8,
                    qscheme=torch.per_tensor_symmetric,
                    reduce_range=False
                )
            )

            model.eval()
            model.qconfig = default_qconfig
            for name, m in model.named_modules():
                if isinstance(m, torch.nn.ConvTranspose2d):
                    m.qconfig = convtranspose_qconfig
                    print(f"为 {name} (ConvTranspose2d) 设置专用量化配置")

            model_prepared = tq.prepare(model, inplace=False)

            print("进行量化校准...")
            with torch.no_grad():
                for i, (data, _) in enumerate(calibration_loader):
                    if i >= 10:
                        break
                    model_prepared(data.cpu())

            quantized_model = tq.convert(model_prepared, inplace=False)
            quantized_model.eval()

            test_input = torch.randn(1, 4, 256, 256)

            try:
                with torch.no_grad():
                    _ = quantized_model(test_input)
                print("✅ 静态量化成功")
                return quantized_model
            except Exception as e:
                print(f"⚠️ 静态量化模型运行失败: {e}")
                print("回退到伪量化方案...")
                return pseudo_quantization(model, bits=8)

        except Exception as e:
            print(f"⚠️ 静态量化过程失败: {e}")
            print("回退到伪量化方案...")
            return pseudo_quantization(model, bits=8)

    else:
        return pseudo_quantization(model, bits=bits)

def pseudo_quantization(model, bits=8):
    """
    伪量化实现 - 模拟量化效果但保持浮点运算
    """
    print(f"使用{bits}-bit伪量化...")

    quantized_model = copy.deepcopy(model)

    def quantize_tensor(tensor, bits):
        """模拟量化过程"""
        if bits == 8:
            # 8-bit: [-128, 127] 对于有符号整数
            min_val = tensor.min()
            max_val = tensor.max()
            scale = (max_val - min_val) / 255.0
            zero_point = min_val

            # 量化
            quantized = torch.round((tensor - zero_point) / scale)
            quantized = torch.clamp(quantized, 0, 255)

            # 反量化
            dequantized = quantized * scale + zero_point
            return dequantized

        elif bits == 16:
            # 16-bit: 使用半精度浮点
            return tensor.half().float()

        elif bits == 4:
            # 4-bit: [-8, 7]
            min_val = tensor.min()
            max_val = tensor.max()
            scale = (max_val - min_val) / 15.0
            zero_point = min_val

            quantized = torch.round((tensor - zero_point) / scale)
            quantized = torch.clamp(quantized, 0, 15)

            dequantized = quantized * scale + zero_point
            return dequantized

        else:
            return tensor

    # 量化所有权重
    with torch.no_grad():
        for name, param in quantized_model.named_parameters():
            if param.dim() > 1:  # 只量化权重矩阵，跳过偏置
                param.copy_(quantize_tensor(param, bits))

    print(f"✅ {bits}-bit伪量化完成")
    return quantized_model

def train_model(model, train_loader, val_loader, device, args):
    """训练函数"""
    print(f"开始训练Cloud-Net模型...")

    optimizer = optim.AdamW(model.parameters(),
                            lr=args.learning_rate,
                            weight_decay=args.weight_decay,
                            betas=(0.9, 0.999))

    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs, eta_min=1e-6)

    # 动态类别权重
    ce_loss_fn = nn.CrossEntropyLoss()

    best_iou = 0.0
    best_model_state = None
    train_history = {'loss': [], 'iou': [], 'val_iou': [], 'val_f1': []}

    patience = 10
    patience_counter = 0

    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        running_metrics = {'iou': 0.0, 'f1': 0.0}
        num_batches = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for batch_idx, (images, masks) in enumerate(pbar):
            images, masks = images.to(device), masks.to(device)

            optimizer.zero_grad()
            outputs = model(images)

            # 组合损失
            ce_loss = ce_loss_fn(outputs, masks)
            d_loss = dice_loss(outputs, masks)
            f_loss = focal_loss(outputs, masks, alpha=0.25, gamma=2.0)

            total_loss = 0.3 * ce_loss + 0.4 * d_loss + 0.3 * f_loss

            total_loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            optimizer.step()

            # 计算指标（固定阈值）
            metrics = calculate_metrics_fixed(outputs, masks, threshold=0.5)
            running_loss += total_loss.item()
            running_metrics['iou'] += metrics['iou']
            running_metrics['f1'] += metrics['f1']
            num_batches += 1

            pbar.set_postfix({
                'Loss': f'{total_loss.item():.4f}',
                'IoU': f'{metrics["iou"]:.4f}',
                'F1': f'{metrics["f1"]:.4f}'
            })

        # 计算平均训练指标
        avg_train_loss = running_loss / num_batches
        avg_train_iou = running_metrics['iou'] / num_batches

        # 验证阶段
        val_metrics = evaluate_model(model, val_loader, device)

        scheduler.step()

        # 保存最佳模型
        if val_metrics['iou'] > best_iou:
            best_iou = val_metrics['iou']
            best_model_state = copy.deepcopy(model.state_dict())
            patience_counter = 0
        else:
            patience_counter += 1

        # 记录历史
        train_history['loss'].append(avg_train_loss)
        train_history['iou'].append(avg_train_iou)
        train_history['val_iou'].append(val_metrics['iou'])
        train_history['val_f1'].append(val_metrics['f1'])

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train IoU: {avg_train_iou:.4f}, '
              f'Val IoU: {val_metrics["iou"]:.4f}, '
              f'Val F1: {val_metrics["f1"]:.4f}')

        # 早停
        if patience_counter >= patience:
            print(f"早停在epoch {epoch + 1}, 最佳IoU: {best_iou:.4f}")
            break

    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)

    return model, best_iou, train_history


def evaluate_model(model, dataloader, device, threshold=0.5):
    """评估模型性能 - 固定阈值"""
    model.eval()
    total_metrics = {'iou': 0.0, 'f1': 0.0, 'precision': 0.0, 'recall': 0.0, 'accuracy': 0.0}
    num_batches = 0

    with torch.no_grad():
        for images, masks in dataloader:
            images, masks = images.to(device), masks.to(device)
            outputs = model(images)
            metrics = calculate_metrics_fixed(outputs, masks, threshold)

            for key in total_metrics:
                total_metrics[key] += metrics[key]
            num_batches += 1

    # 计算平均指标
    avg_metrics = {key: value / num_batches for key, value in total_metrics.items()}
    return avg_metrics

def count_parameters(model):
    """统计模型参数数量"""
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

def evaluate_performance_spec(model, dataloader, device, threshold=0.5):
    model.eval()
    eps = 1e-8
    TP = FP = FN = TN = 0.0
    with torch.no_grad():
        for images, masks in dataloader:
            images, masks = images.to(device), masks.to(device)
            logits = model(images)
            probs  = torch.softmax(logits, dim=1)[:, 1, :, :]
            preds  = (probs > threshold).to(masks.dtype)
            p = preds.view(-1).float(); t = masks.view(-1).float()
            TP += float((p * t).sum().item())
            FP += float((p * (1 - t)).sum().item())
            FN += float(((1 - p) * t).sum().item())
            TN += float(((1 - p) * (1 - t)).sum().item())
    accuracy  = (TP + TN) / (TP + TN + FP + FN + eps)
    precision = TP / (TP + FP + eps)
    return {"accuracy": float(accuracy), "precision": float(precision)}

def _measure_inference_memory_mb(model, sample_images, device):
    import gc, time
    proc = psutil.Process(os.getpid())
    model.eval()
    with torch.no_grad():
        _ = model(sample_images)  # 预热
    if device.type == "cuda":
        torch.cuda.empty_cache()
        torch.cuda.reset_peak_memory_stats(device)
        start = torch.cuda.memory_allocated(device)
        with torch.no_grad():
            _ = model(sample_images)
        peak = torch.cuda.max_memory_allocated(device)
        return float(max(peak - start, 0) / (1024 * 1024))
    gc.collect(); time.sleep(0.02)
    rss_before = proc.memory_info().rss
    with torch.no_grad():
        _ = model(sample_images)
    gc.collect(); time.sleep(0.02)
    rss_after = proc.memory_info().rss
    return float(max(rss_after - rss_before, 0) / (1024 * 1024))

def evaluate_efficiency_spec(model, dataloader, device):
    first_images = None
    for images, _ in dataloader:
        first_images = images.to(device); break
    if first_images is None:
        raise RuntimeError("dataloader 为空，无法评估效率指标。")
    return {
        "参数数量": int(count_parameters(model)),
        "模型大小(MB)": float(get_model_size_mb(model)),
        "内存占用(MB)": float(_measure_inference_memory_mb(model, first_images, device)),
    }

def build_eval_report_spec(model, dataloader, device, threshold=0.5):
    perf = evaluate_performance_spec(model, dataloader, device, threshold)
    eff  = evaluate_efficiency_spec(model, dataloader, device)
    return {
        "模型性能": {"准确率": perf["accuracy"], "精度": perf["precision"]},
        "模型效率": eff
    }

def create_dataloaders(args):
    """创建 CloudDetection 数据加载器（train/val）"""
    print("准备 CloudDetection 数据集...")

    normalize = transforms.Normalize(
        mean=[0.485, 0.456, 0.406, 0.5],  # R, G, B, NIR
        std =[0.229, 0.224, 0.225, 0.2]
    )

    full_dataset = CloudDetectionDataset(
        root_dir=args.data_root,
        mode='train',
        transform=normalize
    )

    train_size = int(0.8 * len(full_dataset))
    val_size = len(full_dataset) - train_size
    train_dataset, val_dataset = random_split(full_dataset, [train_size, val_size])

    # Windows+rasterio 多进程偶发卡死/句柄问题 → 用 0 更稳；CUDA 时 pin_memory=True
    use_workers = 0
    pin_mem = torch.cuda.is_available()

    train_loader = DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=use_workers,
        pin_memory=pin_mem
    )
    val_loader = DataLoader(
        val_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=use_workers,
        pin_memory=pin_mem
    )

    print(f"训练集大小: {len(train_dataset)}")
    print(f"验证集大小: {len(val_dataset)}")
    return train_loader, val_loader

def save_results(results, output_dir):
    """保存结果"""
    results_path = os.path.join(output_dir, 'compression_results.json')

    def convert_types(obj):
        if isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, (np.integer, np.floating)):
            return obj.item()
        elif isinstance(obj, dict):
            return {key: convert_types(value) for key, value in obj.items()}
        elif isinstance(obj, list):
            return [convert_types(item) for item in obj]
        else:
            return obj

    converted_results = convert_types(results)

    with open(results_path, 'w', encoding='utf-8') as f:
        json.dump(converted_results, f, indent=2, ensure_ascii=False)

    print(f"结果已保存到: {results_path}")

def get_args():
    parser = argparse.ArgumentParser(description="Cloud-Net模型训练和量化 - 修复版")

    script_dir = os.path.dirname(os.path.abspath(__file__))
    default_data_root = os.path.normpath(
        os.path.join(script_dir, "..", "CloudDetection")
    )

    # === 环境变量覆盖（可选）
    # 例如：set CLOUD_DATA_ROOT=E:\USM\year2sem3\实习数据集\CloudDetection
    env_root = os.environ.get("CLOUD_DATA_ROOT")
    if env_root and os.path.isdir(env_root):
        default_data_root = env_root

    parser.add_argument("--output-dir",       default="./output", help="输出目录")
    parser.add_argument("--epochs",           type=int, default=25,  help="训练轮数")
    parser.add_argument("--batch-size",       type=int, default=8,   help="批次大小")
    parser.add_argument("--learning-rate",    type=float, default=1e-3, help="学习率")
    parser.add_argument("--weight-decay",     type=float, default=1e-4, help="权重衰减")
    parser.add_argument("--quantization-bits",type=int, default=8,   help="量化位数")
    parser.add_argument("--data-root",        default=default_data_root,
                        help="CloudDetection 数据集根目录（默认相对脚本：..\\..\\实习数据集\\CloudDetection）")

    args = parser.parse_args()

    # 友好检查（只检查训练集目录，避免 test_* 缺失就误报）
    expected_train_dirs = [
        os.path.join(args.data_root, "38-Cloud_training", "train_red"),
        os.path.join(args.data_root, "38-Cloud_training", "train_green"),
        os.path.join(args.data_root, "38-Cloud_training", "train_blue"),
        os.path.join(args.data_root, "38-Cloud_training", "train_nir"),
        os.path.join(args.data_root, "38-Cloud_training", "train_gt"),
    ]
    missing = [p for p in expected_train_dirs if not os.path.isdir(p)]
    if missing:
        print("[提示] 数据目录可能不完整或路径不对：")
        for p in missing:
            print("  - 缺少目录：", p)
        print("你可以通过命令行覆盖默认路径，例如：")
        print(r'  python main.py --data-root "E:\USM\year2sem3\实习数据集\CloudDetection"')
        print("或设置环境变量 CLOUD_DATA_ROOT 指向数据根目录。")

    return args

def main():
    args = get_args()
    os.makedirs(args.output_dir, exist_ok=True)

    # 设备 & 随机种子
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")
    torch.manual_seed(42)
    np.random.seed(42)

    # 确保数据可用（若缺失则自动用 KaggleHub 下载并定位）
    ensure_dataset_available(args)

    # ============================ 数据 ============================
    train_loader, val_loader = create_dataloaders(args)

    # ============================ 训练 ============================
    print("=== STAGE: TRAIN ===")
    model = CloudNet(num_classes=2, dropout_rate=0.1).to(device)
    model, best_iou, _ = train_model(model, train_loader, val_loader, device, args)
    trained_model_path = os.path.join(args.output_dir, "cloudnet_trained.pth")
    torch.save(model.state_dict(), trained_model_path)
    print(f"训练完成，模型已保存到 {trained_model_path}")

    # ============================ 量化（CPU） ============================
    print("=== STAGE: QUANTIZATION ===")
    # 静态量化在 CPU 上进行
    model_cpu = CloudNet(num_classes=2, dropout_rate=0.1).cpu()
    model_cpu.load_state_dict(torch.load(trained_model_path, map_location="cpu"))
    model_cpu.eval()
    print("已加载训练好的模型，开始静态量化")
    quantized_model = proper_quantization(model_cpu, train_loader, bits=args.quantization_bits)

    # ============================ 评估（原始 & 量化） ============================
    print("评估原始模型性能...")
    orig_metrics = evaluate_model(model.to(device), val_loader, device)
    orig_iou     = float(orig_metrics["iou"])
    orig_size_mb = float(get_model_size_mb(model))
    orig_params  = int(count_parameters(model))
    print(f"原始模型 - IoU: {orig_iou:.4f}, 大小: {orig_size_mb:.2f}MB, 参数: {orig_params:,}")

    print(f"评估 {args.quantization_bits}-bit 量化后模型性能...")
    cpu = torch.device("cpu")
    quant_metrics = evaluate_model(quantized_model, val_loader, cpu)
    quant_iou     = float(quant_metrics["iou"])
    quant_size_mb = float(get_model_size_mb(quantized_model))
    # 量化后的模块可能使用打包权重，参数统计可能偏少或为0，这是预期情况
    quant_params  = int(count_parameters(quantized_model))
    print(f"量化模型 - IoU: {quant_iou:.4f}, 大小: {quant_size_mb:.2f}MB, 参数: {quant_params:,}")

    # ============================ “按图”评估报告 ============================
    report_orig  = build_eval_report_spec(model.to(device), val_loader, device)
    report_quant = build_eval_report_spec(quantized_model,       val_loader, cpu)

    # ============================ 汇总与保存 ============================
    iou_drop     = float(orig_iou - quant_iou)
    iou_drop_pct = float((iou_drop / max(orig_iou, 1e-8)) * 100.0)
    size_reduce  = float(((orig_size_mb - quant_size_mb) / max(orig_size_mb, 1e-8)) * 100.0)
    param_reduce = float(((orig_params - quant_params) / max(orig_params, 1)) * 100.0)

    if iou_drop_pct < 5:
        status = "优秀"; sym = "✅"
    elif iou_drop_pct < 10:
        status = "良好"; sym = "⚠️"
    else:
        status = "需改进"; sym = "❌"

    results = {
        "original_model": {
            "iou":        float(orig_metrics["iou"]),
            "f1":         float(orig_metrics["f1"]),
            "precision":  float(orig_metrics["precision"]),
            "recall":     float(orig_metrics["recall"]),
            "accuracy":   float(orig_metrics["accuracy"]),
            "model_size_mb": float(orig_size_mb),
            "parameters": int(orig_params),
        },
        "quantized_model": {
            "iou":        float(quant_metrics["iou"]),
            "f1":         float(quant_metrics["f1"]),
            "precision":  float(quant_metrics["precision"]),
            "recall":     float(quant_metrics["recall"]),
            "accuracy":   float(quant_metrics["accuracy"]),
            "model_size_mb": float(quant_size_mb),
            "parameters": int(quant_params),
        },
        "compression_stats": {
            "iou_drop_percent":            float(iou_drop_pct),
            "size_reduction_percent":      float(size_reduce),
            "parameter_reduction_percent": float(param_reduce),
            "quantization_bits":           int(args.quantization_bits),
            "status":                      str(status),
            "symbol":                      sym,
        },
        # 严格按你的图：模型性能（准确率、精度）+ 模型效率（参数、模型大小、内存占用）
        "spec_report": {
            "original":  report_orig,
            "quantized": report_quant,
        }
    }
    save_results(results, args.output_dir)

    # 控制台速览（与图一致）
    print("\n=== 按图的模型评估 ===")
    def _p(tag, rep):
        print(f"{tag} | 性能: 准确率={rep['模型性能']['准确率']:.4f}, 精度={rep['模型性能']['精度']:.4f} | "
              f"效率: 参数={rep['模型效率']['参数数量']}, "
              f"大小={rep['模型效率']['模型大小(MB)']:.2f}MB, "
              f"内存={rep['模型效率']['内存占用(MB)']:.2f}MB")
    _p("Original",  report_orig)
    _p("Quantized", report_quant)

if __name__ == "__main__":
    main()