#!/usr/bin/env python

"""
Cloud-Net模型训练和压缩实现 - 完全修复版本
基于论文: "Cloud-Net: An End-to-end Cloud Detection Algorithm for Landsat 8 Imagery"
修复了所有已知问题：量化、剪枝、评估方法、数据生成等
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset, random_split
import copy
import time
import numpy as np
import json
from tqdm import tqdm
import rasterio
import torchvision.transforms as transforms
import psutil

class CloudDetectionDataset(Dataset):

    def __init__(self, root_dir, mode='train', transform=None):
        super().__init__()
        assert mode in ['train','test']
        sub = '38-Cloud_training' if mode == 'train' else '38-Cloud_test'
        base = os.path.join(root_dir, sub)

        self.mode = mode
        self.transform = transform
        self.ext = '.tif'

        self.band_dirs = {
            'red':   os.path.join(base, f'{mode}_red'),
            'green': os.path.join(base, f'{mode}_green'),
            'blue':  os.path.join(base, f'{mode}_blue'),
            'nir':   os.path.join(base, f'{mode}_nir'),
        }
        self.gt_dir = os.path.join(base, f'{mode}_gt')

        # 常见前缀（用于剥离，找“公共尾巴”来对齐）
        self.prefix_candidates = {
            'red':   ['red_', 'B4_', 'band4_', 'R_'],
            'green': ['green_', 'B3_', 'band3_', 'G_'],
            'blue':  ['blue_', 'B2_', 'band2_', 'B_'],
            'nir':   ['nir_', 'B5_', 'band5_', 'NIR_'],
            'gt':    ['gt_', 'mask_', 'label_', 'cloud_', 'gtmask_'],
        }

        self.index = {b: self._build_index(self.band_dirs[b], b) for b in ['red','green','blue','nir']}
        self.index_gt = self._build_index(self.gt_dir, 'gt')

        ids = set(self.index['red'].keys())
        ids &= set(self.index['green'].keys())
        ids &= set(self.index['blue'].keys())
        ids &= set(self.index['nir'].keys())
        ids &= set(self.index_gt.keys())

        self.ids = sorted(list(ids))
        if not self.ids:
            raise RuntimeError("没有找到可用样本，请检查数据根目录与命名规则。")
        print(f"[{mode}] 可用样本数: {len(self.ids)}")

    def _strip_prefix(self, stem, band_key):
        for p in self.prefix_candidates.get(band_key, []):
            if stem.startswith(p):
                return stem[len(p):]
        return stem

    def _build_index(self, folder, band_key):
        mapping = {}
        if not os.path.isdir(folder):
            if band_key != 'gt':
                raise FileNotFoundError(f"目录不存在: {folder}")
            return mapping

        for f in os.listdir(folder):
            if not f.lower().endswith(self.ext):
                continue
            stem = os.path.splitext(f)[0]
            tail = self._strip_prefix(stem, band_key)
            mapping[tail] = stem
        return mapping

    def __len__(self):
        return len(self.ids)

    def _resolve(self, folder, stem):
        return os.path.join(folder, stem + self.ext)

    def __getitem__(self, idx):
        pid = self.ids[idx]

        # 读四个波段
        bands = []
        for b in ['red','green','blue','nir']:
            stem = self.index[b][pid]
            path = self._resolve(self.band_dirs[b], stem)
            with rasterio.open(path) as src:
                arr = src.read(1).astype(np.float32)
            arr = np.nan_to_num(arr, nan=0.0)
            # 如果是Landsat SR常见范围 0~10000，这里缩放到[0,1]
            arr = arr / 10000.0
            arr = np.clip(arr, 0.0, 1.0)
            bands.append(arr)
        img = np.stack(bands, axis=0)  # (4,H,W)

        # 掩码
        gt_stem = self.index_gt[pid]
        gt_path = self._resolve(self.gt_dir, gt_stem)
        with rasterio.open(gt_path) as src:
            m = src.read(1)
        m = (m > 0).astype(np.int64)   # 二值化

        img_t  = torch.from_numpy(img)
        mask_t = torch.from_numpy(m)

        if self.transform is not None:
            img_t = self.transform(img_t)

        return img_t, mask_t


class CloudNet(nn.Module):
    """
    Cloud-Net模型实现 - 修复版本
    基于U-Net架构，适合云检测任务
    """

    def __init__(self, num_classes=2, dropout_rate=0.1):
        super(CloudNet, self).__init__()

        # 编码器部分
        self.enc1 = self._make_layer(4, 64, dropout_rate)
        self.enc2 = self._make_layer(64, 128, dropout_rate)
        self.enc3 = self._make_layer(128, 256, dropout_rate)
        self.enc4 = self._make_layer(256, 512, dropout_rate)

        # 中心部分
        self.center = self._make_layer(512, 1024, dropout_rate)

        # 解码器部分
        self.dec4 = self._make_decoder_layer(1024, 512)
        self.dec3 = self._make_decoder_layer(1024, 256)  # 1024 = 512 + 512 (skip connection)
        self.dec2 = self._make_decoder_layer(512, 128)  # 512 = 256 + 256
        self.dec1 = self._make_decoder_layer(256, 64)  # 256 = 128 + 128

        # 最终分类层
        self.final = nn.Conv2d(128, num_classes, 1)  # 128 = 64 + 64

        # 池化层
        self.pool = nn.MaxPool2d(2, 2)

        self._initialize_weights()

    def _make_layer(self, in_channels, out_channels, dropout_rate=0.1):
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout_rate),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def _make_decoder_layer(self, in_channels, out_channels):
        return nn.Sequential(
            nn.ConvTranspose2d(in_channels, out_channels, 2, stride=2),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.ConvTranspose2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')

    def forward(self, x):
        # 编码器
        enc1 = self.enc1(x)
        enc2 = self.enc2(self.pool(enc1))
        enc3 = self.enc3(self.pool(enc2))
        enc4 = self.enc4(self.pool(enc3))

        # 中心
        center = self.center(self.pool(enc4))

        # 解码器 + 跳跃连接
        dec4 = self.dec4(center)
        dec4 = torch.cat([dec4, enc4], dim=1)

        dec3 = self.dec3(dec4)
        dec3 = torch.cat([dec3, enc3], dim=1)

        dec2 = self.dec2(dec3)
        dec2 = torch.cat([dec2, enc2], dim=1)

        dec1 = self.dec1(dec2)
        dec1 = torch.cat([dec1, enc1], dim=1)

        # 最终输出
        out = self.final(dec1)

        return out

class PrunedCloudNet(nn.Module):
    """
    剪枝后重构的CloudNet模型（已修复：加入 Dropout 以对齐原模型结构）
    """
    def __init__(self, channel_config, num_classes=2, dropout_rate=0.1):
        super(PrunedCloudNet, self).__init__()
        self.channel_config = channel_config
        self.dropout_rate = dropout_rate

        # 编码器部分（注意插入 Dropout）
        self.enc1 = self._make_layer(4, channel_config['enc1'], dropout_rate)
        self.enc2 = self._make_layer(channel_config['enc1'], channel_config['enc2'], dropout_rate)
        self.enc3 = self._make_layer(channel_config['enc2'], channel_config['enc3'], dropout_rate)
        self.enc4 = self._make_layer(channel_config['enc3'], channel_config['enc4'], dropout_rate)

        # 中心部分
        self.center = self._make_layer(channel_config['enc4'], channel_config['center'], dropout_rate)

        # 解码器部分
        self.dec4 = self._make_decoder_layer(channel_config['center'], channel_config['enc4'])
        self.dec3 = self._make_decoder_layer(channel_config['enc4'] * 2, channel_config['enc3'])
        self.dec2 = self._make_decoder_layer(channel_config['enc3'] * 2, channel_config['enc2'])
        self.dec1 = self._make_decoder_layer(channel_config['enc2'] * 2, channel_config['enc1'])

        # 最终分类层
        self.final = nn.Conv2d(channel_config['enc1'] * 2, num_classes, 1)

        # 池化层
        self.pool = nn.MaxPool2d(2, 2)

        self._initialize_weights()

    def _make_layer(self, in_channels, out_channels, dropout_rate):
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout_rate),           # <--- 和 CloudNet 一致
            nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def _make_decoder_layer(self, in_channels, out_channels):
        return nn.Sequential(
            nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if hasattr(m, 'bias') and m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        # 编码
        enc1 = self.enc1(x)
        enc2 = self.enc2(self.pool(enc1))
        enc3 = self.enc3(self.pool(enc2))
        enc4 = self.enc4(self.pool(enc3))

        # 中心
        center = self.center(self.pool(enc4))

        # 解码 + 跳跃
        dec4 = self.dec4(center)
        dec4 = torch.cat([dec4, enc4], dim=1)

        dec3 = self.dec3(dec4)
        dec3 = torch.cat([dec3, enc3], dim=1)

        dec2 = self.dec2(dec3)
        dec2 = torch.cat([dec2, enc2], dim=1)

        dec1 = self.dec1(dec2)
        dec1 = torch.cat([dec1, enc1], dim=1)

        return self.final(dec1)

def dice_loss(pred, target, smooth=1e-5):
    """Dice损失"""
    pred = torch.softmax(pred, dim=1)
    pred_cloud = pred[:, 1, :, :]
    target_cloud = target.float()

    intersection = (pred_cloud * target_cloud).sum(dim=(1, 2))
    pred_sum = pred_cloud.sum(dim=(1, 2))
    target_sum = target_cloud.sum(dim=(1, 2))

    dice = (2.0 * intersection + smooth) / (pred_sum + target_sum + smooth)
    return 1 - dice.mean()


def focal_loss(pred, target, alpha=0.25, gamma=2.0):
    """Focal损失"""
    ce_loss = F.cross_entropy(pred, target, reduction='none')
    pt = torch.exp(-ce_loss)
    focal_loss = alpha * (1 - pt) ** gamma * ce_loss
    return focal_loss.mean()


def calculate_metrics_fixed(pred, target, threshold=0.5):
    """
    修复的评估指标计算 - 使用固定阈值
    """
    pred_prob = torch.softmax(pred, dim=1)[:, 1, :, :]
    pred_binary = (pred_prob > threshold).float()
    target_binary = target.float()

    # IoU
    intersection = (pred_binary * target_binary).sum()
    union = pred_binary.sum() + target_binary.sum() - intersection
    iou = intersection / (union + 1e-8)

    # 其他指标
    tp = (pred_binary * target_binary).sum()
    fp = (pred_binary * (1 - target_binary)).sum()
    fn = ((1 - pred_binary) * target_binary).sum()
    tn = ((1 - pred_binary) * (1 - target_binary)).sum()

    precision = tp / (tp + fp + 1e-8)
    recall = tp / (tp + fn + 1e-8)
    f1 = 2 * precision * recall / (precision + recall + 1e-8)
    accuracy = (tp + tn) / (tp + tn + fp + fn + 1e-8)

    return {
        'iou': iou.item(),
        'f1': f1.item(),
        'precision': precision.item(),
        'recall': recall.item(),
        'accuracy': accuracy.item()
    }


def get_model_size_mb(model):
    """
    精确计算模型大小（以MB为单位）
    修复版本 - 避免临时文件权限问题
    """
    try:
        # 方法1: 尝试使用临时文件
        import tempfile
        with tempfile.NamedTemporaryFile(delete=False, suffix='.pth') as tmp_file:
            torch.save(model.state_dict(), tmp_file.name)
            size_mb = os.path.getsize(tmp_file.name) / (1024 * 1024)
            # 清理临时文件
            try:
                os.unlink(tmp_file.name)
            except:
                pass
        return size_mb
    except Exception as e:
        print(f"临时文件方法失败: {e}")
        # 方法2: 使用内存计算（备用方案）
        total_params = 0
        for param in model.parameters():
            if param.requires_grad:
                total_params += param.numel()

        # 估算模型大小 (假设每个参数4字节 + 一些开销)
        estimated_size_mb = total_params * 4 / (1024 * 1024) * 1.2  # 加20%开销
        print(f"使用估算方法计算模型大小: {estimated_size_mb:.2f} MB")
        return estimated_size_mb

# 自动下载与解压数据集
def ensure_dataset_available(args):
    """
    若 args.data_root 下没有 38-Cloud_training/train_* 结构：
      1) 用 kagglehub 下载 38-Cloud 数据集
      2) 自动定位到包含 38-Cloud_training 的根
      3) 若压缩包存在则自动解压后再定位
    成功后会回写 args.data_root。
    """
    # 仅检测目录结构是否完整
    if os.path.isdir(args.data_root) and _has_train_structure(args.data_root):
        return args.data_root

    print("[auto] 数据目录不完整，尝试自动下载数据集...")

    try:
        import kagglehub
    except ImportError:
        raise RuntimeError("未安装 kagglehub。请运行 `pip install kagglehub`")

    # 下载数据集
    try:
        download_path = kagglehub.dataset_download("sorour/38cloud-cloud-segmentation-in-satellite-images")
        download_path = os.path.normpath(download_path)
        print(f"[auto] 下载完成：{download_path}")
    except Exception as e:
        raise RuntimeError(f"数据集下载失败：{e}")

    # 解压并查找数据集
    if _has_train_structure(download_path):
        args.data_root = download_path
        print(f"[auto] 数据已准备好：{args.data_root}")
        return args.data_root

    print("[auto] 数据集目录结构异常，无法找到有效数据")
    return None

def _has_train_structure(root):
    expected_dirs = [
        os.path.join(root, "38-Cloud_training", "train_red"),
        os.path.join(root, "38-Cloud_training", "train_green"),
        os.path.join(root, "38-Cloud_training", "train_blue"),
        os.path.join(root, "38-Cloud_training", "train_nir"),
        os.path.join(root, "38-Cloud_training", "train_gt"),
    ]
    return all(os.path.isdir(d) for d in expected_dirs)

def true_structured_pruning(model, pruning_ratio=0.3):
    """
    真正的结构化剪枝 - 重建模型结构
    """
    print(f"应用真正的结构化剪枝 {pruning_ratio:.1%}...")

    # 计算剪枝后的通道配置
    original_config = {
        'enc1': 64,
        'enc2': 128,
        'enc3': 256,
        'enc4': 512,
        'center': 1024
    }

    pruned_config = {}
    for layer_name, original_channels in original_config.items():
        pruned_channels = max(8, int(original_channels * (1 - pruning_ratio)))
        pruned_config[layer_name] = pruned_channels
        print(f"  {layer_name}: {original_channels} -> {pruned_channels} 通道")

    # 创建新的剪枝模型
    pruned_model = PrunedCloudNet(pruned_config)

    # 复制重要权重
    _copy_important_weights(model, pruned_model, pruned_config)

    return pruned_model


def _copy_important_weights(source_model, target_model, channel_config):
    """
    复制重要权重到剪枝后的模型（修复版本）
    """
    source_dict = source_model.state_dict()
    target_dict = target_model.state_dict()
    layer_important_indices = {}

    for key in target_dict.keys():
        if key not in source_dict:
            continue

        src = source_dict[key]
        tgt = target_dict[key]

        # —— Conv2d / ConvTranspose2d 权重 —— #
        if 'weight' in key and src.dim() == 4 and tgt.dim() == 4:
            layer_name = key.split('.')[0]
            layer_idx = key.split('.')[1] if len(key.split('.')) > 1 else '0'
            src_out, tgt_out = src.size(0), tgt.size(0)
            src_in, tgt_in = src.size(1), tgt.size(1)

            # —— 输出通道剪枝 —— #
            if src_out > tgt_out:
                # 计算每个输出通道的重要性
                norms = torch.norm(src.view(src_out, -1), p=2, dim=1)
                _, sorted_idx = norms.sort(descending=True)
                idx = sorted_idx[:tgt_out]  # 选最重要的 tgt_out 条
                idx, _ = idx.sort()  # 恢复原始相对顺序
                layer_important_indices[layer_name] = idx
                pruned = src[idx].clone()
            else:
                pruned = src[:tgt_out].clone()
                # 如果没有剪枝输出通道，也要记录索引用于后续层
                if src_out == tgt_out:
                    layer_important_indices[layer_name] = torch.arange(tgt_out)

            # —— 输入通道剪枝 —— #
            if src_in > tgt_in:
                in_idx = None

                # 对于编码器层的第二个卷积（xxx.4），输入输出通道数应该相同
                if layer_idx == '4':  # 第二个卷积层
                    # 使用同一层第一个卷积的输出通道索引
                    if layer_name in layer_important_indices:
                        in_idx = layer_important_indices[layer_name][:tgt_in]

                # 对于编码器层的第一个卷积（xxx.0）
                elif layer_idx == '0':
                    if 'enc2' in key and 'enc1' in layer_important_indices:
                        in_idx = layer_important_indices['enc1']
                    elif 'enc3' in key and 'enc2' in layer_important_indices:
                        in_idx = layer_important_indices['enc2']
                    elif 'enc4' in key and 'enc3' in layer_important_indices:
                        in_idx = layer_important_indices['enc3']
                    elif 'center' in key and 'enc4' in layer_important_indices:
                        in_idx = layer_important_indices['enc4']

                # 对于解码器层
                elif 'dec' in key:
                    if 'dec4' in key and layer_idx == '0':  # ConvTranspose2d
                        in_idx = layer_important_indices.get('center', torch.arange(tgt_in))
                    elif 'dec4' in key and layer_idx == '3':  # 第二个Conv2d
                        in_idx = layer_important_indices.get('dec4', torch.arange(tgt_in))
                    elif 'dec3' in key and layer_idx == '0':
                        # dec3的输入通道数应该是dec4+enc4的concat
                        dec4_channels = channel_config['enc4']
                        enc4_idx = layer_important_indices.get('enc4', torch.arange(dec4_channels))
                        dec4_idx = layer_important_indices.get('dec4', torch.arange(dec4_channels))
                        combined_idx = torch.cat([dec4_idx, enc4_idx + dec4_channels])
                        in_idx = combined_idx[:tgt_in]
                    elif 'dec3' in key and layer_idx == '3':
                        in_idx = layer_important_indices.get('dec3', torch.arange(tgt_in))
                    elif 'dec2' in key and layer_idx == '0':
                        # dec2的输入通道数应该是dec3+enc3的concat
                        dec3_channels = channel_config['enc3']
                        enc3_idx = layer_important_indices.get('enc3', torch.arange(dec3_channels))
                        dec3_idx = layer_important_indices.get('dec3', torch.arange(dec3_channels))
                        combined_idx = torch.cat([dec3_idx, enc3_idx + dec3_channels])
                        in_idx = combined_idx[:tgt_in]
                    elif 'dec2' in key and layer_idx == '3':
                        in_idx = layer_important_indices.get('dec2', torch.arange(tgt_in))
                    elif 'dec1' in key and layer_idx == '0':
                        # dec1的输入通道数应该是dec2+enc2的concat
                        dec2_channels = channel_config['enc2']
                        enc2_idx = layer_important_indices.get('enc2', torch.arange(dec2_channels))
                        dec2_idx = layer_important_indices.get('dec2', torch.arange(dec2_channels))
                        combined_idx = torch.cat([dec2_idx, enc2_idx + dec2_channels])
                        in_idx = combined_idx[:tgt_in]
                    elif 'dec1' in key and layer_idx == '3':
                        in_idx = layer_important_indices.get('dec1', torch.arange(tgt_in))

                # 对于final层
                elif 'final' in key:
                    # final层的输入是dec1+enc1的concat
                    dec1_channels = channel_config['enc1']
                    enc1_idx = layer_important_indices.get('enc1', torch.arange(dec1_channels))
                    dec1_idx = layer_important_indices.get('dec1', torch.arange(dec1_channels))
                    combined_idx = torch.cat([dec1_idx, enc1_idx + dec1_channels])
                    in_idx = combined_idx[:tgt_in]

                # 如果没找到对应的索引，使用前tgt_in个通道
                if in_idx is None:
                    in_idx = torch.arange(min(tgt_in, src_in))
                else:
                    # 确保索引不越界且数量正确
                    in_idx = in_idx[in_idx < src_in][:tgt_in]
                    if len(in_idx) < tgt_in:
                        # 如果索引不够，补充额外的索引
                        remaining = tgt_in - len(in_idx)
                        all_used = set(in_idx.tolist())
                        extra_valid = []
                        for i in range(src_in):
                            if i not in all_used:
                                extra_valid.append(i)
                            if len(extra_valid) == remaining:
                                break
                        if extra_valid:
                            in_idx = torch.cat([in_idx, torch.tensor(extra_valid)])
                    in_idx = in_idx[:tgt_in]

                pruned = pruned[:, in_idx].clone()

            target_dict[key] = pruned

        # —— Bias —— #
        elif 'bias' in key and src.dim() == 1 and tgt.dim() == 1:
            layer_name = key.split('.')[0]
            src_ch, tgt_ch = src.size(0), tgt.size(0)
            if src_ch > tgt_ch and layer_name in layer_important_indices:
                idx = layer_important_indices[layer_name][:tgt_ch]
                # 确保索引不越界
                idx = idx[idx < src_ch]
                if len(idx) < tgt_ch:
                    # 如果索引不够，用前面的补齐
                    extra_needed = tgt_ch - len(idx)
                    extra_idx = torch.arange(extra_needed)
                    idx = torch.cat([idx, extra_idx])
                target_dict[key] = src[idx[:tgt_ch]].clone()
            else:
                target_dict[key] = src[:tgt_ch].clone()

        # —— BatchNorm 等其他1D参数 —— #
        elif src.dim() == 1 and tgt.dim() == 1:
            layer_name = key.split('.')[0]
            src_ch, tgt_ch = src.size(0), tgt.size(0)
            if src_ch > tgt_ch and layer_name in layer_important_indices:
                idx = layer_important_indices[layer_name]
                # 确保索引不越界
                idx = idx[idx < src_ch][:tgt_ch]
                if len(idx) < tgt_ch:
                    # 补齐索引
                    extra_needed = tgt_ch - len(idx)
                    extra_idx = torch.arange(extra_needed)
                    idx = torch.cat([idx, extra_idx])
                target_dict[key] = src[idx[:tgt_ch]].clone()
            else:
                target_dict[key] = src[:tgt_ch].clone()

        # —— 形状完全匹配的参数 —— #
        elif src.shape == tgt.shape:
            target_dict[key] = src.clone()

    target_model.load_state_dict(target_dict)

def proper_quantization(model, calibration_loader, bits=8):
    """
    正确的静态量化实现
    """
    print(f"应用{bits}位静态量化...")

    if bits == 8:
        # 准备量化
        model.eval()
        model.qconfig = torch.quantization.get_default_qconfig('fbgemm')

        # 准备模型
        model_prepared = torch.quantization.prepare(model, inplace=False)

        # 校准
        print("进行量化校准...")
        with torch.no_grad():
            for i, (data, _) in enumerate(calibration_loader):
                if i >= 10:  # 只用几个batch进行校准
                    break
                model_prepared(data.cpu())  # 量化需要在CPU上进行

        # 转换为量化模型
        quantized_model = torch.quantization.convert(model_prepared, inplace=False)

        return quantized_model

    else:
        # 手动量化（16位）
        quantized_model = copy.deepcopy(model)

        for name, param in quantized_model.named_parameters():
            if 'weight' in name and param.dim() > 1:
                with torch.no_grad():
                    if bits == 16:
                        param.copy_(param.half().float())

        return quantized_model


def train_model(model, train_loader, val_loader, device, args):
    """训练函数"""
    print(f"开始训练Cloud-Net模型...")

    optimizer = optim.AdamW(model.parameters(),
                            lr=args.learning_rate,
                            weight_decay=args.weight_decay,
                            betas=(0.9, 0.999))

    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs, eta_min=1e-6)

    # 动态类别权重
    ce_loss_fn = nn.CrossEntropyLoss()

    best_iou = 0.0
    best_model_state = None
    train_history = {'loss': [], 'iou': [], 'val_iou': [], 'val_f1': []}

    patience = 10
    patience_counter = 0

    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        running_metrics = {'iou': 0.0, 'f1': 0.0}
        num_batches = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for batch_idx, (images, masks) in enumerate(pbar):
            images, masks = images.to(device), masks.to(device)

            optimizer.zero_grad()
            outputs = model(images)

            # 组合损失
            ce_loss = ce_loss_fn(outputs, masks)
            d_loss = dice_loss(outputs, masks)
            f_loss = focal_loss(outputs, masks, alpha=0.25, gamma=2.0)

            total_loss = 0.3 * ce_loss + 0.4 * d_loss + 0.3 * f_loss

            total_loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            optimizer.step()

            # 计算指标（固定阈值）
            metrics = calculate_metrics_fixed(outputs, masks, threshold=0.5)
            running_loss += total_loss.item()
            running_metrics['iou'] += metrics['iou']
            running_metrics['f1'] += metrics['f1']
            num_batches += 1

            pbar.set_postfix({
                'Loss': f'{total_loss.item():.4f}',
                'IoU': f'{metrics["iou"]:.4f}',
                'F1': f'{metrics["f1"]:.4f}'
            })

        # 计算平均训练指标
        avg_train_loss = running_loss / num_batches
        avg_train_iou = running_metrics['iou'] / num_batches

        # 验证阶段
        val_metrics = evaluate_model(model, val_loader, device)

        scheduler.step()

        # 保存最佳模型
        if val_metrics['iou'] > best_iou:
            best_iou = val_metrics['iou']
            best_model_state = copy.deepcopy(model.state_dict())
            patience_counter = 0
        else:
            patience_counter += 1

        # 记录历史
        train_history['loss'].append(avg_train_loss)
        train_history['iou'].append(avg_train_iou)
        train_history['val_iou'].append(val_metrics['iou'])
        train_history['val_f1'].append(val_metrics['f1'])

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train IoU: {avg_train_iou:.4f}, '
              f'Val IoU: {val_metrics["iou"]:.4f}, '
              f'Val F1: {val_metrics["f1"]:.4f}')

        # 早停
        if patience_counter >= patience:
            print(f"早停在epoch {epoch + 1}, 最佳IoU: {best_iou:.4f}")
            break

    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)

    return model, best_iou, train_history


def evaluate_model(model, dataloader, device, threshold=0.5):
    """评估模型性能 - 固定阈值"""
    model.eval()
    total_metrics = {'iou': 0.0, 'f1': 0.0, 'precision': 0.0, 'recall': 0.0, 'accuracy': 0.0}
    num_batches = 0

    with torch.no_grad():
        for images, masks in dataloader:
            images, masks = images.to(device), masks.to(device)
            outputs = model(images)
            metrics = calculate_metrics_fixed(outputs, masks, threshold)

            for key in total_metrics:
                total_metrics[key] += metrics[key]
            num_batches += 1

    # 计算平均指标
    avg_metrics = {key: value / num_batches for key, value in total_metrics.items()}
    return avg_metrics


def count_parameters(model):
    """统计模型参数数量"""
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

def evaluate_performance_spec(model, dataloader, device, threshold=0.5):
    model.eval()
    eps = 1e-8
    TP = FP = FN = TN = 0.0
    with torch.no_grad():
        for images, masks in dataloader:
            images, masks = images.to(device), masks.to(device)
            logits = model(images)
            probs  = torch.softmax(logits, dim=1)[:, 1, :, :]
            preds  = (probs > threshold).to(masks.dtype)
            p = preds.view(-1).float(); t = masks.view(-1).float()
            TP += float((p * t).sum().item())
            FP += float((p * (1 - t)).sum().item())
            FN += float(((1 - p) * t).sum().item())
            TN += float(((1 - p) * (1 - t)).sum().item())
    accuracy  = (TP + TN) / (TP + TN + FP + FN + eps)
    precision = TP / (TP + FP + eps)
    return {"accuracy": float(accuracy), "precision": float(precision)}

def _measure_inference_memory_mb(model, sample_images, device):
    import gc, time
    proc = psutil.Process(os.getpid())
    model.eval()
    with torch.no_grad():
        _ = model(sample_images)  # 预热
    if device.type == "cuda":
        torch.cuda.empty_cache()
        torch.cuda.reset_peak_memory_stats(device)
        start = torch.cuda.memory_allocated(device)
        with torch.no_grad():
            _ = model(sample_images)
        peak = torch.cuda.max_memory_allocated(device)
        return float(max(peak - start, 0) / (1024 * 1024))
    gc.collect(); time.sleep(0.02)
    rss_before = proc.memory_info().rss
    with torch.no_grad():
        _ = model(sample_images)
    gc.collect(); time.sleep(0.02)
    rss_after = proc.memory_info().rss
    return float(max(rss_after - rss_before, 0) / (1024 * 1024))

def evaluate_efficiency_spec(model, dataloader, device):
    first_images = None
    for images, _ in dataloader:
        first_images = images.to(device); break
    if first_images is None:
        raise RuntimeError("dataloader 为空，无法评估效率指标。")
    return {
        "参数数量": int(count_parameters(model)),
        "模型大小(MB)": float(get_model_size_mb(model)),
        "内存占用(MB)": float(_measure_inference_memory_mb(model, first_images, device)),
    }

def build_eval_report_spec(model, dataloader, device, threshold=0.5):
    perf = evaluate_performance_spec(model, dataloader, device, threshold)
    eff  = evaluate_efficiency_spec(model, dataloader, device)
    return {"模型性能": {"准确率": perf["accuracy"], "精度": perf["precision"]},
            "模型效率": eff}

def create_dataloaders(args):
    """创建 38-Cloud 数据加载器（train/val）"""
    print("准备 38-Cloud 数据集...")

    # 归一化（4 通道）
    normalize = transforms.Normalize(
        mean=[0.485, 0.456, 0.406, 0.5],
        std =[0.229, 0.224, 0.225, 0.2]
    )

    full_dataset = CloudDetectionDataset(
        root_dir=args.data_root,
        mode='train',
        transform=normalize
    )

    train_size = int(0.8 * len(full_dataset))
    val_size = len(full_dataset) - train_size
    train_dataset, val_dataset = random_split(full_dataset, [train_size, val_size])

    # Windows 上建议先用 num_workers=0，稳定一点；CPU 训练可以把 pin_memory 设成 False
    train_loader = DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=0,
        pin_memory=False
    )
    val_loader = DataLoader(
        val_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=0,
        pin_memory=False
    )

    print(f"训练集大小: {len(train_dataset)}")
    print(f"验证集大小: {len(val_dataset)}")
    return train_loader, val_loader

def save_results(results, output_dir):
    """保存结果"""
    results_path = os.path.join(output_dir, 'compression_results.json')

    def convert_types(obj):
        if isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, (np.integer, np.floating)):
            return obj.item()
        elif isinstance(obj, dict):
            return {key: convert_types(value) for key, value in obj.items()}
        elif isinstance(obj, list):
            return [convert_types(item) for item in obj]
        else:
            return obj

    converted_results = convert_types(results)

    with open(results_path, 'w', encoding='utf-8') as f:
        json.dump(converted_results, f, indent=2, ensure_ascii=False)

    print(f"结果已保存到: {results_path}")

def get_args():
    parser = argparse.ArgumentParser(description="Cloud-Net模型训练和压缩")

    # === 默认数据根目录：以脚本为基准的相对路径 ..\..\实习数据集\CloudDetection
    script_dir = os.path.dirname(os.path.abspath(__file__))
    default_data_root = os.path.normpath(
        os.path.join(script_dir, "..", "CloudDetection")
    )

    # === 环境变量覆盖（可选）
    # 例如在 Windows CMD 中： set CLOUD_DATA_ROOT=E:\USM\year2sem3\实习数据集\CloudDetection
    env_root = os.environ.get("CLOUD_DATA_ROOT")
    if env_root and os.path.isdir(env_root):
        default_data_root = env_root

    # === 原有参数（保持不变，只把 --data-root 的默认值换成相对路径）
    parser.add_argument("--stage", choices=["train", "compress"], default="compress",
                        help="train: 只初始训练；compress: 训练后做剪枝+微调")
    parser.add_argument("--output-dir",    default="./output", help="输出目录")
    parser.add_argument("--epochs",        type=int,   default=25,   help="训练轮数")
    parser.add_argument("--batch-size",    type=int,   default=8,    help="批次大小")
    parser.add_argument("--learning-rate", type=float, default=0.001,help="学习率")
    parser.add_argument("--weight-decay",  type=float, default=1e-4, help="权重衰减")
    parser.add_argument("--data-root",     type=str,   default=default_data_root,
                        help="CloudDetection 数据集根目录（默认相对脚本：..\\..\\实习数据集\\CloudDetection）")
    parser.add_argument("--compression-method",
                        choices=["quantization","pruning","combined"],
                        default="pruning", help="压缩方法")
    parser.add_argument("--prune-ft-epochs", type=int,   default=3,    help="剪枝后微调轮数")
    parser.add_argument("--prune-ft-lr",     type=float, default=1e-4, help="剪枝后微调学习率")
    parser.add_argument("--quantization-bits", type=int, default=8,     help="量化位数")
    parser.add_argument("--pruning-ratio",     type=float, default=0.3, help="剪枝比例")

    args = parser.parse_args()

    # 友好检查（只检查训练集目录，避免 test_* 缺失就误报）
    expected_train_dirs = [
        os.path.join(args.data_root, "38-Cloud_training", "train_red"),
        os.path.join(args.data_root, "38-Cloud_training", "train_green"),
        os.path.join(args.data_root, "38-Cloud_training", "train_blue"),
        os.path.join(args.data_root, "38-Cloud_training", "train_nir"),
        os.path.join(args.data_root, "38-Cloud_training", "train_gt"),
    ]
    missing = [p for p in expected_train_dirs if not os.path.isdir(p)]
    if missing:
        print("[提示] 数据目录可能不完整或路径不对：")
        for p in missing:
            print("  - 缺少目录：", p)
        print("你可以通过命令行覆盖默认路径，例如：")
        print(r'  python main.py --data-root "E:\USM\year2sem3\实习数据集\CloudDetection"')
        print("或设置环境变量 CLOUD_DATA_ROOT 指向数据根目录。")

    return args


def main():
    args = get_args()

    # 确保数据集已准备好
    ensure_dataset_available(args)

    os.makedirs(args.output_dir, exist_ok=True)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")

    torch.manual_seed(42)
    np.random.seed(42)

    # 准备数据
    train_loader, val_loader = create_dataloaders(args)

    # —— 始终先进行完整训练 —— #
    print("=== STAGE: TRAIN ===")
    model = CloudNet(num_classes=2, dropout_rate=0.1).to(device)
    model, best_iou, history = train_model(
        model, train_loader, val_loader, device, args
    )
    trained_model_path = os.path.join(args.output_dir, "cloudnet_trained.pth")
    torch.save(model.state_dict(), trained_model_path)
    print(f"训练完成，模型已保存到 {trained_model_path}")

    # —— 然后进入压缩阶段 —— #
    print("=== STAGE: COMPRESS ===")
    model.load_state_dict(torch.load(trained_model_path, map_location=device))
    print("已加载训练好的模型，开始压缩流程")

    # 评估原始模型
    print("评估原始模型性能...")
    orig_metrics = evaluate_model(model, val_loader, device)
    orig_iou = orig_metrics["iou"]
    orig_size_mb = get_model_size_mb(model)
    orig_params = count_parameters(model)
    print(f"原始模型 - IoU: {orig_iou:.4f}, 大小: {orig_size_mb:.2f}MB, 参数: {orig_params:,}")

    # 结构化剪枝
    print(f"应用结构化剪枝 (比例: {args.pruning_ratio:.1%})...")
    pruned_model = true_structured_pruning(model, args.pruning_ratio).to(device)

    # 剪枝后微调
    print("剪枝后微调...")
    prune_args = argparse.Namespace(
        epochs=args.prune_ft_epochs,
        learning_rate=args.prune_ft_lr,
        weight_decay=args.weight_decay
    )
    pruned_model, best_pruned_iou, _ = train_model(
        pruned_model, train_loader, val_loader, device, prune_args
    )

    # 评估剪枝后模型
    pruned_metrics = evaluate_model(pruned_model, val_loader, device)
    pruned_iou = pruned_metrics["iou"]
    pruned_size_mb = get_model_size_mb(pruned_model)
    pruned_params = count_parameters(pruned_model)

    # 保存剪枝后模型
    pruned_model_path = os.path.join(args.output_dir, "cloudnet_pruned.pth")
    torch.save(pruned_model.state_dict(), pruned_model_path)
    print(f"剪枝模型已保存到 {pruned_model_path}")

    # 计算并输出压缩效果
    iou_drop = orig_iou - pruned_iou
    iou_drop_pct = (iou_drop / orig_iou) * 100
    size_reduction = ((orig_size_mb - pruned_size_mb) / orig_size_mb) * 100
    param_reduction = ((orig_params - pruned_params) / orig_params) * 100

    if iou_drop_pct < 5:
        status_symbol, status = "✅", "优秀"
    elif iou_drop_pct < 10:
        status_symbol, status = "⚠️", "良好"
    else:
        status_symbol, status = "❌", "需改进"

    print("\n" + "=" * 70)
    print("Cloud-Net 模型压缩效果汇总")
    print("=" * 70)
    print(f"原始模型:")
    print(f"  - IoU: {orig_iou:.4f}")
    print(f"  - 大小: {orig_size_mb:.2f} MB")
    print(f"  - 参数: {orig_params:,}")
    print(f"\n剪枝后模型:")
    print(f"  - IoU: {pruned_iou:.4f} (下降 {iou_drop:.4f}, {iou_drop_pct:.1f}%)")
    print(f"  - 大小: {pruned_size_mb:.2f} MB (减少 {size_reduction:.1f}%)")
    print(f"  - 参数: {pruned_params:,} (减少 {param_reduction:.1f}%)")
    print(f"\n{status_symbol} 压缩效果: {status}")
    print("=" * 70)

    # —— 按“图片”结构生成评估报告 ——
    report_orig = build_eval_report_spec(model, val_loader, device)
    report_prune = build_eval_report_spec(pruned_model, val_loader, device)

    results = {
        "original_model": {
            "iou": float(orig_iou),
            "f1": float(orig_metrics["f1"]),
            "precision": float(orig_metrics["precision"]),
            "recall": float(orig_metrics["recall"]),
            "accuracy": float(orig_metrics["accuracy"]),
            "model_size_mb": float(orig_size_mb),
            "parameters": int(orig_params)
        },
        "pruned_model": {
            "iou": float(pruned_iou),
            "f1": float(pruned_metrics["f1"]),
            "precision": float(pruned_metrics["precision"]),
            "recall": float(pruned_metrics["recall"]),
            "accuracy": float(pruned_metrics["accuracy"]),
            "model_size_mb": float(pruned_size_mb),
            "parameters": int(pruned_params)
        },
        "compression_stats": {
            "iou_drop_percent": float(iou_drop_pct),
            "size_reduction_percent": float(size_reduction),
            "parameter_reduction_percent": float(param_reduction),
            "pruning_ratio": float(args.pruning_ratio),
            "status": status
        },
        "spec_report": {
            "original": report_orig,
            "pruned": report_prune
        }
    }
    save_results(results, args.output_dir)

    # 控制台速览
    print("\n=== 按图的模型评估 ===")

    def _p(tag, rep):
        print(f"{tag} | 性能: 准确率={rep['模型性能']['准确率']:.4f}, 精度={rep['模型性能']['精度']:.4f} | "
              f"效率: 参数={rep['模型效率']['参数数量']}, 大小={rep['模型效率']['模型大小(MB)']:.2f}MB, "
              f"内存={rep['模型效率']['内存占用(MB)']:.2f}MB")

    _p("Original", report_orig)
    _p("Pruned", report_prune)

if __name__ == "__main__":
    main()