#!/usr/bin/env python

"""
Cloud-Net模型训练和压缩实现 - 完全修复版本
基于论文: "Cloud-Net: An End-to-end Cloud Detection Algorithm for Landsat 8 Imagery"
修复了所有已知问题：量化、剪枝、评估方法、数据生成等
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset, random_split
import copy
import time
import numpy as np
import json
import matplotlib.pyplot as plt
from tqdm import tqdm
from collections import OrderedDict
import math
import tempfile
from sklearn.cluster import MiniBatchKMeans
import tensorly as tl
from tensorly.decomposition import parafac
from tensorly.cp_tensor import cp_to_tensor
import rasterio
import psutil,time
import kagglehub

class CloudDetectionDataset(Dataset):
    def __init__(self, root_dir, mode='train', transform=None):
        super().__init__()
        assert mode in ['train', 'test']
        sub = '38-Cloud_training' if mode == 'train' else '38-Cloud_test'
        base = os.path.join(root_dir, sub)

        self.band_dirs = {
            'red':   os.path.join(base, f'{mode}_red'),
            'green': os.path.join(base, f'{mode}_green'),
            'blue':  os.path.join(base, f'{mode}_blue'),
            'nir':   os.path.join(base, f'{mode}_nir'),
        }
        self.gt_dir = os.path.join(base, f'{mode}_gt')

        # 用 red 目录的文件名列表作为样本索引（包含 red_ 前缀）
        self.red_files = sorted([f for f in os.listdir(self.band_dirs['red']) if f.lower().endswith('.tif')])
        self.transform = transform

    def __len__(self):
        return len(self.red_files)

    def __getitem__(self, idx):
        red_name = self.red_files[idx]
        green_name = red_name.replace('red_', 'green_', 1)
        blue_name  = red_name.replace('red_', 'blue_', 1)
        nir_name   = red_name.replace('red_', 'nir_', 1)
        gt_name    = red_name.replace('red_', 'gt_', 1)

        paths = {
            'red':   os.path.join(self.band_dirs['red'],   red_name),
            'green': os.path.join(self.band_dirs['green'], green_name),
            'blue':  os.path.join(self.band_dirs['blue'],  blue_name),
            'nir':   os.path.join(self.band_dirs['nir'],   nir_name),
            'gt':    os.path.join(self.gt_dir,             gt_name),
        }
        for k, p in paths.items():
            if not os.path.exists(p):
                raise FileNotFoundError(f"Missing file for '{k}': {p}")

        bands = []
        for k in ['red','green','blue','nir']:
            with rasterio.open(paths[k]) as src:
                arr = src.read(1).astype(np.float32)
            arr = np.nan_to_num(arr, nan=0.0)
            arr = np.clip(arr / 10000.0, 0.0, 1.0)
            bands.append(arr)
        img = np.stack(bands, axis=0)  # (4,H,W)

        with rasterio.open(paths['gt']) as src:
            m = src.read(1)
        m = (m > 0).astype(np.int64)

        img_t  = torch.from_numpy(img)
        mask_t = torch.from_numpy(m)
        if self.transform is not None:
            img_t = self.transform(img_t)
        return img_t, mask_t

def weight_sharing(model, rank=16, device='cpu'):
    """
    使用 CP 分解（parafac）对 4D 卷积核权重做低秩近似。
    - 跳过 1x1 或特别小的核（分解收益低/不稳定）
    - rank 建议不要超过 min(Cin, Cout, kH*kW)
    """
    tl.set_backend('numpy')

    for name, module in model.named_modules():
        if isinstance(module, (nn.Conv2d, nn.ConvTranspose2d)):
            W = module.weight.data
            if W.dim() != 4:
                continue

            out_c, in_c, kH, kW = W.shape
            if kH * kW <= 1:
                # 1x1 卷积意义不大，跳过
                print(f"↷ skip `{name}` (kernel={kH}x{kW})")
                continue
            if out_c * in_c * kH * kW < 512:
                # 特别小的核：分解成本 > 可能收益，跳过
                print(f"↷ skip `{name}` (very small tensor)")
                continue

            # 为了稳定，rank 不超过每个模态的维度上限
            safe_rank = int(min(rank, out_c, in_c, max(kH, kW)))
            if safe_rank < 2:
                print(f"↷ skip `{name}` (safe_rank<2)")
                continue

            W_np = W.detach().cpu().numpy().astype(np.float32)

            try:
                factors = parafac(
                    W_np,
                    rank=safe_rank,
                    init='svd',
                    tol=1e-6,
                    n_iter_max=50
                )
            except Exception as e:
                print(f"☞ `{name}` SVD init failed ({e}), fallback to random init")
                factors = parafac(
                    W_np,
                    rank=safe_rank,
                    init='random',
                    n_iter_max=50
                )

            W_approx = cp_to_tensor(factors)
            module.weight.data = torch.from_numpy(W_approx).to(device).to(W.dtype)

            print(f"✓ layer `{name}` CP rank={safe_rank}")

    return model

class CloudNet(nn.Module):
    """
    Cloud-Net模型实现 - 修复版本
    基于U-Net架构，适合云检测任务
    """

    def __init__(self, num_classes=2, dropout_rate=0.1):
        super(CloudNet, self).__init__()

        # 编码器部分
        self.enc1 = self._make_layer(4, 64, dropout_rate)
        self.enc2 = self._make_layer(64, 128, dropout_rate)
        self.enc3 = self._make_layer(128, 256, dropout_rate)
        self.enc4 = self._make_layer(256, 512, dropout_rate)

        # 中心部分
        self.center = self._make_layer(512, 1024, dropout_rate)

        # 解码器部分
        self.dec4 = self._make_decoder_layer(1024, 512)
        self.dec3 = self._make_decoder_layer(1024, 256)  # 1024 = 512 + 512 (skip connection)
        self.dec2 = self._make_decoder_layer(512, 128)  # 512 = 256 + 256
        self.dec1 = self._make_decoder_layer(256, 64)  # 256 = 128 + 128

        # 最终分类层
        self.final = nn.Conv2d(128, num_classes, 1)  # 128 = 64 + 64

        # 池化层
        self.pool = nn.MaxPool2d(2, 2)

        self._initialize_weights()

    def _make_layer(self, in_channels, out_channels, dropout_rate=0.1):
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout_rate),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def _make_decoder_layer(self, in_channels, out_channels):
        return nn.Sequential(
            nn.ConvTranspose2d(in_channels, out_channels, 2, stride=2),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.ConvTranspose2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')

    def forward(self, x):
        # 编码器
        enc1 = self.enc1(x)
        enc2 = self.enc2(self.pool(enc1))
        enc3 = self.enc3(self.pool(enc2))
        enc4 = self.enc4(self.pool(enc3))

        # 中心
        center = self.center(self.pool(enc4))

        # 解码器 + 跳跃连接
        dec4 = self.dec4(center)
        dec4 = torch.cat([dec4, enc4], dim=1)

        dec3 = self.dec3(dec4)
        dec3 = torch.cat([dec3, enc3], dim=1)

        dec2 = self.dec2(dec3)
        dec2 = torch.cat([dec2, enc2], dim=1)

        dec1 = self.dec1(dec2)
        dec1 = torch.cat([dec1, enc1], dim=1)

        # 最终输出
        out = self.final(dec1)

        return out

def dice_loss(pred, target, smooth=1e-5):
    """Dice损失"""
    pred = torch.softmax(pred, dim=1)
    pred_cloud = pred[:, 1, :, :]
    target_cloud = target.float()

    intersection = (pred_cloud * target_cloud).sum(dim=(1, 2))
    pred_sum = pred_cloud.sum(dim=(1, 2))
    target_sum = target_cloud.sum(dim=(1, 2))

    dice = (2.0 * intersection + smooth) / (pred_sum + target_sum + smooth)
    return 1 - dice.mean()


def focal_loss(pred, target, alpha=0.25, gamma=2.0):
    """Focal损失"""
    ce_loss = F.cross_entropy(pred, target, reduction='none')
    pt = torch.exp(-ce_loss)
    focal_loss = alpha * (1 - pt) ** gamma * ce_loss
    return focal_loss.mean()


def calculate_metrics_fixed(pred, target, threshold=0.5):
    """
    修复的评估指标计算 - 使用固定阈值
    """
    pred_prob = torch.softmax(pred, dim=1)[:, 1, :, :]
    pred_binary = (pred_prob > threshold).float()
    target_binary = target.float()

    # IoU
    intersection = (pred_binary * target_binary).sum()
    union = pred_binary.sum() + target_binary.sum() - intersection
    iou = intersection / (union + 1e-8)

    # 其他指标
    tp = (pred_binary * target_binary).sum()
    fp = (pred_binary * (1 - target_binary)).sum()
    fn = ((1 - pred_binary) * target_binary).sum()
    tn = ((1 - pred_binary) * (1 - target_binary)).sum()

    precision = tp / (tp + fp + 1e-8)
    recall = tp / (tp + fn + 1e-8)
    f1 = 2 * precision * recall / (precision + recall + 1e-8)
    accuracy = (tp + tn) / (tp + tn + fp + fn + 1e-8)

    return {
        'iou': iou.item(),
        'f1': f1.item(),
        'precision': precision.item(),
        'recall': recall.item(),
        'accuracy': accuracy.item()
    }


def get_model_size_mb(model):
    """
    精确计算模型大小（以MB为单位）
    修复版本 - 避免临时文件权限问题
    """
    try:
        # 方法1: 尝试使用临时文件
        import tempfile
        with tempfile.NamedTemporaryFile(delete=False, suffix='.pth') as tmp_file:
            torch.save(model.state_dict(), tmp_file.name)
            size_mb = os.path.getsize(tmp_file.name) / (1024 * 1024)
            # 清理临时文件
            try:
                os.unlink(tmp_file.name)
            except:
                pass
        return size_mb
    except Exception as e:
        print(f"临时文件方法失败: {e}")
        # 方法2: 使用内存计算（备用方案）
        total_params = 0
        for param in model.parameters():
            if param.requires_grad:
                total_params += param.numel()

        # 估算模型大小 (假设每个参数4字节 + 一些开销)
        estimated_size_mb = total_params * 4 / (1024 * 1024) * 1.2  # 加20%开销
        print(f"使用估算方法计算模型大小: {estimated_size_mb:.2f} MB")
        return estimated_size_mb

def train_model(model, train_loader, val_loader, device, args):
    """训练函数"""
    print(f"开始训练Cloud-Net模型...")

    optimizer = optim.AdamW(model.parameters(),
                            lr=args.learning_rate,
                            weight_decay=args.weight_decay,
                            betas=(0.9, 0.999))

    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs, eta_min=1e-6)

    # 动态类别权重
    ce_loss_fn = nn.CrossEntropyLoss()

    best_iou = 0.0
    best_model_state = None
    train_history = {'loss': [], 'iou': [], 'val_iou': [], 'val_f1': []}

    patience = 10
    patience_counter = 0

    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        running_metrics = {'iou': 0.0, 'f1': 0.0}
        num_batches = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for batch_idx, (images, masks) in enumerate(pbar):
            images, masks = images.to(device), masks.to(device)

            optimizer.zero_grad()
            outputs = model(images)

            # 组合损失
            ce_loss = ce_loss_fn(outputs, masks)
            d_loss = dice_loss(outputs, masks)
            f_loss = focal_loss(outputs, masks, alpha=0.25, gamma=2.0)

            total_loss = 0.3 * ce_loss + 0.4 * d_loss + 0.3 * f_loss

            total_loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            optimizer.step()

            # 计算指标（固定阈值）
            metrics = calculate_metrics_fixed(outputs, masks, threshold=0.5)
            running_loss += total_loss.item()
            running_metrics['iou'] += metrics['iou']
            running_metrics['f1'] += metrics['f1']
            num_batches += 1

            pbar.set_postfix({
                'Loss': f'{total_loss.item():.4f}',
                'IoU': f'{metrics["iou"]:.4f}',
                'F1': f'{metrics["f1"]:.4f}'
            })

        # 计算平均训练指标
        avg_train_loss = running_loss / num_batches
        avg_train_iou = running_metrics['iou'] / num_batches

        # 验证阶段
        val_metrics = evaluate_model(model, val_loader, device)

        scheduler.step()

        # 保存最佳模型
        if val_metrics['iou'] > best_iou:
            best_iou = val_metrics['iou']
            best_model_state = copy.deepcopy(model.state_dict())
            patience_counter = 0
        else:
            patience_counter += 1

        # 记录历史
        train_history['loss'].append(avg_train_loss)
        train_history['iou'].append(avg_train_iou)
        train_history['val_iou'].append(val_metrics['iou'])
        train_history['val_f1'].append(val_metrics['f1'])

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train IoU: {avg_train_iou:.4f}, '
              f'Val IoU: {val_metrics["iou"]:.4f}, '
              f'Val F1: {val_metrics["f1"]:.4f}')

        # 早停
        if patience_counter >= patience:
            print(f"早停在epoch {epoch + 1}, 最佳IoU: {best_iou:.4f}")
            break

    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)

    return model, best_iou, train_history


def evaluate_model(model, dataloader, device, threshold=0.5):
    """评估模型性能 - 固定阈值"""
    model.eval()
    total_metrics = {'iou': 0.0, 'f1': 0.0, 'precision': 0.0, 'recall': 0.0, 'accuracy': 0.0}
    num_batches = 0

    with torch.no_grad():
        for images, masks in dataloader:
            images, masks = images.to(device), masks.to(device)
            outputs = model(images)
            metrics = calculate_metrics_fixed(outputs, masks, threshold)

            for key in total_metrics:
                total_metrics[key] += metrics[key]
            num_batches += 1

    # 计算平均指标
    avg_metrics = {key: value / num_batches for key, value in total_metrics.items()}
    return avg_metrics

def count_parameters(model):
    """统计模型参数数量"""
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

# ==== 照片版评估所需的4个函数 ====
import gc

def evaluate_performance_spec(model, dataloader, device, threshold=0.5):
    """模型性能（照片版）：准确率、精度"""
    model.eval()
    eps = 1e-8
    TP = FP = FN = TN = 0.0
    with torch.no_grad():
        for images, masks in dataloader:
            images, masks = images.to(device), masks.to(device)
            logits = model(images)
            probs  = torch.softmax(logits, dim=1)[:, 1, :, :]
            preds  = (probs > threshold).to(masks.dtype)

            p = preds.view(-1).float()
            t = masks.view(-1).float()

            TP += float((p * t).sum().item())
            FP += float((p * (1 - t)).sum().item())
            FN += float(((1 - p) * t).sum().item())
            TN += float(((1 - p) * (1 - t)).sum().item())

    accuracy  = (TP + TN) / (TP + TN + FP + FN + eps)
    precision = TP / (TP + FP + eps)
    return {"accuracy": float(accuracy), "precision": float(precision)}

def _measure_inference_memory_mb(model, sample_images, device):
    """
    模型效率-内存占用（按图）：一次前向推理的增量内存（MB）
    - CUDA: 用峰值分配统计
    - CPU: 以进程RSS增量估算
    """
    process = psutil.Process(os.getpid())

    model.eval()
    with torch.no_grad():
        _ = model(sample_images)  # 预热

    if device.type == "cuda":
        torch.cuda.empty_cache()
        torch.cuda.reset_peak_memory_stats(device)
        start = torch.cuda.memory_allocated(device)
        with torch.no_grad():
            _ = model(sample_images)
        peak = torch.cuda.max_memory_allocated(device)
        delta_bytes = max(peak - start, 0)
        mem_mb = delta_bytes / (1024 * 1024)
        torch.cuda.empty_cache()
        return float(mem_mb)

    # CPU 路径
    gc.collect()
    time.sleep(0.02)
    rss_before = process.memory_info().rss
    with torch.no_grad():
        _ = model(sample_images)
    gc.collect()
    time.sleep(0.02)
    rss_after = process.memory_info().rss
    delta_bytes = max(rss_after - rss_before, 0)
    return float(delta_bytes / (1024 * 1024))

def evaluate_efficiency_spec(model, dataloader, device):
    """模型效率（照片版）：参数数量、模型大小(MB)、内存占用(MB)"""
    # 取一个batch作内存测试
    first_images = None
    for images, _ in dataloader:
        first_images = images.to(device)
        break
    if first_images is None:
        raise RuntimeError("dataloader 为空，无法评估效率指标。")

    params = count_parameters(model)
    size_mb = get_model_size_mb(model)
    mem_mb  = _measure_inference_memory_mb(model, first_images, device)
    return {
        "params": int(params),
        "model_size_mb": float(size_mb),
        "memory_mb": float(mem_mb),
    }

def build_eval_report_spec(model, dataloader, device, threshold=0.5):
    """
    汇总为“照片版两块”：
    - 模型性能：准确率、精度
    - 模型效率：参数数量、模型大小(MB)、内存占用(MB)
    """
    perf = evaluate_performance_spec(model, dataloader, device, threshold=threshold)
    eff  = evaluate_efficiency_spec(model, dataloader, device)
    return {
        "模型性能": {
            "准确率": perf["accuracy"],
            "精度":   perf["precision"],
        },
        "模型效率": {
            "参数数量":     eff["params"],
            "模型大小(MB)": eff["model_size_mb"],
            "内存占用(MB)": eff["memory_mb"],
        }
    }

def create_dataloaders(args):
    print("准备 38-Cloud 真实数据集...")

    norm = transforms.Normalize(mean=[0.485, 0.456, 0.406, 0.5],
                                std =[0.229, 0.224, 0.225, 0.2])

    full_dataset = CloudDetectionDataset(
        root_dir=args.data_root,
        mode='train',
        transform=norm
    )

    train_size = int(0.8 * len(full_dataset))
    val_size = len(full_dataset) - train_size
    train_dataset, val_dataset = random_split(full_dataset, [train_size, val_size])

    # Windows + rasterio: num_workers=0 更稳；CUDA 时 pin_memory=True
    use_workers = 0
    pin_mem = torch.cuda.is_available()

    train_loader = DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=use_workers,
        pin_memory=pin_mem
    )
    val_loader = DataLoader(
        val_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=use_workers,
        pin_memory=pin_mem
    )

    print(f"训练集大小: {len(train_dataset)}")
    print(f"验证集大小: {len(val_dataset)}")
    return train_loader, val_loader


def save_results(results, output_dir):
    """保存结果"""
    results_path = os.path.join(output_dir, 'compression_results.json')

    def convert_types(obj):
        if isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, (np.integer, np.floating)):
            return obj.item()
        elif isinstance(obj, dict):
            return {key: convert_types(value) for key, value in obj.items()}
        elif isinstance(obj, list):
            return [convert_types(item) for item in obj]
        else:
            return obj

    converted_results = convert_types(results)

    with open(results_path, 'w', encoding='utf-8') as f:
        json.dump(converted_results, f, indent=2, ensure_ascii=False)

    print(f"结果已保存到: {results_path}")

def save_spec_report(report, output_dir, name="spec_report.json"):
    path = os.path.join(output_dir, name)
    with open(path, "w", encoding="utf-8") as f:
        json.dump(report, f, indent=2, ensure_ascii=False)
    print(f"照片版模型评估已保存到: {path}")

def ensure_dataset_available(args):
    """
    若 args.data_root 下没有 38-Cloud_training/train_* 结构：
      1) 用 kagglehub 下载 38-Cloud 数据集
      2) 自动定位到包含 38-Cloud_training 的根
      3) 若压缩包存在则自动解压后再定位
    成功后会回写 args.data_root。
    """
    # 仅检测目录结构是否完整
    if os.path.isdir(args.data_root) and _has_train_structure(args.data_root):
        return args.data_root

    print("[auto] 数据目录不完整，尝试自动下载数据集...")

    try:
        import kagglehub
    except ImportError:
        raise RuntimeError("未安装 kagglehub。请运行 `pip install kagglehub`")

    # 下载数据集
    try:
        download_path = kagglehub.dataset_download("sorour/38cloud-cloud-segmentation-in-satellite-images")
        download_path = os.path.normpath(download_path)
        print(f"[auto] 下载完成：{download_path}")
    except Exception as e:
        raise RuntimeError(f"数据集下载失败：{e}")

    # 解压并查找数据集
    if _has_train_structure(download_path):
        args.data_root = download_path
        print(f"[auto] 数据已准备好：{args.data_root}")
        return args.data_root

    print("[auto] 数据集目录结构异常，无法找到有效数据")
    return None

def _has_train_structure(root):
    """
    检查指定目录下是否包含预期的 38-Cloud 数据集结构
    """
    expected_dirs = [
        os.path.join(root, "38-Cloud_training", "train_red"),
        os.path.join(root, "38-Cloud_training", "train_green"),
        os.path.join(root, "38-Cloud_training", "train_blue"),
        os.path.join(root, "38-Cloud_training", "train_nir"),
        os.path.join(root, "38-Cloud_training", "train_gt"),
    ]
    return all(os.path.isdir(d) for d in expected_dirs)

def get_args():
    parser = argparse.ArgumentParser(description="Cloud-Net 模型训练 + 张量分解压缩 + 微调（38-Cloud）")

    # 默认数据根目录：以脚本为基准 ..\..\实习数据集\CloudDetection
    script_dir = os.path.dirname(os.path.abspath(__file__))
    default_data_root = os.path.normpath(
        os.path.join(script_dir, "..", "CloudDetection")
    )

    # 环境变量覆盖（可选）：set CLOUD_DATA_ROOT=E:\USM\year2sem3\实习数据集\CloudDetection
    env_root = os.environ.get("CLOUD_DATA_ROOT")
    if env_root and os.path.isdir(env_root):
        default_data_root = env_root

    parser.add_argument("--data-root",       default=default_data_root, help="38-Cloud 数据集根目录")
    parser.add_argument("--output-dir",      default="./output",        help="输出目录")
    parser.add_argument("--epochs",          type=int, default=25,      help="训练轮数")
    parser.add_argument("--batch-size",      type=int, default=8,       help="批次大小")
    parser.add_argument("--learning-rate",   type=float, default=1e-3,  help="学习率")
    parser.add_argument("--weight-decay",    type=float, default=1e-4,  help="权重衰减")
    parser.add_argument("--decompose-rank",  type=int,   default=16,    help="CP 分解秩")
    parser.add_argument("--ft-epochs",       type=int,   default=3,     help="分解后微调轮数")
    parser.add_argument("--ft-lr",           type=float, default=1e-4,  help="分解后微调学习率")

    args = parser.parse_args()

    # 友好检查（只检查 training 的五个子目录，避免 test_* 缺失时误报）
    expected_train_dirs = [
        os.path.join(args.data_root, "38-Cloud_training", "train_red"),
        os.path.join(args.data_root, "38-Cloud_training", "train_green"),
        os.path.join(args.data_root, "38-Cloud_training", "train_blue"),
        os.path.join(args.data_root, "38-Cloud_training", "train_nir"),
        os.path.join(args.data_root, "38-Cloud_training", "train_gt"),
    ]
    missing = [p for p in expected_train_dirs if not os.path.isdir(p)]
    if missing:
        print("[提示] 数据目录可能不完整或路径不对：")
        for p in missing:
            print("  - 缺少目录：", p)
        print("你可以通过命令行覆盖默认路径，例如：")
        print(r'  python main.py --data-root "E:\USM\year2sem3\实习数据集\CloudDetection"')
        print("或设置环境变量 CLOUD_DATA_ROOT 指向数据根目录。")
        print("\n[auto] 自动尝试下载数据集...")
        args.data_root = ensure_dataset_available(args)  # 自动下载数据集

    return args

def main():
    args = get_args()
    os.makedirs(args.output_dir, exist_ok=True)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")

    # 固定随机种子
    torch.manual_seed(42)
    np.random.seed(42)

    # 数据加载
    train_loader, val_loader = create_dataloaders(args)

    # ===== STAGE 1: 基础训练 =====
    print("=== STAGE: TRAIN ===")
    model = CloudNet(num_classes=2, dropout_rate=0.1).to(device)
    model, orig_iou, _ = train_model(model, train_loader, val_loader, device, args)
    orig_size   = get_model_size_mb(model)
    orig_params = count_parameters(model)
    torch.save(model.state_dict(), os.path.join(args.output_dir, "cloudnet_trained.pth"))
    print(f"训练完成，原始模型 IoU={orig_iou:.4f}, 大小={orig_size:.2f}MB, 参数={orig_params:,}")

    # 照片版评估（原始模型）
    spec_orig = build_eval_report_spec(model, val_loader, device, threshold=0.5)
    print("【原始模型-照片版评估】", spec_orig)

    # ===== STAGE 2: 张量分解（代替权重共享） =====
    print("=== STAGE: TENSOR DECOMPOSITION ===")
    model.load_state_dict(torch.load(os.path.join(args.output_dir, "cloudnet_trained.pth"), map_location=device))
    model.eval()
    shared_model = weight_sharing(
        model,
        rank=args.decompose_rank,
        device=device
    )
    torch.save(shared_model.state_dict(), os.path.join(args.output_dir, "cloudnet_decomposed.pth"))
    shared_iou    = evaluate_model(shared_model, val_loader, device)["iou"]
    shared_size   = get_model_size_mb(shared_model)
    shared_params = count_parameters(shared_model)
    print(f"分解后模型 IoU={shared_iou:.4f}, 大小={shared_size:.2f}MB, 参数={shared_params:,}")

    # 照片版评估（分解后）
    spec_decomp = build_eval_report_spec(shared_model, val_loader, device, threshold=0.5)
    print("【分解后模型-照片版评估】", spec_decomp)

    # ===== STAGE 3: 分解后微调 =====
    print("=== STAGE: FINE-TUNE AFTER DECOMPOSITION ===")
    ft_args = argparse.Namespace(
        epochs        = args.ft_epochs,
        learning_rate = args.ft_lr,
        weight_decay  = args.weight_decay
    )
    shared_model = shared_model.to(device)
    shared_model, ft_iou, _ = train_model(shared_model, train_loader, val_loader, device, ft_args)
    torch.save(shared_model.state_dict(), os.path.join(args.output_dir, "cloudnet_decomposed_ft.pth"))
    ft_size   = get_model_size_mb(shared_model)
    ft_params = count_parameters(shared_model)
    print(f"微调后模型 IoU={ft_iou:.4f}, 大小={ft_size:.2f}MB, 参数={ft_params:,}")

    # 照片版评估（微调后）
    spec_ft = build_eval_report_spec(shared_model, val_loader, device, threshold=0.5)
    print("【微调后模型-照片版评估】", spec_ft)

    # ===== 汇总并保存 =====
    stats = {
        "original": {
            "iou": orig_iou, "size_mb": orig_size, "params": orig_params
        },
        "decomposed": {
            "iou": shared_iou, "size_mb": shared_size, "params": shared_params
        },
        "fine_tuned": {
            "iou": ft_iou, "size_mb": ft_size, "params": ft_params
        },
        "compression": {
            "rank": args.decompose_rank,
            "drop_pct": (orig_iou - shared_iou) / max(orig_iou, 1e-8) * 100.0,
            "ft_gain_pct": (ft_iou - shared_iou) / max(shared_iou, 1e-8) * 100.0
        },
        # “照片上的指标”整体收进报告里
        "spec_report": {
            "original":   spec_orig,
            "decomposed": spec_decomp,
            "fine_tuned": spec_ft
        }
    }

    save_results(stats, args.output_dir)

if __name__ == "__main__":
    main()