#!/usr/bin/env python
"""
卫星变化检测模型训练和压缩示例 - 改进版
基于Siamese UNet架构的变化检测模型
实现完整的训练流程和改进的模型压缩策略
"""
from PIL import Image
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset, random_split
from torchvision import transforms
import copy
import time
import numpy as np
import json
import gzip
import psutil, gc
import matplotlib.pyplot as plt
from tqdm import tqdm
import random
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)

def compress_parameter_sharing(model):
    """
    Parameter Sharing 压缩——
    将 encoder1 和 encoder2 中的所有 Conv2d 层绑定为同一组 weight/bias。
    """
    # encoder1 和 encoder2 都是 nn.Sequential，层数和类型一一对应
    for l1, l2 in zip(model.encoder1, model.encoder2):
        if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):
            # 让第二条分支引用第一条分支的同一组参数
            l2.weight = l1.weight
            if l1.bias is not None:
                l2.bias = l1.bias
    return model

class SimpleSiameseUNet(nn.Module):
    def __init__(self, in_channels=3, num_classes=2):
        super().__init__()
        # 修复：为每个分支创建独立的卷积层，避免参数共享导致的问题

        # 分支1的编码器
        self.encoder1 = nn.Sequential(
            nn.Conv2d(in_channels, 64, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
        )
        self.pool1 = nn.MaxPool2d(2)

        # 分支2的编码器（独立的层，不共享参数）
        self.encoder2 = nn.Sequential(
            nn.Conv2d(in_channels, 64, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
        )
        self.pool2 = nn.MaxPool2d(2)

        # 第二级编码（两个分支共享）
        self.encoder3 = nn.Sequential(
            nn.Conv2d(64, 128, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
        )
        self.pool3 = nn.MaxPool2d(2)

        # 第三级编码
        self.encoder4 = nn.Sequential(
            nn.Conv2d(128, 256, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
        )

        # 特征融合
        self.fusion = nn.Sequential(
            nn.Conv2d(512, 256, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
        )
        self.dropout = nn.Dropout2d(0.3)

        # 解码器
        self.up1 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2)
        self.dec1 = nn.Sequential(
            nn.Conv2d(256, 128, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
        )

        self.up2 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2)
        self.dec2 = nn.Sequential(
            nn.Conv2d(128, 64, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
        )

        # 输出层
        self.outc = nn.Conv2d(64, num_classes, kernel_size=1)

    def forward(self, x1, x2):
        # 分支1编码
        e1a = self.encoder1(x1)
        p1 = self.pool1(e1a)
        e1b = self.encoder3(p1)
        p2 = self.pool3(e1b)
        e1c = self.encoder4(p2)

        # 分支2编码
        e2a = self.encoder2(x2)
        p1b = self.pool2(e2a)
        e2b = self.encoder3(p1b)
        p2b = self.pool3(e2b)
        e2c = self.encoder4(p2b)

        # 特征融合
        fus = torch.cat([e1c, e2c], dim=1)
        fus = self.fusion(fus)
        fus = self.dropout(fus)

        # 解码1
        u1 = self.up1(fus)
        d1 = torch.cat([u1, torch.abs(e1b - e2b)], dim=1)
        d1 = self.dec1(d1)

        # 解码2
        u2 = self.up2(d1)
        d2 = torch.cat([u2, torch.abs(e1a - e2a)], dim=1)
        d2 = self.dec2(d2)

        # 输出
        out = self.outc(d2)
        return out

# 改进的变化检测数据集
class CDDataset(Dataset):
    """
    你的真实数据集
    根目录下有 train/，test/，val/ 三个子文件夹
      train/、test/ 下又各自有 A/, B/, label/
      val/ 下只有 A/ 和 B/
    """
    def __init__(self, root_dir, split='train', transforms=None, img_size=None):
        super().__init__()
        self.dir_A     = os.path.join(root_dir, split, 'A')
        self.dir_B     = os.path.join(root_dir, split, 'B')
        self.dir_label = os.path.join(root_dir, split, 'label')
        # A 里所有文件名（png/jpg/tif…）统一为 id
        self.ids = sorted([
            fname for fname in os.listdir(self.dir_A)
            if fname.lower().endswith(('.png','.jpg','.tif'))
        ])
        # val 没有 label，就用 None
        if not os.path.isdir(self.dir_label):
            self.dir_label = None
        '''self.transforms = transforms'''
        self.img_size = img_size

    def __len__(self):
        return len(self.ids)

    def __getitem__(self, idx):
        fname = self.ids[idx]
        # 读两时相影像
        img1 = Image.open(os.path.join(self.dir_A, fname)).convert('RGB')
        img2 = Image.open(os.path.join(self.dir_B, fname)).convert('RGB')

        # 2) resize
        if self.img_size:
            img1 = img1.resize((self.img_size, self.img_size), Image.BILINEAR)
            img2 = img2.resize((self.img_size, self.img_size), Image.BILINEAR)

        # 读掩码（如果有）
        if self.dir_label:
            mask_img = Image.open(os.path.join(self.dir_label, fname)).convert('L')
            if self.img_size:
                mask_img = mask_img.resize((self.img_size, self.img_size), Image.NEAREST)
            mask_np = np.array(mask_img) > 0
            mask = torch.from_numpy(mask_np.astype(np.int64))  # 改为 [H, W]
        else:
            mask = torch.zeros((self.img_size, self.img_size), dtype=torch.int64)  # 统一为 [H, W]

        # PIL → Tensor，归一化到 [0,1]
        to_tensor = transforms.ToTensor()
        img1 = to_tensor(img1)
        img2 = to_tensor(img2)

        return img1, img2, mask

def create_dataloaders(root_dir, batch_size=8, img_size=256):
    """
    root_dir 下有 train/ test/ val/，
    train/test 有 A/B/label，val 没有 label。
    """
    print("准备真实数据集 ...")

    train_ds = CDDataset(root_dir, split='train', img_size=img_size)
    test_ds = CDDataset(root_dir, split='test', img_size=img_size)
    val_ds = CDDataset(root_dir, split='val', img_size=img_size)

    train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True,
                              num_workers=0, pin_memory=False)
    test_loader = DataLoader(test_ds, batch_size=batch_size, shuffle=False,
                             num_workers=0, pin_memory=False)
    val_loader = DataLoader(val_ds, batch_size=batch_size, shuffle=False,
                            num_workers=0, pin_memory=False)

    print(f" train: {len(train_ds)}  test: {len(test_ds)}  val: {len(val_ds)}")
    return train_loader, test_loader, val_loader

def get_args():
    parser = argparse.ArgumentParser(
        description="卫星变化检测模型（Siamese UNet + Parameter Sharing 压缩）"
    )

    # 以脚本目录为基准：..\..\实习数据集\CD
    script_dir = os.path.dirname(os.path.abspath(__file__))
    default_data_root = os.path.normpath(
        os.path.join(script_dir, "..", "..", "实习数据集", "CD")
    )

    # 允许用环境变量覆盖（可选）：set CD_DATA_ROOT=E:\USM\year2sem3\实习数据集\CD
    env_data_root = os.environ.get("CD_DATA_ROOT")
    if env_data_root and os.path.isdir(env_data_root):
        default_data_root = env_data_root

    # 数据和输出参数
    parser.add_argument(
        "--data-root", type=str, default=default_data_root,
        help="数据根目录，应包含 train/A, train/B, train/label 等子文件夹"
    )
    parser.add_argument("--output-dir", "-o", default="./output", help="模型和结果输出目录")
    parser.add_argument("--img-size", type=int, default=256, help="输入图像尺寸")
    parser.add_argument("--num-classes", type=int, default=2, help="类别数（变化/不变=2）")

    # 训练参数
    parser.add_argument("--epochs", "-e", type=int, default=25, help="训练轮数")
    parser.add_argument("--batch-size", "-b", type=int, default=8, help="批大小")
    parser.add_argument("--learning-rate", "-lr", type=float, default=1e-3, help="学习率")
    parser.add_argument("--weight-decay", type=float, default=1e-4, help="权重衰减")

    # ✅ Parameter Sharing 相关（可选开关/时机说明）
    # 当前代码在训练完成后应用共享；这里提供开关以便需要时禁用
    parser.add_argument(
        "--disable-param-sharing",
        action="store_true",
        help="禁用参数共享压缩（默认开启）"
    )
    parser.add_argument(
        "--share-mode",
        choices=["after-train"],
        default="after-train",
        help="参数共享的应用时机（当前实现为训练后）"
    )

    # 微调参数（压缩后）
    parser.add_argument("--finetune-epochs", type=int, default=3, help="压缩后微调轮数")
    parser.add_argument("--finetune-lr", type=float, default=1e-4, help="压缩后微调学习率")

    args = parser.parse_args()

    # 友好提示：若路径不存在，打印实际在找哪里
    if not os.path.isdir(args.data_root):
        print(f"[提示] 未找到数据目录: {args.data_root}")
        print("可用命令行参数覆盖，例如：")
        print(r'  python main.py --data-root "E:\USM\year2sem3\实习数据集\CD"')

    return args

def dice_loss(pred, target, smooth=1e-5):
    pred_prob = torch.softmax(pred, dim=1)[:,1]     # [B,H,W]
    target_f = target.float()                       # [B,H,W]
    inter = (pred_prob * target_f).sum()
    return 1 - (2*inter + smooth) / (pred_prob.sum() + target_f.sum() + smooth)


def combined_loss(pred, target, alpha=0.7):
    # pred: [B,2,H,W], target: [B,H,W] 或 [B,1,H,W]
    # 统一处理target维度
    if target.dim() == 4:
        target = target.squeeze(1)  # [B,1,H,W] -> [B,H,W]
    elif target.dim() == 3:
        pass  # 已经是 [B,H,W]
    else:
        raise ValueError(f"Unexpected target shape: {target.shape}")

    ce = F.cross_entropy(pred, target)
    d = dice_loss(pred, target)
    return alpha * ce + (1 - alpha) * d


def calculate_iou(pred, target):
    """计算IoU（交并比）"""
    pred = torch.softmax(pred, dim=1)
    pred = (pred[:, 1, :, :] > 0.5).float()
    # target 可能是 [B,1,H,W] 或者 [B,H,W]
    if target.dim() == 4:
        target = target.float().squeeze(1)
    else:
        target = target.float()

    intersection = (pred * target).sum()
    union = pred.sum() + target.sum() - intersection

    if union == 0:
        return 1.0 if intersection == 0 else 0.0

    return (intersection / union).item()


def train_model(model, train_loader, test_loader, device, args):
    """训练Siamese UNet模型"""
    print("开始训练Siamese UNet模型...")

    # 定义损失函数和优化器
    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)

    # 记录训练历史
    train_losses = []
    train_ious = []
    test_ious = []

    best_test_iou = 0.0
    best_model_state = None

    # 训练循环
    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        running_iou = 0.0
        num_batches = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for batch_idx, (img1, img2, masks) in enumerate(pbar):
            img1, img2, masks = img1.to(device), img2.to(device), masks.to(device)

            # 前向传播
            optimizer.zero_grad()
            outputs = model(img1, img2)
            loss = combined_loss(outputs, masks)

            # 反向传播
            loss.backward()
            optimizer.step()

            # 计算IoU
            iou = calculate_iou(outputs, masks)

            # 统计
            running_loss += loss.item()
            running_iou += iou
            num_batches += 1

            # 更新进度条
            pbar.set_postfix({
                'Loss': f'{loss.item():.4f}',
                'IoU': f'{iou:.4f}'
            })

        # 计算平均训练损失和IoU
        avg_train_loss = running_loss / num_batches
        train_iou = running_iou / num_batches

        # 测试阶段
        test_iou = evaluate_model(model, test_loader, device)

        # 记录历史
        train_losses.append(avg_train_loss)
        train_ious.append(train_iou)
        test_ious.append(test_iou)

        # 保存最佳模型
        if test_iou > best_test_iou:
            best_test_iou = test_iou
            best_model_state = copy.deepcopy(model.state_dict())

        # 更新学习率
        scheduler.step()

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train IoU: {train_iou:.4f}, '
              f'Test IoU: {test_iou:.4f}')

    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print(f'训练完成! 最佳测试IoU: {best_test_iou:.4f}')

    # 绘制训练曲线
    plot_training_curves(train_losses, train_ious, test_ious, args.output_dir)

    return model, best_test_iou, {
        'train_losses': train_losses,
        'train_ious': train_ious,
        'test_ious': test_ious,
        'best_test_iou': best_test_iou
    }


def finetune_model(model, train_loader, test_loader, device, epochs=5, lr=0.0001):
    """压缩后微调模型"""
    print(f"开始微调模型 ({epochs} 轮)...")

    # 使用较小的学习率进行微调
    optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=1e-5)

    initial_iou = evaluate_model(model, test_loader, device)
    print(f"微调前IoU: {initial_iou:.4f}")

    best_iou = initial_iou
    best_state = copy.deepcopy(model.state_dict())


    for epoch in range(epochs):
        model.train()
        running_loss = 0.0
        num_batches = 0

        pbar = tqdm(train_loader, desc=f'Finetune {epoch + 1}/{epochs}')
        for img1, img2, masks in pbar:
            img1, img2, masks = img1.to(device), img2.to(device), masks.to(device)

            optimizer.zero_grad()
            outputs = model(img1, img2)
            loss = combined_loss(outputs, masks)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            num_batches += 1

            pbar.set_postfix({'Loss': f'{loss.item():.4f}'})

        # 评估
        test_iou = evaluate_model(model, test_loader, device)
        avg_loss = running_loss / num_batches

        print(f'Finetune Epoch [{epoch + 1}/{epochs}] - Loss: {avg_loss:.4f}, IoU: {test_iou:.4f}')

        # 保存最佳模型
        if test_iou > best_iou:
            best_iou = test_iou
            best_state = copy.deepcopy(model.state_dict())

    # 加载最佳微调模型
    model.load_state_dict(best_state)
    final_iou = evaluate_model(model, test_loader, device)

    print(f"微调完成! 最终IoU: {final_iou:.4f} (提升: {final_iou - initial_iou:.4f})")

    return model, final_iou


def evaluate_model(model, dataloader, device):
    """评估变化检测模型性能（使用IoU指标）"""
    model.eval()
    total_iou = 0.0
    num_batches = 0

    with torch.no_grad():
        for img1, img2, masks in dataloader:
            img1, img2, masks = img1.to(device), img2.to(device), masks.to(device)
            outputs = model(img1, img2)

            iou = calculate_iou(outputs, masks)
            total_iou += iou
            num_batches += 1

    average_iou = total_iou / num_batches if num_batches > 0 else 0
    return average_iou


def plot_training_curves(train_losses, train_ious, test_ious, output_dir):
    """绘制训练曲线"""
    epochs = range(1, len(train_losses) + 1)

    plt.figure(figsize=(12, 4))

    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(epochs, train_losses, 'b-', label='Training Loss')
    plt.title('Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)

    # 绘制IoU曲线
    plt.subplot(1, 2, 2)
    plt.plot(epochs, train_ious, 'b-', label='Training IoU')
    plt.plot(epochs, test_ious, 'r-', label='Test IoU')
    plt.title('Training and Test IoU')
    plt.xlabel('Epoch')
    plt.ylabel('IoU')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'training_curves.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"训练曲线已保存到: {os.path.join(output_dir, 'training_curves.png')}")


def visualize_predictions(model, dataloader, device, output_dir, num_samples=4):
    """可视化预测结果"""
    model.eval()

    fig, axes = plt.subplots(num_samples, 4, figsize=(16, 4 * num_samples))
    fig.suptitle('Change Detection Results', fontsize=16)

    with torch.no_grad():
        for i, (img1, img2, masks) in enumerate(dataloader):
            if i >= num_samples:
                break

            img1, img2, masks = img1.to(device), img2.to(device), masks.to(device)
            outputs = model(img1, img2)
            pred = torch.softmax(outputs, dim=1)[:, 1, :, :] > 0.5

            # 取第一个样本
            img1_show = img1[0].cpu().permute(1, 2, 0).numpy()
            img2_show = img2[0].cpu().permute(1, 2, 0).numpy()
            mask_true = masks[0].cpu().numpy()
            mask_pred = pred[0].cpu().numpy()

            # 归一化显示
            img1_show = np.clip(img1_show, 0, 1)
            img2_show = np.clip(img2_show, 0, 1)

            axes[i, 0].imshow(img1_show)
            axes[i, 0].set_title('Time 1')
            axes[i, 0].axis('off')

            axes[i, 1].imshow(img2_show)
            axes[i, 1].set_title('Time 2')
            axes[i, 1].axis('off')

            axes[i, 2].imshow(mask_true, cmap='gray')
            axes[i, 2].set_title('Ground Truth')
            axes[i, 2].axis('off')

            axes[i, 3].imshow(mask_pred, cmap='gray')
            axes[i, 3].set_title('Prediction')
            axes[i, 3].axis('off')

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'predictions_visualization.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"预测可视化已保存到: {os.path.join(output_dir, 'predictions_visualization.png')}")


def get_model_size(model):
    """计算模型在内存中的大小（MB）"""
    param_size = 0
    for param in model.parameters():
        param_size += param.nelement() * param.element_size()
    size_mb = param_size / (1024 * 1024)
    return size_mb


def compute_compressed_size(model, bits=4, sparsity=0.7):
    """计算压缩后模型的理论存储大小（MB）"""
    total_bytes = 0
    total_params = 0
    nonzero_params = 0

    for name, param in model.named_parameters():
        param_count = param.numel()
        total_params += param_count

        if 'weight' in name:
            nz_count = torch.count_nonzero(param).item()
            nonzero_params += nz_count
            sparse_size = nz_count * (4 + bits / 8)
            dense_size = param_count * (bits / 8)

            if sparsity > 0 and sparse_size < dense_size:
                total_bytes += sparse_size
            else:
                total_bytes += dense_size
        elif 'bias' in name:
            total_bytes += param_count * 4

    overhead_bytes = total_bytes * 0.05
    total_size_mb = (total_bytes + overhead_bytes) / (1024 * 1024)
    actual_sparsity = 1.0 - (nonzero_params / total_params) if total_params > 0 else 0

    return total_size_mb, actual_sparsity

def save_original_model(model, path):
    """保存原始模型，计算文件大小"""
    torch.save(model.state_dict(), path)
    return os.path.getsize(path)


def save_compressed_model_simple(model, path):
    """简化的压缩模型保存 - 直接保存PyTorch模型"""
    # 方法1: 保存完整的压缩后模型
    torch.save({
        'model_state_dict': model.state_dict(),
        'model_class': 'SimpleSiameseUNet',
        'compression_applied': True
    }, path)
    return os.path.getsize(path)


def load_compressed_model_simple(path, device=None):
    """简化的压缩模型加载"""
    try:
        checkpoint = torch.load(path, map_location=device if device else 'cpu')
        model = SimpleSiameseUNet()
        model.load_state_dict(checkpoint['model_state_dict'])
        if device:
            model = model.to(device)
        return model
    except Exception as e:
        print(f"加载模型失败: {e}")
        return None


def load_compressed_model(path, device=None):
    """改进的压缩模型加载函数"""
    try:
        # 首先尝试加载pickle格式
        pickle_path = path.replace('.pth', '_compressed.pkl')
        if os.path.exists(pickle_path):
            import pickle
            with open(pickle_path, 'rb') as f:
                compressed_info = pickle.load(f)
        else:
            # 加载gzip格式
            with gzip.open(path, 'rb') as f:
                json_str = f.read().decode('utf-8')
            compressed_info = json.loads(json_str)
    except Exception as e:
        print(f"加载压缩模型失败: {e}")
        return None

    model = SimpleSiameseUNet()

    for name, param in model.named_parameters():
        if name in compressed_info['compressed_layers']:
            info = compressed_info['compressed_layers'][name]

            if 'is_bias' in info and info['is_bias']:
                param.data = torch.tensor(info['values'], dtype=torch.float32)
                continue

            shape = info['shape']
            param.data = torch.zeros(shape, dtype=torch.float32)

            if 'indices' in info and 'values' in info:
                try:
                    for indices, value in zip(info['indices'], info['values']):
                        if len(indices) == len(shape):
                            index = tuple(indices)
                            if 'zero_point' in info and 'scale' in info:
                                dequantized_val = (float(value) - info['zero_point']) * info['scale']
                            else:
                                dequantized_val = float(value)
                            param.data[index] = dequantized_val
                except Exception as e:
                    print(f"恢复权重 {name} 时出错: {e}")
                    continue

    if device:
        model = model.to(device)

    return model

def _binarize_pred_seg(logits, threshold=0.5):
    """logits [B,2,H,W] -> 二值预测 [B,H,W]"""
    probs = torch.softmax(logits, dim=1)[:, 1, :, :]
    return (probs > threshold).to(torch.float32)

def evaluate_performance_spec_seg(model, dataloader, device, threshold=0.5):
    """模型性能（照片版）：准确率、精度（二分类分割）"""
    model.eval()
    eps = 1e-8
    TP = FP = FN = TN = 0.0
    with torch.no_grad():
        for img1, img2, masks in dataloader:
            img1, img2 = img1.to(device), img2.to(device)
            masks = masks.to(device)
            logits = model(img1, img2)
            preds = _binarize_pred_seg(logits, threshold)

            # masks 可能是 [B,H,W] 或 [B,1,H,W]，统一到 [B,H,W]
            if masks.dim() == 4 and masks.size(1) == 1:
                masks = masks[:, 0, :, :]

            p = preds.reshape(-1).float()
            t = masks.reshape(-1).float()

            TP += float((p * t).sum().item())
            FP += float((p * (1 - t)).sum().item())
            FN += float(((1 - p) * t).sum().item())
            TN += float(((1 - p) * (1 - t)).sum().item())

    accuracy  = (TP + TN) / (TP + TN + FP + FN + eps)
    precision = TP / (TP + FP + eps)
    return {"accuracy": float(accuracy), "precision": float(precision)}

def count_parameters(model):
    """可训练参数数量"""
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

def _model_file_size_mb(model):
    """优先用真实保存文件得到大小；失败则估算"""
    try:
        import tempfile, os
        with tempfile.NamedTemporaryFile(delete=False, suffix=".pth") as tmp:
            torch.save(model.state_dict(), tmp.name)
            size_mb = os.path.getsize(tmp.name) / (1024 * 1024)
        try: os.unlink(tmp.name)
        except: pass
        return float(size_mb)
    except Exception:
        # 4字节/参数 + 20% 冗余估算
        return float(count_parameters(model) * 4 / (1024 * 1024) * 1.2)

def _measure_inference_memory_mb(model, sample_img1, sample_img2, device):
    """一次前向推理的增量内存（MB）"""
    model.eval()
    with torch.no_grad():
        _ = model(sample_img1, sample_img2)  # 预热

    if device.type == "cuda":
        torch.cuda.empty_cache()
        torch.cuda.reset_peak_memory_stats(device)
        start = torch.cuda.memory_allocated(device)
        with torch.no_grad():
            _ = model(sample_img1, sample_img2)
        peak = torch.cuda.max_memory_allocated(device)
        delta = max(peak - start, 0)
        torch.cuda.empty_cache()
        return float(delta / (1024 * 1024))

    # CPU：使用进程 RSS 增量（需要 psutil）
    try:
        import psutil, gc, time, os
        proc = psutil.Process(os.getpid())
        gc.collect(); time.sleep(0.02)
        rss_before = proc.memory_info().rss
        with torch.no_grad():
            _ = model(sample_img1, sample_img2)
        gc.collect(); time.sleep(0.02)
        rss_after = proc.memory_info().rss
        delta = max(rss_after - rss_before, 0)
        return float(delta / (1024 * 1024))
    except Exception:
        return 0.0

def evaluate_efficiency_spec(model, dataloader, device):
    """模型效率（照片版）：参数数量、模型大小(MB)、内存占用(MB)"""
    first = None
    for img1, img2, _ in dataloader:
        first = (img1.to(device), img2.to(device))
        break
    if first is None:
        raise RuntimeError("dataloader 为空，无法评估效率指标。")

    params = count_parameters(model)
    size_mb = _model_file_size_mb(model)
    mem_mb  = _measure_inference_memory_mb(model, first[0], first[1], device)
    return {"params": int(params), "model_size_mb": float(size_mb), "memory_mb": float(mem_mb)}

def build_eval_report_spec(model, dataloader, device, threshold=0.5):
    """汇总‘照片版’两块"""
    perf = evaluate_performance_spec_seg(model, dataloader, device, threshold)
    eff  = evaluate_efficiency_spec(model, dataloader, device)
    return {
        "模型性能": {"准确率": perf["accuracy"], "精度": perf["precision"]},
        "模型效率": {"参数数量": eff["params"], "模型大小(MB)": eff["model_size_mb"], "内存占用(MB)": eff["memory_mb"]},
    }

def main():
    """主函数 —— 训练 + Parameter Sharing 压缩 +（可选）微调 + ‘照片版评估’"""
    args = get_args()
    os.makedirs(args.output_dir, exist_ok=True)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 数据
    train_loader, test_loader, val_loader = create_dataloaders(
        root_dir=args.data_root,
        batch_size=args.batch_size,
        img_size=args.img_size
    )

    # 1) 训练原始模型
    model = SimpleSiameseUNet(num_classes=args.num_classes).to(device)
    model, best_iou, history = train_model(model, train_loader, test_loader, device, args)
    print(f"原始模型最佳测试 IoU: {best_iou:.4f}")

    # 保存原始模型
    orig_path = os.path.join(args.output_dir, "siamese_unet_trained.pth")
    torch.save(model.state_dict(), orig_path)
    print(f"训练好的模型已保存到: {orig_path}")

    # 照片版评估（原始）
    try:
        spec_orig = build_eval_report_spec(model, test_loader, device, threshold=0.5)
        print("【原始模型-照片版评估】", spec_orig)
    except Exception as e:
        print(f"照片版评估(原始)失败: {e}")
        spec_orig = None

    # 2) Parameter Sharing 压缩
    print("\n>> 正在应用 Parameter Sharing 压缩策略")
    compressed_model = copy.deepcopy(model).to(device)
    compressed_model = compress_parameter_sharing(compressed_model)

    # 3) （可选）微调压缩模型
    compressed_model, iou_after = finetune_model(
        compressed_model, train_loader, test_loader, device,
        epochs=args.finetune_epochs, lr=args.finetune_lr
    )

    # 照片版评估（压缩+微调后）
    try:
        spec_shared = build_eval_report_spec(compressed_model, test_loader, device, threshold=0.5)
        print("【ParamSharing压缩后-照片版评估】", spec_shared)
    except Exception as e:
        print(f"照片版评估(ParamSharing)失败: {e}")
        spec_shared = None

    # 4) 对比与保存
    drop = (best_iou - iou_after) * 100
    size_after, sparsity = compute_compressed_size(compressed_model)
    results = {
        "Baseline": (best_iou, 0.0, get_model_size(model), 0.0),
        "ParamSharing": (iou_after, drop, size_after, sparsity)
    }

    # 保存“照片版评估”JSON
    photo_spec_report = {"original": spec_orig, "param_sharing": spec_shared}
    spec_path = os.path.join(args.output_dir, "photo_spec_report.json")
    with open(spec_path, "w", encoding="utf-8") as f:
        json.dump(photo_spec_report, f, indent=2, ensure_ascii=False)
    print("照片版评估已保存到:", spec_path)

    # 终端对比
    print("\n" + "=" * 40 + " 最终对比 " + "=" * 40)
    print("方法           |  IoU   | 跌幅   | 大小(MB)  | 稀疏率   | 结论")
    print("-" * 60)
    for name, (iou, d, size, sp) in results.items():
        flag = "✅" if d < 2 else ("⚠️" if d < 10 else "❌")
        print(f"{name:14s} | {iou:6.4f} | {d:6.2f}% | {size:8.2f} | {sp:8.2%} | {flag}")

if __name__ == "__main__":
    main()
