#!/usr/bin/env python
"""
卫星变化检测模型训练和压缩示例 - 改进版
基于Siamese UNet架构的变化检测模型
实现完整的训练流程和改进的模型压缩策略
"""
from gettext import install
from PIL import Image
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset, random_split
from torchvision import transforms
import copy
import psutil, gc, time
import numpy as np
import json
import gzip
import io
import matplotlib.pyplot as plt
from tqdm import tqdm

class LowRankConv2d(nn.Module):
    """
    用一次性 SVD 分解结果把 3×3 卷积拆成：
      conv1: [in_ch → rank] 的 3×3 （无 bias）
      conv2: [rank → out_ch] 的 1×1 （原 bias）
    """
    def __init__(self, in_ch, out_ch, rank, padding, bias):
        super().__init__()
        # 第 1 步：3×3 降维到 rank 维度
        self.conv1 = nn.Conv2d(in_ch, rank,
                               kernel_size=3,
                               padding=padding,
                               bias=False)
        # 第 2 步：1×1 升维回 out_ch
        self.conv2 = nn.Conv2d(rank, out_ch,
                               kernel_size=1,
                               padding=0,
                               bias=bias)

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        return x


def svd_decomposition(weight: torch.Tensor, rank_ratio: float):
    """
    对 3×3 卷积权重做一次性 SVD 截断
    weight: [out_ch, in_ch, k, k]
    返回 w1([out_ch, r,1,1]), w2([r,in_ch,k,k]), r
    """
    out_ch, in_ch, k, _ = weight.shape
    flat = weight.view(out_ch, -1)              # [out_ch, in_ch*k*k]
    U, S, Vh = torch.linalg.svd(flat, full_matrices=False)
    r = max(1, int(S.size(0) * rank_ratio))
    U_r = U[:, :r]                              # [out_ch,r]
    S_r = S[:r]                                 # [r]
    Vh_r = Vh[:r, :]                            # [r, in_ch*k*k]
    SVh = torch.diag(S_r) @ Vh_r                # [r, in_ch*k*k]
    w1 = U_r.unsqueeze(-1).unsqueeze(-1)        # [out_ch, r,1,1]
    w2 = SVh.view(r, in_ch, k, k)               # [r, in_ch,k,k]
    return w1, w2, r


def apply_lowrank(module: nn.Module, rank_ratio: float):
    """
    递归遍历 module，找到所有 kernel_size==(3,3) 的 Conv2d，
    用一次性 SVD 分解后的 LowRankConv2d 串联层来替换它。
    """
    for name, child in list(module.named_children()):
        # 精确匹配 3x3 卷积
        if isinstance(child, nn.Conv2d) and child.kernel_size == (3, 3):
            # 1) 拷贝原始权重与 bias
            W = child.weight.data.clone()  # [out_ch, in_ch, 3, 3]
            B = child.bias.data.clone() if child.bias is not None else None

            # 2) SVD 一次性分解
            W1, W2, r = svd_decomposition(W, rank_ratio)
            # W2: [r, in_ch,3,3], W1: [out_ch,r,1,1]

            # 3) 构造低秩替换层
            lr = LowRankConv2d(
                in_ch=child.in_channels,
                out_ch=child.out_channels,
                rank=r,
                padding=child.padding[0],
                bias=(B is not None)
            )
            # 把分解好的权重灌进去
            lr.conv1.weight.data.copy_(W2)
            lr.conv2.weight.data.copy_(W1)
            if B is not None:
                lr.conv2.bias.data.copy_(B)

            # 4) 用 nn.Sequential 串联起来替换原层
            setattr(module, name, nn.Sequential(lr.conv1, lr.conv2))

        else:
            # 其他子模块递归替换
            apply_lowrank(child, rank_ratio)



# SimpleSiameseUNet模型 - 改进版
class SimpleSiameseUNet(nn.Module):
    def __init__(self, in_channels=3, num_classes=2):
        super(SimpleSiameseUNet, self).__init__()

        # 编码器（特征提取）
        self.encoder1 = nn.Sequential(
            nn.Conv2d(in_channels, 64, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
        )

        self.pool1 = nn.MaxPool2d(2)

        self.encoder2 = nn.Sequential(
            nn.Conv2d(64, 128, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
        )

        self.pool2 = nn.MaxPool2d(2)

        self.encoder3 = nn.Sequential(
            nn.Conv2d(128, 256, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
        )

        # 特征融合层
        self.fusion = nn.Sequential(
            nn.Conv2d(512, 256, kernel_size=3, padding=1, bias=True), # 融合两个时间点的特征
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
        )

        # 解码器
        self.upconv1 = nn.ConvTranspose2d(256, 128, 2, stride=2)
        self.decoder1 = nn.Sequential(
            nn.Conv2d(256, 128, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
        )

        self.upconv2 = nn.ConvTranspose2d(128, 64, 2, stride=2)
        self.decoder2 = nn.Sequential(
            nn.Conv2d(128, 64, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=True),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
        )

        # 输出层
        self.output = nn.Conv2d(64, num_classes, 1)

        # Dropout层
        self.dropout = nn.Dropout2d(0.3)

    def forward(self, x1, x2):
        # 编码器 - 处理第一个时间点
        enc1_1 = self.encoder1(x1)
        enc1_pooled = self.pool1(enc1_1)
        enc1_2 = self.encoder2(enc1_pooled)
        enc1_pooled2 = self.pool2(enc1_2)
        enc1_3 = self.encoder3(enc1_pooled2)

        # 编码器 - 处理第二个时间点
        enc2_1 = self.encoder1(x2)
        enc2_pooled = self.pool1(enc2_1)
        enc2_2 = self.encoder2(enc2_pooled)
        enc2_pooled2 = self.pool2(enc2_2)
        enc2_3 = self.encoder3(enc2_pooled2)

        # 特征融合
        fused = torch.cat([enc1_3, enc2_3], dim=1)
        fused = self.fusion(fused)
        fused = self.dropout(fused)

        # 解码器
        up1 = self.upconv1(fused)
        # 跳跃连接：使用差分特征
        diff_2 = torch.abs(enc1_2 - enc2_2)
        dec1 = torch.cat([up1, diff_2], dim=1)
        dec1 = self.decoder1(dec1)

        up2 = self.upconv2(dec1)
        # 跳跃连接：使用差分特征
        diff_1 = torch.abs(enc1_1 - enc2_1)
        dec2 = torch.cat([up2, diff_1], dim=1)
        dec2 = self.decoder2(dec2)

        # 输出
        output = self.output(dec2)

        return output


# 改进的变化检测数据集
class CDDataset(Dataset):
    """
    你的真实数据集
    根目录下有 train/，test/，val/ 三个子文件夹
      train/、test/ 下又各自有 A/, B/, label/
      val/ 下只有 A/ 和 B/
    """
    def __init__(self, root_dir, split='train', transforms=None, img_size=None):
        super().__init__()
        self.dir_A     = os.path.join(root_dir, split, 'A')
        self.dir_B     = os.path.join(root_dir, split, 'B')
        self.dir_label = os.path.join(root_dir, split, 'label')
        # A 里所有文件名（png/jpg/tif…）统一为 id
        self.ids = sorted([
            fname for fname in os.listdir(self.dir_A)
            if fname.lower().endswith(('.png','.jpg','.tif'))
        ])
        # val 没有 label，就用 None
        if not os.path.isdir(self.dir_label):
            self.dir_label = None
        '''self.transforms = transforms'''
        self.img_size = img_size

    def __len__(self):
        return len(self.ids)

    def __getitem__(self, idx):
        fname = self.ids[idx]
        # 读两时相影像
        img1 = Image.open(os.path.join(self.dir_A, fname)).convert('RGB')
        img2 = Image.open(os.path.join(self.dir_B, fname)).convert('RGB')

        # 2) resize
        if self.img_size:
            img1 = img1.resize((self.img_size, self.img_size), Image.BILINEAR)
            img2 = img2.resize((self.img_size, self.img_size), Image.BILINEAR)

        # 读掩码（如果有）
        if self.dir_label:
            mask_img = Image.open(os.path.join(self.dir_label, fname)).convert('L')
            if self.img_size:
                mask_img = mask_img.resize((self.img_size, self.img_size), Image.NEAREST)
            mask_np = np.array(mask_img) > 0
            mask = torch.from_numpy(mask_np.astype(np.int64)).unsqueeze(0)  # shape (1, H, W)
        else:
            mask = torch.zeros((self.img_size, self.img_size), dtype=torch.int64)

        # PIL → Tensor，归一化到 [0,1]
        to_tensor = transforms.ToTensor()
        img1 = to_tensor(img1)
        img2 = to_tensor(img2)

        return img1, img2, mask

def create_dataloaders(root_dir, batch_size=8, img_size=256):
    """
    root_dir 下有 train/ test/ val/，
    train/test 有 A/B/label，val 没有 label。
    """
    print("准备真实数据集 ...")

    # 三输入的 transform：Resize + ToTensor
    def my_transforms(img1, img2, mask):
        img1 = transforms.functional.resize(img1, (img_size, img_size))
        img2 = transforms.functional.resize(img2, (img_size, img_size))
        mask = transforms.functional.resize(mask, (img_size, img_size),
                                            interpolation=Image.NEAREST)
        img1 = transforms.functional.to_tensor(img1)
        img2 = transforms.functional.to_tensor(img2)
        # mask 从 PIL→numpy→long tensor
        mask = torch.from_numpy(np.array(mask)).long()[None]
        return img1, img2, mask

    train_ds = CDDataset(root_dir, split='train', img_size=img_size)
    test_ds = CDDataset(root_dir, split='test', img_size=img_size)
    val_ds = CDDataset(root_dir, split='val', img_size=img_size)

    train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True,
                              num_workers=0, pin_memory=False)
    test_loader = DataLoader(test_ds, batch_size=batch_size, shuffle=False,
                             num_workers=0, pin_memory=False)
    val_loader = DataLoader(val_ds, batch_size=batch_size, shuffle=False,
                            num_workers=0, pin_memory=False)

    print(f" train: {len(train_ds)}  test: {len(test_ds)}  val: {len(val_ds)}")
    return train_loader, test_loader, val_loader


def get_args():
    """解析命令行参数 —— 仅保留 Low-Rank 分解和训练相关设置（含相对路径数据根目录）"""
    parser = argparse.ArgumentParser(
        description="卫星变化检测模型（Low-Rank 版）—— 基于一次性 SVD 低秩分解"
    )

    script_dir = os.path.dirname(os.path.abspath(__file__))
    default_data_root = os.path.normpath(
        os.path.join(script_dir, "..", "CD")
    )

    env_root = os.environ.get("CD_DATA_ROOT")
    if env_root and os.path.isdir(env_root):
        default_data_root = env_root

    # 数据与输出
    parser.add_argument("--data-root", type=str, default=default_data_root,
                        help="数据根目录，应包含 train/A, train/B, train/label 以及 test/、val/ 等子文件夹")
    parser.add_argument("--output-dir", "-o", default="./output", help="模型和结果输出目录")
    parser.add_argument("--img-size", type=int, default=256, help="输入图像的尺寸 (默认: 256)")
    parser.add_argument("--num-classes", type=int, default=2, help="变化检测类别数 (默认: 2)")

    # 训练参数
    parser.add_argument("--epochs", "-e", type=int, default=25, help="训练轮数 (默认: 25)")
    parser.add_argument("--batch-size", "-b", type=int, default=8, help="批大小 (默认: 8)")
    parser.add_argument("--learning-rate", "-lr", type=float, default=1e-3, help="学习率 (默认: 0.001)")
    parser.add_argument("--weight-decay", type=float, default=1e-4, help="权重衰减 (默认: 1e-4)")

    # Low-Rank 超参
    parser.add_argument("--rank-ratio", type=float, default=0.5,
                        help="SVD 低秩分解时保留的奇异值比例 (0~1)")
    parser.add_argument("--finetune-epochs", type=int, default=3,
                        help="低秩替换后微调轮数")
    parser.add_argument("--finetune-lr", type=float, default=1e-4,
                        help="低秩替换后微调学习率")

    args = parser.parse_args()

    # 友好提示
    if not os.path.isdir(args.data_root):
        print(f"[提示] 未找到数据目录: {args.data_root}")
        print("你可以：")
        print('  1) 将数据放在脚本相对路径 ../../实习数据集/CD')
        print('  2) 或用命令行覆盖:  python main.py --data-root "../../你的/CD/路径"')
        print('  3) 或设置环境变量:  Windows:  set CD_DATA_ROOT=D:\\path\\to\\CD')
        print('                         Linux/macOS:  export CD_DATA_ROOT=/path/to/CD')
    return args

def dice_loss(pred, target, smooth=1e-5):
    pred_prob = torch.softmax(pred, dim=1)[:,1]     # [B,H,W]
    target_f = target.float()                       # [B,H,W]
    inter = (pred_prob * target_f).sum()
    return 1 - (2*inter + smooth) / (pred_prob.sum() + target_f.sum() + smooth)


def combined_loss(pred, target, alpha=0.7):
    # pred: [B,2,H,W], target: [B,H,W]
    # 如果 target 还是 [B,1,H,W]，先把通道挤掉
    if target.dim() == 4 and target.size(1) == 1:
        target = target.squeeze(1)  # -> [B,H,W]
    ce = F.cross_entropy(pred, target)
    d  = dice_loss(pred, target)
    return alpha * ce + (1-alpha) * d


def calculate_iou(pred, target):
    """计算IoU（交并比）"""
    pred = torch.softmax(pred, dim=1)
    pred = (pred[:, 1, :, :] > 0.5).float()
    target = target.float().squeeze(1)

    intersection = (pred * target).sum()
    union = pred.sum() + target.sum() - intersection

    if union == 0:
        return 1.0 if intersection == 0 else 0.0

    return (intersection / union).item()


def train_model(model, train_loader, test_loader, device, args):
    """训练Siamese UNet模型"""
    print("开始训练Siamese UNet模型...")

    # 定义损失函数和优化器
    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)

    # 记录训练历史
    train_losses = []
    train_ious = []
    test_ious = []

    best_test_iou = 0.0
    best_model_state = None

    # 训练循环
    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        running_iou = 0.0
        num_batches = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for batch_idx, (img1, img2, masks) in enumerate(pbar):
            img1, img2, masks = img1.to(device), img2.to(device), masks.to(device)

            # 前向传播
            optimizer.zero_grad()
            outputs = model(img1, img2)
            loss = combined_loss(outputs, masks)

            # 反向传播
            loss.backward()
            optimizer.step()

            # 计算IoU
            iou = calculate_iou(outputs, masks)

            # 统计
            running_loss += loss.item()
            running_iou += iou
            num_batches += 1

            # 更新进度条
            pbar.set_postfix({
                'Loss': f'{loss.item():.4f}',
                'IoU': f'{iou:.4f}'
            })

        # 计算平均训练损失和IoU
        avg_train_loss = running_loss / num_batches
        train_iou = running_iou / num_batches

        # 测试阶段
        test_iou = evaluate_model(model, test_loader, device)

        # 记录历史
        train_losses.append(avg_train_loss)
        train_ious.append(train_iou)
        test_ious.append(test_iou)

        # 保存最佳模型
        if test_iou > best_test_iou:
            best_test_iou = test_iou
            best_model_state = copy.deepcopy(model.state_dict())

        # 更新学习率
        scheduler.step()

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train IoU: {train_iou:.4f}, '
              f'Test IoU: {test_iou:.4f}')

    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print(f'训练完成! 最佳测试IoU: {best_test_iou:.4f}')

    # 绘制训练曲线
    plot_training_curves(train_losses, train_ious, test_ious, args.output_dir)

    return model, best_test_iou, {
        'train_losses': train_losses,
        'train_ious': train_ious,
        'test_ious': test_ious,
        'best_test_iou': best_test_iou
    }


def finetune_model(model, train_loader, test_loader, device, epochs=5, lr=0.0001):
    """压缩后微调模型"""
    print(f"开始微调模型 ({epochs} 轮)...")

    # 使用较小的学习率进行微调
    optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=1e-5)

    initial_iou = evaluate_model(model, test_loader, device)
    print(f"微调前IoU: {initial_iou:.4f}")

    best_iou = initial_iou
    best_state = copy.deepcopy(model.state_dict())

    for epoch in range(epochs):
        model.train()
        running_loss = 0.0
        num_batches = 0

        pbar = tqdm(train_loader, desc=f'Finetune {epoch + 1}/{epochs}')
        for img1, img2, masks in pbar:
            img1, img2, masks = img1.to(device), img2.to(device), masks.to(device)

            optimizer.zero_grad()
            outputs = model(img1, img2)
            loss = combined_loss(outputs, masks)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            num_batches += 1

            pbar.set_postfix({'Loss': f'{loss.item():.4f}'})

        # 评估
        test_iou = evaluate_model(model, test_loader, device)
        avg_loss = running_loss / num_batches

        print(f'Finetune Epoch [{epoch + 1}/{epochs}] - Loss: {avg_loss:.4f}, IoU: {test_iou:.4f}')

        # 保存最佳模型
        if test_iou > best_iou:
            best_iou = test_iou
            best_state = copy.deepcopy(model.state_dict())

    # 加载最佳微调模型
    model.load_state_dict(best_state)
    final_iou = evaluate_model(model, test_loader, device)

    print(f"微调完成! 最终IoU: {final_iou:.4f} (提升: {final_iou - initial_iou:.4f})")

    return model, final_iou


def evaluate_model(model, dataloader, device):
    """评估变化检测模型性能（使用IoU指标）"""
    model.eval()
    total_iou = 0.0
    num_batches = 0

    with torch.no_grad():
        for img1, img2, masks in dataloader:
            img1, img2, masks = img1.to(device), img2.to(device), masks.to(device)
            outputs = model(img1, img2)

            iou = calculate_iou(outputs, masks)
            total_iou += iou
            num_batches += 1

    average_iou = total_iou / num_batches if num_batches > 0 else 0
    return average_iou


def plot_training_curves(train_losses, train_ious, test_ious, output_dir):
    """绘制训练曲线"""
    epochs = range(1, len(train_losses) + 1)

    plt.figure(figsize=(12, 4))

    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(epochs, train_losses, 'b-', label='Training Loss')
    plt.title('Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)

    # 绘制IoU曲线
    plt.subplot(1, 2, 2)
    plt.plot(epochs, train_ious, 'b-', label='Training IoU')
    plt.plot(epochs, test_ious, 'r-', label='Test IoU')
    plt.title('Training and Test IoU')
    plt.xlabel('Epoch')
    plt.ylabel('IoU')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'training_curves.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"训练曲线已保存到: {os.path.join(output_dir, 'training_curves.png')}")


def visualize_predictions(model, dataloader, device, output_dir, num_samples=4):
    """可视化预测结果"""
    model.eval()

    fig, axes = plt.subplots(num_samples, 4, figsize=(16, 4 * num_samples))
    fig.suptitle('Change Detection Results', fontsize=16)

    with torch.no_grad():
        for i, (img1, img2, masks) in enumerate(dataloader):
            if i >= num_samples:
                break

            img1, img2, masks = img1.to(device), img2.to(device), masks.to(device)
            outputs = model(img1, img2)
            pred = torch.softmax(outputs, dim=1)[:, 1, :, :] > 0.5

            # 取第一个样本
            img1_show = img1[0].cpu().permute(1, 2, 0).numpy()
            img2_show = img2[0].cpu().permute(1, 2, 0).numpy()
            mask_true = masks[0, 0].cpu().numpy()
            mask_pred = pred[0].cpu().numpy()

            # 归一化显示
            img1_show = np.clip(img1_show, 0, 1)
            img2_show = np.clip(img2_show, 0, 1)

            axes[i, 0].imshow(img1_show)
            axes[i, 0].set_title('Time 1')
            axes[i, 0].axis('off')

            axes[i, 1].imshow(img2_show)
            axes[i, 1].set_title('Time 2')
            axes[i, 1].axis('off')

            axes[i, 2].imshow(mask_true, cmap='gray')
            axes[i, 2].set_title('Ground Truth')
            axes[i, 2].axis('off')

            axes[i, 3].imshow(mask_pred, cmap='gray')
            axes[i, 3].set_title('Prediction')
            axes[i, 3].axis('off')

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'predictions_visualization.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"预测可视化已保存到: {os.path.join(output_dir, 'predictions_visualization.png')}")


def get_model_size(model):
    """计算模型在内存中的大小（MB）"""
    param_size = 0
    for param in model.parameters():
        param_size += param.nelement() * param.element_size()
    size_mb = param_size / (1024 * 1024)
    return size_mb


def compute_compressed_size(model, bits=4, sparsity=0.7):
    """计算压缩后模型的理论存储大小（MB）"""
    total_bytes = 0
    total_params = 0
    nonzero_params = 0

    for name, param in model.named_parameters():
        param_count = param.numel()
        total_params += param_count

        if 'weight' in name:
            nz_count = torch.count_nonzero(param).item()
            nonzero_params += nz_count
            sparse_size = nz_count * (4 + bits / 8)
            dense_size = param_count * (bits / 8)

            if sparsity > 0 and sparse_size < dense_size:
                total_bytes += sparse_size
            else:
                total_bytes += dense_size
        elif 'bias' in name:
            total_bytes += param_count * 4

    overhead_bytes = total_bytes * 0.05
    total_size_mb = (total_bytes + overhead_bytes) / (1024 * 1024)
    actual_sparsity = 1.0 - (nonzero_params / total_params) if total_params > 0 else 0

    return total_size_mb, actual_sparsity

def compress_model(model, rank_ratio: float):
    """仅用一次性 SVD 低秩分解"""
    print(f"🔧 应用 Low-Rank 分解 (保留 {rank_ratio*100:.1f}% 奇异值)…")
    apply_lowrank(model, rank_ratio)
    return model

def save_original_model(model, path):
    """保存原始模型，计算文件大小"""
    torch.save(model.state_dict(), path)
    return os.path.getsize(path)


def save_compressed_model_simple(model, path):
    """简化的压缩模型保存 - 直接保存PyTorch模型"""
    # 方法1: 保存完整的压缩后模型
    torch.save({
        'model_state_dict': model.state_dict(),
        'model_class': 'SimpleSiameseUNet',
        'compression_applied': True
    }, path)
    return os.path.getsize(path)


def load_compressed_model_simple(path, device=None):
    """简化的压缩模型加载"""
    try:
        checkpoint = torch.load(path, map_location=device if device else 'cpu')
        model = SimpleSiameseUNet()
        model.load_state_dict(checkpoint['model_state_dict'])
        if device:
            model = model.to(device)
        return model
    except Exception as e:
        print(f"加载模型失败: {e}")
        return None


def load_compressed_model(path, device=None):
    """改进的压缩模型加载函数"""
    try:
        # 首先尝试加载pickle格式
        pickle_path = path.replace('.pth', '_compressed.pkl')
        if os.path.exists(pickle_path):
            import pickle
            with open(pickle_path, 'rb') as f:
                compressed_info = pickle.load(f)
        else:
            # 加载gzip格式
            with gzip.open(path, 'rb') as f:
                json_str = f.read().decode('utf-8')
            compressed_info = json.loads(json_str)
    except Exception as e:
        print(f"加载压缩模型失败: {e}")
        return None

    model = SimpleSiameseUNet()

    for name, param in model.named_parameters():
        if name in compressed_info['compressed_layers']:
            info = compressed_info['compressed_layers'][name]

            if 'is_bias' in info and info['is_bias']:
                param.data = torch.tensor(info['values'], dtype=torch.float32)
                continue

            shape = info['shape']
            param.data = torch.zeros(shape, dtype=torch.float32)

            if 'indices' in info and 'values' in info:
                try:
                    for indices, value in zip(info['indices'], info['values']):
                        if len(indices) == len(shape):
                            index = tuple(indices)
                            if 'zero_point' in info and 'scale' in info:
                                dequantized_val = (float(value) - info['zero_point']) * info['scale']
                            else:
                                dequantized_val = float(value)
                            param.data[index] = dequantized_val
                except Exception as e:
                    print(f"恢复权重 {name} 时出错: {e}")
                    continue

    if device:
        model = model.to(device)

    return model

# ======== 照片版评估：性能(accuracy/precision) + 效率(params/size/memory) ========
def _binarize_pred_seg(logits, threshold=0.5):
    probs = torch.softmax(logits, dim=1)[:, 1, :, :]
    return (probs > threshold).to(torch.float32)

def evaluate_performance_spec_seg(model, dataloader, device, threshold=0.5):
    """照片版性能：像素级二分类的准确率、精度（precision）"""
    model.eval()
    eps = 1e-8
    TP = FP = FN = TN = 0.0
    with torch.no_grad():
        for img1, img2, masks in dataloader:
            img1, img2 = img1.to(device), img2.to(device)
            masks = masks.to(device)
            logits = model(img1, img2)
            preds = _binarize_pred_seg(logits, threshold)
            if masks.dim() == 4 and masks.size(1) == 1:
                masks = masks[:, 0, :, :]
            p = preds.reshape(-1).float()
            t = masks.reshape(-1).float()
            TP += float((p * t).sum().item())
            FP += float((p * (1 - t)).sum().item())
            FN += float(((1 - p) * t).sum().item())
            TN += float(((1 - p) * (1 - t)).sum().item())
    accuracy  = (TP + TN) / (TP + TN + FP + FN + eps)
    precision = TP / (TP + FP + eps)
    return {"accuracy": float(accuracy), "precision": float(precision)}

def count_parameters(model):
    """可训练参数数量"""
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

def _model_file_size_mb(model):
    """用临时保存得到真实文件大小（失败时退化为估算）"""
    try:
        import tempfile, os
        with tempfile.NamedTemporaryFile(delete=False, suffix=".pth") as tmp:
            torch.save(model.state_dict(), tmp.name)
            size_mb = os.path.getsize(tmp.name) / (1024 * 1024)
        try: os.unlink(tmp.name)
        except: pass
        return float(size_mb)
    except Exception:
        # 估算：float32 * 参数个数 * 1.2(一点额外开销)
        return float(count_parameters(model) * 4 / (1024 * 1024) * 1.2)

def _measure_inference_memory_mb(model, sample_img1, sample_img2, device):
    """单次前向的增量内存(MB)。CUDA走显存峰值，CPU走RSS增量（需 psutil）。"""
    model.eval()
    with torch.no_grad():
        _ = model(sample_img1, sample_img2)  # 预热一次

    if device.type == "cuda":
        torch.cuda.empty_cache()
        torch.cuda.reset_peak_memory_stats(device)
        start = torch.cuda.memory_allocated(device)
        with torch.no_grad():
            _ = model(sample_img1, sample_img2)
        peak = torch.cuda.max_memory_allocated(device)
        delta = max(peak - start, 0)
        torch.cuda.empty_cache()
        return float(delta / (1024 * 1024))

    try:
        import psutil, gc, time, os
        proc = psutil.Process(os.getpid())
        gc.collect(); time.sleep(0.02)
        rss_before = proc.memory_info().rss
        with torch.no_grad():
            _ = model(sample_img1, sample_img2)
        gc.collect(); time.sleep(0.02)
        rss_after = proc.memory_info().rss
        delta = max(rss_after - rss_before, 0)
        return float(delta / (1024 * 1024))
    except Exception:
        return 0.0  # 没装 psutil 的情况下给 0

def evaluate_efficiency_spec(model, dataloader, device):
    """照片版效率：参数数量、真实模型文件大小(MB)、单次推理内存(MB)"""
    first = None
    for img1, img2, _ in dataloader:
        first = (img1.to(device), img2.to(device))
        break
    if first is None:
        raise RuntimeError("dataloader 为空，无法评估效率指标。")

    params = count_parameters(model)
    size_mb = _model_file_size_mb(model)
    mem_mb  = _measure_inference_memory_mb(model, first[0], first[1], device)
    return {"params": int(params), "model_size_mb": float(size_mb), "memory_mb": float(mem_mb)}

def build_eval_report_spec(model, dataloader, device, threshold=0.5):
    """汇总‘照片版’评估：性能 + 效率"""
    perf = evaluate_performance_spec_seg(model, dataloader, device, threshold)
    eff  = evaluate_efficiency_spec(model, dataloader, device)
    return {
        "模型性能": {"准确率": perf["accuracy"], "精度": perf["precision"]},
        "模型效率": {"参数数量": eff["params"], "模型大小(MB)": eff["model_size_mb"], "内存占用(MB)": eff["memory_mb"]},
    }
# ========================================================================
def main():
    """主函数 —— 训练 + 一次性SVD低秩替换 + 微调 + ‘照片版评估’ + 汇总"""
    args = get_args()

    # 1) 输出目录 & 设备
    os.makedirs(args.output_dir, exist_ok=True)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 2) 数据
    train_loader, test_loader, val_loader = create_dataloaders(
        root_dir=args.data_root,
        batch_size=args.batch_size,
        img_size=args.img_size
    )

    # 3) 训练原始模型
    model = SimpleSiameseUNet(num_classes=args.num_classes).to(device)
    model, best_iou, history = train_model(model, train_loader, test_loader, device, args)
    print(f"原始模型最佳测试 IoU: {best_iou:.4f}")

    # 4) 保存原始模型
    orig_path = os.path.join(args.output_dir, "siamese_unet_trained.pth")
    torch.save(model.state_dict(), orig_path)
    print(f"训练好的模型已保存到: {orig_path}")

    # 5) ‘照片版评估’（原始）
    try:
        spec_orig = build_eval_report_spec(model, test_loader, device, threshold=0.5)
        print("【原始模型-照片版评估】", spec_orig)
    except Exception as e:
        print(f"照片版评估(原始)失败: {e}")
        spec_orig = None

    # 6) 一次性 SVD 低秩替换
    if args.rank_ratio < 1.0:
        low_model = copy.deepcopy(model)
        print(f"🔧 应用一次性 SVD 低秩分解: 保留 {args.rank_ratio*100:.1f}% 奇异值 …")
        apply_lowrank(low_model, args.rank_ratio)
    else:
        low_model = copy.deepcopy(model)

    # 7) 微调低秩模型
    print(f"🛠 低秩模型微调 {args.finetune_epochs} 轮，lr={args.finetune_lr} …")
    low_model, low_iou = finetune_model(
        low_model, train_loader, test_loader, device,
        epochs=args.finetune_epochs, lr=args.finetune_lr
    )
    print(f"低秩模型微调后 IoU: {low_iou:.4f}")

    # 8) ‘照片版评估’（低秩+微调后）
    try:
        spec_low = build_eval_report_spec(low_model, test_loader, device, threshold=0.5)
        print("【低秩模型-照片版评估】", spec_low)
    except Exception as e:
        print(f"照片版评估(低秩)失败: {e}")
        spec_low = None

    # 9) 指标汇总与判级
    iou_drop_percent = (best_iou - low_iou) * 100
    print(f"IoU 下降: {iou_drop_percent:.2f}%")
    if iou_drop_percent < 2:
        grade = "✅ 压缩效果: 优秀 (IoU下降 < 2%)"
    elif iou_drop_percent < 10:
        grade = "✅ 压缩效果: 良好 (IoU下降 < 10%)"
    elif iou_drop_percent < 20:
        grade = "⚠️ 压缩效果: 可接受 (IoU下降 < 20%)"
    else:
        grade = "❌ 压缩效果: 需要调整 (IoU下降过大)"
    print(grade)

    # 10) 保存低秩模型
    low_path = os.path.join(args.output_dir, "siamese_unet_lowrank.pth")
    torch.save(low_model.state_dict(), low_path)
    print(f"低秩模型已保存到: {low_path}")

    # 11) 形成“照片版评估”报告 JSON
    report = {
        "original": spec_orig,
        "low_rank": spec_low,
        "iou": {"original": float(best_iou), "low_rank": float(low_iou), "drop_percent": float(iou_drop_percent)},
        "conclusion": grade
    }
    report_path = os.path.join(args.output_dir, "photo_spec_report.json")
    with open(report_path, "w", encoding="utf-8") as f:
        json.dump(report, f, indent=2, ensure_ascii=False)
    print("照片版评估已保存到:", report_path)

    # 12) 终端对比小表
    #    （模型大小这里用真实文件大小；同时也可显示参数个数，来自照片版评估）
    try:
        params_orig = spec_orig["模型效率"]["参数数量"] if spec_orig else count_parameters(model)
        params_low  = spec_low["模型效率"]["参数数量"] if spec_low  else count_parameters(low_model)
        size_orig   = spec_orig["模型效率"]["模型大小(MB)"] if spec_orig else _model_file_size_mb(model)
        size_low    = spec_low["模型效率"]["模型大小(MB)"]  if spec_low  else _model_file_size_mb(low_model)
    except Exception:
        params_orig = count_parameters(model); params_low = count_parameters(low_model)
        size_orig = _model_file_size_mb(model); size_low = _model_file_size_mb(low_model)

    print("\n" + "=" * 46 + " 最终对比 " + "=" * 46)
    print("版本          |  IoU   | 跌幅     | 参数量     | 文件大小(MB)")
    print("-" * 90)
    print(f"原始          | {best_iou:6.4f} | {'-':>7} | {params_orig:9d} | {size_orig:12.2f}")
    print(f"低秩(微调后)  | {low_iou:6.4f} | {iou_drop_percent:6.2f}% | {params_low:9d} | {size_low:12.2f}")
    print("=" * 90)

    # 13) 可视化预测
    visualize_predictions(low_model, test_loader, device, args.output_dir)

if __name__ == "__main__":
    main()
