#!/usr/bin/env python
"""
卫星变化检测模型训练和压缩示例 - 改进版
基于Siamese UNet架构的变化检测模型
实现完整的训练流程和改进的模型压缩策略
"""
from gettext import install
from PIL import Image
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset, random_split
from torchvision import transforms
import copy
import time
import numpy as np
import json
import gzip
import matplotlib.pyplot as plt
from tqdm import tqdm
import psutil, gc

# SimpleSiameseUNet模型 - 改进版
class SimpleSiameseUNet(nn.Module):
    def __init__(self, in_channels=3, num_classes=2):
        super(SimpleSiameseUNet, self).__init__()

        # 编码器（特征提取）
        self.encoder1 = nn.Sequential(
            nn.Conv2d(in_channels, 64, 3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 64, 3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
        )

        self.pool1 = nn.MaxPool2d(2)

        self.encoder2 = nn.Sequential(
            nn.Conv2d(64, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
        )

        self.pool2 = nn.MaxPool2d(2)

        self.encoder3 = nn.Sequential(
            nn.Conv2d(128, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
        )

        # 特征融合层
        self.fusion = nn.Sequential(
            nn.Conv2d(512, 256, 3, padding=1),  # 融合两个时间点的特征
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
        )

        # 解码器
        self.upconv1 = nn.ConvTranspose2d(256, 128, 2, stride=2)
        self.decoder1 = nn.Sequential(
            nn.Conv2d(256, 128, 3, padding=1),  # 128 + 128 (skip connection)
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
        )

        self.upconv2 = nn.ConvTranspose2d(128, 64, 2, stride=2)
        self.decoder2 = nn.Sequential(
            nn.Conv2d(128, 64, 3, padding=1),  # 64 + 64 (skip connection)
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 64, 3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
        )

        # 输出层
        self.output = nn.Conv2d(64, num_classes, 1)

        # Dropout层
        self.dropout = nn.Dropout2d(0.3)

    def forward(self, x1, x2):
        # 编码器 - 处理第一个时间点
        enc1_1 = self.encoder1(x1)
        enc1_pooled = self.pool1(enc1_1)
        enc1_2 = self.encoder2(enc1_pooled)
        enc1_pooled2 = self.pool2(enc1_2)
        enc1_3 = self.encoder3(enc1_pooled2)

        # 编码器 - 处理第二个时间点
        enc2_1 = self.encoder1(x2)
        enc2_pooled = self.pool1(enc2_1)
        enc2_2 = self.encoder2(enc2_pooled)
        enc2_pooled2 = self.pool2(enc2_2)
        enc2_3 = self.encoder3(enc2_pooled2)

        # 特征融合
        fused = torch.cat([enc1_3, enc2_3], dim=1)
        fused = self.fusion(fused)
        fused = self.dropout(fused)

        # 解码器
        up1 = self.upconv1(fused)
        # 跳跃连接：使用差分特征
        diff_2 = torch.abs(enc1_2 - enc2_2)
        dec1 = torch.cat([up1, diff_2], dim=1)
        dec1 = self.decoder1(dec1)

        up2 = self.upconv2(dec1)
        # 跳跃连接：使用差分特征
        diff_1 = torch.abs(enc1_1 - enc2_1)
        dec2 = torch.cat([up2, diff_1], dim=1)
        dec2 = self.decoder2(dec2)

        # 输出
        output = self.output(dec2)

        return output


# 改进的变化检测数据集
class CDDataset(Dataset):
    """
    你的真实数据集
    根目录下有 train/，test/，val/ 三个子文件夹
      train/、test/ 下又各自有 A/, B/, label/
      val/ 下只有 A/ 和 B/
    """
    def __init__(self, root_dir, split='train', transforms=None, img_size=None):
        super().__init__()
        self.dir_A     = os.path.join(root_dir, split, 'A')
        self.dir_B     = os.path.join(root_dir, split, 'B')
        self.dir_label = os.path.join(root_dir, split, 'label')
        # A 里所有文件名（png/jpg/tif…）统一为 id
        self.ids = sorted([
            fname for fname in os.listdir(self.dir_A)
            if fname.lower().endswith(('.png','.jpg','.tif'))
        ])
        # val 没有 label，就用 None
        if not os.path.isdir(self.dir_label):
            self.dir_label = None
        '''self.transforms = transforms'''
        self.img_size = img_size

    def __len__(self):
        return len(self.ids)

    def __getitem__(self, idx):
        fname = self.ids[idx]
        # 读两时相影像
        img1 = Image.open(os.path.join(self.dir_A, fname)).convert('RGB')
        img2 = Image.open(os.path.join(self.dir_B, fname)).convert('RGB')

        # 2) resize
        if self.img_size:
            img1 = img1.resize((self.img_size, self.img_size), Image.BILINEAR)
            img2 = img2.resize((self.img_size, self.img_size), Image.BILINEAR)

        # 读掩码（如果有）
        if self.dir_label:
            mask_img = Image.open(os.path.join(self.dir_label, fname)).convert('L')
            if self.img_size:
                mask_img = mask_img.resize((self.img_size, self.img_size), Image.NEAREST)
            mask_np = np.array(mask_img) > 0
            mask = torch.from_numpy(mask_np.astype(np.int64)).unsqueeze(0)  # shape (1, H, W)
        else:
            mask = torch.zeros((self.img_size, self.img_size), dtype=torch.int64)

        # PIL → Tensor，归一化到 [0,1]
        to_tensor = transforms.ToTensor()
        img1 = to_tensor(img1)
        img2 = to_tensor(img2)

        return img1, img2, mask

def create_dataloaders(root_dir, batch_size=8, img_size=256):
    """
    root_dir 下有 train/ test/ val/，
    train/test 有 A/B/label，val 没有 label。
    """
    print("准备真实数据集 ...")

    # 三输入的 transform：Resize + ToTensor
    def my_transforms(img1, img2, mask):
        img1 = transforms.functional.resize(img1, (img_size, img_size))
        img2 = transforms.functional.resize(img2, (img_size, img_size))
        mask = transforms.functional.resize(mask, (img_size, img_size),
                                            interpolation=Image.NEAREST)
        img1 = transforms.functional.to_tensor(img1)
        img2 = transforms.functional.to_tensor(img2)
        # mask 从 PIL→numpy→long tensor
        mask = torch.from_numpy(np.array(mask)).long()[None]
        return img1, img2, mask

    train_ds = CDDataset(root_dir, split='train', img_size=img_size)
    test_ds = CDDataset(root_dir, split='test', img_size=img_size)
    val_ds = CDDataset(root_dir, split='val', img_size=img_size)

    train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True,
                              num_workers=0, pin_memory=False)
    test_loader = DataLoader(test_ds, batch_size=batch_size, shuffle=False,
                             num_workers=0, pin_memory=False)
    val_loader = DataLoader(val_ds, batch_size=batch_size, shuffle=False,
                            num_workers=0, pin_memory=False)

    print(f" train: {len(train_ds)}  test: {len(test_ds)}  val: {len(val_ds)}")
    return train_loader, test_loader, val_loader

def get_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="卫星变化检测模型训练和压缩示例（Siamese UNet + 照片版评估）")

    # 以脚本目录为基准，指向 ../../实习数据集/CD
    script_dir = os.path.dirname(os.path.abspath(__file__))
    default_data_root = os.path.normpath(os.path.join(script_dir, "..", "..", "实习数据集", "CD"))

    # 数据与输出
    parser.add_argument(
        "--data-root",
        type=str,
        default=default_data_root,
        help="数据集根目录，需包含 train/、test/、val/ 三个子目录"
    )
    parser.add_argument("--output-dir", "-o", default="./output", help="模型和结果的输出目录")
    parser.add_argument("--num-classes", type=int, default=2, help="类别数量 (默认: 2 - 变化/无变化)")
    parser.add_argument("--img-size", type=int, default=256, help="Resize 到的尺寸 (默认: 256)")

    # 训练参数
    parser.add_argument("--epochs", "-e", type=int, default=25, help="训练轮数 (默认: 25)")
    parser.add_argument("--batch-size", "-b", type=int, default=8, help="批次大小 (默认: 8)")
    parser.add_argument("--learning-rate", "-lr", type=float, default=0.001, help="学习率 (默认: 0.001)")
    parser.add_argument("--weight-decay", type=float, default=1e-4, help="权重衰减 (默认: 1e-4)")

    # 改进的压缩参数
    parser.add_argument("--compression-level", choices=['conservative', 'moderate', 'aggressive'],
                        default='conservative', help="压缩级别")
    parser.add_argument("--bits", type=int, default=None, help="量化位数 (覆盖 compression-level)")
    parser.add_argument("--sparsity", type=float, default=None, help="剪枝稀疏度 (覆盖 compression-level)")
    parser.add_argument("--finetune-epochs", type=int, default=3, help="压缩后微调轮数 (默认: 1)")
    parser.add_argument("--progressive-compression", action='store_true', help="使用渐进式压缩")

    # 模式选择
    parser.add_argument("--mode", choices=['train', 'compress', 'both'], default='both',
                        help="运行模式: train(仅训练), compress(仅压缩), both(训练+压缩)")
    parser.add_argument("--pretrained-path", type=str, default=None, help="预训练模型路径 (仅压缩模式可用)")

    # 照片版评估相关
    parser.add_argument("--threshold", type=float, default=0.5, help="分割阈值（照片版评估用）")

    return parser.parse_args()

def get_compression_params(level):
    """根据压缩级别返回压缩参数"""
    params = {
        'conservative': {'bits': 8, 'sparsity': 0.3},
        'moderate': {'bits': 6, 'sparsity': 0.5},
        'aggressive': {'bits': 4, 'sparsity': 0.7}
    }
    return params[level]

def dice_loss(pred, target, smooth=1e-5):
    pred_prob = torch.softmax(pred, dim=1)[:,1]     # [B,H,W]
    target_f = target.float()                       # [B,H,W]
    inter = (pred_prob * target_f).sum()
    return 1 - (2*inter + smooth) / (pred_prob.sum() + target_f.sum() + smooth)


def combined_loss(pred, target, alpha=0.7):
    # pred: [B,2,H,W], target: [B,H,W]
    # 如果 target 还是 [B,1,H,W]，先把通道挤掉
    if target.dim() == 4 and target.size(1) == 1:
        target = target.squeeze(1)  # -> [B,H,W]
    ce = F.cross_entropy(pred, target)
    d  = dice_loss(pred, target)
    return alpha * ce + (1-alpha) * d


def calculate_iou(pred, target):
    """计算IoU（交并比）"""
    pred = torch.softmax(pred, dim=1)
    pred = (pred[:, 1, :, :] > 0.5).float()
    target = target.float().squeeze(1)

    intersection = (pred * target).sum()
    union = pred.sum() + target.sum() - intersection

    if union == 0:
        return 1.0 if intersection == 0 else 0.0

    return (intersection / union).item()


def train_model(model, train_loader, test_loader, device, args):
    """训练Siamese UNet模型"""
    print("开始训练Siamese UNet模型...")

    # 定义损失函数和优化器
    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)

    # 记录训练历史
    train_losses = []
    train_ious = []
    test_ious = []

    best_test_iou = 0.0
    best_model_state = None

    # 训练循环
    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        running_iou = 0.0
        num_batches = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for batch_idx, (img1, img2, masks) in enumerate(pbar):
            img1, img2, masks = img1.to(device), img2.to(device), masks.to(device)

            # 前向传播
            optimizer.zero_grad()
            outputs = model(img1, img2)
            loss = combined_loss(outputs, masks)

            # 反向传播
            loss.backward()
            optimizer.step()

            # 计算IoU
            iou = calculate_iou(outputs, masks)

            # 统计
            running_loss += loss.item()
            running_iou += iou
            num_batches += 1

            # 更新进度条
            pbar.set_postfix({
                'Loss': f'{loss.item():.4f}',
                'IoU': f'{iou:.4f}'
            })

        # 计算平均训练损失和IoU
        avg_train_loss = running_loss / num_batches
        train_iou = running_iou / num_batches

        # 测试阶段
        test_iou = evaluate_model(model, test_loader, device)

        # 记录历史
        train_losses.append(avg_train_loss)
        train_ious.append(train_iou)
        test_ious.append(test_iou)

        # 保存最佳模型
        if test_iou > best_test_iou:
            best_test_iou = test_iou
            best_model_state = copy.deepcopy(model.state_dict())

        # 更新学习率
        scheduler.step()

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train IoU: {train_iou:.4f}, '
              f'Test IoU: {test_iou:.4f}')

    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print(f'训练完成! 最佳测试IoU: {best_test_iou:.4f}')

    # 绘制训练曲线
    plot_training_curves(train_losses, train_ious, test_ious, args.output_dir)

    return model, best_test_iou, {
        'train_losses': train_losses,
        'train_ious': train_ious,
        'test_ious': test_ious,
        'best_test_iou': best_test_iou
    }


def finetune_model(model, train_loader, test_loader, device, epochs=5, lr=0.0001):
    """压缩后微调模型"""
    print(f"开始微调模型 ({epochs} 轮)...")

    # 使用较小的学习率进行微调
    optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=1e-5)

    initial_iou = evaluate_model(model, test_loader, device)
    print(f"微调前IoU: {initial_iou:.4f}")

    best_iou = initial_iou
    best_state = copy.deepcopy(model.state_dict())

    for epoch in range(epochs):
        model.train()
        running_loss = 0.0
        num_batches = 0

        pbar = tqdm(train_loader, desc=f'Finetune {epoch + 1}/{epochs}')
        for img1, img2, masks in pbar:
            img1, img2, masks = img1.to(device), img2.to(device), masks.to(device)

            optimizer.zero_grad()
            outputs = model(img1, img2)
            loss = combined_loss(outputs, masks)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            num_batches += 1

            pbar.set_postfix({'Loss': f'{loss.item():.4f}'})

        # 评估
        test_iou = evaluate_model(model, test_loader, device)
        avg_loss = running_loss / num_batches

        print(f'Finetune Epoch [{epoch + 1}/{epochs}] - Loss: {avg_loss:.4f}, IoU: {test_iou:.4f}')

        # 保存最佳模型
        if test_iou > best_iou:
            best_iou = test_iou
            best_state = copy.deepcopy(model.state_dict())

    # 加载最佳微调模型
    model.load_state_dict(best_state)
    final_iou = evaluate_model(model, test_loader, device)

    print(f"微调完成! 最终IoU: {final_iou:.4f} (提升: {final_iou - initial_iou:.4f})")

    return model, final_iou


def evaluate_model(model, dataloader, device):
    """评估变化检测模型性能（使用IoU指标）"""
    model.eval()
    total_iou = 0.0
    num_batches = 0

    with torch.no_grad():
        for img1, img2, masks in dataloader:
            img1, img2, masks = img1.to(device), img2.to(device), masks.to(device)
            outputs = model(img1, img2)

            iou = calculate_iou(outputs, masks)
            total_iou += iou
            num_batches += 1

    average_iou = total_iou / num_batches if num_batches > 0 else 0
    return average_iou


def plot_training_curves(train_losses, train_ious, test_ious, output_dir):
    """绘制训练曲线"""
    epochs = range(1, len(train_losses) + 1)

    plt.figure(figsize=(12, 4))

    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(epochs, train_losses, 'b-', label='Training Loss')
    plt.title('Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)

    # 绘制IoU曲线
    plt.subplot(1, 2, 2)
    plt.plot(epochs, train_ious, 'b-', label='Training IoU')
    plt.plot(epochs, test_ious, 'r-', label='Test IoU')
    plt.title('Training and Test IoU')
    plt.xlabel('Epoch')
    plt.ylabel('IoU')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'training_curves.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"训练曲线已保存到: {os.path.join(output_dir, 'training_curves.png')}")


def visualize_predictions(model, dataloader, device, output_dir, num_samples=4):
    """可视化预测结果"""
    model.eval()

    fig, axes = plt.subplots(num_samples, 4, figsize=(16, 4 * num_samples))
    fig.suptitle('Change Detection Results', fontsize=16)

    with torch.no_grad():
        for i, (img1, img2, masks) in enumerate(dataloader):
            if i >= num_samples:
                break

            img1, img2, masks = img1.to(device), img2.to(device), masks.to(device)
            outputs = model(img1, img2)
            pred = torch.softmax(outputs, dim=1)[:, 1, :, :] > 0.5

            # 取第一个样本
            img1_show = img1[0].cpu().permute(1, 2, 0).numpy()
            img2_show = img2[0].cpu().permute(1, 2, 0).numpy()
            mask_true = masks[0, 0].cpu().numpy()
            mask_pred = pred[0].cpu().numpy()

            # 归一化显示
            img1_show = np.clip(img1_show, 0, 1)
            img2_show = np.clip(img2_show, 0, 1)

            axes[i, 0].imshow(img1_show)
            axes[i, 0].set_title('Time 1')
            axes[i, 0].axis('off')

            axes[i, 1].imshow(img2_show)
            axes[i, 1].set_title('Time 2')
            axes[i, 1].axis('off')

            axes[i, 2].imshow(mask_true, cmap='gray')
            axes[i, 2].set_title('Ground Truth')
            axes[i, 2].axis('off')

            axes[i, 3].imshow(mask_pred, cmap='gray')
            axes[i, 3].set_title('Prediction')
            axes[i, 3].axis('off')

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'predictions_visualization.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"预测可视化已保存到: {os.path.join(output_dir, 'predictions_visualization.png')}")


def get_model_size(model):
    """计算模型在内存中的大小（MB）"""
    param_size = 0
    for param in model.parameters():
        param_size += param.nelement() * param.element_size()
    size_mb = param_size / (1024 * 1024)
    return size_mb


def compute_compressed_size(model, bits=4, sparsity=0.7):
    """计算压缩后模型的理论存储大小（MB）"""
    total_bytes = 0
    total_params = 0
    nonzero_params = 0

    for name, param in model.named_parameters():
        param_count = param.numel()
        total_params += param_count

        if 'weight' in name:
            nz_count = torch.count_nonzero(param).item()
            nonzero_params += nz_count
            sparse_size = nz_count * (4 + bits / 8)
            dense_size = param_count * (bits / 8)

            if sparsity > 0 and sparse_size < dense_size:
                total_bytes += sparse_size
            else:
                total_bytes += dense_size
        elif 'bias' in name:
            total_bytes += param_count * 4

    overhead_bytes = total_bytes * 0.05
    total_size_mb = (total_bytes + overhead_bytes) / (1024 * 1024)
    actual_sparsity = 1.0 - (nonzero_params / total_params) if total_params > 0 else 0

    return total_size_mb, actual_sparsity


def quantize_weights(weight, bits=8):
    """改进的量化函数，修复量化逻辑"""
    if bits >= 16:  # 16位及以上不进行量化
        return weight.clone(), None, None, None

    if bits == 8:  # 8位量化使用对称量化
        max_val = max(weight.abs().max(), 1e-8)  # 避免除零
        scale = max_val / 127.0  # 使用-127到127的范围
        q_weight = torch.round(weight / scale)
        q_weight = torch.clamp(q_weight, -127, 127)
        dq_weight = q_weight * scale
        return dq_weight, scale, torch.tensor(0.0), q_weight
    else:
        # 对于更低位数使用非对称量化
        qmin, qmax = -(2 ** (bits - 1)), 2 ** (bits - 1) - 1
        min_val, max_val = weight.min(), weight.max()

        if abs(max_val - min_val) < 1e-8:
            return weight.clone(), None, None, None

        scale = (max_val - min_val) / (qmax - qmin)
        zero_point = qmin - min_val / scale

        q_weight = torch.round(weight / scale + zero_point)
        q_weight = torch.clamp(q_weight, qmin, qmax)
        dq_weight = (q_weight - zero_point) * scale

        return dq_weight, scale, zero_point, q_weight


def prune_weights(weight, sparsity=0.3):
    """改进的剪枝函数，支持结构化剪枝"""
    if sparsity <= 0:
        return weight.clone(), None

    weight_abs = weight.abs()

    # 对于卷积层，使用更智能的剪枝策略
    if len(weight.shape) == 4:  # 卷积层
        # 计算每个filter的重要性
        filter_importance = weight_abs.sum(dim=(1, 2, 3))
        # 保留重要的filter
        num_filters_to_keep = max(1, int(weight.shape[0] * (1 - sparsity * 0.5)))
        _, important_filters = torch.topk(filter_importance, num_filters_to_keep)

        # 创建mask
        mask = torch.zeros_like(weight).bool()
        mask[important_filters] = True

        # 在保留的filter内部进行非结构化剪枝
        remaining_weight = weight[important_filters]
        remaining_abs = remaining_weight.abs().flatten()
        k = int(remaining_abs.numel() * sparsity * 0.5)  # 降低内部剪枝强度
        if k > 0 and k < remaining_abs.numel():
            threshold = torch.kthvalue(remaining_abs, k).values
            internal_mask = (remaining_weight.abs() >= threshold)
            mask[important_filters] = internal_mask
    else:
        # 对于全连接层，使用传统的基于幅值的剪枝
        weight_flat = weight_abs.flatten()
        k = int(weight_flat.numel() * sparsity)
        if k >= weight_flat.numel():
            return torch.zeros_like(weight), torch.zeros_like(weight).bool()
        threshold = torch.kthvalue(weight_flat, k).values
        mask = (weight.abs() >= threshold)

    pruned = weight * mask.float()
    return pruned, mask


def progressive_compress_model(model, target_bits=4, target_sparsity=0.7, steps=3):
    """渐进式压缩模型"""
    print(f"开始渐进式压缩 (目标: {target_bits}位, {target_sparsity}稀疏度, {steps}步)")

    # 计算每步的压缩参数
    current_bits = 32
    current_sparsity = 0.0

    bit_step = (current_bits - target_bits) / steps
    sparsity_step = target_sparsity / steps

    compressed_model = copy.deepcopy(model)

    for step in range(steps):
        step_bits = max(target_bits, int(current_bits - bit_step * (step + 1)))
        step_sparsity = min(target_sparsity, current_sparsity + sparsity_step * (step + 1))

        print(f"步骤 {step + 1}: {step_bits}位量化, {step_sparsity:.2f}稀疏度")

        # 应用当前步骤的压缩
        for name, param in compressed_model.named_parameters():
            if 'weight' in name:
                with torch.no_grad():
                    # 先剪枝
                    pruned_weight, _ = prune_weights(param.data, step_sparsity)
                    # 再量化
                    quantized_weight, _, _, _ = quantize_weights(pruned_weight, step_bits)
                    param.copy_(quantized_weight)

    return compressed_model


def compress_model(model, bits=8, sparsity=0.3, progressive=False):
    """改进的模型压缩函数，修复量化和剪枝逻辑"""
    print(f"压缩模型（量化位数={bits}, 剪枝稀疏度={sparsity}）...")

    if progressive and (bits < 8 or sparsity > 0.5):
        return progressive_compress_model(model, bits, sparsity)

    compressed_model = copy.deepcopy(model)
    compression_info = {}
    model_info = {
        'model_type': 'SimpleSiameseUNet',
        'bits': bits,
        'sparsity': sparsity,
        'compressed_layers': compression_info
    }

    total_elements = 0
    zero_elements = 0

    for name, param in compressed_model.named_parameters():
        if 'weight' in name and param.numel() > 1:  # 只压缩权重层，跳过单个参数
            with torch.no_grad():
                original_param = param.data.clone()
                total_elements += param.numel()

                # 先应用剪枝
                if sparsity > 0:
                    pruned_weight, mask = prune_weights(original_param, sparsity)
                    zero_elements += param.numel() - torch.count_nonzero(pruned_weight).item()
                else:
                    pruned_weight = original_param
                    mask = torch.ones_like(original_param).bool()

                # 再应用量化
                if bits < 16:
                    quantized_weight, scale, zero_point, q_weight = quantize_weights(pruned_weight, bits)
                    if quantized_weight is not None:
                        param.copy_(quantized_weight)
                    else:
                        param.copy_(pruned_weight)
                        scale, zero_point, q_weight = None, None, None
                else:
                    param.copy_(pruned_weight)
                    scale, zero_point, q_weight = None, None, None

                # 保存压缩信息
                if mask is not None:
                    nonzero_mask = mask & (param.data.abs() > 1e-8)  # 确保真正非零
                    if torch.any(nonzero_mask):
                        nonzero_indices = nonzero_mask.nonzero(as_tuple=False).cpu().numpy().tolist()
                        if q_weight is not None:
                            nonzero_values = q_weight[nonzero_mask].cpu().numpy().astype(np.float32).tolist()
                        else:
                            nonzero_values = param.data[nonzero_mask].cpu().numpy().astype(np.float32).tolist()

                        compression_info[name] = {
                            'shape': list(param.shape),
                            'indices': nonzero_indices,
                            'values': nonzero_values,
                            'scale': float(scale) if scale is not None else 1.0,
                            'zero_point': float(zero_point) if zero_point is not None else 0.0,
                            'bits': bits,
                            'original_dtype': str(original_param.dtype)
                        }
                    else:
                        # 如果所有权重都被剪枝，保存空层信息
                        compression_info[name] = {
                            'shape': list(param.shape),
                            'indices': [],
                            'values': [],
                            'scale': 1.0,
                            'zero_point': 0.0,
                            'bits': bits,
                            'original_dtype': str(original_param.dtype)
                        }
        elif 'bias' in name:
            # 偏置参数通常不压缩，直接保存
            compression_info[name] = {
                'shape': list(param.shape),
                'values': param.data.cpu().numpy().astype(np.float32).tolist(),
                'is_bias': True,
                'original_dtype': str(param.dtype)
            }

    overall_sparsity = zero_elements / total_elements if total_elements > 0 else 0
    print(f"整体稀疏度: {overall_sparsity:.4f}")

    return compressed_model, model_info, overall_sparsity


def save_original_model(model, path):
    """保存原始模型，计算文件大小"""
    torch.save(model.state_dict(), path)
    return os.path.getsize(path)


def save_compressed_model_simple(model, path):
    """简化的压缩模型保存 - 直接保存PyTorch模型"""
    # 方法1: 保存完整的压缩后模型
    torch.save({
        'model_state_dict': model.state_dict(),
        'model_class': 'SimpleSiameseUNet',
        'compression_applied': True
    }, path)
    return os.path.getsize(path)


def load_compressed_model_simple(path, device=None):
    """简化的压缩模型加载"""
    try:
        checkpoint = torch.load(path, map_location=device if device else 'cpu')
        model = SimpleSiameseUNet()
        model.load_state_dict(checkpoint['model_state_dict'])
        if device:
            model = model.to(device)
        return model
    except Exception as e:
        print(f"加载模型失败: {e}")
        return None


def load_compressed_model(path, device=None):
    """改进的压缩模型加载函数"""
    try:
        # 首先尝试加载pickle格式
        pickle_path = path.replace('.pth', '_compressed.pkl')
        if os.path.exists(pickle_path):
            import pickle
            with open(pickle_path, 'rb') as f:
                compressed_info = pickle.load(f)
        else:
            # 加载gzip格式
            with gzip.open(path, 'rb') as f:
                json_str = f.read().decode('utf-8')
            compressed_info = json.loads(json_str)
    except Exception as e:
        print(f"加载压缩模型失败: {e}")
        return None

    model = SimpleSiameseUNet()

    for name, param in model.named_parameters():
        if name in compressed_info['compressed_layers']:
            info = compressed_info['compressed_layers'][name]

            if 'is_bias' in info and info['is_bias']:
                param.data = torch.tensor(info['values'], dtype=torch.float32)
                continue

            shape = info['shape']
            param.data = torch.zeros(shape, dtype=torch.float32)

            if 'indices' in info and 'values' in info:
                try:
                    for indices, value in zip(info['indices'], info['values']):
                        if len(indices) == len(shape):
                            index = tuple(indices)
                            if 'zero_point' in info and 'scale' in info:
                                dequantized_val = (float(value) - info['zero_point']) * info['scale']
                            else:
                                dequantized_val = float(value)
                            param.data[index] = dequantized_val
                except Exception as e:
                    print(f"恢复权重 {name} 时出错: {e}")
                    continue

    if device:
        model = model.to(device)

    return model


def compare_compression_levels(model, test_loader, device, output_dir):
    """比较不同压缩级别的效果"""
    print("比较不同压缩级别...")

    levels = ['conservative', 'moderate', 'aggressive']
    results = {}

    for level in levels:
        params = get_compression_params(level)
        print(f"\n测试 {level} 压缩级别 (bits={params['bits']}, sparsity={params['sparsity']})...")

        # 压缩模型
        compressed_model, _, actual_sparsity = compress_model(
            model, params['bits'], params['sparsity']
        )
        compressed_model = compressed_model.to(device)

        # 评估性能
        compressed_iou = evaluate_model(compressed_model, test_loader, device)

        # 计算大小
        compressed_size = get_model_size(compressed_model)
        original_size = get_model_size(model)
        compression_ratio = original_size / compressed_size

        results[level] = {
            'bits': params['bits'],
            'sparsity': params['sparsity'],
            'actual_sparsity': actual_sparsity,
            'iou': compressed_iou,
            'size_mb': compressed_size,
            'compression_ratio': compression_ratio
        }

        print(f"{level}: IoU={compressed_iou:.4f}, 压缩率={compression_ratio:.2f}x")

    # 保存比较结果
    comparison_path = os.path.join(output_dir, "compression_comparison.json")
    with open(comparison_path, 'w') as f:
        json.dump(results, f, indent=2)

    return results

def _binarize_pred(logits, threshold=0.5):
    # logits: [B,2,H,W] -> prob of class-1 -> binary [B,H,W]
    probs = torch.softmax(logits, dim=1)[:, 1, :, :]
    return (probs > threshold).to(torch.float32)

def evaluate_performance_spec_seg(model, dataloader, device, threshold=0.5):
    """
    模型性能（照片版）: 准确率、精度（针对二分类分割）
    accuracy = (TP+TN)/All
    precision = TP/(TP+FP)
    """
    model.eval()
    eps = 1e-8
    TP = FP = FN = TN = 0.0

    with torch.no_grad():
        for img1, img2, masks in dataloader:
            img1, img2, masks = img1.to(device), img2.to(device), masks.to(device)
            logits = model(img1, img2)
            preds  = _binarize_pred(logits, threshold=threshold)

            # 统一成 [B,H,W] 的 float
            p = preds.view(-1).float()
            # masks 可能是 [B,1,H,W] 或 [B,H,W]
            t = masks
            if t.dim() == 4 and t.size(1) == 1:
                t = t[:, 0, :, :]
            t = t.view(-1).float()

            TP += float((p * t).sum().item())
            FP += float((p * (1 - t)).sum().item())
            FN += float(((1 - p) * t).sum().item())
            TN += float(((1 - p) * (1 - t)).sum().item())

    accuracy  = (TP + TN) / (TP + TN + FP + FN + eps)
    precision = TP / (TP + FP + eps)
    return {"accuracy": float(accuracy), "precision": float(precision)}

def _measure_inference_memory_mb(model, sample_batch, device):
    """
    一次前向推理的增量内存（MB）
    - CUDA: 用峰值分配统计
    - CPU: 进程RSS增量（需要 psutil）
    """
    model.eval()
    with torch.no_grad():
        _ = model(*sample_batch)  # 预热

    if device.type == "cuda":
        torch.cuda.empty_cache()
        torch.cuda.reset_peak_memory_stats(device)
        start = torch.cuda.memory_allocated(device)
        with torch.no_grad():
            _ = model(*sample_batch)
        peak = torch.cuda.max_memory_allocated(device)
        delta = max(peak - start, 0)
        torch.cuda.empty_cache()
        return float(delta / (1024 * 1024))

    # CPU 路径
    proc = psutil.Process(os.getpid())
    gc.collect(); time.sleep(0.02)
    rss_before = proc.memory_info().rss
    with torch.no_grad():
        _ = model(*sample_batch)
    gc.collect(); time.sleep(0.02)
    rss_after = proc.memory_info().rss
    delta = max(rss_after - rss_before, 0)
    return float(delta / (1024 * 1024))

def evaluate_efficiency_spec(model, dataloader, device):
    """
    模型效率（照片版）: 参数数量、模型大小(MB)、内存占用(MB)
    - 内存占用：用第一个 batch 做一次推理的增量内存
    """
    # 取一个 batch
    sample = None
    for img1, img2, masks in dataloader:
        sample = (img1.to(device), img2.to(device))
        break
    if sample is None:
        raise RuntimeError("dataloader 为空，无法评估效率指标。")

    # 参数数
    params = sum(p.numel() for p in model.parameters() if p.requires_grad)

    # 模型大小（保存 state_dict 临时文件更准确）
    try:
        import tempfile
        with tempfile.NamedTemporaryFile(delete=False, suffix='.pth') as tmp:
            torch.save(model.state_dict(), tmp.name)
            size_mb = os.path.getsize(tmp.name) / (1024 * 1024)
        try:
            os.unlink(tmp.name)
        except:
            pass
    except Exception:
        # 回退：按参数数估算（float32）
        size_mb = params * 4 / (1024 * 1024) * 1.2  # +20%开销

    # 推理内存
    mem_mb = _measure_inference_memory_mb(model, sample, device)

    return {"params": int(params), "model_size_mb": float(size_mb), "memory_mb": float(mem_mb)}

def build_eval_report_spec(model, dataloader, device, threshold=0.5):
    """汇总为‘照片版’两块"""
    perf = evaluate_performance_spec_seg(model, dataloader, device, threshold=threshold)
    eff  = evaluate_efficiency_spec(model, dataloader, device)
    return {
        "模型性能": {
            "准确率": perf["accuracy"],
            "精度":   perf["precision"],
        },
        "模型效率": {
            "参数数量":     eff["params"],
            "模型大小(MB)": eff["model_size_mb"],
            "内存占用(MB)": eff["memory_mb"],
        }
    }

def main():
    """主函数（已按“照片版评估”与参数化数据根目录改造）"""
    args = get_args()
    os.makedirs(args.output_dir, exist_ok=True)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 解析压缩参数
    if args.bits is not None and args.sparsity is not None:
        compression_params = {'bits': args.bits, 'sparsity': args.sparsity}
        level_used = 'custom'
    else:
        compression_params = get_compression_params(args.compression_level)
        level_used = args.compression_level

    print(f"压缩级别: {level_used}")
    print(f"压缩参数: {compression_params['bits']} 位, 稀疏度 {compression_params['sparsity']}")

    # 构建模型
    model = SimpleSiameseUNet(num_classes=args.num_classes).to(device)

    trained_model_path = os.path.join(args.output_dir, "siamese_unet_trained.pth")
    training_history = None

    # -------- Train or Both --------
    if args.mode in ['train', 'both']:
        print("=" * 50); print("开始训练阶段"); print("=" * 50)

        train_loader, test_loader, val_loader = create_dataloaders(
            root_dir=args.data_root, batch_size=args.batch_size, img_size=args.img_size
        )

        model, best_iou, training_history = train_model(
            model, train_loader, test_loader, device, args
        )
        torch.save(model.state_dict(), trained_model_path)
        print(f"训练好的模型已保存到: {trained_model_path}")

        # 预测可视化
        visualize_predictions(model, test_loader, device, args.output_dir)

        # 保存训练历史
        history_path = os.path.join(args.output_dir, "training_history.json")
        with open(history_path, 'w') as f:
            json.dump(training_history, f, indent=2)

        # 照片版评估（原始）
        try:
            spec_orig = build_eval_report_spec(model, test_loader, device, threshold=getattr(args, "threshold", 0.5))
            print("【原始模型-照片版评估】", spec_orig)
        except Exception as e:
            print(f"照片版评估(原始)失败: {e}")
            spec_orig = None

    # -------- Compress-only needs a model --------
    elif args.mode == 'compress':
        if args.pretrained_path and os.path.exists(args.pretrained_path):
            model.load_state_dict(torch.load(args.pretrained_path, map_location=device))
            print(f"已加载预训练模型: {args.pretrained_path}")
        elif os.path.exists(trained_model_path):
            model.load_state_dict(torch.load(trained_model_path, map_location=device))
            print(f"已加载训练好的模型: {trained_model_path}")
        else:
            print("警告: 未找到预训练模型，将使用随机初始化权重进行压缩")

    # -------- Compression Stage (for both/compress) --------
    if args.mode in ['compress', 'both']:
        print("=" * 50); print("开始压缩阶段"); print("=" * 50)

        # 仅需 test_loader 做评估
        _, test_loader, _ = create_dataloaders(
            root_dir=args.data_root, batch_size=args.batch_size, img_size=args.img_size
        )

        # 原始性能与大小
        print("评估原始模型...")
        original_iou = evaluate_model(model, test_loader, device)
        original_size_mem = get_model_size(model)
        orig_path = os.path.join(args.output_dir, "siamese_unet_original.pth")
        orig_file_bytes = save_original_model(model, orig_path)
        orig_size_file = orig_file_bytes / (1024 * 1024)

        print(f"原始模型IoU: {original_iou:.4f}")
        print(f"原始模型内存大小: {original_size_mem:.2f} MB")
        print(f"原始模型文件大小: {orig_size_file:.2f} MB")

        #（可选）比较不同压缩级别
        if level_used in ['conservative', 'moderate']:
            _ = compare_compression_levels(model, test_loader, device, args.output_dir)

        # 正式压缩
        t0 = time.time()
        compressed_model, compressed_info, actual_sparsity = compress_model(
            model,
            compression_params['bits'],
            compression_params['sparsity'],
            args.progressive_compression
        )
        comp_time = time.time() - t0
        print(f"压缩完成，耗时: {comp_time:.2f} 秒")

        # 压缩后评估
        compressed_model = compressed_model.to(device)
        compressed_iou = evaluate_model(compressed_model, test_loader, device)
        print(f"压缩后模型IoU: {compressed_iou:.4f} (下降 {(original_iou - compressed_iou)*100:.2f}%)")

        # 照片版评估（压缩后）
        try:
            spec_compressed = build_eval_report_spec(compressed_model, test_loader, device, threshold=getattr(args, "threshold", 0.5))
            print("【压缩后模型-照片版评估】", spec_compressed)
        except Exception as e:
            print(f"照片版评估(压缩后)失败: {e}")
            spec_compressed = None

        # 需要则微调
        if (original_iou - compressed_iou) > 0.1 and args.finetune_epochs > 0:
            print("性能下降较大，开始微调...")
            train_loader, test_loader, val_loader = create_dataloaders(
                root_dir=args.data_root, batch_size=args.batch_size, img_size=args.img_size
            )
            compressed_model, finetuned_iou = finetune_model(
                compressed_model, train_loader, test_loader, device, args.finetune_epochs
            )
        else:
            finetuned_iou = compressed_iou

        # 照片版评估（微调后）
        try:
            spec_ft = build_eval_report_spec(compressed_model, test_loader, device, threshold=getattr(args, "threshold", 0.5))
            print("【微调后模型-照片版评估】", spec_ft)
        except Exception as e:
            print(f"照片版评估(微调后)失败: {e}")
            spec_ft = None

        # 尺寸统计与保存
        compressed_size_mem = get_model_size(compressed_model)
        comp_path = os.path.join(args.output_dir, "siamese_unet_compressed.pth")
        comp_file_bytes = save_compressed_model_simple(compressed_model, comp_path)
        comp_size_file = comp_file_bytes / (1024 * 1024)
        file_compression_ratio = (orig_size_file / comp_size_file) if comp_size_file > 0 else float('inf')

        print(f"压缩后模型内存大小: {compressed_size_mem:.2f} MB")
        print(f"压缩后模型文件大小: {comp_size_file:.2f} MB")
        print(f"实际稀疏度: {actual_sparsity:.4f}")

        # 测加载
        print("测试加载压缩模型...")
        loaded_model = load_compressed_model_simple(comp_path, device)
        if loaded_model is not None:
            loaded_iou = evaluate_model(loaded_model, test_loader, device)
            print(f"加载后模型IoU: {loaded_iou:.4f}")
        else:
            print("加载压缩模型失败")
            loaded_iou = finetuned_iou

        # 汇总并保存
        results = {
            'model': 'SimpleSiameseUNet',
            'mode': args.mode,
            'task': 'change_detection',
            'num_classes': args.num_classes,
            'compression_level_used': level_used,
            'training_params': {
                'epochs': args.epochs,
                'batch_size': args.batch_size,
                'learning_rate': args.learning_rate,
            } if args.mode in ['train', 'both'] else None,
            'compression_params': {
                'bits': compression_params['bits'],
                'target_sparsity': compression_params['sparsity'],
                'actual_sparsity': float(actual_sparsity),
                'progressive': args.progressive_compression,
                'finetune_epochs': args.finetune_epochs
            },
            'model_sizes': {
                'original_memory_mb': float(original_size_mem),
                'original_file_mb': float(orig_size_file),
                'compressed_memory_mb': float(compressed_size_mem),
                'compressed_file_mb': float(comp_size_file),
                'file_compression_ratio': float(file_compression_ratio)
            },
            'performance_metrics': {
                'original_iou': float(original_iou),
                'compressed_iou': float(compressed_iou),
                'finetuned_iou': float(finetuned_iou),
                'loaded_iou': float(loaded_iou),
                'iou_drop_before_finetune': float(original_iou - compressed_iou),
                'iou_drop_after_finetune': float(original_iou - finetuned_iou)
            },
            'photo_spec_report': {
                'original':   spec_orig if 'spec_orig' in locals() else None,
                'compressed': spec_compressed if 'spec_compressed' in locals() else None,
                'fine_tuned': spec_ft if 'spec_ft' in locals() else None
            },
            'training_history': training_history
        }
        results_path = os.path.join(args.output_dir, "complete_results.json")
        with open(results_path, 'w') as f:
            json.dump(results, f, indent=2)
        print(f"结果已保存到: {results_path}")

        # 结尾打印
        print("\n" + "=" * 60)
        print("最终结果统计")
        print("=" * 60)
        if training_history:
            print(f"训练最佳IoU: {training_history.get('best_test_iou', 0.0):.4f}")
        print(f"原始模型IoU: {original_iou:.4f}")
        print(f"压缩后模型IoU: {compressed_iou:.4f}")
        print(f"微调后模型IoU: {finetuned_iou:.4f}")
        print(f"最终IoU下降: {(original_iou - finetuned_iou) * 100:.2f}%")
        print(f"文件压缩率: {file_compression_ratio:.2f}倍")
        print(f"原始文件大小: {orig_size_file:.2f} MB")
        print(f"压缩文件大小: {comp_size_file:.2f} MB")
        print(f"量化位数: {compression_params['bits']}")
        print(f"实际稀疏度: {actual_sparsity:.4f}")

    print("\n卫星变化检测模型训练和压缩完成！")

if __name__ == "__main__":
    main()