#!/usr/bin/env python
"""
目标检测模型训练和压缩示例 - 修复版
支持多种检测模型的训练和压缩
确保实际减小保存的模型文件大小
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset, random_split
import time
import copy
import json
import gzip
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from tqdm import tqdm


# 自定义目标检测数据集
class ObjectDetectionDataset(Dataset):
    def __init__(self, size=1000, img_size=640, num_classes=5):
        self.size = size
        self.img_size = img_size
        self.num_classes = num_classes

        print(f"生成 {size} 个目标检测样本...")
        self.data = []

        # 设置随机种子以确保结果可重现
        torch.manual_seed(42)
        np.random.seed(42)

        for i in tqdm(range(size)):
            img, targets = self._generate_detection_sample()
            self.data.append((img, targets))

    def _generate_detection_sample(self):
        """生成包含多个目标的检测样本"""
        # 创建基础图像
        img = torch.zeros(3, self.img_size, self.img_size)

        # 添加背景
        background_type = np.random.choice(['sky', 'ground', 'urban', 'water'])
        if background_type == 'sky':
            # 天空背景（浅蓝色渐变）
            for i in range(self.img_size):
                intensity = 0.3 + 0.4 * (i / self.img_size)
                img[0, i, :] = 0.3 * intensity  # R
                img[1, i, :] = 0.6 * intensity  # G
                img[2, i, :] = 0.9 * intensity  # B
        elif background_type == 'ground':
            # 地面背景（绿棕色）
            img[0, :, :] = 0.4 + torch.randn(self.img_size, self.img_size) * 0.1
            img[1, :, :] = 0.6 + torch.randn(self.img_size, self.img_size) * 0.1
            img[2, :, :] = 0.2 + torch.randn(self.img_size, self.img_size) * 0.1
        elif background_type == 'urban':
            # 城市背景（灰色）
            intensity = 0.5 + torch.randn(self.img_size, self.img_size) * 0.2
            img[0, :, :] = intensity
            img[1, :, :] = intensity
            img[2, :, :] = intensity
        else:  # water
            # 水面背景（蓝色）
            img[0, :, :] = 0.1 + torch.randn(self.img_size, self.img_size) * 0.1
            img[1, :, :] = 0.3 + torch.randn(self.img_size, self.img_size) * 0.1
            img[2, :, :] = 0.8 + torch.randn(self.img_size, self.img_size) * 0.1

        # 确保背景在合理范围内
        img = torch.clamp(img, 0, 1)

        # 生成目标
        boxes = []
        labels = []
        num_objects = np.random.randint(1, 6)  # 1-5个目标

        for _ in range(num_objects):
            # 随机选择目标类型
            obj_class = np.random.randint(0, self.num_classes)

            # 随机生成目标位置和大小
            obj_w = np.random.randint(30, 120)
            obj_h = np.random.randint(30, 120)
            obj_x = np.random.randint(0, self.img_size - obj_w)
            obj_y = np.random.randint(0, self.img_size - obj_h)

            # 生成目标外观
            obj_color, obj_shape = self._get_object_appearance(obj_class)

            # 在图像上绘制目标
            self._draw_object(img, obj_x, obj_y, obj_w, obj_h, obj_color, obj_shape)

            # 记录边界框和标签
            boxes.append([obj_x, obj_y, obj_x + obj_w, obj_y + obj_h])
            labels.append(obj_class)

        # 转换为tensor
        boxes = torch.tensor(boxes, dtype=torch.float32)
        labels = torch.tensor(labels, dtype=torch.long)

        targets = {
            'boxes': boxes,
            'labels': labels
        }

        return img, targets

    def _get_object_appearance(self, obj_class):
        """根据类别获取目标外观"""
        # 定义不同类别的颜色和形状
        appearances = {
            0: ([0.8, 0.2, 0.2], 'rectangle'),  # 车辆 - 红色矩形
            1: ([0.2, 0.8, 0.2], 'circle'),  # 植被 - 绿色圆形
            2: ([0.2, 0.2, 0.8], 'rectangle'),  # 建筑 - 蓝色矩形
            3: ([0.8, 0.8, 0.2], 'triangle'),  # 路标 - 黄色三角形
            4: ([0.8, 0.4, 0.8], 'ellipse')  # 其他 - 紫色椭圆
        }

        if obj_class in appearances:
            return appearances[obj_class]
        else:
            return ([0.5, 0.5, 0.5], 'rectangle')  # 默认灰色矩形

    def _draw_object(self, img, x, y, w, h, color, shape):
        """在图像上绘制目标 - 修复版本"""
        # 确保绘制区域在图像范围内
        x = max(0, min(x, self.img_size - 1))
        y = max(0, min(y, self.img_size - 1))
        w = min(w, self.img_size - x)
        h = min(h, self.img_size - y)

        if w <= 0 or h <= 0:
            return

        if shape == 'rectangle':
            # 绘制矩形
            for c in range(3):
                img[c, y:y + h, x:x + w] = color[c]
            # 添加边框
            border_width = 2
            border_color = [c * 0.7 for c in color]
            for c in range(3):
                if h >= border_width:
                    img[c, y:y + border_width, x:x + w] = border_color[c]
                    img[c, y + h - border_width:y + h, x:x + w] = border_color[c]
                if w >= border_width:
                    img[c, y:y + h, x:x + border_width] = border_color[c]
                    img[c, y:y + h, x + w - border_width:x + w] = border_color[c]

        elif shape == 'circle':
            # 绘制圆形
            center_x, center_y = x + w // 2, y + h // 2
            radius = min(w, h) // 2
            for i in range(max(0, y), min(self.img_size, y + h)):
                for j in range(max(0, x), min(self.img_size, x + w)):
                    if (j - center_x) ** 2 + (i - center_y) ** 2 <= radius ** 2:
                        for c in range(3):
                            img[c, i, j] = color[c]

        elif shape == 'triangle':
            # 绘制三角形（修复版本）
            for i in range(h):
                if y + i >= self.img_size:
                    break
                width = max(1, int(w * (1 - i / h)))
                start = x + (w - width) // 2
                end = min(start + width, self.img_size)
                start = max(start, 0)

                if start < end and y + i >= 0:
                    for c in range(3):
                        img[c, y + i, start:end] = color[c]

        elif shape == 'ellipse':
            # 绘制椭圆
            center_x, center_y = x + w // 2, y + h // 2
            a, b = max(1, w // 2), max(1, h // 2)
            for i in range(max(0, y), min(self.img_size, y + h)):
                for j in range(max(0, x), min(self.img_size, x + w)):
                    if ((j - center_x) / a) ** 2 + ((i - center_y) / b) ** 2 <= 1:
                        for c in range(3):
                            img[c, i, j] = color[c]

    def __len__(self):
        return self.size

    def __getitem__(self, idx):
        return self.data[idx]

    @staticmethod
    def collate_fn(batch):
        """自定义批处理函数"""
        images = [item[0] for item in batch]
        targets = [item[1] for item in batch]
        return images, targets


# 简化的目标检测模型（类似YOLO结构）
class SimpleDetector(nn.Module):
    def __init__(self, num_classes=5, num_anchors=3):
        super(SimpleDetector, self).__init__()
        self.num_classes = num_classes
        self.num_anchors = num_anchors

        # 主干网络
        self.backbone = nn.Sequential(
            # Stage 1
            nn.Conv2d(3, 32, 3, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2),  # 320x320

            # Stage 2
            nn.Conv2d(32, 64, 3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 64, 3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2),  # 160x160

            # Stage 3
            nn.Conv2d(64, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2),  # 80x80

            # Stage 4
            nn.Conv2d(128, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2),  # 40x40

            # Stage 5
            nn.Conv2d(256, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2),  # 20x20
        )

        # 检测头
        # 每个anchor box预测: 4个坐标 + 1个置信度 + num_classes个类别概率
        self.output_channels = num_anchors * (5 + num_classes)
        self.detection_head = nn.Sequential(
            nn.Conv2d(512, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, self.output_channels, 1)
        )

        # 损失权重
        self.coord_weight = 5.0
        self.conf_weight = 1.0
        self.class_weight = 1.0

    def forward(self, x):
        # 前向传播
        features = self.backbone(x)
        output = self.detection_head(features)
        return output

    def compute_loss(self, predictions, targets):
        """计算YOLO风格的损失 - 简化但稳定的版本"""
        batch_size = predictions.size(0)
        grid_size = predictions.size(2)

        # 重塑预测结果
        predictions = predictions.view(batch_size, self.num_anchors,
                                       5 + self.num_classes, grid_size, grid_size)
        predictions = predictions.permute(0, 1, 3, 4, 2).contiguous()

        # 分离坐标、置信度和类别预测
        coord_pred = predictions[..., :4]
        conf_pred = predictions[..., 4]
        class_pred = predictions[..., 5:]

        # 初始化损失
        coord_loss = torch.tensor(0.0, device=predictions.device)
        conf_loss = torch.tensor(0.0, device=predictions.device)
        class_loss = torch.tensor(0.0, device=predictions.device)

        # 简化的损失计算
        num_valid_samples = 0

        for i in range(batch_size):
            if len(targets[i]['boxes']) > 0:
                num_valid_samples += 1

                # 坐标损失（简化：基于预测值的回归损失）
                coord_loss += torch.mean(torch.abs(coord_pred[i]))

                # 置信度损失（鼓励有目标的位置输出高置信度）
                conf_target = torch.zeros_like(conf_pred[i])
                # 简化：将第一个anchor的第一个网格设为正样本
                if grid_size > 0:
                    conf_target[0, 0, 0] = 1.0
                conf_loss += nn.BCEWithLogitsLoss()(conf_pred[i], conf_target)

                # 类别损失（使用第一个目标的类别）
                target_labels = targets[i]['labels']
                if len(target_labels) > 0:
                    class_target = torch.zeros(self.num_classes, device=predictions.device)
                    class_target[target_labels[0]] = 1.0
                    class_loss += nn.BCEWithLogitsLoss()(class_pred[i, 0, 0, 0], class_target)
            else:
                # 没有目标的情况，鼓励低置信度
                conf_target = torch.zeros_like(conf_pred[i])
                conf_loss += nn.BCEWithLogitsLoss()(conf_pred[i], conf_target)

        # 平均损失
        if num_valid_samples > 0:
            coord_loss = coord_loss / num_valid_samples
            class_loss = class_loss / num_valid_samples
        conf_loss = conf_loss / batch_size

        # 总损失
        total_loss = (self.coord_weight * coord_loss +
                      self.conf_weight * conf_loss +
                      self.class_weight * class_loss)

        return total_loss, coord_loss, conf_loss, class_loss


def get_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="目标检测模型训练和压缩示例")

    # 模型选择
    parser.add_argument("--model", "-m", default="simple",
                        choices=["simple"],  # 暂时只支持simple模型
                        help="要使用的模型架构")

    # 数据和输出参数
    parser.add_argument("--data-dir", "-d", default="./data",
                        help="检测数据集目录路径")
    parser.add_argument("--output-dir", "-o", default="./output",
                        help="模型和结果的输出目录")
    parser.add_argument("--num-classes", type=int, default=5,
                        help="检测类别数量 (默认: 5)")

    # 训练参数
    parser.add_argument("--epochs", "-e", type=int, default=20,
                        help="训练轮数 (默认: 20)")
    parser.add_argument("--batch-size", "-b", type=int, default=8,
                        help="批次大小 (默认: 8)")
    parser.add_argument("--learning-rate", "-lr", type=float, default=0.001,
                        help="学习率 (默认: 0.001)")
    parser.add_argument("--weight-decay", type=float, default=1e-4,
                        help="权重衰减 (默认: 1e-4)")
    parser.add_argument("--dataset-size", type=int, default=1000,
                        help="数据集大小 (默认: 1000)")
    parser.add_argument("--img-size", type=int, default=320,
                        help="输入图像大小 (默认: 320，降低以加快训练)")

    # 压缩参数
    parser.add_argument("--bits", type=int, default=8,
                        help="量化位数 (默认: 8位)")
    parser.add_argument("--sparsity", type=float, default=0.5,
                        help="剪枝稀疏度 (默认: 0.5)")

    # 模式选择
    parser.add_argument("--mode", choices=['train', 'compress', 'both'], default='both',
                        help="运行模式: train(仅训练), compress(仅压缩), both(训练+压缩)")
    parser.add_argument("--pretrained-path", type=str, default=None,
                        help="预训练模型路径 (用于仅压缩模式)")

    return parser.parse_args()


def load_detection_model(model_name, num_classes=5):
    """加载目标检测模型"""
    print(f"加载 {model_name} 模型...")

    if model_name == "simple":
        model = SimpleDetector(num_classes=num_classes)
        print("使用自定义简化检测模型")
    else:
        raise ValueError(f"不支持的模型: {model_name}")

    return model


def create_dataloaders(data_dir, batch_size=8, dataset_size=1000, img_size=320, num_classes=5):
    """创建目标检测数据集加载器"""
    print("准备目标检测数据集...")

    # 创建完整数据集
    full_dataset = ObjectDetectionDataset(
        size=dataset_size,
        img_size=img_size,
        num_classes=num_classes
    )

    # 划分训练集和测试集 (80%训练, 20%测试)
    train_size = int(0.8 * len(full_dataset))
    test_size = len(full_dataset) - train_size
    train_dataset, test_dataset = random_split(full_dataset, [train_size, test_size])

    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=0,  # 设为0避免多进程问题
        pin_memory=True,
        collate_fn=ObjectDetectionDataset.collate_fn
    )

    test_loader = DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=0,
        pin_memory=True,
        collate_fn=ObjectDetectionDataset.collate_fn
    )

    print(f"训练集大小: {len(train_dataset)}")
    print(f"测试集大小: {len(test_dataset)}")

    return train_loader, test_loader


def train_model(model, train_loader, test_loader, device, args):
    """训练目标检测模型"""
    print("开始训练目标检测模型...")

    # 定义优化器
    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=8, gamma=0.5)

    # 记录训练历史
    train_losses = []

    best_loss = float('inf')
    best_model_state = None

    # 训练循环
    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        num_batches = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for batch_idx, (images, targets) in enumerate(pbar):
            try:
                images = [img.to(device) for img in images]
                targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

                optimizer.zero_grad()

                # 自定义模型
                batch_images = torch.stack(images)
                outputs = model(batch_images)
                loss, coord_loss, conf_loss, class_loss = model.compute_loss(outputs, targets)

                # 检查损失是否有效
                if torch.isnan(loss) or torch.isinf(loss):
                    print(f"警告: 检测到无效损失值，跳过此批次")
                    continue

                # 反向传播
                loss.backward()
                # 梯度裁剪
                torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
                optimizer.step()

                # 统计
                running_loss += loss.item()
                num_batches += 1

                # 更新进度条
                pbar.set_postfix({
                    'Loss': f'{loss.item():.4f}',
                    'Coord': f'{coord_loss.item():.4f}',
                    'Conf': f'{conf_loss.item():.4f}',
                    'Class': f'{class_loss.item():.4f}'
                })

            except Exception as e:
                print(f"训练批次出错: {e}")
                continue

        # 计算平均训练损失
        if num_batches > 0:
            avg_train_loss = running_loss / num_batches
        else:
            avg_train_loss = float('inf')

        # 记录历史
        train_losses.append(avg_train_loss)

        # 保存最佳模型
        if avg_train_loss < best_loss:
            best_loss = avg_train_loss
            best_model_state = copy.deepcopy(model.state_dict())

        # 更新学习率
        scheduler.step()

        print(f'Epoch [{epoch + 1}/{args.epochs}] - Train Loss: {avg_train_loss:.4f}')

    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print(f'训练完成! 最佳训练损失: {best_loss:.4f}')

    # 绘制训练曲线
    plot_training_curves(train_losses, args.output_dir)

    return model, best_loss, {
        'train_losses': train_losses,
        'best_loss': best_loss
    }


def evaluate_detection_model(model, test_loader, device, model_name="simple"):
    """评估目标检测模型性能"""
    print("评估目标检测模型性能...")
    model.eval()

    total_loss = 0.0
    num_batches = 0

    with torch.no_grad():
        for images, targets in test_loader:
            try:
                images = [img.to(device) for img in images]
                targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

                # 自定义模型评估
                batch_images = torch.stack(images)
                outputs = model(batch_images)
                loss, _, _, _ = model.compute_loss(outputs, targets)

                if not (torch.isnan(loss) or torch.isinf(loss)):
                    total_loss += loss.item()
                    num_batches += 1

                # 只评估前几个批次
                if num_batches >= 10:
                    break

            except Exception as e:
                print(f"评估批次出错: {e}")
                continue

    avg_loss = total_loss / num_batches if num_batches > 0 else float('inf')
    # 返回损失的倒数作为"性能"指标（损失越小，性能越好）
    performance = 1.0 / (1.0 + avg_loss) if avg_loss < float('inf') else 0.0

    print(f"平均损失: {avg_loss:.4f}, 性能指标: {performance:.4f}")
    return performance


def plot_training_curves(train_losses, output_dir):
    """绘制训练曲线"""
    if not train_losses:
        return

    epochs = range(1, len(train_losses) + 1)

    plt.figure(figsize=(8, 6))
    plt.plot(epochs, train_losses, 'b-', label='Training Loss')
    plt.title('Training Loss Curve')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'training_curves.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"训练曲线已保存到: {os.path.join(output_dir, 'training_curves.png')}")


def visualize_detections(model, dataloader, device, output_dir, model_name="simple", num_samples=4):
    """可视化检测结果"""
    model.eval()

    fig, axes = plt.subplots(2, 2, figsize=(12, 12))
    fig.suptitle('Object Detection Results', fontsize=16)  # 英文标题
    axes = axes.flatten()

    class_names = ['Vehicle', 'Vegetation', 'Building', 'Sign', 'Other']  # 使用英文避免字体问题
    colors = ['red', 'green', 'blue', 'yellow', 'purple']

    with torch.no_grad():
        sample_count = 0
        for i, (images, targets) in enumerate(dataloader):
            if sample_count >= num_samples:
                break

            try:
                images = [img.to(device) for img in images]

                # 取第一个样本
                img_show = images[0].cpu().permute(1, 2, 0).numpy()
                img_show = np.clip(img_show, 0, 1)

                axes[sample_count].imshow(img_show)
                axes[sample_count].set_title(f'Sample {sample_count + 1}')  # 英文标题
                axes[sample_count].axis('off')

                # 绘制真实边界框
                if len(targets) > 0 and 'boxes' in targets[0]:
                    true_boxes = targets[0]['boxes'].cpu().numpy()
                    true_labels = targets[0]['labels'].cpu().numpy()

                    for box, label in zip(true_boxes, true_labels):
                        x1, y1, x2, y2 = box
                        rect = patches.Rectangle((x1, y1), x2 - x1, y2 - y1,
                                                 linewidth=2, edgecolor='red',
                                                 facecolor='none', linestyle='--')
                        axes[sample_count].add_patch(rect)
                        if label < len(class_names):
                            axes[sample_count].text(x1, y1 - 5, f'GT: {class_names[label]}',
                                                    color='red', fontsize=8, weight='bold')

                sample_count += 1

            except Exception as e:
                print(f"可视化出错: {e}")
                continue

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'detection_results.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"检测结果已保存到: {os.path.join(output_dir, 'detection_results.png')}")


def visualize_dataset_samples(dataloader, output_dir, num_samples=8):
    """可视化数据集样本"""
    fig, axes = plt.subplots(2, 4, figsize=(16, 8))
    fig.suptitle('Object Detection Dataset Samples', fontsize=16)  # 英文标题
    axes = axes.flatten()

    class_names = ['Vehicle', 'Vegetation', 'Building', 'Sign', 'Other']  # 使用英文避免字体问题
    colors = ['red', 'green', 'blue', 'yellow', 'purple']

    sample_count = 0
    for i, (images, targets) in enumerate(dataloader):
        if sample_count >= num_samples:
            break

        try:
            # 取第一个样本
            img_show = images[0].permute(1, 2, 0).numpy()
            img_show = np.clip(img_show, 0, 1)

            axes[sample_count].imshow(img_show)
            axes[sample_count].set_title(f'Sample {sample_count + 1}')  # 英文标题
            axes[sample_count].axis('off')

            # 绘制边界框和标签
            if 'boxes' in targets[0] and len(targets[0]['boxes']) > 0:
                boxes = targets[0]['boxes'].numpy()
                labels = targets[0]['labels'].numpy()

                for box, label in zip(boxes, labels):
                    x1, y1, x2, y2 = box
                    color = colors[label % len(colors)]
                    rect = patches.Rectangle((x1, y1), x2 - x1, y2 - y1,
                                             linewidth=2, edgecolor=color, facecolor='none')
                    axes[sample_count].add_patch(rect)
                    if label < len(class_names):
                        axes[sample_count].text(x1, y1 - 5, class_names[label],
                                                color=color, fontsize=8, weight='bold')

            sample_count += 1

        except Exception as e:
            print(f"数据集可视化出错: {e}")
            continue

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'dataset_samples.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"数据集样本已保存到: {os.path.join(output_dir, 'dataset_samples.png')}")


def get_model_size(model):
    """计算模型在内存中的大小（MB）"""
    param_size = 0
    for param in model.parameters():
        param_size += param.nelement() * param.element_size()
    size_mb = param_size / (1024 * 1024)
    return size_mb


def compute_compressed_size(model, bits=8, sparsity=0.5):
    """计算压缩后模型的理论存储大小（MB）"""
    total_bytes = 0
    total_params = 0
    nonzero_params = 0

    for name, param in model.named_parameters():
        param_count = param.numel()
        total_params += param_count

        if 'weight' in name:
            nz_count = torch.count_nonzero(param).item()
            nonzero_params += nz_count
            # 稀疏存储：索引 + 值
            sparse_size = nz_count * (4 + bits / 8)  # 4字节索引 + 量化值
            # 密集存储
            dense_size = param_count * (bits / 8)

            # 选择更小的存储方式
            if sparsity > 0 and sparse_size < dense_size:
                total_bytes += sparse_size
            else:
                total_bytes += dense_size
        elif 'bias' in name:
            # 偏置通常不压缩
            total_bytes += param_count * 4

    # 添加一些开销（元数据等）
    overhead_bytes = total_bytes * 0.05
    total_size_mb = (total_bytes + overhead_bytes) / (1024 * 1024)
    actual_sparsity = 1.0 - (nonzero_params / total_params) if total_params > 0 else 0

    return total_size_mb, actual_sparsity


def quantize_weights(weight, bits=8):
    """量化权重到指定位数 - 保持零值版本"""
    if bits >= 32:
        return weight.clone(), None, None, None

    # 记录原始的零位置
    zero_mask = (weight.abs() < 1e-8)

    qmin, qmax = 0, 2 ** bits - 1
    min_val, max_val = weight.min(), weight.max()

    if min_val == max_val:
        return weight.clone(), None, None, None

    scale = (max_val - min_val) / (qmax - qmin)
    zero_point = qmin - min_val / scale

    q_weight = torch.round(weight / scale + zero_point)
    q_weight = torch.clamp(q_weight, qmin, qmax)
    dq_weight = (q_weight - zero_point) * scale

    # 确保原来的零值保持为零
    dq_weight[zero_mask] = 0.0

    return dq_weight, scale, zero_point, q_weight


def prune_weights(weight, sparsity=0.5):
    """按稀疏度修剪权重 - 修复版"""
    if sparsity <= 0:
        return weight.clone(), None

    weight_abs = weight.abs().flatten()
    total_elements = weight_abs.numel()
    k = int(total_elements * sparsity)

    if k >= total_elements:
        return torch.zeros_like(weight), torch.ones_like(weight).bool()

    # 找到第k小的值作为阈值（要保留的最小值）
    if k > 0:
        threshold = torch.kthvalue(weight_abs, k + 1).values  # +1 because we want to prune k elements
        mask = (weight.abs() >= threshold)
    else:
        mask = torch.ones_like(weight).bool()

    pruned = weight * mask.float()

    # 验证稀疏度 - 移除详细输出
    actual_zeros = (mask == False).sum().item()
    actual_sparsity = actual_zeros / total_elements

    return pruned, mask


def compress_model(model, bits=8, sparsity=0.5):
    """压缩模型，对卷积和全连接层应用剪枝和量化"""
    print(f"压缩模型（量化位数={bits}, 剪枝稀疏度={sparsity}）...")

    # 深拷贝模型
    compressed_model = copy.deepcopy(model)

    # 存储压缩信息
    compressed_layers = {}

    # 递归处理所有层
    def process_module(module, module_path=''):
        for name, child in module.named_children():
            child_path = f"{module_path}.{name}" if module_path else name
            process_module(child, child_path)

        # 处理当前模块的权重
        if hasattr(module, 'weight') and module.weight is not None:
            # 剪枝
            original_weight = module.weight.data.clone()
            pruned_weight, mask = prune_weights(original_weight, sparsity)

            if mask is not None:
                # 量化剪枝后的权重
                quantized_weight, scale, zero_point, q_weight = quantize_weights(pruned_weight, bits)

                # 更新模块权重
                module.weight.data.copy_(quantized_weight)

                # 存储压缩信息（用于保存）
                if scale is not None and torch.any(mask):
                    # 获取非零元素的索引和值
                    nonzero_mask = (quantized_weight != 0)
                    if torch.any(nonzero_mask):
                        nonzero_indices = nonzero_mask.nonzero(as_tuple=False).cpu().numpy()
                        nonzero_q_values = q_weight[nonzero_mask].cpu().numpy()

                        compressed_layers[module_path + '.weight'] = {
                            'shape': list(module.weight.shape),
                            'indices': nonzero_indices.tolist(),
                            'values': nonzero_q_values.astype(int).tolist(),
                            'scale': float(scale.item()),
                            'zero_point': float(zero_point.item()),
                            'bits': bits,
                            'sparsity': float(sparsity)
                        }

        # 处理偏置
        if hasattr(module, 'bias') and module.bias is not None:
            compressed_layers[module_path + '.bias'] = {
                'shape': list(module.bias.shape),
                'values': module.bias.data.cpu().numpy().tolist(),
                'is_bias': True
            }

    # 开始处理
    process_module(compressed_model)

    # 计算整体稀疏度 - 详细诊断版本
    total_elements = 0
    zero_elements = 0
    layer_stats = []

    print("\n=== 层级稀疏度诊断 ===")
    for name, param in compressed_model.named_parameters():
        if 'weight' in name:  # 只统计权重层的稀疏度
            layer_total = param.numel()
            layer_zeros = (param.abs() < 1e-8).sum().item()  # 使用小阈值检测接近零的值
            layer_sparsity = layer_zeros / layer_total if layer_total > 0 else 0

            total_elements += layer_total
            zero_elements += layer_zeros

            layer_stats.append({
                'name': name,
                'total': layer_total,
                'zeros': layer_zeros,
                'sparsity': layer_sparsity
            })

            print(f"{name}: {layer_zeros}/{layer_total} = {layer_sparsity:.4f}")

    overall_sparsity = zero_elements / total_elements if total_elements > 0 else 0
    print(f"\n整体统计: {zero_elements}/{total_elements} = {overall_sparsity:.4f}")
    print("=" * 40)

    # 构建压缩信息
    model_info = {
        'model_type': str(type(model).__name__),
        'compressed_layers': compressed_layers
    }

    return compressed_model, model_info, overall_sparsity


def save_original_model(model, path):
    """保存原始模型，计算文件大小"""
    torch.save(model.state_dict(), path)
    return os.path.getsize(path)


def save_compressed_model(compressed_info, path):
    """将压缩信息保存为紧凑的文件"""
    json_str = json.dumps(compressed_info)
    with gzip.open(path, 'wb', compresslevel=9) as f:
        f.write(json_str.encode('utf-8'))
    return os.path.getsize(path)


def load_compressed_model(path, model_name="simple", num_classes=5, device=None):
    """从压缩文件加载模型 - 修复版"""
    with gzip.open(path, 'rb') as f:
        json_str = f.read().decode('utf-8')

    compressed_info = json.loads(json_str)

    # 创建新模型
    model = load_detection_model(model_name, num_classes)

    # 递归恢复参数
    def restore_compressed_params(module, module_path=''):
        for name, child in module.named_children():
            child_path = f"{module_path}.{name}" if module_path else name
            restore_compressed_params(child, child_path)

        # 恢复权重
        weight_path = module_path + '.weight'
        if hasattr(module, 'weight') and weight_path in compressed_info['compressed_layers']:
            info = compressed_info['compressed_layers'][weight_path]
            shape = info['shape']

            # 初始化为零
            module.weight.data = torch.zeros(shape, dtype=torch.float32)

            # 恢复非零元素
            if 'indices' in info and 'values' in info:
                indices_array = torch.tensor(info['indices'], dtype=torch.long)
                values_array = torch.tensor(info['values'], dtype=torch.float32)

                # 反量化
                scale = info['scale']
                zero_point = info['zero_point']
                dequantized_values = (values_array - zero_point) * scale

                # 使用高级索引设置值
                if len(indices_array) > 0:
                    # 转换索引格式
                    if indices_array.dim() == 2:
                        indices_tuple = tuple(indices_array.t())
                        module.weight.data[indices_tuple] = dequantized_values
                    else:
                        # 处理一维情况
                        flat_weight = module.weight.data.view(-1)
                        flat_weight[indices_array] = dequantized_values
                        module.weight.data = flat_weight.view(shape)

        # 恢复偏置
        bias_path = module_path + '.bias'
        if (hasattr(module, 'bias') and module.bias is not None and
                bias_path in compressed_info['compressed_layers']):
            info = compressed_info['compressed_layers'][bias_path]
            if 'is_bias' in info and info['is_bias']:
                module.bias.data = torch.tensor(info['values'], dtype=torch.float32)

    # 开始恢复
    restore_compressed_params(model)

    if device:
        model = model.to(device)

    return model


def main():
    """主函数"""
    args = get_args()

    # 确保输出目录存在
    os.makedirs(args.output_dir, exist_ok=True)

    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 创建或加载模型
    model = load_detection_model(args.model, args.num_classes)
    model = model.to(device)

    trained_model_path = os.path.join(args.output_dir, f"{args.model}_detection_trained.pth")
    training_history = None

    # 训练模式
    if args.mode in ['train', 'both']:
        print("=" * 50)
        print("开始目标检测训练阶段")
        print("=" * 50)

        # 创建数据加载器
        train_loader, test_loader = create_dataloaders(
            args.data_dir, args.batch_size, args.dataset_size,
            args.img_size, args.num_classes
        )

        # 可视化数据集样本
        visualize_dataset_samples(train_loader, args.output_dir)

        # 训练模型
        model, best_loss, training_history = train_model(
            model, train_loader, test_loader, device, args
        )

        # 保存训练好的模型
        torch.save(model.state_dict(), trained_model_path)
        print(f"训练好的模型已保存到: {trained_model_path}")

        # 可视化检测结果
        visualize_detections(model, test_loader, device, args.output_dir, args.model)

        # 保存训练历史
        if training_history:
            history_path = os.path.join(args.output_dir, "training_history.json")
            with open(history_path, 'w') as f:
                json.dump(training_history, f, indent=2)

    # 仅压缩模式：加载预训练模型
    elif args.mode == 'compress':
        if args.pretrained_path and os.path.exists(args.pretrained_path):
            model.load_state_dict(torch.load(args.pretrained_path, map_location=device))
            print(f"已加载预训练模型: {args.pretrained_path}")
        elif os.path.exists(trained_model_path):
            model.load_state_dict(torch.load(trained_model_path, map_location=device))
            print(f"已加载训练好的模型: {trained_model_path}")
        else:
            print("警告: 没有找到预训练模型，将使用随机初始化的模型进行压缩")

    # 压缩模式
    if args.mode in ['compress', 'both']:
        print("=" * 50)
        print("开始压缩阶段")
        print("=" * 50)

        # 创建测试数据加载器用于评估
        if args.mode == 'compress':
            _, test_loader = create_dataloaders(
                args.data_dir, args.batch_size, args.dataset_size,
                args.img_size, args.num_classes
            )

        # 评估原始模型
        print("评估原始模型...")
        original_performance = evaluate_detection_model(model, test_loader, device, args.model)

        # 计算原始模型大小
        original_size = get_model_size(model)

        # 保存原始模型并计算文件大小
        original_path = os.path.join(args.output_dir, f"{args.model}_detection_original.pth")
        original_file_size_bytes = save_original_model(model, original_path)
        original_file_size = original_file_size_bytes / (1024 * 1024)

        print(f"原始模型性能: {original_performance:.4f}")
        print(f"原始模型内存大小: {original_size:.2f} MB")
        print(f"原始模型文件大小: {original_file_size:.2f} MB")

        # 压缩模型
        start_time = time.time()
        compressed_model, compressed_info, actual_sparsity = compress_model(
            model, args.bits, args.sparsity
        )
        compression_time = time.time() - start_time
        print(f"压缩完成，耗时: {compression_time:.2f} 秒")

        # 评估压缩后的模型
        print("评估压缩后的模型...")
        compressed_model = compressed_model.to(device)
        compressed_performance = evaluate_detection_model(compressed_model, test_loader, device, args.model)

        # 计算压缩后的大小
        compressed_size = get_model_size(compressed_model)

        # 保存压缩模型并计算实际文件大小
        compressed_path = os.path.join(args.output_dir, f"{args.model}_detection_compressed.gz")
        compressed_file_size_bytes = save_compressed_model(compressed_info, compressed_path)
        compressed_file_size = compressed_file_size_bytes / (1024 * 1024)

        print(f"压缩后模型性能: {compressed_performance:.4f}")
        print(f"压缩后模型内存大小: {compressed_size:.2f} MB")
        print(f"压缩后模型文件大小: {compressed_file_size:.2f} MB")
        print(f"实际稀疏度: {actual_sparsity:.4f}")

        # 计算压缩率
        file_compression_ratio = original_file_size / compressed_file_size if compressed_file_size > 0 else 1

        # 测试加载压缩模型
        print("测试加载压缩模型...")
        try:
            loaded_model = load_compressed_model(compressed_path, args.model, args.num_classes, device)
            loaded_performance = evaluate_detection_model(loaded_model, test_loader, device, args.model)
            print(f"加载后模型性能: {loaded_performance:.4f}")
        except Exception as e:
            print(f"加载压缩模型失败: {e}")
            loaded_performance = 0.0

        # 保存完整的结果信息
        results = {
            'model': args.model,
            'mode': args.mode,
            'task': 'object_detection',
            'num_classes': args.num_classes,
            'training_params': {
                'epochs': args.epochs,
                'batch_size': args.batch_size,
                'learning_rate': args.learning_rate,
                'dataset_size': args.dataset_size,
                'img_size': args.img_size
            } if args.mode in ['train', 'both'] else None,
            'compression_params': {
                'bits': args.bits,
                'target_sparsity': args.sparsity,
                'actual_sparsity': float(actual_sparsity)
            },
            'model_sizes': {
                'original_memory_mb': float(original_size),
                'original_file_mb': float(original_file_size),
                'compressed_memory_mb': float(compressed_size),
                'compressed_file_mb': float(compressed_file_size),
                'file_compression_ratio': float(file_compression_ratio)
            },
            'performance_metrics': {
                'original_performance': float(original_performance),
                'compressed_performance': float(compressed_performance),
                'loaded_performance': float(loaded_performance),
                'performance_change': float(compressed_performance - original_performance)
            },
            'training_history': training_history
        }

        # 保存结果
        results_path = os.path.join(args.output_dir, "complete_results.json")
        with open(results_path, 'w') as f:
            json.dump(results, f, indent=2)

        # 输出最终统计
        print("\n" + "=" * 60)
        print("目标检测最终结果统计")
        print("=" * 60)
        if training_history:
            print(f"训练最佳损失: {training_history['best_loss']:.4f}")
        print(f"原始模型性能: {original_performance:.4f}")
        print(f"压缩后模型性能: {compressed_performance:.4f}")
        print(f"性能变化: {(compressed_performance - original_performance) * 100:.2f}%")
        print(f"文件压缩率: {file_compression_ratio:.2f}倍")
        print(f"原始文件大小: {original_file_size:.2f} MB")
        print(f"压缩文件大小: {compressed_file_size:.2f} MB")
        print(f"量化位数: {args.bits}")
        print(f"实际稀疏度: {actual_sparsity:.4f}")
        print(f"结果已保存到: {results_path}")

    print("\n目标检测模型训练和压缩完成!")


if __name__ == "__main__":
    main()