#!/usr/bin/env python
"""
卫星航迹识别模型训练和智能压缩示例 - 改进版
支持Social-LSTM航迹预测模型的训练和智能压缩
采用渐进式压缩策略，确保性能损失在可接受范围内
"""

import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset, random_split
import numpy as np
import copy
import time
import json
import math
import gzip
import matplotlib.pyplot as plt
from tqdm import tqdm
import random


# 定义改进版Social-LSTM模型
class SocialLSTM(nn.Module):
    def __init__(self, input_size=2, embedding_size=64, hidden_size=128, num_layers=2, dropout=0.1):
        """
        Social-LSTM模型用于卫星航迹预测
        Args:
            input_size: 输入维度，通常为2（x,y坐标）
            embedding_size: 嵌入层维度
            hidden_size: LSTM隐藏层大小
            num_layers: LSTM层数
            dropout: Dropout概率
        """
        super(SocialLSTM, self).__init__()

        self.input_size = input_size
        self.embedding_size = embedding_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers

        # 轨迹点嵌入层
        self.embedding = nn.Sequential(
            nn.Linear(input_size, embedding_size),
            nn.ReLU(),
            nn.Dropout(dropout)
        )

        # 主轨迹编码LSTM
        self.encoder = nn.LSTM(
            embedding_size,
            hidden_size,
            num_layers,
            batch_first=True,
            dropout=dropout if num_layers > 1 else 0,
            bidirectional=False
        )

        # 社交上下文编码器
        self.social_encoder = nn.LSTM(
            embedding_size,
            hidden_size // 2,
            1,
            batch_first=True
        )

        # 注意力机制层
        self.attention = nn.MultiheadAttention(
            embed_dim=hidden_size // 2,
            num_heads=4,
            dropout=dropout,
            batch_first=True
        )

        # 社交融合层
        self.social_fusion = nn.Sequential(
            nn.Linear(hidden_size + hidden_size // 2, hidden_size),
            nn.ReLU(),
            nn.Dropout(dropout)
        )

        # 预测LSTM解码器
        self.decoder = nn.LSTM(
            embedding_size + hidden_size,
            hidden_size,
            num_layers,
            batch_first=True,
            dropout=dropout if num_layers > 1 else 0
        )

        # 输出层
        self.output = nn.Sequential(
            nn.Linear(hidden_size, hidden_size // 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_size // 2, input_size)
        )

        # 初始化权重
        self._init_weights()

    def _init_weights(self):
        """初始化网络权重"""
        for module in self.modules():
            if isinstance(module, nn.Linear):
                nn.init.xavier_uniform_(module.weight)
                if module.bias is not None:
                    nn.init.zeros_(module.bias)
            elif isinstance(module, nn.LSTM):
                for name, param in module.named_parameters():
                    if 'weight' in name:
                        nn.init.xavier_uniform_(param)
                    elif 'bias' in name:
                        nn.init.zeros_(param)

    def forward(self, trajectory, neighbors=None, future_steps=3):
        """
        前向传播
        Args:
            trajectory: [batch_size, seq_len, 2] 历史轨迹
            neighbors: [batch_size, num_neighbors, seq_len, 2] 邻居轨迹
            future_steps: 预测的未来步数
        Returns:
            pred_trajectory: [batch_size, future_steps, 2] 预测轨迹
        """
        batch_size, seq_len, _ = trajectory.shape

        # 嵌入历史轨迹
        embedded = self.embedding(trajectory)

        # 编码主轨迹
        encoder_output, (h_n, c_n) = self.encoder(embedded)

        # 处理社交信息
        social_context = torch.zeros(batch_size, self.hidden_size // 2, device=trajectory.device)

        if neighbors is not None and neighbors.numel() > 0:
            # 重塑邻居数据
            num_neighbors = neighbors.shape[1]
            neighbors_flat = neighbors.view(batch_size * num_neighbors, seq_len, self.input_size)

            # 嵌入邻居轨迹
            neighbors_embedded = self.embedding(neighbors_flat)

            # 编码邻居轨迹
            _, (neighbors_h, _) = self.social_encoder(neighbors_embedded)
            neighbors_h = neighbors_h[-1].view(batch_size, num_neighbors, self.hidden_size // 2)

            # 创建查询向量（将主轨迹隐藏状态投影到正确维度）
            query = h_n[-1][:, :self.hidden_size // 2].unsqueeze(1)

            # 应用注意力机制
            attended_neighbors, attention_weights = self.attention(
                query, neighbors_h, neighbors_h
            )

            # 获取社交上下文
            social_context = attended_neighbors.squeeze(1)

        # 融合个人和社交特征
        personal_features = h_n[-1]
        combined_features = torch.cat([personal_features, social_context], dim=-1)
        fused_context = self.social_fusion(combined_features)

        # 预测未来轨迹
        predictions = []
        decoder_input = embedded[:, -1:, :]
        decoder_h, decoder_c = h_n, c_n

        for step in range(future_steps):
            # 扩展上下文特征
            context_expanded = fused_context.unsqueeze(1)

            # 组合输入
            decoder_input_combined = torch.cat([decoder_input, context_expanded], dim=-1)

            # LSTM解码
            decoder_output, (decoder_h, decoder_c) = self.decoder(
                decoder_input_combined, (decoder_h, decoder_c)
            )

            # 预测下一个位置
            next_pos = self.output(decoder_output)
            predictions.append(next_pos)

            # 更新输入（使用预测的位置）
            decoder_input = self.embedding(next_pos)

        # 组合所有预测
        pred_trajectory = torch.cat(predictions, dim=1)

        return pred_trajectory


# 卫星航迹数据集生成器
class SatelliteTrajectoryDataset(Dataset):
    def __init__(self, size=1000, seq_len=10, pred_len=3, num_neighbors=5, map_size=100):
        self.size = size
        self.seq_len = seq_len
        self.pred_len = pred_len
        self.num_neighbors = num_neighbors
        self.map_size = map_size

        print(f"生成 {size} 个卫星航迹样本...")
        self.data = []

        for i in tqdm(range(size)):
            trajectory_data = self._generate_trajectory_sample()
            self.data.append(trajectory_data)

    def _generate_trajectory_sample(self):
        """生成单个航迹样本"""
        # 选择轨迹类型
        trajectory_type = random.choice(['linear', 'curved', 'zigzag', 'circular', 'random_walk'])

        if trajectory_type == 'linear':
            history, future = self._generate_linear_trajectory()
        elif trajectory_type == 'curved':
            history, future = self._generate_curved_trajectory()
        elif trajectory_type == 'zigzag':
            history, future = self._generate_zigzag_trajectory()
        elif trajectory_type == 'circular':
            history, future = self._generate_circular_trajectory()
        else:
            history, future = self._generate_random_walk_trajectory()

        # 生成邻居轨迹
        neighbors = self._generate_neighbor_trajectories(history)

        return history, future, neighbors

    def _generate_linear_trajectory(self):
        """生成直线轨迹"""
        # 随机起点
        start_x = random.uniform(10, self.map_size - 10)
        start_y = random.uniform(10, self.map_size - 10)

        # 随机方向和速度
        angle = random.uniform(0, 2 * math.pi)
        speed = random.uniform(0.5, 2.0)

        dx = speed * math.cos(angle)
        dy = speed * math.sin(angle)

        # 生成历史轨迹
        history = []
        for i in range(self.seq_len):
            x = start_x + i * dx
            y = start_y + i * dy
            history.append([x, y])

        # 生成未来轨迹
        future = []
        for i in range(self.pred_len):
            x = start_x + (self.seq_len + i) * dx
            y = start_y + (self.seq_len + i) * dy
            future.append([x, y])

        return torch.tensor(history, dtype=torch.float32), torch.tensor(future, dtype=torch.float32)

    def _generate_curved_trajectory(self):
        """生成曲线轨迹"""
        # 随机起点
        start_x = random.uniform(20, self.map_size - 20)
        start_y = random.uniform(20, self.map_size - 20)

        # 随机曲率参数
        amplitude = random.uniform(5, 15)
        frequency = random.uniform(0.1, 0.3)
        base_speed = random.uniform(0.8, 1.5)
        direction = random.uniform(0, 2 * math.pi)

        # 生成轨迹点
        history = []
        for i in range(self.seq_len):
            t = i * 0.5
            curve_offset = amplitude * math.sin(frequency * t)

            x = start_x + t * math.cos(direction) + curve_offset * math.cos(direction + math.pi / 2)
            y = start_y + t * math.sin(direction) + curve_offset * math.sin(direction + math.pi / 2)
            history.append([x, y])

        future = []
        for i in range(self.pred_len):
            t = (self.seq_len + i) * 0.5
            curve_offset = amplitude * math.sin(frequency * t)

            x = start_x + t * math.cos(direction) + curve_offset * math.cos(direction + math.pi / 2)
            y = start_y + t * math.sin(direction) + curve_offset * math.sin(direction + math.pi / 2)
            future.append([x, y])

        return torch.tensor(history, dtype=torch.float32), torch.tensor(future, dtype=torch.float32)

    def _generate_zigzag_trajectory(self):
        """生成之字形轨迹"""
        start_x = random.uniform(20, self.map_size - 20)
        start_y = random.uniform(20, self.map_size - 20)

        # 之字形参数
        segment_length = random.uniform(3, 6)
        turn_angle = random.uniform(math.pi / 4, math.pi / 2)
        base_direction = random.uniform(0, 2 * math.pi)

        history = []
        current_x, current_y = start_x, start_y
        current_direction = base_direction
        step_count = 0

        for i in range(self.seq_len):
            history.append([current_x, current_y])

            # 移动
            current_x += math.cos(current_direction)
            current_y += math.sin(current_direction)
            step_count += 1

            # 检查是否需要转向
            if step_count >= segment_length:
                current_direction += turn_angle * (1 if random.random() > 0.5 else -1)
                step_count = 0

        future = []
        for i in range(self.pred_len):
            future.append([current_x, current_y])

            current_x += math.cos(current_direction)
            current_y += math.sin(current_direction)
            step_count += 1

            if step_count >= segment_length:
                current_direction += turn_angle * (1 if random.random() > 0.5 else -1)
                step_count = 0

        return torch.tensor(history, dtype=torch.float32), torch.tensor(future, dtype=torch.float32)

    def _generate_circular_trajectory(self):
        """生成圆形轨迹"""
        # 圆心
        center_x = random.uniform(30, self.map_size - 30)
        center_y = random.uniform(30, self.map_size - 30)

        # 半径和角速度
        radius = random.uniform(10, 25)
        angular_speed = random.uniform(0.1, 0.3)
        start_angle = random.uniform(0, 2 * math.pi)

        history = []
        for i in range(self.seq_len):
            angle = start_angle + i * angular_speed
            x = center_x + radius * math.cos(angle)
            y = center_y + radius * math.sin(angle)
            history.append([x, y])

        future = []
        for i in range(self.pred_len):
            angle = start_angle + (self.seq_len + i) * angular_speed
            x = center_x + radius * math.cos(angle)
            y = center_y + radius * math.sin(angle)
            future.append([x, y])

        return torch.tensor(history, dtype=torch.float32), torch.tensor(future, dtype=torch.float32)

    def _generate_random_walk_trajectory(self):
        """生成随机游走轨迹"""
        start_x = random.uniform(20, self.map_size - 20)
        start_y = random.uniform(20, self.map_size - 20)

        history = []
        current_x, current_y = start_x, start_y

        for i in range(self.seq_len):
            history.append([current_x, current_y])

            # 随机移动
            step_size = random.uniform(0.5, 2.0)
            direction = random.uniform(0, 2 * math.pi)

            current_x += step_size * math.cos(direction)
            current_y += step_size * math.sin(direction)

            # 边界约束
            current_x = max(5, min(self.map_size - 5, current_x))
            current_y = max(5, min(self.map_size - 5, current_y))

        future = []
        for i in range(self.pred_len):
            step_size = random.uniform(0.5, 2.0)
            direction = random.uniform(0, 2 * math.pi)

            current_x += step_size * math.cos(direction)
            current_y += step_size * math.sin(direction)

            current_x = max(5, min(self.map_size - 5, current_x))
            current_y = max(5, min(self.map_size - 5, current_y))

            future.append([current_x, current_y])

        return torch.tensor(history, dtype=torch.float32), torch.tensor(future, dtype=torch.float32)

    def _generate_neighbor_trajectories(self, main_trajectory):
        """为主轨迹生成邻居轨迹"""
        neighbors = []

        for _ in range(self.num_neighbors):
            # 在主轨迹附近生成邻居
            neighbor = []

            for i in range(self.seq_len):
                # 基于主轨迹位置加上随机偏移
                main_pos = main_trajectory[i]

                # 随机偏移
                offset_x = random.uniform(-15, 15)
                offset_y = random.uniform(-15, 15)

                neighbor_x = main_pos[0] + offset_x
                neighbor_y = main_pos[1] + offset_y

                # 边界约束
                neighbor_x = max(0, min(self.map_size, neighbor_x))
                neighbor_y = max(0, min(self.map_size, neighbor_y))

                neighbor.append([neighbor_x, neighbor_y])

            neighbors.append(neighbor)

        return torch.tensor(neighbors, dtype=torch.float32)

    def __len__(self):
        return self.size

    def __getitem__(self, idx):
        return self.data[idx]


def get_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="卫星航迹识别模型训练和智能压缩示例")

    # 模型参数
    parser.add_argument("--model", "-m", default="social_lstm",
                        choices=["social_lstm"],
                        help="要使用的模型架构")
    parser.add_argument("--hidden-size", type=int, default=128,
                        help="LSTM隐藏层大小")
    parser.add_argument("--num-layers", type=int, default=2,
                        help="LSTM层数")
    parser.add_argument("--embedding-size", type=int, default=64,
                        help="嵌入层维度")

    # 数据参数
    parser.add_argument("--output-dir", "-o", default="./output",
                        help="模型和结果的输出目录")
    parser.add_argument("--seq-len", type=int, default=10,
                        help="输入序列长度")
    parser.add_argument("--pred-len", type=int, default=3,
                        help="预测序列长度")
    parser.add_argument("--num-neighbors", type=int, default=5,
                        help="邻居数量")

    # 训练参数
    parser.add_argument("--epochs", "-e", type=int, default=50,
                        help="训练轮数")
    parser.add_argument("--batch-size", "-b", type=int, default=32,
                        help="批次大小")
    parser.add_argument("--learning-rate", "-lr", type=float, default=0.001,
                        help="学习率")
    parser.add_argument("--weight-decay", type=float, default=1e-4,
                        help="权重衰减")
    parser.add_argument("--dataset-size", type=int, default=1000,
                        help="数据集大小")

    # 智能压缩参数
    parser.add_argument("--max-performance-loss", type=float, default=0.25,
                        help="最大允许的性能损失比例 (默认25%)")
    parser.add_argument("--target-compression-ratio", type=float, default=4.0,
                        help="目标压缩比 (默认4倍)")
    parser.add_argument("--compression-strategy", choices=['conservative', 'balanced', 'aggressive'],
                        default='balanced', help="压缩策略")

    # 传统压缩参数（用于手动模式）
    parser.add_argument("--bits", type=int, default=8,
                        help="量化位数（智能模式下会自动调整）")
    parser.add_argument("--sparsity", type=float, default=0.3,
                        help="剪枝稀疏度（智能模式下会自动调整）")

    # 模式选择
    parser.add_argument("--mode", choices=['train', 'compress', 'both'], default='both',
                        help="运行模式")
    parser.add_argument("--compression-mode", choices=['smart', 'manual'], default='smart',
                        help="压缩模式：smart=智能压缩，manual=手动参数")
    parser.add_argument("--pretrained-path", type=str, default=None,
                        help="预训练模型路径")

    return parser.parse_args()


def load_trajectory_model(args):
    """加载航迹识别模型"""
    print(f"加载 Social-LSTM 航迹识别模型...")

    model = SocialLSTM(
        input_size=2,
        embedding_size=args.embedding_size,
        hidden_size=args.hidden_size,
        num_layers=args.num_layers,
        dropout=0.1
    )

    return model


def create_dataloaders(args):
    """创建航迹识别数据集加载器"""
    print(f"准备航迹识别数据集...")

    # 创建完整数据集
    full_dataset = SatelliteTrajectoryDataset(
        size=args.dataset_size,
        seq_len=args.seq_len,
        pred_len=args.pred_len,
        num_neighbors=args.num_neighbors
    )

    # 划分训练集和测试集
    train_size = int(0.8 * len(full_dataset))
    test_size = len(full_dataset) - train_size
    train_dataset, test_dataset = random_split(full_dataset, [train_size, test_size])

    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=2,
        pin_memory=True
    )

    test_loader = DataLoader(
        test_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=2,
        pin_memory=True
    )

    print(f"训练集大小: {len(train_dataset)}")
    print(f"测试集大小: {len(test_dataset)}")

    return train_loader, test_loader


def calculate_ade(pred, target):
    """计算平均位移误差 (Average Displacement Error)"""
    return torch.sqrt(((pred - target) ** 2).sum(dim=2)).mean()


def calculate_fde(pred, target):
    """计算最终位移误差 (Final Displacement Error)"""
    return torch.sqrt(((pred[:, -1] - target[:, -1]) ** 2).sum(dim=1)).mean()


def train_model(model, train_loader, test_loader, device, args):
    """训练航迹预测模型"""
    print("开始训练卫星航迹识别模型...")

    # 定义损失函数和优化器
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.5)

    # 记录训练历史
    train_losses = []
    train_ades = []
    test_ades = []
    test_fdes = []

    best_test_ade = float('inf')
    best_model_state = None

    # 训练循环
    for epoch in range(args.epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        running_ade = 0.0
        num_batches = 0

        pbar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{args.epochs}')
        for batch_idx, (history, future, neighbors) in enumerate(pbar):
            history, future, neighbors = history.to(device), future.to(device), neighbors.to(device)

            # 前向传播
            optimizer.zero_grad()
            pred = model(history, neighbors, future_steps=args.pred_len)

            # 计算损失
            loss = criterion(pred, future)

            # 反向传播
            loss.backward()

            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)

            optimizer.step()

            # 计算ADE
            with torch.no_grad():
                ade = calculate_ade(pred, future)

            # 统计
            running_loss += loss.item()
            running_ade += ade.item()
            num_batches += 1

            # 更新进度条
            pbar.set_postfix({
                'Loss': f'{loss.item():.4f}',
                'ADE': f'{ade.item():.4f}'
            })

        # 计算平均训练损失和ADE
        avg_train_loss = running_loss / num_batches
        train_ade = running_ade / num_batches

        # 测试阶段
        test_ade, test_fde = evaluate_trajectory_model(model, test_loader, device)

        # 记录历史
        train_losses.append(avg_train_loss)
        train_ades.append(train_ade)
        test_ades.append(test_ade)
        test_fdes.append(test_fde)

        # 保存最佳模型
        if test_ade < best_test_ade:
            best_test_ade = test_ade
            best_model_state = copy.deepcopy(model.state_dict())

        # 更新学习率
        scheduler.step()

        print(f'Epoch [{epoch + 1}/{args.epochs}] - '
              f'Train Loss: {avg_train_loss:.4f}, '
              f'Train ADE: {train_ade:.4f}, '
              f'Test ADE: {test_ade:.4f}, '
              f'Test FDE: {test_fde:.4f}')

    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print(f'训练完成! 最佳测试ADE: {best_test_ade:.4f}')

    # 绘制训练曲线
    plot_training_curves(train_losses, train_ades, test_ades, test_fdes, args.output_dir)

    return model, best_test_ade, {
        'train_losses': train_losses,
        'train_ades': train_ades,
        'test_ades': test_ades,
        'test_fdes': test_fdes,
        'best_test_ade': best_test_ade
    }


def evaluate_trajectory_model(model, dataloader, device):
    """评估航迹识别模型性能"""
    model.eval()
    total_ade = 0
    total_fde = 0
    count = 0

    with torch.no_grad():
        for history, future, neighbors in dataloader:
            history, future, neighbors = history.to(device), future.to(device), neighbors.to(device)

            pred = model(history, neighbors, future_steps=future.shape[1])

            # 计算ADE和FDE
            ade = calculate_ade(pred, future)
            fde = calculate_fde(pred, future)

            total_ade += ade.item()
            total_fde += fde.item()
            count += 1

    avg_ade = total_ade / count if count > 0 else 0
    avg_fde = total_fde / count if count > 0 else 0

    return avg_ade, avg_fde


def plot_training_curves(train_losses, train_ades, test_ades, test_fdes, output_dir):
    """绘制训练曲线"""
    epochs = range(1, len(train_losses) + 1)

    plt.figure(figsize=(15, 5))

    # 绘制损失曲线
    plt.subplot(1, 3, 1)
    plt.plot(epochs, train_losses, 'b-', label='Training Loss')
    plt.title('Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('MSE Loss')
    plt.legend()
    plt.grid(True)

    # 绘制ADE曲线
    plt.subplot(1, 3, 2)
    plt.plot(epochs, train_ades, 'b-', label='Training ADE')
    plt.plot(epochs, test_ades, 'r-', label='Test ADE')
    plt.title('Average Displacement Error')
    plt.xlabel('Epoch')
    plt.ylabel('ADE')
    plt.legend()
    plt.grid(True)

    # 绘制FDE曲线
    plt.subplot(1, 3, 3)
    plt.plot(epochs, test_fdes, 'g-', label='Test FDE')
    plt.title('Final Displacement Error')
    plt.xlabel('Epoch')
    plt.ylabel('FDE')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'training_curves.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"训练曲线已保存到: {os.path.join(output_dir, 'training_curves.png')}")


def visualize_trajectory_predictions(model, dataloader, device, output_dir, num_samples=6):
    """可视化轨迹预测结果"""
    model.eval()

    fig, axes = plt.subplots(2, 3, figsize=(15, 10))
    fig.suptitle('Satellite Trajectory Prediction Results', fontsize=16)
    axes = axes.flatten()

    with torch.no_grad():
        for i, (history, future, neighbors) in enumerate(dataloader):
            if i >= num_samples:
                break

            history, future, neighbors = history.to(device), future.to(device), neighbors.to(device)
            pred = model(history, neighbors, future_steps=future.shape[1])

            # 取第一个样本
            hist_np = history[0].cpu().numpy()
            future_np = future[0].cpu().numpy()
            pred_np = pred[0].cpu().numpy()
            neighbors_np = neighbors[0].cpu().numpy()

            # 计算ADE和FDE
            ade = calculate_ade(pred[0:1], future[0:1]).item()
            fde = calculate_fde(pred[0:1], future[0:1]).item()

            # 绘制轨迹
            ax = axes[i]

            # 绘制邻居轨迹（淡色）
            for neighbor in neighbors_np:
                ax.plot(neighbor[:, 0], neighbor[:, 1], 'lightgray', alpha=0.5, linewidth=1)

            # 绘制历史轨迹
            ax.plot(hist_np[:, 0], hist_np[:, 1], 'b-o', label='History', linewidth=2, markersize=4)

            # 绘制真实未来轨迹
            ax.plot(future_np[:, 0], future_np[:, 1], 'g-o', label='Ground Truth', linewidth=2, markersize=4)

            # 绘制预测未来轨迹
            ax.plot(pred_np[:, 0], pred_np[:, 1], 'r-o', label='Prediction', linewidth=2, markersize=4)

            # 连接历史和未来
            if len(hist_np) > 0 and len(future_np) > 0:
                ax.plot([hist_np[-1, 0], future_np[0, 0]],
                        [hist_np[-1, 1], future_np[0, 1]], 'g--', alpha=0.5)
                ax.plot([hist_np[-1, 0], pred_np[0, 0]],
                        [hist_np[-1, 1], pred_np[0, 1]], 'r--', alpha=0.5)

            ax.set_title(f'Sample {i + 1}\nADE: {ade:.3f}, FDE: {fde:.3f}')
            ax.set_xlabel('X Position')
            ax.set_ylabel('Y Position')
            ax.legend()
            ax.grid(True, alpha=0.3)
            ax.set_aspect('equal')

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'trajectory_predictions.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"轨迹预测结果已保存到: {os.path.join(output_dir, 'trajectory_predictions.png')}")


def visualize_dataset_samples(dataloader, output_dir, num_samples=8):
    """可视化数据集样本"""
    fig, axes = plt.subplots(2, 4, figsize=(16, 8))
    fig.suptitle('Satellite Trajectory Dataset Samples', fontsize=16)
    axes = axes.flatten()

    for i, (history, future, neighbors) in enumerate(dataloader):
        if i >= num_samples:
            break

        # 取第一个样本
        hist_np = history[0].numpy()
        future_np = future[0].numpy()
        neighbors_np = neighbors[0].numpy()

        ax = axes[i]

        # 绘制邻居轨迹
        for neighbor in neighbors_np:
            ax.plot(neighbor[:, 0], neighbor[:, 1], 'lightgray', alpha=0.6, linewidth=1)

        # 绘制主轨迹
        ax.plot(hist_np[:, 0], hist_np[:, 1], 'b-o', label='History', linewidth=2, markersize=3)
        ax.plot(future_np[:, 0], future_np[:, 1], 'r-o', label='Future', linewidth=2, markersize=3)

        # 连接历史和未来
        if len(hist_np) > 0 and len(future_np) > 0:
            ax.plot([hist_np[-1, 0], future_np[0, 0]],
                    [hist_np[-1, 1], future_np[0, 1]], 'k--', alpha=0.5)

        ax.set_title(f'Sample {i + 1}')
        ax.set_xlabel('X Position')
        ax.set_ylabel('Y Position')
        ax.legend()
        ax.grid(True, alpha=0.3)
        ax.set_aspect('equal')

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'dataset_samples.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"数据集样本已保存到: {os.path.join(output_dir, 'dataset_samples.png')}")


def get_model_size(model):
    """计算模型在内存中的大小（MB）"""
    param_size = 0
    for param in model.parameters():
        param_size += param.nelement() * param.element_size()
    size_mb = param_size / (1024 * 1024)
    return size_mb


def get_layer_importance_simple(model):
    """简单的层重要性计算方法（基于权重大小）"""
    layer_importance = {}

    for name, param in model.named_parameters():
        if param.requires_grad and 'weight' in name:
            # 基于权重的L2范数计算重要性
            importance = param.data.norm(p=2).item()
            layer_importance[name] = importance

    return layer_importance


def get_layer_importance(model, test_loader, device, sample_size=100):
    """计算各层的重要性得分，用于智能压缩"""
    try:
        # 设置为训练模式以支持梯度计算
        original_mode = model.training
        model.train()
        layer_gradients = {}

        # 初始化梯度累积器
        for name, param in model.named_parameters():
            if param.requires_grad:
                layer_gradients[name] = torch.zeros_like(param)

        sample_count = 0
        with torch.enable_grad():
            for history, future, neighbors in test_loader:
                if sample_count >= sample_size:
                    break

                history, future, neighbors = history.to(device), future.to(device), neighbors.to(device)

                # 前向传播
                model.zero_grad()
                pred = model(history, neighbors, future_steps=future.shape[1])
                loss = nn.MSELoss()(pred, future)

                # 反向传播
                loss.backward()

                # 累积梯度
                for name, param in model.named_parameters():
                    if param.grad is not None:
                        layer_gradients[name] += param.grad.abs()

                sample_count += history.shape[0]

        # 恢复原始模式
        model.train(original_mode)

        # 计算各层重要性得分
        layer_importance = {}
        for name, grad_sum in layer_gradients.items():
            layer_importance[name] = grad_sum.mean().item()

        return layer_importance

    except Exception as e:
        print(f"梯度计算方法失败: {e}")
        print("使用简单的权重范数方法计算层重要性...")
        # 恢复原始模式
        model.train(original_mode)
        return get_layer_importance_simple(model)


def adaptive_layer_compression(model, layer_importance, global_sparsity, global_bits):
    """基于层重要性的自适应压缩"""
    # 计算重要性分数的分位数
    importance_values = list(layer_importance.values())
    q75 = np.percentile(importance_values, 75)
    q25 = np.percentile(importance_values, 25)

    compressed_model = copy.deepcopy(model)
    compression_stats = []

    for name, param in compressed_model.named_parameters():
        if 'weight' in name and name in layer_importance:
            importance = layer_importance[name]

            # 根据重要性调整压缩参数
            if importance >= q75:  # 高重要性层，保守压缩
                layer_sparsity = max(0.1, global_sparsity * 0.5)
                layer_bits = max(8, global_bits + 2)
            elif importance <= q25:  # 低重要性层，激进压缩
                layer_sparsity = min(0.8, global_sparsity * 1.5)
                layer_bits = max(4, global_bits - 1)
            else:  # 中等重要性层，标准压缩
                layer_sparsity = global_sparsity
                layer_bits = global_bits

            # 应用压缩
            with torch.no_grad():
                # 剪枝
                pruned_weight, mask = prune_weights(param.data, layer_sparsity)
                # 量化
                quantized_weight, scale, zero_point, q_weight = quantize_weights(pruned_weight, layer_bits)
                param.copy_(quantized_weight)

            compression_stats.append({
                'layer': name,
                'importance': importance,
                'sparsity': layer_sparsity,
                'bits': layer_bits,
                'original_params': param.numel(),
                'nonzero_params': torch.count_nonzero(param).item()
            })

    return compressed_model, compression_stats


def smart_compression_search(model, test_loader, device, args):
    """智能压缩搜索，找到最佳的压缩参数组合"""
    print(f"开始智能压缩搜索...")
    print(f"目标: 性能损失 < {args.max_performance_loss * 100:.1f}%, 压缩率 > {args.target_compression_ratio:.1f}x")

    # 评估原始模型性能
    baseline_ade, baseline_fde = evaluate_trajectory_model(model, test_loader, device)
    baseline_size = get_model_size(model)

    print(f"基线性能: ADE={baseline_ade:.4f}, FDE={baseline_fde:.4f}, 大小={baseline_size:.2f}MB")

    # 获取层重要性
    print("计算层重要性...")
    layer_importance = get_layer_importance(model, test_loader, device, sample_size=50)

    # 根据压缩策略定义搜索空间
    if args.compression_strategy == 'conservative':
        sparsity_candidates = [0.1, 0.2, 0.3, 0.4]
        bits_candidates = [8, 6]
    elif args.compression_strategy == 'balanced':
        sparsity_candidates = [0.2, 0.3, 0.4, 0.5, 0.6]
        bits_candidates = [8, 6, 5]
    else:  # aggressive
        sparsity_candidates = [0.4, 0.5, 0.6, 0.7, 0.8]
        bits_candidates = [6, 5, 4]

    best_config = None
    best_compression_ratio = 1.0
    search_results = []

    print(f"\n搜索压缩配置...")
    total_combinations = len(sparsity_candidates) * len(bits_candidates)

    with tqdm(total=total_combinations, desc="压缩搜索") as pbar:
        for sparsity in sparsity_candidates:
            for bits in bits_candidates:
                try:
                    # 创建压缩模型
                    start_time = time.time()
                    compressed_model, compression_stats = adaptive_layer_compression(
                        model, layer_importance, sparsity, bits
                    )
                    compression_time = time.time() - start_time

                    # 评估压缩后的性能
                    compressed_ade, compressed_fde = evaluate_trajectory_model(
                        compressed_model, test_loader, device
                    )
                    compressed_size = get_model_size(compressed_model)

                    # 计算性能损失和压缩率
                    ade_loss = (compressed_ade - baseline_ade) / baseline_ade
                    compression_ratio = baseline_size / compressed_size

                    # 记录结果
                    result = {
                        'sparsity': sparsity,
                        'bits': bits,
                        'ade': compressed_ade,
                        'fde': compressed_fde,
                        'ade_loss': ade_loss,
                        'compression_ratio': compression_ratio,
                        'compression_time': compression_time,
                        'size_mb': compressed_size,
                        'compression_stats': compression_stats
                    }
                    search_results.append(result)

                    # 检查是否满足约束条件
                    if (ade_loss <= args.max_performance_loss and
                            compression_ratio >= args.target_compression_ratio):
                        if compression_ratio > best_compression_ratio:
                            best_config = result
                            best_compression_ratio = compression_ratio

                    pbar.set_postfix({
                        'ADE Loss': f'{ade_loss * 100:.1f}%',
                        'Compression': f'{compression_ratio:.1f}x',
                        'Best': f'{best_compression_ratio:.1f}x' if best_config else 'None'
                    })

                except Exception as e:
                    print(f"压缩配置 (sparsity={sparsity}, bits={bits}) 失败: {e}")

                pbar.update(1)

    # 如果没有找到满足条件的配置，选择最佳的权衡
    if best_config is None:
        print("未找到满足所有约束的配置，选择最佳权衡...")
        valid_results = [r for r in search_results if r['ade_loss'] <= args.max_performance_loss * 1.5]
        if valid_results:
            best_config = max(valid_results, key=lambda x: x['compression_ratio'])
        else:
            best_config = min(search_results, key=lambda x: x['ade_loss'])

    # 输出搜索结果
    print(f"\n智能压缩搜索完成!")
    print(f"最佳配置: 稀疏度={best_config['sparsity']:.1f}, 量化位数={best_config['bits']}")
    print(f"性能: ADE={best_config['ade']:.4f} (损失{best_config['ade_loss'] * 100:+.1f}%)")
    print(f"压缩率: {best_config['compression_ratio']:.1f}x")

    # 重新创建最佳压缩模型
    final_compressed_model, final_stats = adaptive_layer_compression(
        model, layer_importance, best_config['sparsity'], best_config['bits']
    )

    # 保存搜索结果
    search_results_path = os.path.join(args.output_dir, "compression_search_results.json")
    with open(search_results_path, 'w') as f:
        # 移除不可序列化的对象
        serializable_results = []
        for result in search_results:
            serializable_result = {k: v for k, v in result.items() if k != 'compression_stats'}
            serializable_results.append(serializable_result)
        json.dump({
            'search_results': serializable_results,
            'best_config': {k: v for k, v in best_config.items() if k != 'compression_stats'},
            'baseline_performance': {'ade': baseline_ade, 'fde': baseline_fde}
        }, f, indent=2)

    return final_compressed_model, best_config, search_results


def quantize_weights(weight, bits=8):
    """量化权重到指定位数"""
    qmin, qmax = 0, 2 ** bits - 1
    min_val, max_val = weight.min(), weight.max()

    if min_val == max_val:
        return weight.clone(), None, None, None

    scale = (max_val - min_val) / (qmax - qmin)
    zero_point = qmin - min_val / scale

    q_weight = torch.round(weight / scale + zero_point)
    q_weight = torch.clamp(q_weight, qmin, qmax)
    dq_weight = (q_weight - zero_point) * scale

    return dq_weight, scale, zero_point, q_weight


def prune_weights(weight, sparsity=0.7):
    """按稀疏度修剪权重"""
    if sparsity <= 0:
        return weight.clone(), None

    weight_abs = weight.abs().flatten()
    k = int(weight_abs.numel() * sparsity)

    if k >= weight_abs.numel():
        return torch.zeros_like(weight), torch.zeros_like(weight).bool()

    threshold = torch.kthvalue(weight_abs, k).values
    mask = (weight.abs() >= threshold)
    pruned = weight * mask.float()

    return pruned, mask


def compress_model(model, bits=4, sparsity=0.7):
    """传统压缩模型方法（用于手动模式）"""
    print(f"压缩模型（量化位数={bits}, 剪枝稀疏度={sparsity}）...")
    compressed_model = copy.deepcopy(model)

    total_elements = 0
    zero_elements = 0

    for name, param in compressed_model.named_parameters():
        if 'weight' in name:
            with torch.no_grad():
                total_elements += param.numel()

                pruned_weight, mask = prune_weights(param.data, sparsity)
                zero_elements += param.numel() - torch.count_nonzero(pruned_weight).item()

                quantized_weight, scale, zero_point, q_weight = quantize_weights(pruned_weight, bits)
                param.copy_(quantized_weight)

    overall_sparsity = zero_elements / total_elements if total_elements > 0 else 0
    print(f"整体稀疏度: {overall_sparsity:.4f}")

    return compressed_model, overall_sparsity


def save_compressed_model(model, path, compress=True):
    """保存压缩模型"""
    state_dict = model.state_dict()
    temp_path = path + ".temp"
    torch.save(state_dict, temp_path)

    if compress:
        with open(temp_path, 'rb') as f_in:
            with gzip.open(path, 'wb', compresslevel=9) as f_out:
                f_out.write(f_in.read())
        os.remove(temp_path)
    else:
        os.rename(temp_path, path)

    return os.path.getsize(path)


def load_compressed_model(path, args, device=None):
    """加载压缩模型"""
    try:
        with gzip.open(path, 'rb') as f:
            temp_path = path + ".temp"
            with open(temp_path, 'wb') as temp_f:
                temp_f.write(f.read())

            # 尝试使用weights_only参数（较新版本的PyTorch）
            try:
                state_dict = torch.load(temp_path, map_location='cpu', weights_only=True)
            except TypeError:
                # 如果不支持weights_only参数，使用传统方法
                state_dict = torch.load(temp_path, map_location='cpu')

            os.remove(temp_path)

            model = SocialLSTM(
                input_size=2,
                embedding_size=args.embedding_size,
                hidden_size=args.hidden_size,
                num_layers=args.num_layers,
                dropout=0.1
            )
            model.load_state_dict(state_dict)

            if device:
                model = model.to(device)

            return model

    except gzip.BadGzipFile:
        # 尝试使用weights_only参数（较新版本的PyTorch）
        try:
            state_dict = torch.load(path, map_location='cpu', weights_only=True)
        except TypeError:
            # 如果不支持weights_only参数，使用传统方法
            state_dict = torch.load(path, map_location='cpu')

        model = SocialLSTM(
            input_size=2,
            embedding_size=args.embedding_size,
            hidden_size=args.hidden_size,
            num_layers=args.num_layers,
            dropout=0.1
        )
        model.load_state_dict(state_dict)

        if device:
            model = model.to(device)

        return model


def plot_compression_analysis(search_results, output_dir):
    """绘制压缩分析图表"""
    if not search_results:
        return

    fig, axes = plt.subplots(2, 2, figsize=(12, 10))
    fig.suptitle('Compression Analysis', fontsize=16)

    # 提取数据
    ade_losses = [r['ade_loss'] * 100 for r in search_results]
    compression_ratios = [r['compression_ratio'] for r in search_results]
    sparsities = [r['sparsity'] * 100 for r in search_results]
    bits = [r['bits'] for r in search_results]

    # 性能损失 vs 压缩率
    axes[0, 0].scatter(compression_ratios, ade_losses, c=sparsities, cmap='viridis', alpha=0.7)
    axes[0, 0].set_xlabel('Compression Ratio')
    axes[0, 0].set_ylabel('ADE Loss (%)')
    axes[0, 0].set_title('Performance vs Compression Trade-off')
    axes[0, 0].grid(True)
    cbar1 = plt.colorbar(axes[0, 0].collections[0], ax=axes[0, 0])
    cbar1.set_label('Sparsity (%)')

    # 稀疏度 vs 性能损失
    axes[0, 1].scatter(sparsities, ade_losses, c=bits, cmap='plasma', alpha=0.7)
    axes[0, 1].set_xlabel('Sparsity (%)')
    axes[0, 1].set_ylabel('ADE Loss (%)')
    axes[0, 1].set_title('Sparsity vs Performance Loss')
    axes[0, 1].grid(True)
    cbar2 = plt.colorbar(axes[0, 1].collections[0], ax=axes[0, 1])
    cbar2.set_label('Quantization Bits')

    # 量化位数 vs 压缩率
    unique_bits = sorted(list(set(bits)))
    bit_compression_ratios = []
    bit_labels = []
    for bit in unique_bits:
        ratios = [r['compression_ratio'] for r in search_results if r['bits'] == bit]
        bit_compression_ratios.append(ratios)
        bit_labels.append(f'{bit}-bit')

    axes[1, 0].boxplot(bit_compression_ratios, labels=bit_labels)
    axes[1, 0].set_xlabel('Quantization Bits')
    axes[1, 0].set_ylabel('Compression Ratio')
    axes[1, 0].set_title('Quantization Impact on Compression')
    axes[1, 0].grid(True)

    # 稀疏度 vs 压缩率
    sparsity_levels = sorted(list(set([r['sparsity'] for r in search_results])))
    sparsity_compression_ratios = []
    sparsity_labels = []
    for sparsity in sparsity_levels:
        ratios = [r['compression_ratio'] for r in search_results if r['sparsity'] == sparsity]
        sparsity_compression_ratios.append(ratios)
        sparsity_labels.append(f'{sparsity * 100:.0f}%')

    axes[1, 1].boxplot(sparsity_compression_ratios, labels=sparsity_labels)
    axes[1, 1].set_xlabel('Sparsity')
    axes[1, 1].set_ylabel('Compression Ratio')
    axes[1, 1].set_title('Sparsity Impact on Compression')
    axes[1, 1].grid(True)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'compression_analysis.png'), dpi=300, bbox_inches='tight')
    plt.close()
    print(f"压缩分析图表已保存到: {os.path.join(output_dir, 'compression_analysis.png')}")


def main():
    """主函数"""
    args = get_args()

    # 确保输出目录存在
    os.makedirs(args.output_dir, exist_ok=True)

    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 创建或加载模型
    model = load_trajectory_model(args)
    model = model.to(device)

    trained_model_path = os.path.join(args.output_dir, f"social_lstm_trained.pth")
    training_history = None

    # 训练模式
    if args.mode in ['train', 'both']:
        print("=" * 50)
        print("开始卫星航迹识别训练阶段")
        print("=" * 50)

        # 创建数据加载器
        train_loader, test_loader = create_dataloaders(args)

        # 可视化数据集样本
        visualize_dataset_samples(train_loader, args.output_dir)

        # 训练模型
        model, best_ade, training_history = train_model(
            model, train_loader, test_loader, device, args
        )

        # 保存训练好的模型
        torch.save(model.state_dict(), trained_model_path)
        print(f"训练好的模型已保存到: {trained_model_path}")

        # 可视化预测结果
        visualize_trajectory_predictions(model, test_loader, device, args.output_dir)

        # 保存训练历史
        history_path = os.path.join(args.output_dir, "training_history.json")
        with open(history_path, 'w') as f:
            json.dump(training_history, f, indent=2)

    # 仅压缩模式：加载预训练模型
    elif args.mode == 'compress':
        if args.pretrained_path and os.path.exists(args.pretrained_path):
            model.load_state_dict(torch.load(args.pretrained_path, map_location=device, weights_only=True))
            print(f"已加载预训练模型: {args.pretrained_path}")
        elif os.path.exists(trained_model_path):
            model.load_state_dict(torch.load(trained_model_path, map_location=device, weights_only=True))
            print(f"已加载训练好的模型: {trained_model_path}")
        else:
            print("警告: 没有找到预训练模型，将使用随机初始化的模型进行压缩")

    # 压缩模式
    if args.mode in ['compress', 'both']:
        print("=" * 50)
        print(f"开始{'智能' if args.compression_mode == 'smart' else '手动'}压缩阶段")
        print("=" * 50)

        # 创建测试数据加载器用于评估
        _, test_loader = create_dataloaders(args)

        # 评估原始模型
        print("评估原始模型...")
        original_ade, original_fde = evaluate_trajectory_model(model, test_loader, device)
        original_size = get_model_size(model)

        # 保存原始模型并计算文件大小
        original_path = os.path.join(args.output_dir, f"social_lstm_original.pth")
        original_file_size_bytes = save_compressed_model(model, original_path, compress=False)
        original_file_size = original_file_size_bytes / (1024 * 1024)

        print(f"原始模型ADE: {original_ade:.4f}, FDE: {original_fde:.4f}")
        print(f"原始模型内存大小: {original_size:.2f} MB")
        print(f"原始模型文件大小: {original_file_size:.2f} MB")

        # 选择压缩方法
        if args.compression_mode == 'smart':
            # 智能压缩
            compressed_model, best_config, search_results = smart_compression_search(
                model, test_loader, device, args
            )
            actual_sparsity = best_config['sparsity']
            compression_bits = best_config['bits']

            # 绘制压缩分析图表
            plot_compression_analysis(search_results, args.output_dir)

        else:
            # 手动压缩
            print(f"使用手动压缩参数: 稀疏度={args.sparsity}, 量化位数={args.bits}")
            start_time = time.time()
            compressed_model, actual_sparsity = compress_model(model, args.bits, args.sparsity)
            compression_time = time.time() - start_time
            print(f"压缩完成，耗时: {compression_time:.2f} 秒")
            compression_bits = args.bits
            best_config = {
                'sparsity': args.sparsity,
                'bits': args.bits,
                'compression_time': compression_time
            }
            search_results = []

        # 评估压缩后的模型
        print("评估压缩后的模型...")
        compressed_model = compressed_model.to(device)
        compressed_ade, compressed_fde = evaluate_trajectory_model(compressed_model, test_loader, device)

        # 计算压缩后的大小
        compressed_size = get_model_size(compressed_model)

        # 保存压缩模型并计算实际文件大小
        compressed_path = os.path.join(args.output_dir, f"social_lstm_compressed.pth")
        compressed_file_size_bytes = save_compressed_model(compressed_model, compressed_path, compress=True)
        compressed_file_size = compressed_file_size_bytes / (1024 * 1024)

        print(f"压缩后模型ADE: {compressed_ade:.4f}, FDE: {compressed_fde:.4f}")
        print(f"压缩后模型内存大小: {compressed_size:.2f} MB")
        print(f"压缩后模型文件大小: {compressed_file_size:.2f} MB")
        print(f"实际稀疏度: {actual_sparsity:.4f}")

        # 计算压缩率和性能变化
        file_compression_ratio = original_file_size / compressed_file_size
        ade_change = compressed_ade - original_ade
        ade_change_percent = (ade_change / original_ade) * 100
        fde_change = compressed_fde - original_fde
        fde_change_percent = (fde_change / original_fde) * 100

        # 测试加载压缩模型
        print("测试加载压缩模型...")
        loaded_model = load_compressed_model(compressed_path, args, device)
        loaded_ade, loaded_fde = evaluate_trajectory_model(loaded_model, test_loader, device)
        print(f"加载后模型ADE: {loaded_ade:.4f}, FDE: {loaded_fde:.4f}")

        # 保存完整的结果信息
        results = {
            'model': args.model,
            'mode': args.mode,
            'compression_mode': args.compression_mode,
            'task': 'trajectory_prediction',
            'model_params': {
                'hidden_size': args.hidden_size,
                'num_layers': args.num_layers,
                'embedding_size': args.embedding_size,
                'seq_len': args.seq_len,
                'pred_len': args.pred_len
            },
            'training_params': {
                'epochs': args.epochs,
                'batch_size': args.batch_size,
                'learning_rate': args.learning_rate,
                'dataset_size': args.dataset_size
            } if args.mode in ['train', 'both'] else None,
            'compression_params': {
                'final_bits': compression_bits,
                'final_sparsity': float(actual_sparsity),
                'max_performance_loss': args.max_performance_loss,
                'target_compression_ratio': args.target_compression_ratio,
                'compression_strategy': args.compression_strategy
            },
            'model_sizes': {
                'original_memory_mb': float(original_size),
                'original_file_mb': float(original_file_size),
                'compressed_memory_mb': float(compressed_size),
                'compressed_file_mb': float(compressed_file_size),
                'file_compression_ratio': float(file_compression_ratio)
            },
            'performance_metrics': {
                'original_ade': float(original_ade),
                'original_fde': float(original_fde),
                'compressed_ade': float(compressed_ade),
                'compressed_fde': float(compressed_fde),
                'loaded_ade': float(loaded_ade),
                'loaded_fde': float(loaded_fde),
                'ade_change': float(ade_change),
                'ade_change_percent': float(ade_change_percent),
                'fde_change': float(fde_change),
                'fde_change_percent': float(fde_change_percent)
            },
            'training_history': training_history,
            'best_compression_config': best_config
        }

        # 保存结果
        results_path = os.path.join(args.output_dir, "complete_results.json")
        with open(results_path, 'w') as f:
            json.dump(results, f, indent=2)

        # 输出最终统计
        print("\n" + "=" * 60)
        print("卫星航迹识别最终结果统计")
        print("=" * 60)
        if training_history:
            print(f"训练最佳ADE: {training_history['best_test_ade']:.4f}")
        print(f"原始模型ADE: {original_ade:.4f}, FDE: {original_fde:.4f}")
        print(f"压缩后模型ADE: {compressed_ade:.4f}, FDE: {compressed_fde:.4f}")
        print(f"ADE变化: {ade_change:+.4f} ({ade_change_percent:+.1f}%)")
        print(f"FDE变化: {fde_change:+.4f} ({fde_change_percent:+.1f}%)")
        print(f"文件压缩率: {file_compression_ratio:.2f}倍")
        print(f"原始文件大小: {original_file_size:.2f} MB")
        print(f"压缩文件大小: {compressed_file_size:.2f} MB")
        print(f"最终量化位数: {compression_bits}")
        print(f"最终稀疏度: {actual_sparsity:.4f}")

        # 压缩质量评估
        if ade_change_percent <= args.max_performance_loss * 100:
            print(f"✅ 压缩成功！性能损失在可接受范围内 (<{args.max_performance_loss * 100:.1f}%)")
        else:
            print(f"⚠️  压缩性能损失较大 ({ade_change_percent:.1f}%)，考虑调整压缩策略")

        if file_compression_ratio >= args.target_compression_ratio:
            print(f"✅ 达到目标压缩率 (>{args.target_compression_ratio:.1f}x)")
        else:
            print(f"📊 压缩率 {file_compression_ratio:.1f}x，未达到目标 {args.target_compression_ratio:.1f}x")

        print(f"结果已保存到: {results_path}")

    print("\n卫星航迹识别模型训练和智能压缩完成!")


if __name__ == "__main__":
    main()