import os
import random

import joblib
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from torch.utils.data import DataLoader, TensorDataset

# 导入配置文件
from config import (
    SEED, FONT_PATHS, COLORMAP, DEVICE, TOTAL_TIME_SEC, ORIGINAL_DT_SEC, NEW_DT_SEC,
    TIME_STEPS_NEW,
    N_STEPS_IN, M_STEPS_OUT, LSTM_UNITS, DROPOUT_RATE, EPOCHS, BATCH_SIZE,
    NUM_COORDINATES, NUM_DRONES, NUM_FEATURES,
    SMOOTHING_WINDOW_SIZE, NOISE_MEAN, NOISE_STD_DEV,
    EARLY_STOPPING_PATIENCE, NUM_DRONES_TO_PLOT_RELATIVE, NUM_DRONES_TO_PLOT_ABSOLUTE,
    LEARNING_RATE, WEIGHT_DECAY, LOSS_FUNCTION,
    TRAIN_DATA_PATH, MODEL_STATE_DICT_PATH, SCALER_PATH, MODEL_ROOT
)

# --- 全局随机种子设置 (为确保可复现性，非常重要) ---
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
if torch.cuda.is_available():
    torch.cuda.manual_seed(SEED)
    torch.cuda.manual_seed_all(SEED)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
print(f"随机种子已固定为: {SEED}")

# --- Matplotlib 中文字体配置 ---
def setup_matplotlib_font():
    chosen_font = None
    for font_path in FONT_PATHS:
        if os.path.exists(font_path):
            try:
                fm.FontProperties(fname=font_path)
                chosen_font = font_path
                break
            except Exception:
                continue

    if chosen_font:
        font_name = os.path.basename(chosen_font).split('.')[0]
        plt.rcParams['font.sans-serif'] = [font_name]
        plt.rcParams['axes.unicode_minus'] = False
        print(f"Matplotlib 已配置使用字体: {font_name} (来源于 {chosen_font})")
    else:
        print("警告: 未找到常见的中文字体文件。图表中的中文可能显示为方块。")
        print("请手动安装中文字体并更新 'font_paths' 列表，或参考 Matplotlib 官方文档配置字体。")

setup_matplotlib_font()

# 绝对轨迹图的颜色
try:
    ABSOLUTE_COLORS = plt.colormaps['tab20'].colors
except AttributeError:
    ABSOLUTE_COLORS = plt.cm.get_cmap('tab20', 20).colors

# 颜色映射实例
try:
    cmap_instance = plt.colormaps[COLORMAP]
except AttributeError:
    cmap_instance = plt.get_cmap(COLORMAP)

# 确保模型保存目录存在
os.makedirs(MODEL_ROOT, exist_ok=True)

# 欧氏距离损失函数
class EuclideanDistanceLoss(nn.Module):
    """欧氏距离损失函数，适用于三维轨迹预测"""
    def __init__(self):
        super(EuclideanDistanceLoss, self).__init__()

    def forward(self, predictions, targets):
        """
        计算欧氏距离损失

        Args:
            predictions: 预测值，形状 (batch_size, sequence_length * num_coordinates)
            targets: 目标值，形状 (batch_size, sequence_length * num_coordinates)

        Returns:
            欧氏距离损失（标量）
        """
        # 重塑为 (batch_size, sequence_length, num_coordinates)
        batch_size = predictions.size(0)
        pred_reshaped = predictions.view(batch_size, -1, NUM_COORDINATES)
        target_reshaped = targets.view(batch_size, -1, NUM_COORDINATES)

        # 计算欧氏距离
        squared_diff = torch.pow(pred_reshaped - target_reshaped, 2)
        euclidean_dist = torch.sqrt(torch.sum(squared_diff, dim=2))  # (batch_size, sequence_length)

        # 返回平均欧氏距离
        return torch.mean(euclidean_dist)

# 根据配置选择损失函数
def get_loss_function():
    """根据配置返回相应的损失函数"""
    if LOSS_FUNCTION == 'Euclidean':
        return EuclideanDistanceLoss()
    elif LOSS_FUNCTION == 'MSE':
        return nn.MSELoss()
    elif LOSS_FUNCTION == 'Huber':
        return nn.HuberLoss()
    else:
        print(f"警告: 未知的损失函数类型 {LOSS_FUNCTION}，使用默认的 MSE")
        return nn.MSELoss()


# GPU 配置检查
print("\n--- 检查 GPU 可用性 ---")
device = torch.device(DEVICE)
print(f"PyTorch 配置为使用: {device}")
if device.type == 'cuda':
    print(f"CUDA 版本: {torch.version.cuda}")
    print(f"GPU 名称: {torch.cuda.get_device_name(0)}")

# 数据加载
print(f"\n--- 正在加载数据: {TRAIN_DATA_PATH} ---")
try:
    df_raw = pd.read_csv(TRAIN_DATA_PATH, sep=r'\s+', header=None, engine='python')
    print("原始数据读取成功。")
except FileNotFoundError:
    print(f"\n错误：文件未找到。请检查文件路径是否正确：'{os.path.abspath(TRAIN_DATA_PATH)}'")
    exit()
except Exception as e:
    print(f"\n读取数据文件时发生错误: {e}")
    exit()

if df_raw.empty:
    raise ValueError(f"文件 {TRAIN_DATA_PATH} 为空或无法解析为DataFrame。")

# 设置全局变量（这些将从数据中动态计算）
global NUM_DRONES, NUM_FEATURES

# 根据列数推断无人机数量
num_cols = df_raw.shape[1]
if (num_cols - 1) % NUM_COORDINATES != 0:
    raise ValueError(
        f"文件 {TRAIN_DATA_PATH} 的数据格式似乎不正确。总列数 ({num_cols}) "
        f"不符合 (1 + N_DRONES * {NUM_COORDINATES}) 的模式。\n"
        f"请检查文件内容或 NUM_COORDINATES 配置。"
    )

# 更新全局变量
NUM_DRONES = (num_cols - 1) // NUM_COORDINATES
NUM_FEATURES = NUM_COORDINATES + NUM_DRONES
TIME_STEPS_ORIGINAL = df_raw.shape[0]

print(f"根据文件列数 ({num_cols}) 推断无人机数量为: {NUM_DRONES} 架")
print(f"数据加载完成，总行数: {df_raw.shape[0]}, 总列数: {df_raw.shape[1]}")
print(f"实际原始时间步数: {TIME_STEPS_ORIGINAL}")
print(f"计算的特征维度: {NUM_FEATURES}")


# 将原始宽格式数据转换为长格式，并为每个无人机分配ID
print("--- 正在将原始宽格式数据转换为长格式并分配无人机ID ---")
dfs_per_drone = []
# 列名映射：第0列是时间，之后每3列是一个无人机的数据
col_names = ['time']
for i in range(NUM_DRONES):
    col_names.extend([f'r{i+1}', f'e{i+1}', f'b{i+1}'])
df_raw.columns = col_names # 为df_raw设置正确的列名

for i in range(1, NUM_DRONES + 1):
    r_col = f'r{i}'
    e_col = f'e{i}'
    b_col = f'b{i}'
    df_drone = df_raw[['time', r_col, e_col, b_col]].copy()
    df_drone.columns = ['time', 'r', 'e', 'b']  # 统一列名
    df_drone['drone_id'] = i - 1  # 分配从0开始的无人机ID
    dfs_per_drone.append(df_drone)

df_long_format = pd.concat(dfs_per_drone).sort_values(['drone_id', 'time']).reset_index(drop=True)
print(f"长格式转换完成。形状: {df_long_format.shape}")

# 数据预处理：球坐标转换为笛卡尔坐标
def spherical_to_cartesian_coords(r, e, b):
    """
    将球坐标 (r, e, b) 转换为笛卡尔坐标 (x, y, z)。
    r: 径向距离 (range)
    e: 仰角 (elevation), 与XY平面夹角，范围 -pi/2 到 pi/2
    b: 方位角 (bearing/azimuth), 在XY平面内的角度，从X轴正方向逆时针旋转
    """
    x = r * np.cos(e) * np.cos(b)
    y = r * np.cos(e) * np.sin(b)
    z = r * np.sin(e)
    return x, y, z

print("\n--- 正在将球坐标转换为笛卡尔坐标 ---")
df_long_format['x_abs'], df_long_format['y_abs'], df_long_format['z_abs'] = spherical_to_cartesian_coords(
    df_long_format['r'], df_long_format['e'], df_long_format['b']
)

# 移除原始的球坐标列
df_xyz_abs_raw = df_long_format.drop(columns=['r', 'e', 'b'])
print(f"坐标转换完成。形状: {df_xyz_abs_raw.shape}")

# --- 存储加噪前的原始笛卡尔绝对坐标数据，用于可视化 ---
df_xyz_abs_raw_before_noise = df_xyz_abs_raw.copy()

# --- 添加高斯噪声函数 ---
def add_gaussian_noise(df_input, noise_mean, noise_std_dev, x_col, y_col, z_col):
    """
    对笛卡尔坐标数据添加高斯噪声。
    df_input: 包含坐标的 DataFrame
    noise_mean: 噪声均值
    noise_std_dev: 噪声标准差
    x_col, y_col, z_col: 噪声将作用的列名
    返回添加噪声后的 DataFrame (原地修改并返回)。
    """
    if noise_std_dev == 0:
        print("\n--- 噪声标准差为0，跳过高斯噪声添加步骤。---")
        return df_input

    print("\n--- 正在对笛卡尔坐标添加高斯噪声 ---")
    df_input[x_col] = df_input[x_col] + np.random.normal(noise_mean, noise_std_dev, df_input.shape[0])
    df_input[y_col] = df_input[y_col] + np.random.normal(noise_mean, noise_std_dev, df_input.shape[0])
    df_input[z_col] = df_input[z_col] + np.random.normal(noise_mean, noise_std_dev, df_input.shape[0])
    print(f"高斯噪声添加完成 (均值={noise_mean}, 标准差={noise_std_dev}米)。")
    return df_input

# --- 调用高斯噪声函数，对绝对坐标添加噪声 ---
df_xyz_abs_raw = add_gaussian_noise(df_xyz_abs_raw, NOISE_MEAN, NOISE_STD_DEV, 'x_abs', 'y_abs', 'z_abs')

# --- 可视化: 原始轨迹与加噪轨迹对比 (单架无人机) ---
print("\n--- 可视化: 原始轨迹与加噪轨迹对比 (单架无人机) ---")
drone_id_to_plot_noise_compare = 5  # 选取第5架无人机进行演示
if NUM_DRONES > drone_id_to_plot_noise_compare:
    df_drone_original_xyz_single = df_xyz_abs_raw_before_noise[df_xyz_abs_raw_before_noise['drone_id'] == drone_id_to_plot_noise_compare].copy()
    df_drone_noisy_xyz_single = df_xyz_abs_raw[df_xyz_abs_raw['drone_id'] == drone_id_to_plot_noise_compare].copy()

    if not df_drone_original_xyz_single.empty and not df_drone_noisy_xyz_single.empty:
        plt.figure(figsize=(10, 8))
        ax = plt.axes(projection='3d')

        combined_data_xyz_noise_compare = np.vstack([
            df_drone_original_xyz_single[['x_abs', 'y_abs', 'z_abs']].values,
            df_drone_noisy_xyz_single[['x_abs', 'y_abs', 'z_abs']].values
        ])

        if combined_data_xyz_noise_compare.size > 0:
            # 使用 min/max for each axis for flexible scaling
            min_x, max_x = combined_data_xyz_noise_compare[:, 0].min(), combined_data_xyz_noise_compare[:, 0].max()
            min_y, max_y = combined_data_xyz_noise_compare[:, 1].min(), combined_data_xyz_noise_compare[:, 1].max()
            min_z, max_z = combined_data_xyz_noise_compare[:, 2].min(), combined_data_xyz_noise_compare[:, 2].max()

            # Add a small buffer to the limits
            buffer_x = (max_x - min_x) * 0.1 if (max_x - min_x) != 0 else 1.0 # Ensure buffer if range is zero
            buffer_y = (max_y - min_y) * 0.1 if (max_y - min_y) != 0 else 1.0
            buffer_z = (max_z - min_z) * 0.1 if (max_z - min_z) != 0 else 1.0

            ax.set_xlim([min_x - buffer_x, max_x + buffer_x])
            ax.set_ylim([min_y - buffer_y, max_y + buffer_y])
            ax.set_zlim([min_z - buffer_z, max_z + buffer_z])
            # REMOVED: ax.set_box_aspect([1,1,1])
        else:
            print(f"警告: 无人机 {drone_id_to_plot_noise_compare} 的原始或加噪数据为空，无法设置轴限制。")

        ax.plot(df_drone_original_xyz_single['x_abs'], df_drone_original_xyz_single['y_abs'], df_drone_original_xyz_single['z_abs'],
                label=f'无人机 {drone_id_to_plot_noise_compare} 原始轨迹', color='green', alpha=0.7)
        ax.plot(df_drone_noisy_xyz_single['x_abs'], df_drone_noisy_xyz_single['y_abs'], df_drone_noisy_xyz_single['z_abs'],
                label=f'无人机 {drone_id_to_plot_noise_compare} 加噪后轨迹', color='red', linestyle='--', alpha=0.7)

        ax.set_xlabel('X 坐标 (米)', fontsize=12)
        ax.set_ylabel('Y 坐标 (米)', fontsize=12)
        ax.set_zlabel('Z 坐标 (米)', fontsize=12)
        ax.set_title(f'原始轨迹与加噪轨迹对比 (无人机 {drone_id_to_plot_noise_compare})', fontsize=16)
        ax.legend(fontsize=12)
        ax.grid(True)
        plt.show()
    else:
        print(f"警告: 无人机 {drone_id_to_plot_noise_compare} 的数据缺失，无法绘制原始与加噪轨迹对比图。")
else:
    print(f"警告: 无人机 {drone_id_to_plot_noise_compare} 不存在 (总共只有 {NUM_DRONES} 架)，跳过原始与加噪轨迹对比图。")


# --- 新增：可视化所有无人机的绝对轨迹 ---
print("\n--- 正在绘制所有无人机的绝对轨迹 ---")
fig_abs = plt.figure(figsize=(14, 10))
ax_abs = fig_abs.add_subplot(111, projection='3d')

if NUM_DRONES_TO_PLOT_ABSOLUTE is None or NUM_DRONES_TO_PLOT_ABSOLUTE > NUM_DRONES:
    actual_drones_to_plot_abs = NUM_DRONES
else:
    actual_drones_to_plot_abs = NUM_DRONES_TO_PLOT_ABSOLUTE

print(f"计划绘制前 {actual_drones_to_plot_abs} 架无人机的绝对轨迹。")

all_absolute_coords = df_xyz_abs_raw[['x_abs', 'y_abs', 'z_abs']].values
if all_absolute_coords.size > 0:
    # Use min/max for each axis for flexible scaling
    min_x_abs, max_x_abs = all_absolute_coords[:, 0].min(), all_absolute_coords[:, 0].max()
    min_y_abs, max_y_abs = all_absolute_coords[:, 1].min(), all_absolute_coords[:, 1].max()
    min_z_abs, max_z_abs = all_absolute_coords[:, 2].min(), all_absolute_coords[:, 2].max()

    buffer_x_abs = (max_x_abs - min_x_abs) * 0.1 if (max_x_abs - min_x_abs) != 0 else 1.0
    buffer_y_abs = (max_y_abs - min_y_abs) * 0.1 if (max_y_abs - min_y_abs) != 0 else 1.0
    buffer_z_abs = (max_z_abs - min_z_abs) * 0.1 if (max_z_abs - min_z_abs) != 0 else 1.0

    ax_abs.set_xlim([min_x_abs - buffer_x_abs, max_x_abs + buffer_x_abs])
    ax_abs.set_ylim([min_y_abs - buffer_y_abs, max_y_abs + buffer_y_abs])
    ax_abs.set_zlim([min_z_abs - buffer_z_abs, max_z_abs + buffer_z_abs])
    # REMOVED: ax_abs.set_box_aspect([1,1,1])
else:
    print("警告: 没有绝对坐标数据，无法设置绝对轨迹图的轴限制。")

for drone_id in range(actual_drones_to_plot_abs):
    df_current_drone_abs = df_xyz_abs_raw[df_xyz_abs_raw['drone_id'] == drone_id]

    if not df_current_drone_abs.empty:
        color_idx = drone_id % len(ABSOLUTE_COLORS)
        color = ABSOLUTE_COLORS[color_idx]
        ax_abs.plot(df_current_drone_abs['x_abs'], df_current_drone_abs['y_abs'], df_current_drone_abs['z_abs'],
                label=f'无人机 {drone_id}', color=color, linewidth=1.0, alpha=0.8)
    else:
        print(f"警告: 目标 {drone_id} 没有可用的绝对轨迹数据。")

ax_abs.set_title(f'{os.path.basename(TRAIN_DATA_PATH)} - 多目标绝对轨迹图 (共 {NUM_DRONES} 个, 显示 {actual_drones_to_plot_abs} 个)')
ax_abs.set_xlabel('X 坐标 (m)')
ax_abs.set_ylabel('Y 坐标 (m)')
ax_abs.set_zlabel('Z 坐标 (m)')

if actual_drones_to_plot_abs <= 15:
    ax_abs.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
else:
    print(f"注意：绝对轨迹图目标数量 ({actual_drones_to_plot_abs}) 较多，图例可能过于拥挤，已省略。")
ax_abs.grid(True)
plt.tight_layout()
plt.show()
print("绝对轨迹图绘制完成。")


# 数据预处理：重采样、平滑和相对坐标转换
print(f"\n--- 正在处理数据 (重采样 {ORIGINAL_DT_SEC}s -> {NEW_DT_SEC}s, 平滑, 相对坐标) ---")
df_resampled_list = []
for drone_id in range(NUM_DRONES):
    df_drone_abs = df_xyz_abs_raw[df_xyz_abs_raw['drone_id'] == drone_id].copy()

    if df_drone_abs.empty:
        print(f"警告: 无人机 {drone_id} 没有数据，跳过处理。")
        continue

    initial_pos = df_drone_abs.iloc[0][['x_abs', 'y_abs', 'z_abs']].values
    df_drone_abs['x_rel'] = df_drone_abs['x_abs'] - initial_pos[0]
    df_drone_abs['y_rel'] = df_drone_abs['y_abs'] - initial_pos[1]
    df_drone_abs['z_rel'] = df_drone_abs['z_abs'] - initial_pos[2]

    df_drone_indexed = df_drone_abs.set_index('time')[['x_rel', 'y_rel', 'z_rel']]
    num_duplicates = df_drone_indexed.index.duplicated(keep='first').sum()
    if num_duplicates > 0:
        print(f"警告: 无人机 {drone_id} 存在 {num_duplicates} 个重复时间戳，已保留第一个。")
        df_drone_indexed = df_drone_indexed[~df_drone_indexed.index.duplicated(keep='first')]
    df_drone_indexed = df_drone_indexed.sort_index()

    new_time_index = pd.Index(np.arange(0, TOTAL_TIME_SEC + NEW_DT_SEC, NEW_DT_SEC), name='time')

    df_drone_resampled = df_drone_indexed.reindex(new_time_index).interpolate(method='linear')

    if df_drone_resampled.isnull().values.any():
        df_drone_resampled = df_drone_resampled.ffill().bfill() # FIX
        if df_drone_resampled.isnull().values.any():
            print(f"错误: 无人机 {drone_id} 在重采样和插值后仍存在 NaN 值。请检查原始数据的时间范围和 TOTAL_TIME_SEC。")
            continue

    if SMOOTHING_WINDOW_SIZE > 1:
        df_drone_resampled[['x_rel', 'y_rel', 'z_rel']] = df_drone_resampled[['x_rel', 'y_rel', 'z_rel']].rolling(
            window=SMOOTHING_WINDOW_SIZE, min_periods=1, center=True
        ).mean()
        df_drone_resampled = df_drone_resampled.interpolate(method='linear', limit_direction='both')

    df_drone_resampled = df_drone_resampled.reset_index(drop=False)
    df_drone_resampled['drone_id'] = drone_id
    df_resampled_list.append(df_drone_resampled)

if not df_resampled_list:
    raise ValueError("没有足够的数据或无人机来完成重采样和序列构建。请检查数据和参数。")

df_resampled = pd.concat(df_resampled_list)
print(f"数据预处理完成，总条数: {df_resampled.shape[0]}. 预期总条数 (NUM_DRONES * TIME_STEPS_NEW): {NUM_DRONES * TIME_STEPS_NEW}")


# --- 可视化: 相对坐标转换效果 (多架无人机示例) ---
print("\n--- 可视化: 相对坐标转换效果 (多架无人机) ---")
plt.figure(figsize=(12, 10))
ax_rel = plt.axes(projection='3d')

if NUM_DRONES_TO_PLOT_RELATIVE is None or NUM_DRONES_TO_PLOT_RELATIVE > NUM_DRONES:
    actual_drones_to_plot_rel = NUM_DRONES
else:
    actual_drones_to_plot_rel = NUM_DRONES_TO_PLOT_RELATIVE

drones_to_show_relative_plot_ids = list(range(actual_drones_to_plot_rel))
print(f"计划绘制前 {actual_drones_to_plot_rel} 架无人机的相对轨迹。")

colors_rel = cmap_instance(np.linspace(0, 1, len(drones_to_show_relative_plot_ids)))

all_relative_coords = []
for df_drone in df_resampled_list:
    if not df_drone.empty:
        all_relative_coords.append(df_drone[['x_rel', 'y_rel', 'z_rel']].values)

if not all_relative_coords:
    print("错误：没有有效的目标相对轨迹数据可供设置轴限制。")
    # Fallback if no data
    min_x_rel, max_x_rel = -100, 100
    min_y_rel, max_y_rel = -100, 100
    min_z_rel, max_z_rel = -10, 10 # Adjusted for potentially smaller Z range
else:
    all_relative_coords = np.vstack(all_relative_coords)
    # Use min/max for each axis for flexible scaling
    min_x_rel, max_x_rel = all_relative_coords[:, 0].min(), all_relative_coords[:, 0].max()
    min_y_rel, max_y_rel = all_relative_coords[:, 1].min(), all_relative_coords[:, 1].max()
    min_z_rel, max_z_rel = all_relative_coords[:, 2].min(), all_relative_coords[:, 2].max()

# Add a small buffer to the limits
buffer_x_rel = (max_x_rel - min_x_rel) * 0.1 if (max_x_rel - min_x_rel) != 0 else 1.0
buffer_y_rel = (max_y_rel - min_y_rel) * 0.1 if (max_y_rel - min_y_rel) != 0 else 1.0
buffer_z_rel = (max_z_rel - min_z_rel) * 0.1 if (max_z_rel - min_z_rel) != 0 else 1.0

ax_rel.set_xlim([min_x_rel - buffer_x_rel, max_x_rel + buffer_x_rel])
ax_rel.set_ylim([min_y_rel - buffer_y_rel, max_y_rel + buffer_y_rel])
ax_rel.set_zlim([min_z_rel - buffer_z_rel, max_z_rel + buffer_z_rel])
# REMOVED: ax_rel.set_box_aspect([1,1,1]) # This was removed in previous iteration as per your request

for i, drone_id in enumerate(drones_to_show_relative_plot_ids):
    if drone_id < len(df_resampled_list):
        df_current_drone = df_resampled_list[drone_id]
    else:
        df_current_drone = pd.DataFrame()

    if not df_current_drone.empty:
        ax_rel.plot(df_current_drone['x_rel'], df_current_drone['y_rel'], df_current_drone['z_rel'],
                label=f'无人机 {drone_id} 相对轨迹', color=colors_rel[i], alpha=0.8)

        if i == 0:
            ax_rel.scatter(0, 0, 0, color='black', marker='o', s=100, label='轨迹起始点 (0,0,0)', depthshade=True)
    else:
        print(f"警告: 目标 {drone_id} 没有可用的相对轨迹数据，无法绘制。")

ax_rel.set_xlabel('X 坐标 (相对米)', fontsize=12)
ax_rel.set_ylabel('Y 坐标 (相对米)', fontsize=12)
ax_rel.set_zlabel('Z 坐标 (相对米)', fontsize=12)
ax_rel.set_title(f'{os.path.basename(TRAIN_DATA_PATH)} - 多目标相对轨迹图 (共 {NUM_DRONES} 个, 显示 {actual_drones_to_plot_rel} 个)', fontsize=16)

if actual_drones_to_plot_rel <= 15:
    ax_rel.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
else:
    print(f"注意：相对轨迹图显示目标数量 ({actual_drones_to_plot_rel}) 较多，图例可能过于拥挤，已省略。")

ax_rel.grid(True)
plt.tight_layout()
plt.show()

# --- 数据预处理：归一化 ---
print("--- 正在归一化数据 (使用 MinMaxScaler) ---")
scaler = MinMaxScaler(feature_range=(0, 1))

coords_to_scale = df_resampled[['x_rel', 'y_rel', 'z_rel']].values
if coords_to_scale.size == 0:
    raise ValueError("用于归一化的坐标数据为空，请检查数据处理步骤。")
scaled_coords = scaler.fit_transform(coords_to_scale)

df_scaled = df_resampled.copy()
df_scaled[['x_rel', 'y_rel', 'z_rel']] = scaled_coords
print("数据归一化完成。")

# --- 可视化: 归一化前后数据分布对比 (直方图) ---
print("\n--- 可视化: 归一化前后数据分布对比 ---")
fig, axes = plt.subplots(3, 2, figsize=(12, 12))
coords_labels = ['X', 'Y', 'Z']

for i, label in enumerate(coords_labels):
    axes[i, 0].hist(coords_to_scale[:, i], bins=50, alpha=0.7, color='skyblue', edgecolor='black')
    axes[i, 0].set_title(f'{label} 坐标 (归一化前)', fontsize=14)
    axes[i, 0].set_xlabel('值 (米)', fontsize=12)
    axes[i, 0].set_ylabel('频次', fontsize=12)

    axes[i, 1].hist(scaled_coords[:, i], bins=50, alpha=0.7, color='lightcoral', edgecolor='black')
    axes[i, 1].set_title(f'{label} 坐标 (归一化后)', fontsize=14)
    axes[i, 1].set_xlabel('值 (0-1范围)', fontsize=12)
    axes[i, 1].set_ylabel('频次', fontsize=12)
    axes[i, 1].set_xlim([0, 1])

plt.tight_layout()
plt.suptitle('归一化前后坐标分布对比', y=1.02, fontsize=18)
plt.show()

# 构建序列数据 (输入 X 和输出 y)
print("--- 正在构建序列数据 ---")
NUM_FEATURES = NUM_COORDINATES + NUM_DRONES
X, y = [], []
drone_ids_one_hot = np.eye(NUM_DRONES)

for drone_id in range(NUM_DRONES):
    drone_data_scaled = df_scaled[df_scaled['drone_id'] == drone_id][['x_rel', 'y_rel', 'z_rel']].values

    if len(drone_data_scaled) < N_STEPS_IN + M_STEPS_OUT:
        print(f"警告: 无人机 {drone_id} 的数据不足以构建完整序列 ({len(drone_data_scaled)}/{N_STEPS_IN + M_STEPS_OUT})，跳过。")
        continue

    current_drone_id_one_hot = drone_ids_one_hot[drone_id]

    for i in range(len(drone_data_scaled) - N_STEPS_IN - M_STEPS_OUT + 1):
        input_seq = drone_data_scaled[i: i + N_STEPS_IN]
        input_with_id = np.concatenate((input_seq, np.tile(current_drone_id_one_hot, (N_STEPS_IN, 1))), axis=1)

        output_seq = drone_data_scaled[
                     i + N_STEPS_IN: i + N_STEPS_IN + M_STEPS_OUT].flatten()

        X.append(input_with_id)
        y.append(output_seq)

if not X or not y:
    raise ValueError("未能成功构建任何训练序列。请检查数据、N_STEPS_IN 和 M_STEPS_OUT 配置。")

X = np.array(X, dtype=np.float32)
y = np.array(y, dtype=np.float32)

print(f"X (输入) 形状: {X.shape}")
print(f"y (输出) 形状: {y.shape}")

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

train_dataset = TensorDataset(torch.from_numpy(X_train), torch.from_numpy(y_train))
test_dataset = TensorDataset(torch.from_numpy(X_test), torch.from_numpy(y_test))

train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)

print("\n--- 正在构建 PyTorch LSTM 模型 ---")

class LSTMModel(nn.Module):
    def __init__(self, input_dim, lstm_units, output_dim, dropout_rate):
        super(LSTMModel, self).__init__()
        self.lstm = nn.LSTM(input_dim, lstm_units, batch_first=True)
        self.dropout = nn.Dropout(dropout_rate)
        self.fc = nn.Linear(lstm_units, output_dim)

    def forward(self, x):
        _, (h_n, _) = self.lstm(x)
        x = h_n.squeeze(0)
        x = self.dropout(x)
        x = self.fc(x)
        return x

model = LSTMModel(
    input_dim=NUM_FEATURES,
    lstm_units=LSTM_UNITS,
    output_dim=M_STEPS_OUT * NUM_COORDINATES,
    dropout_rate=DROPOUT_RATE
).to(device)

criterion = get_loss_function()
print(f"使用损失函数: {LOSS_FUNCTION}")
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)

print("模型构建完成。")

print("\n--- 正在训练模型 ---")

best_val_loss = float('inf')
patience_counter = 0
early_stopping_patience = EARLY_STOPPING_PATIENCE

history_train_loss = []
history_val_loss = []

for epoch in range(EPOCHS):
    model.train()
    train_loss = 0
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)

        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        train_loss += loss.item()

    avg_train_loss = train_loss / len(train_loader)
    history_train_loss.append(avg_train_loss)

    model.eval()
    val_loss = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            loss = criterion(output, target)
            val_loss += loss.item()

    avg_val_loss = val_loss / len(test_loader)
    history_val_loss.append(avg_val_loss)

    print(f'轮次 [{epoch + 1}/{EPOCHS}], 训练损失: {avg_train_loss:.6f}, 验证损失: {avg_val_loss:.6f}')

    if avg_val_loss < best_val_loss:
        best_val_loss = avg_val_loss
        patience_counter = 0
        torch.save(model.state_dict(), MODEL_STATE_DICT_PATH)
        print(f"  --> 验证损失改善。模型已保存到 {MODEL_STATE_DICT_PATH}")
    else:
        patience_counter += 1
        if patience_counter >= early_stopping_patience:
            print(f"在轮次 {epoch + 1} 处触发早停。")
            break

print("模型训练完成。")

if os.path.exists(MODEL_STATE_DICT_PATH):
    model.load_state_dict(torch.load(MODEL_STATE_DICT_PATH))
    model.to(device)
    model.eval()
    print(f"已加载最佳模型状态字典：{MODEL_STATE_DICT_PATH}")
else:
    print(f"未找到最佳模型状态字典：{MODEL_STATE_DICT_PATH}。使用最后一次训练的模型。")

print(f"\n--- 正在保存 MinMaxScaler 到: {SCALER_PATH} ---")
joblib.dump(scaler, SCALER_PATH)
print("MinMaxScaler 已保存。")

print("\n--- 正在评估模型性能 ---")
model.eval()
total_test_loss = 0
with torch.no_grad():
    for data, target in test_loader:
        data, target = data.to(device), target.to(device)
        output = model(data)
        loss = criterion(output, target)
        total_test_loss += loss.item()

avg_test_loss = total_test_loss / len(test_loader)
print(f'测试集 MSE 损失: {avg_test_loss:.4f}')

plt.figure(figsize=(10, 6))
plt.plot(history_train_loss, label='训练损失 (MSE)')
plt.plot(history_val_loss, label='验证损失 (MSE)')
plt.title('模型训练与验证损失历史', fontsize=16)
plt.xlabel('训练轮次 (Epoch)', fontsize=12)
plt.ylabel('均方误差 (MSE)', fontsize=12)
plt.legend(fontsize=12)
plt.grid(True)
plt.show()

print("\n--- 正在生成示例预测可视化 ---")
if len(X_test) == 0:
    print("没有可用的测试样本来生成预测可视化。")
else:
    sample_idx = np.random.randint(0, len(X_test))
    sample_input = torch.from_numpy(X_test[sample_idx:sample_idx + 1]).to(device)
    true_output_flat = y_test[sample_idx]

    model.eval()
    with torch.no_grad():
        predicted_output_flat_tensor = model(sample_input)
        predicted_output_flat = predicted_output_flat_tensor.cpu().numpy()[0]

    true_output = true_output_flat.reshape(M_STEPS_OUT, NUM_COORDINATES)
    predicted_output = predicted_output_flat.reshape(M_STEPS_OUT, NUM_COORDINATES)

    input_trajectory_scaled = sample_input.cpu().numpy()[0, :, :NUM_COORDINATES]
    input_trajectory_rescaled = scaler.inverse_transform(input_trajectory_scaled)
    true_output_rescaled = scaler.inverse_transform(true_output)
    predicted_output_rescaled = scaler.inverse_transform(predicted_output)

    fig = plt.figure(figsize=(15, 12))
    ax = fig.add_subplot(111, projection='3d')

    combined_data_xyz_example_predict = np.vstack([
        input_trajectory_rescaled,
        true_output_rescaled,
        predicted_output_rescaled
    ])

    if combined_data_xyz_example_predict.size > 0:
        # Use min/max for each axis for flexible scaling
        min_x_pred, max_x_pred = combined_data_xyz_example_predict[:, 0].min(), combined_data_xyz_example_predict[:, 0].max()
        min_y_pred, max_y_pred = combined_data_xyz_example_predict[:, 1].min(), combined_data_xyz_example_predict[:, 1].max()
        min_z_pred, max_z_pred = combined_data_xyz_example_predict[:, 2].min(), combined_data_xyz_example_predict[:, 2].max()

        buffer_x_pred = (max_x_pred - min_x_pred) * 0.1 if (max_x_pred - min_x_pred) != 0 else 1.0
        buffer_y_pred = (max_y_pred - min_y_pred) * 0.1 if (max_y_pred - min_y_pred) != 0 else 1.0
        buffer_z_pred = (max_z_pred - min_z_pred) * 0.1 if (max_z_pred - min_z_pred) != 0 else 1.0

        ax.set_xlim([min_x_pred - buffer_x_pred, max_x_pred + buffer_x_pred])
        ax.set_ylim([min_y_pred - buffer_y_pred, max_y_pred + buffer_y_pred])
        ax.set_zlim([min_z_pred - buffer_z_pred, max_z_pred + buffer_z_pred])
        # REMOVED: ax.set_box_aspect([1,1,1]) # This was removed in previous iteration as per your request
    else:
        print("警告: 预测可视化数据为空，无法设置轴限制。")

    ax.plot(input_trajectory_rescaled[:, 0], input_trajectory_rescaled[:, 1], input_trajectory_rescaled[:, 2],
            color='blue', linestyle='-', linewidth=2, label=f'输入轨迹 (过去 {N_STEPS_IN} 步)')

    start_x, start_y, start_z = input_trajectory_rescaled[-1, 0], input_trajectory_rescaled[-1, 1], \
        input_trajectory_rescaled[-1, 2]
    ax.scatter(start_x, start_y, start_z, color='blue', marker='o', s=150,
               label='预测起点')

    ax.plot(true_output_rescaled[:, 0], true_output_rescaled[:, 1], true_output_rescaled[:, 2],
            color='green', linestyle='-', linewidth=2, label=f'真实未来轨迹 (未来 {M_STEPS_OUT} 步)')

    ax.plot(predicted_output_rescaled[:, 0], predicted_output_rescaled[:, 1], predicted_output_rescaled[:, 2],
            color='red', linestyle='--', linewidth=2, label=f'预测未来轨迹 (未来 {M_STEPS_OUT} 步)')

    ax.set_xlabel('X 坐标 (相对米)', fontsize=12)
    ax.set_ylabel('Y 坐标 (相对米)', fontsize=12)
    ax.set_zlabel('Z 坐标 (相对米)', fontsize=12)
    ax.set_title(f'无人机轨迹预测示例 (相对坐标) - 测试样本 {sample_idx}', fontsize=16)
    ax.legend(fontsize=12)
    ax.grid(True)
    plt.tight_layout()
    plt.show()

    plt.figure(figsize=(15, 6))
    input_time_steps = np.arange(0, N_STEPS_IN) * NEW_DT_SEC
    future_time_steps = np.arange(N_STEPS_IN, N_STEPS_IN + M_STEPS_OUT) * NEW_DT_SEC

    plt.plot(input_time_steps, input_trajectory_rescaled[:, 2], color='blue', label='输入 Z (相对)')
    plt.plot(future_time_steps, true_output_rescaled[:, 2], color='green', label='真实未来 Z (相对)')
    plt.plot(future_time_steps, predicted_output_rescaled[:, 2], color='red', linestyle='--',
             label='预测 Z (相对)')

    plt.axvline(x=input_time_steps[-1], color='gray', linestyle=':', label='预测开始时间')

    plt.xlabel('时间 (秒)', fontsize=12)
    plt.ylabel('Z 坐标 (相对米)', fontsize=12)
    plt.title('相对 Z 轴预测 vs 真实值', fontsize=16)
    plt.legend(fontsize=12)
    plt.grid(True)
    plt.show()

print("\n--- PyTorch 模型训练和评估完成 ---")

