import os
import joblib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from matplotlib.animation import FuncAnimation
import random
import matplotlib.font_manager as fm
import scipy.io

# 导入配置文件
from config import (
    SEED, FONT_PATHS, COLORMAP, DEVICE, PIN_MEMORY,
    TOTAL_TIME_SEC, ORIGINAL_DT_SEC, NEW_DT_SEC,
    TIME_STEPS_ORIGINAL, TIME_STEPS_NEW,
    N_STEPS_IN, M_STEPS_OUT, LSTM_UNITS, DROPOUT_RATE, BATCH_SIZE,
    NUM_COORDINATES, NUM_DRONES, NUM_FEATURES,
    SMOOTHING_WINDOW_SIZE, NOISE_MEAN, NOISE_STD_DEV,
    EARLY_STOPPING_PATIENCE, BUFFER_SIZE,
    NUM_DRONES_TO_PLOT_RELATIVE, NUM_DRONES_TO_PLOT_ABSOLUTE,
    DRONE_TO_VIZ_NOISE_COMPARE, get_colormap_and_colors,
    LSTM_LAYERS, BIDIRECTIONAL, ATTENTION_HEADS,
    USE_ATTENTION, HIDDEN_DIM, LEARNING_RATE, WEIGHT_DECAY,
    TEST_DATA_PATH, MODEL_STATE_DICT_PATH, SCALER_PATH, MODEL_ROOT,
    SLIDING_WINDOW_STEP_SEC
)

# 设置随机种子
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
if torch.cuda.is_available():
    torch.cuda.manual_seed(SEED)
    torch.cuda.manual_seed_all(SEED)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
print(f"随机种子已固定为: {SEED}")

# 配置 matplotlib 字体
def setup_matplotlib_font():
    chosen_font = None
    for font_path in FONT_PATHS:
        if os.path.exists(font_path):
            try:
                fm.FontProperties(fname=font_path)
                chosen_font = font_path
                break
            except Exception:
                continue

    if chosen_font:
        font_name = os.path.basename(chosen_font).split('.')[0]
        plt.rcParams['font.sans-serif'] = [font_name]
        plt.rcParams['axes.unicode_minus'] = False
        print(f"Matplotlib 已配置使用字体: {font_name} (来源于 {chosen_font})")
    else:
        print("警告: 未找到常见的中文字体文件。图表中的中文可能显示为方块。")
        print("请手动安装中文字体并更新 'font_paths' 列表，或参考 Matplotlib 官方文档配置字体。")

setup_matplotlib_font()

# 设置脚本目录
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))

# GPU 配置检查
print("\n--- 检查 GPU 可用性 ---")
device = torch.device(DEVICE)
print(f"PyTorch 配置为使用: {device}")
if device.type == 'cuda':
    print(f"CUDA 版本: {torch.version.cuda}")
    print(f"GPU 名称: {torch.cuda.get_device_name(0)}")
print(f"PyTorch 版本: {torch.__version__}")
print("--- GPU 可用性检查结束 ---\n")


# --- 2. PyTorch 模型定义 ---
# 必须重新定义模型类，因为.pth文件只保存状态字典，不保存模型结构
class LSTMModel(nn.Module):
    def __init__(self, input_dim, lstm_units, output_dim, dropout_rate):
        super(LSTMModel, self).__init__()
        self.lstm = nn.LSTM(input_dim, lstm_units, batch_first=True)
        self.dropout = nn.Dropout(dropout_rate)
        self.fc = nn.Linear(lstm_units, output_dim)

    def forward(self, x):
        _, (h_n, _) = self.lstm(x)
        x = h_n.squeeze(0)
        x = self.dropout(x)
        x = self.fc(x)
        return x


# --- 3. 数据加载与初步结构化 (与训练脚本保持一致的加载方式) ---
print(f"\n--- 正在加载数据: {TEST_DATA_PATH} ---")
try:
    # 使用 pandas 读取数据，sep=r'\s+' 能够处理一个或多个空格作为分隔符
    # header=None 表示文件没有列头
    df_raw = pd.read_csv(TEST_DATA_PATH, sep=r'\s+', header=None, engine='python')
    print("测试数据读取成功。")
except FileNotFoundError:
    print(f"\n错误：文件未找到。请检查文件路径是否正确：'{os.path.abspath(TEST_DATA_PATH)}'")
    print("确保文件存在于指定的路径，并且文件名与扩展名都正确。")
    exit()  # 退出程序
except pd.errors.EmptyDataError:
    print(f"\n错误：文件 '{os.path.abspath(TEST_DATA_PATH)}' 为空或不包含数据。")
    exit()
except Exception as e:
    print(f"\n读取数据文件时发生其他错误: {e}")
    print("请确保数据格式符合预期 (每行是数字，由空格或制表符分隔)。")
    exit()  # 退出程序

if df_raw.empty:
    raise ValueError(f"文件 {TEST_DATA_PATH} 为空或无法解析为DataFrame。")

# 根据列数推断无人机数量
num_cols = df_raw.shape[1]
if (num_cols - 1) % NUM_COORDINATES != 0:
    raise ValueError(
        f"文件 {TEST_DATA_PATH} 的数据格式似乎不正确。总列数 ({num_cols}) "
        f"不符合 (1 + N_DRONES * {NUM_COORDINATES}) 的模式。\n"
        f"请检查文件内容或 NUM_COORDINATES 配置。"
    )
# 设置全局变量（这些将从数据中动态计算）
global NUM_DRONES, NUM_FEATURES

# 更新全局变量
NUM_DRONES = (num_cols - 1) // NUM_COORDINATES
NUM_FEATURES = NUM_COORDINATES + NUM_DRONES

print(f"根据文件列数 ({num_cols}) 推断无人机数量为: {NUM_DRONES} 架")
print(f"数据加载完成，总行数: {df_raw.shape[0]}, 总列数: {df_raw.shape[1]}")

SLIDING_WINDOW_STEP_STEPS = int(SLIDING_WINDOW_STEP_SEC / NEW_DT_SEC)

# 将原始宽格式数据转换为长格式，并为每个无人机分配ID
print("--- 正在将原始宽格式数据转换为长格式并分配无人机ID ---")
dfs_per_drone = []
col_names = ['time']
for i in range(NUM_DRONES):
    col_names.extend([f'r{i + 1}', f'e{i + 1}', f'b{i + 1}'])
df_raw.columns = col_names

for i in range(1, NUM_DRONES + 1):
    r_col = f'r{i}'
    e_col = f'e{i}'
    b_col = f'b{i}'
    df_drone = df_raw[['time', r_col, e_col, b_col]].copy()
    df_drone.columns = ['time', 'r', 'e', 'b']
    df_drone['drone_id'] = i - 1
    dfs_per_drone.append(df_drone)

df_long_format = pd.concat(dfs_per_drone).sort_values(['drone_id', 'time']).reset_index(drop=True)
print(f"长格式转换完成。形状: {df_long_format.shape}")

# --- 4. 加载 PyTorch 模型和 Scaler ---
print(f"--- 正在加载模型: {MODEL_STATE_DICT_PATH} ---")
if not os.path.exists(MODEL_STATE_DICT_PATH):
    raise FileNotFoundError(f"模型文件未找到: {MODEL_STATE_DICT_PATH}\n请确保模型已训练并保存到正确的位置。")

model = LSTMModel(
    input_dim=NUM_FEATURES,  # 确保这里的 NUM_FEATURES 与训练时的模型保持一致
    lstm_units=LSTM_UNITS,
    output_dim=M_STEPS_OUT * NUM_COORDINATES,
    dropout_rate=DROPOUT_RATE
).to(device)

model.load_state_dict(torch.load(MODEL_STATE_DICT_PATH, map_location=device))
model.eval()  # 确保模型处于评估模式，关闭 Dropout 等
print("PyTorch 模型加载完成。")

print(f"--- 正在加载 MinMaxScaler: {SCALER_PATH} ---")
if not os.path.exists(SCALER_PATH):
    raise FileNotFoundError(f"Scaler 文件未找到: {SCALER_PATH}\n请确保Scaler已在训练时保存到正确的位置。")
scaler = joblib.load(SCALER_PATH)
print("MinMaxScaler 加载完成。")
print(f"Scaler 学到的最小值 (x, y, z): {scaler.data_min_}")
print(f"Scaler 学到的最大值 (x, y, z): {scaler.data_max_}")


# --- 5. 数据预处理 ---

# 5.1 坐标系转换 (r, e, b) -> (x, y, z) (与训练脚本保持一致)
def spherical_to_cartesian_coords(r, e, b):
    """
    将球坐标 (r, e, b) 转换为笛卡尔坐标 (x, y, z)。
    r: 径向距离 (range)
    e: 仰角 (elevation), 与XY平面夹角，范围 -pi/2 到 pi/2
    b: 方位角 (bearing/azimuth), 在XY平面内的角度，从X轴正方向逆时针旋转
    """
    x = r * np.cos(e) * np.cos(b)  # 与训练脚本保持一致
    y = r * np.cos(e) * np.sin(b)  # 与训练脚本保持一致
    z = r * np.sin(e)
    return x, y, z


print("\n--- 正在进行坐标转换 (r,e,b -> x,y,z) ---")
df_long_format['x_abs'], df_long_format['y_abs'], df_long_format['z_abs'] = spherical_to_cartesian_coords(
    df_long_format['r'], df_long_format['e'], df_long_format['b']
)
df_xyz_original_cartesian = df_long_format.drop(columns=['r', 'e', 'b'])  # 存储原始笛卡尔坐标
print("坐标转换完成。")

# --- 添加高斯噪声函数 (此脚本默认不加噪声到模型输入，如果需要，请取消注释) ---
# def add_gaussian_noise_for_prediction(df_xyz_raw_input, noise_mean, noise_std_dev, x_col, y_col, z_col):
#     if noise_std_dev == 0:
#         print("\n--- 噪声标准差为0，跳过高斯噪声添加步骤。---")
#         return df_xyz_raw_input
#     print(f"\n--- 正在对测试输入数据添加高斯噪声 (均值={noise_mean}, 标准差={noise_std_dev}米) ---")
#     df_xyz_raw_input[x_col] = df_xyz_raw_input[x_col] + np.random.normal(noise_mean, noise_std_dev, df_xyz_raw_input.shape[0])
#     df_xyz_raw_input[y_col] = df_xyz_raw_input[y_col] + np.random.normal(noise_mean, noise_std_dev, df_xyz_raw_input.shape[0])
#     df_xyz_raw_input[z_col] = df_xyz_raw_input[z_col] + np.random.normal(noise_mean, noise_std_dev, df_xyz_raw_input.shape[0])
#     print("测试数据噪声添加完成。")
#     return df_xyz_raw_input

# df_xyz_original_cartesian = add_gaussian_noise_for_prediction(df_xyz_original_cartesian, NOISE_MEAN, NOISE_STD_DEV, 'x_abs', 'y_abs', 'z_abs')


# 5.2 重采样、平滑、转换为相对坐标和存储原始全局坐标 (与训练脚本保持一致的逻辑)
print(f"--- 正在对测试数据进行重采样 ({ORIGINAL_DT_SEC}s -> {NEW_DT_SEC}s) 并平滑，并转换为相对坐标 ---")
df_processed_list = []
all_drones_global_coords_resampled = {}  # 用于存储每个无人机完整的全局原始轨迹

for drone_id in range(NUM_DRONES):
    df_drone_orig = df_xyz_original_cartesian[df_xyz_original_cartesian['drone_id'] == drone_id].copy()

    if df_drone_orig.empty:
        print(f"警告: 无人机 {drone_id} 没有数据，跳过处理。")
        continue

    df_drone_orig_indexed = df_drone_orig.set_index('time')[['x_abs', 'y_abs', 'z_abs']]
    num_duplicates = df_drone_orig_indexed.index.duplicated(keep='first').sum()
    if num_duplicates > 0:
        print(f"警告: 无人机 {drone_id} 存在 {num_duplicates} 个重复时间戳，已保留第一个。")
        df_drone_orig_indexed = df_drone_orig_indexed[~df_drone_orig_indexed.index.duplicated(keep='first')]
    df_drone_orig_indexed = df_drone_orig_indexed.sort_index()

    # 强制新时间索引从0到TOTAL_TIME_SEC，确保范围正确
    new_time_index_resample = pd.Index(np.arange(0, TOTAL_TIME_SEC + NEW_DT_SEC, NEW_DT_SEC), name='time')

    df_drone_resampled_current = df_drone_orig_indexed.reindex(new_time_index_resample).interpolate(method='linear')

    # 使用 ffill() 和 bfill() 直接处理 NaN
    if df_drone_resampled_current.isnull().values.any():
        df_drone_resampled_current = df_drone_resampled_current.ffill().bfill()
        if df_drone_resampled_current.isnull().values.any():
            print(f"错误: 无人机 {drone_id} 在重采样和插值后仍存在 NaN 值。请检查原始数据的时间范围和 TOTAL_TIME_SEC。")
            continue

    # 存储原始的全局轨迹，用于动画中的真实轨迹和预测对齐
    all_drones_global_coords_resampled[drone_id] = df_drone_resampled_current.copy()

    # 获取该无人机轨迹的起始全局位置，用于转换为相对坐标
    initial_world_pos = df_drone_resampled_current.iloc[0][['x_abs', 'y_abs', 'z_abs']].values

    df_drone_relative = df_drone_resampled_current.copy()
    df_drone_relative['x_rel'] = df_drone_relative['x_abs'] - initial_world_pos[0]
    df_drone_relative['y_rel'] = df_drone_relative['y_abs'] - initial_world_pos[1]
    df_drone_relative['z_rel'] = df_drone_relative['z_abs'] - initial_world_pos[2]

    if SMOOTHING_WINDOW_SIZE > 1:
        df_drone_relative[['x_rel', 'y_rel', 'z_rel']] = df_drone_relative[['x_rel', 'y_rel', 'z_rel']].rolling(
            window=SMOOTHING_WINDOW_SIZE, min_periods=1, center=True
        ).mean()
        df_drone_relative = df_drone_relative.interpolate(method='linear', limit_direction='both')

    df_drone_relative = df_drone_relative.reset_index(drop=False)
    df_drone_relative['drone_id'] = drone_id
    df_processed_list.append(df_drone_relative)

if not df_processed_list:
    raise ValueError("没有足够的数据或无人机来完成重采样和序列构建。请检查数据和参数。")

df_processed_test = pd.concat(df_processed_list)
print(f"测试数据预处理完成，总条数: {len(df_processed_test)}")

# 5.3 归一化
print("--- 正在对测试数据进行归一化 (使用加载的 MinMaxScaler) ---")
coords_to_scale = df_processed_test[['x_rel', 'y_rel', 'z_rel']].values
scaled_coords = scaler.transform(coords_to_scale)

df_scaled_test = df_processed_test.copy()
df_scaled_test[['x_rel', 'y_rel', 'z_rel']] = scaled_coords
print("测试数据归一化完成。")

# --- 6. 执行滑动窗口预测并组织动画数据 ---
print("--- 正在执行滑动窗口预测并组织动画数据 ---")

animation_frames_data = {}
all_prediction_errors = []  # 用于存储所有滑动窗口预测的误差

drone_ids_one_hot = np.eye(NUM_DRONES)

# 确定动画的总时长上限
max_animation_time = TOTAL_TIME_SEC - (N_STEPS_IN + M_STEPS_OUT) * NEW_DT_SEC

for t_sec_start in np.arange(0, max_animation_time + NEW_DT_SEC, SLIDING_WINDOW_STEP_SEC):

    current_frame_data = {'time_sec': t_sec_start, 'drones': {}}

    for drone_id in range(NUM_DRONES):
        drone_data_scaled = df_scaled_test[df_scaled_test['drone_id'] == drone_id][['x_rel', 'y_rel', 'z_rel']].values
        drone_global_full_trajectory = all_drones_global_coords_resampled[drone_id][['x_abs', 'y_abs', 'z_abs']].values

        start_step = int(t_sec_start / NEW_DT_SEC)
        end_input_step = start_step + N_STEPS_IN
        start_output_step = end_input_step
        end_output_step = start_output_step + M_STEPS_OUT

        # 确保数据长度足够进行完整预测
        if end_output_step > len(drone_data_scaled) or len(drone_data_scaled) < N_STEPS_IN + M_STEPS_OUT:
            # print(f"警告: 无人机 {drone_id} 在时间 {t_sec_start:.2f}s 处数据不足，跳过。")
            continue

        input_seq_scaled = drone_data_scaled[start_step: end_input_step]

        # 预测的“视觉起点”是输入轨迹的最后一个点（在全局坐标系中）
        prediction_visual_start_point_global = drone_global_full_trajectory[end_input_step - 1, :]

        # 构建模型输入：相对坐标序列 + One-Hot ID
        input_for_model_np = np.concatenate((input_seq_scaled, np.tile(drone_ids_one_hot[drone_id], (N_STEPS_IN, 1))),
                                            axis=1).astype(np.float32)
        input_for_model_np = np.expand_dims(input_for_model_np, axis=0)  # 添加batch维度

        input_for_model_tensor = torch.from_numpy(input_for_model_np).to(device)

        with torch.no_grad():
            predicted_output_scaled_flat_tensor = model(input_for_model_tensor)

        predicted_output_scaled_flat = predicted_output_scaled_flat_tensor.cpu().numpy()[0]
        predicted_output_scaled = predicted_output_scaled_flat.reshape(M_STEPS_OUT, NUM_COORDINATES)

        # 反归一化预测结果 (得到相对坐标，相对于 time=0 时的该无人机轨迹起始点)
        predicted_output_relative_to_drone_start = scaler.inverse_transform(predicted_output_scaled)

        # 获取该无人机轨迹的 time=0 时的全局初始位置
        drone_initial_global_pos = drone_global_full_trajectory[0, :]

        # 将相对预测结果转换为基于 time=0 的全局坐标
        predicted_global_from_initial_baseline = predicted_output_relative_to_drone_start + drone_initial_global_pos

        # 获取真实输入轨迹的最后一个点在全局坐标系中的位置
        true_input_end_point_global = drone_global_full_trajectory[end_input_step - 1, :]

        # 获取预测轨迹的第一个点（基于 time=0 基准）
        predicted_first_point_global_from_initial_baseline = predicted_global_from_initial_baseline[0, :]

        # 计算真实输入轨迹终点与预测轨迹起点（基于 time=0）之间的偏移量
        # 这个偏移量会将整个预测轨迹平移到与真实轨迹无缝连接的位置
        alignment_offset = true_input_end_point_global - predicted_first_point_global_from_initial_baseline

        # 将偏移量应用到整个预测轨迹上，得到最终在全局坐标系中对齐的预测轨迹
        predicted_output_global = predicted_global_from_initial_baseline + alignment_offset

        true_future_global = drone_global_full_trajectory[start_output_step: end_output_step]
        input_trajectory_global = drone_global_full_trajectory[start_step: end_input_step]

        # 计算当前预测的误差并存储
        errors_for_this_prediction = np.linalg.norm(true_future_global - predicted_output_global, axis=1)
        all_prediction_errors.append(errors_for_this_prediction)

        current_frame_data['drones'][drone_id] = {
            'input_global': input_trajectory_global,
            'true_future_global': true_future_global,
            'predicted_global': predicted_output_global,
            'prediction_visual_start_point_global': prediction_visual_start_point_global
        }

    if current_frame_data['drones']:
        animation_frames_data[t_sec_start] = current_frame_data

if not animation_frames_data:
    print("没有生成任何动画帧数据。请检查 SLIDING_WINDOW_STEP_SEC 或数据长度是否足够。")
    exit()

print(f"共生成 {len(animation_frames_data)} 个动画帧数据。")

animation_frames_list = sorted(animation_frames_data.values(), key=lambda x: x['time_sec'])



# --- 7. 3D 动态图展示 (全局坐标) ---
print("\n--- 正在生成 3D 动态轨迹可视化 (全局坐标) ---")

fig = plt.figure(figsize=(15, 12))
ax = fig.add_subplot(111, projection='3d')

ax.set_xlabel('X 坐标 (米)', fontsize=12)
ax.set_ylabel('Y 坐标 (米)', fontsize=12)
ax.set_zlabel('Z 坐标 (米)', fontsize=12)
ax.set_title('无人机轨迹预测 (全局坐标)', fontsize=16)

# 为每架无人机初始化线条和散点图对象
lines = {}
scatter_points = {}
for drone_id in range(NUM_DRONES):
    # 使用唯一的标签，以便图例只显示一次
    input_label = '输入轨迹 (过去真实)' if drone_id == 0 else ''
    true_label = '真实未来轨迹' if drone_id == 0 else ''
    pred_label = '预测未来轨迹' if drone_id == 0 else ''
    start_label = '预测起始点' if drone_id == 0 else ''

    lines[f'input_{drone_id}'] = \
        ax.plot([], [], [], color='blue', linestyle='-', label=input_label, alpha=0.7)[0]
    lines[f'true_{drone_id}'] = \
        ax.plot([], [], [], color='green', linestyle='-', label=true_label, alpha=0.7)[0]
    lines[f'pred_{drone_id}'] = \
        ax.plot([], [], [], color='red', linestyle='--', label=pred_label)[0]
    scatter_points[f'start_{drone_id}'] = \
        ax.scatter([], [], [], color='blue', marker='o', s=50, alpha=0.9, label=start_label)

ax.legend(loc='upper left', bbox_to_anchor=(1.05, 1), fontsize=12)

# 设置坐标轴范围，基于所有无人机原始全局轨迹
all_x_global_flat = np.array(
    [df['x_abs'].values for df in all_drones_global_coords_resampled.values() if not df.empty]).flatten()
all_y_global_flat = np.array(
    [df['y_abs'].values for df in all_drones_global_coords_resampled.values() if not df.empty]).flatten()
all_z_global_flat = np.array(
    [df['z_abs'].values for df in all_drones_global_coords_resampled.values() if not df.empty]).flatten()

if all_x_global_flat.size == 0:
    print("警告: 无法从数据中推断出全局坐标范围，将使用默认范围。")
    min_x, max_x = -100, 100
    min_y, max_y = -100, 100
    min_z, max_z = -10, 10
else:
    min_x, max_x = all_x_global_flat.min(), all_x_global_flat.max()
    min_y, max_y = all_y_global_flat.min(), all_y_global_flat.max()
    min_z, max_z = all_z_global_flat.min(), all_z_global_flat.max()

# 增加一些边距，防止轨迹贴边
buffer_x = (max_x - min_x) * 0.1 if (max_x - min_x) != 0 else 1.0
buffer_y = (max_y - min_y) * 0.1 if (max_y - min_y) != 0 else 1.0
buffer_z = (max_z - min_z) * 0.1 if (max_z - min_z) != 0 else 1.0

# 设置各轴的独立范围
ax.set_xlim([min_x - buffer_x, max_x + buffer_x])
ax.set_ylim([min_y - buffer_y, max_y + buffer_y])
ax.set_zlim([min_z - buffer_z, max_z + buffer_z])
ax.set_box_aspect([1, 1, 1])


def update(frame_idx):
    all_artists = list(lines.values()) + list(scatter_points.values())

    if frame_idx >= len(animation_frames_list):
        return all_artists

    current_frame_data = animation_frames_list[frame_idx]
    current_time_sec = current_frame_data['time_sec']

    for drone_id in range(NUM_DRONES):
        if drone_id in current_frame_data['drones']:
            drone_data = current_frame_data['drones'][drone_id]

            input_traj = drone_data['input_global']
            lines[f'input_{drone_id}'].set_data(input_traj[:, 0], input_traj[:, 1])
            lines[f'input_{drone_id}'].set_3d_properties(input_traj[:, 2])

            true_future_traj = drone_data['true_future_global']
            lines[f'true_{drone_id}'].set_data(true_future_traj[:, 0], true_future_traj[:, 1])
            lines[f'true_{drone_id}'].set_3d_properties(true_future_traj[:, 2])

            predicted_future_traj = drone_data['predicted_global']
            lines[f'pred_{drone_id}'].set_data(predicted_future_traj[:, 0], predicted_future_traj[:, 1])
            lines[f'pred_{drone_id}'].set_3d_properties(predicted_future_traj[:, 2])

            start_pos = drone_data['prediction_visual_start_point_global']
            scatter_points[f'start_{drone_id}'].set_offsets(np.array([[start_pos[0], start_pos[1]]]))
            scatter_points[f'start_{drone_id}'].set_3d_properties([start_pos[2]], 'z')

        else:  # 如果当前帧没有该无人机的数据，则清空其绘图
            lines[f'input_{drone_id}'].set_data([], [])
            lines[f'input_{drone_id}'].set_3d_properties([])
            lines[f'true_{drone_id}'].set_data([], [])
            lines[f'true_{drone_id}'].set_3d_properties([])
            lines[f'pred_{drone_id}'].set_data([], [])
            lines[f'pred_{drone_id}'].set_3d_properties([])
            scatter_points[f'start_{drone_id}'].set_offsets(np.array([[], []]).T)
            scatter_points[f'start_{drone_id}'].set_3d_properties([], 'z')

    ax.set_title(f'无人机轨迹预测 (全局坐标) - 时间: {current_time_sec:.2f}s', fontsize=16)

    return all_artists


ani = FuncAnimation(fig, update, frames=len(animation_frames_list), interval=800, blit=False,
                    repeat=False)  # 3D 动画通常不使用 blit=True

plt.tight_layout(rect=[0, 0, 0.85, 1])  # 为图例留出空间
plt.show()

# --- 可视化: 滑动窗口预测的平均欧氏距离误差 (全局坐标) ---
print("\n--- 可视化: 滑动窗口预测的平均欧氏距离误差 ---")

if all_prediction_errors:
    # 将所有预测的误差堆叠起来
    # 确保每个子列表的长度一致
    min_len = min(len(err) for err in all_prediction_errors)
    all_errors_stacked = np.array([err[:min_len] for err in all_prediction_errors])

    if all_errors_stacked.size > 0:
        # 计算每个未来时间步的平均误差
        mean_errors_at_each_step = np.mean(all_errors_stacked, axis=0)
        # 计算每个未来时间步的标准差
        std_errors_at_each_step = np.std(all_errors_stacked, axis=0)

        # 预测未来时间步相对于预测起点的时间 (从0开始)
        future_time_steps_relative = np.arange(0, min_len) * NEW_DT_SEC

        plt.figure(figsize=(12, 6))
        plt.plot(future_time_steps_relative, mean_errors_at_each_step, color='blue', label='平均欧氏距离误差')
        plt.fill_between(future_time_steps_relative,
                         mean_errors_at_each_step - std_errors_at_each_step,
                         mean_errors_at_each_step + std_errors_at_each_step,
                         color='blue', alpha=0.2, label='标准差范围')

        plt.xlabel('预测未来时间 (秒)', fontsize=12)
        plt.ylabel('欧氏距离误差 (米)', fontsize=12)
        plt.title('所有滑动窗口预测的平均欧氏距离误差', fontsize=16)
        plt.legend(fontsize=12)
        plt.grid(True)
        plt.show()
    else:
        print("没有足够的有效预测数据来绘制平均误差图 (所有预测序列都为空)。")
else:
    print("没有足够的预测数据来绘制平均误差图。请检查 SLIDING_WINDOW_STEP_SEC 或数据长度。")

print("\n--- 预测与可视化完成 ---")
