import os
import random

import joblib
import numpy as np
import pandas as pd
import scipy.io
import torch
import torch.nn as nn

# 导入配置文件
from config import (
    SEED, DEVICE, TOTAL_TIME_SEC, ORIGINAL_DT_SEC, NEW_DT_SEC,
    N_STEPS_IN, M_STEPS_OUT, LSTM_UNITS, DROPOUT_RATE, NUM_COORDINATES, NUM_DRONES, NUM_FEATURES,
    SMOOTHING_WINDOW_SIZE, TEST_DATA_PATH, MODEL_STATE_DICT_PATH, SCALER_PATH
)

# 设置随机种子
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
if torch.cuda.is_available():
    torch.cuda.manual_seed(SEED)
    torch.cuda.manual_seed_all(SEED)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
print(f"随机种子已固定为: {SEED}")

SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
OUTPUT_DIR = os.path.join(SCRIPT_DIR, 'LSTM_predictions')
os.makedirs(OUTPUT_DIR, exist_ok=True)

# GPU 配置检查
print("\n--- 检查 GPU 可用性 ---")
device = torch.device(DEVICE)
print(f"PyTorch 配置为使用: {device}")
if device.type == 'cuda':
    print(f"CUDA 版本: {torch.version.cuda}")
    print(f"GPU 名称: {torch.cuda.get_device_name(0)}")
print(f"PyTorch 版本: {torch.__version__}")
print("--- GPU 可用性检查结束 ---\n")


# --- 2. PyTorch 模型定义 ---
class LSTMModel(nn.Module):
    def __init__(self, input_dim, lstm_units, output_dim, dropout_rate):
        super(LSTMModel, self).__init__()
        self.lstm = nn.LSTM(input_dim, lstm_units, batch_first=True)
        self.dropout = nn.Dropout(dropout_rate)
        self.fc = nn.Linear(lstm_units, output_dim)

    def forward(self, x):
        _, (h_n, _) = self.lstm(x)
        x = h_n.squeeze(0)
        x = self.dropout(x)
        x = self.fc(x)
        return x


# --- 3. 数据加载与初步结构化 ---
print(f"\n--- 正在加载数据: {TEST_DATA_PATH} ---")
try:
    df_raw = pd.read_csv(TEST_DATA_PATH, sep=r'\s+', header=None, engine='python')
    print("测试数据读取成功。")
except FileNotFoundError:
    print(f"\n错误：文件未找到。请检查文件路径是否正确：'{os.path.abspath(TEST_DATA_PATH)}'")
    exit()
except pd.errors.EmptyDataError:
    print(f"\n错误：文件 '{os.path.abspath(TEST_DATA_PATH)}' 为空或不包含数据。")
    exit()
except Exception as e:
    print(f"\n读取数据文件时发生其他错误: {e}")
    exit()

if df_raw.empty:
    raise ValueError(f"文件 {TEST_DATA_PATH} 为空或无法解析为DataFrame。")

# 根据列数推断无人机数量
num_cols = df_raw.shape[1]
if (num_cols - 1) % NUM_COORDINATES != 0:
    raise ValueError(
        f"文件 {TEST_DATA_PATH} 的数据格式似乎不正确。总列数 ({num_cols}) "
        f"不符合 (1 + N_DRONES * {NUM_COORDINATES}) 的模式。"
    )
global NUM_DRONES, NUM_FEATURES
NUM_DRONES = (num_cols - 1) // NUM_COORDINATES
NUM_FEATURES = NUM_COORDINATES + NUM_DRONES

print(f"根据文件列数 ({num_cols}) 推断无人机数量为: {NUM_DRONES} 架")
print(f"数据加载完成，总行数: {df_raw.shape[0]}, 总列数: {df_raw.shape[1]}")

# 将原始宽格式数据转换为长格式
print("--- 正在将原始宽格式数据转换为长格式并分配无人机ID ---")
dfs_per_drone = []
col_names = ['time']
for i in range(NUM_DRONES):
    col_names.extend([f'r{i + 1}', f'e{i + 1}', f'b{i + 1}'])
df_raw.columns = col_names

for i in range(1, NUM_DRONES + 1):
    r_col, e_col, b_col = f'r{i}', f'e{i}', f'b{i}'
    df_drone = df_raw[['time', r_col, e_col, b_col]].copy()
    df_drone.columns = ['time', 'r', 'e', 'b']
    df_drone['drone_id'] = i - 1
    dfs_per_drone.append(df_drone)

df_long_format = pd.concat(dfs_per_drone).sort_values(['drone_id', 'time']).reset_index(drop=True)
print(f"长格式转换完成。形状: {df_long_format.shape}")

# --- 4. 加载 PyTorch 模型和 Scaler ---
print(f"--- 正在加载模型: {MODEL_STATE_DICT_PATH} ---")
if not os.path.exists(MODEL_STATE_DICT_PATH):
    raise FileNotFoundError(f"模型文件未找到: {MODEL_STATE_DICT_PATH}")

model = LSTMModel(
    input_dim=NUM_FEATURES,
    lstm_units=LSTM_UNITS,
    output_dim=M_STEPS_OUT * NUM_COORDINATES,
    dropout_rate=DROPOUT_RATE
).to(device)
model.load_state_dict(torch.load(MODEL_STATE_DICT_PATH, map_location=device))
model.eval()
print("PyTorch 模型加载完成。")

print(f"--- 正在加载 MinMaxScaler: {SCALER_PATH} ---")
if not os.path.exists(SCALER_PATH):
    raise FileNotFoundError(f"Scaler 文件未找到: {SCALER_PATH}")
scaler = joblib.load(SCALER_PATH)
print("MinMaxScaler 加载完成。")


# --- 5. 数据预处理 ---
def spherical_to_cartesian_coords(r, e, b):
    x = r * np.cos(e) * np.cos(b)
    y = r * np.cos(e) * np.sin(b)
    z = r * np.sin(e)
    return x, y, z


print("\n--- 正在进行坐标转换 (r,e,b -> x,y,z) ---")
df_long_format['x_abs'], df_long_format['y_abs'], df_long_format['z_abs'] = spherical_to_cartesian_coords(
    df_long_format['r'], df_long_format['e'], df_long_format['b']
)
df_xyz_original_cartesian = df_long_format.drop(columns=['r', 'e', 'b'])
print("坐标转换完成。")

print(f"--- 正在对测试数据进行重采样 ({ORIGINAL_DT_SEC}s -> {NEW_DT_SEC}s) 并平滑 ---")
df_processed_list = []
all_drones_global_coords_resampled = {}

for drone_id in range(NUM_DRONES):
    df_drone_orig = df_xyz_original_cartesian[df_xyz_original_cartesian['drone_id'] == drone_id].copy()
    if df_drone_orig.empty:
        continue

    df_drone_orig_indexed = df_drone_orig.set_index('time')[['x_abs', 'y_abs', 'z_abs']]
    if df_drone_orig_indexed.index.has_duplicates:
        df_drone_orig_indexed = df_drone_orig_indexed[~df_drone_orig_indexed.index.duplicated(keep='first')]

    new_time_index_resample = pd.Index(np.arange(0, TOTAL_TIME_SEC + NEW_DT_SEC, NEW_DT_SEC), name='time')
    df_drone_resampled_current = df_drone_orig_indexed.reindex(new_time_index_resample).interpolate(
        method='linear').ffill().bfill()
    if df_drone_resampled_current.isnull().values.any():
        print(f"错误: 无人机 {drone_id} 在重采样后仍存在 NaN。")
        continue

    all_drones_global_coords_resampled[drone_id] = df_drone_resampled_current.copy()
    initial_world_pos = df_drone_resampled_current.iloc[0].values
    df_drone_relative = df_drone_resampled_current.copy()
    df_drone_relative['x_rel'] = df_drone_relative['x_abs'] - initial_world_pos[0]
    df_drone_relative['y_rel'] = df_drone_relative['y_abs'] - initial_world_pos[1]
    df_drone_relative['z_rel'] = df_drone_relative['z_abs'] - initial_world_pos[2]

    if SMOOTHING_WINDOW_SIZE > 1:
        df_drone_relative[['x_rel', 'y_rel', 'z_rel']] = df_drone_relative[['x_rel', 'y_rel', 'z_rel']].rolling(
            window=SMOOTHING_WINDOW_SIZE, min_periods=1, center=True
        ).mean().interpolate(method='linear', limit_direction='both')

    df_drone_relative = df_drone_relative.reset_index(drop=False)
    df_drone_relative['drone_id'] = drone_id
    df_processed_list.append(df_drone_relative)

if not df_processed_list:
    raise ValueError("没有足够的数据来完成预处理。")
df_processed_test = pd.concat(df_processed_list)
print(f"测试数据预处理完成，总条数: {len(df_processed_test)}")

print("--- 正在对测试数据进行归一化 ---")
coords_to_scale = df_processed_test[['x_rel', 'y_rel', 'z_rel']].values
scaled_coords = scaler.transform(coords_to_scale)
df_scaled_test = df_processed_test.copy()
df_scaled_test[['x_rel', 'y_rel', 'z_rel']] = scaled_coords
print("测试数据归一化完成。")

# --- 6. 执行预测并保存结果到 .mat 文件 ---
print("\n--- 正在生成预测结果以保存到 .mat 文件 ---")

all_predictions_for_saving = []
prediction_step_sec = NEW_DT_SEC  # 以 0.1s 为步长进行预测
drone_ids_one_hot = np.eye(NUM_DRONES)
max_prediction_time = TOTAL_TIME_SEC - (N_STEPS_IN + M_STEPS_OUT) * NEW_DT_SEC

for t_sec_start in np.arange(0, max_prediction_time + NEW_DT_SEC, prediction_step_sec):
    for drone_id in range(NUM_DRONES):
        if drone_id not in all_drones_global_coords_resampled:
            continue

        drone_data_scaled = df_scaled_test[df_scaled_test['drone_id'] == drone_id][['x_rel', 'y_rel', 'z_rel']].values
        drone_global_full_trajectory = all_drones_global_coords_resampled[drone_id][['x_abs', 'y_abs', 'z_abs']].values

        start_step = int(round(t_sec_start / NEW_DT_SEC))
        end_input_step = start_step + N_STEPS_IN
        end_output_step = end_input_step + M_STEPS_OUT

        if end_output_step > len(drone_data_scaled):
            continue

        input_seq_scaled = drone_data_scaled[start_step:end_input_step]
        input_for_model_np = np.concatenate((input_seq_scaled, np.tile(drone_ids_one_hot[drone_id], (N_STEPS_IN, 1))),
                                            axis=1).astype(np.float32)
        input_for_model_np = np.expand_dims(input_for_model_np, axis=0)
        input_for_model_tensor = torch.from_numpy(input_for_model_np).to(device)

        with torch.no_grad():
            predicted_output_scaled_flat_tensor = model(input_for_model_tensor)

        predicted_output_scaled_flat = predicted_output_scaled_flat_tensor.cpu().numpy()[0]
        predicted_output_scaled = predicted_output_scaled_flat.reshape(M_STEPS_OUT, NUM_COORDINATES)
        predicted_output_relative_to_drone_start = scaler.inverse_transform(predicted_output_scaled)

        drone_initial_global_pos = drone_global_full_trajectory[0, :]
        predicted_global_from_initial_baseline = predicted_output_relative_to_drone_start + drone_initial_global_pos

        true_input_end_point_global = drone_global_full_trajectory[end_input_step - 1, :]
        predicted_first_point_global_from_initial_baseline = predicted_global_from_initial_baseline[0, :]
        alignment_offset = true_input_end_point_global - predicted_first_point_global_from_initial_baseline
        predicted_output_global = predicted_global_from_initial_baseline + alignment_offset

        future_time_indices = df_processed_test['time'].iloc[end_input_step:end_output_step]
        for i in range(min(M_STEPS_OUT, len(future_time_indices))):
            pred_point = predicted_output_global[i]
            timestamp = future_time_indices.iloc[i]
            all_predictions_for_saving.append({
                'prediction_base_time': t_sec_start,
                'drone_id': drone_id,
                'predicted_time': timestamp,
                'pred_x': pred_point[0],
                'pred_y': pred_point[1],
                'pred_z': pred_point[2]
            })

# 将结果转换为 DataFrame 并准备保存为 .mat 文件
if all_predictions_for_saving:
    df_predictions = pd.DataFrame(all_predictions_for_saving)
    matlab_data = {key: df_predictions[key].values for key in df_predictions.columns}

    # 从模型路径生成输出文件名
    model_name = os.path.splitext(os.path.basename(MODEL_STATE_DICT_PATH))[0]
    output_filename = os.path.join(OUTPUT_DIR, f"{model_name}.mat")

    scipy.io.savemat(output_filename, matlab_data)
    print(f"预测结果已成功保存到: {output_filename}")
else:
    print("没有生成任何预测数据用于保存。")

print("\n--- 预测与保存完成 ---")
