"""
数据加载和处理
作者: magical857
日期: 2025-11-01
"""

import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from scipy.io import loadmat
import pandas as pd
from pathlib import Path
from typing import Tuple, Optional


class TrajectoryDataset(Dataset):
    """轨迹数据集"""
    
    def __init__(self, states, controls, sequence_length, prediction_horizon):
        """
        Args:
            states: (N, state_dim) numpy array
            controls: (N, control_dim) numpy array
            sequence_length: 输入序列长度
            prediction_horizon: 预测步长
        """
        self.states = torch.FloatTensor(states)
        self.controls = torch.FloatTensor(controls)
        self.sequence_length = sequence_length
        self.prediction_horizon = prediction_horizon
        
        # 计算有效样本数量
        self.num_samples = len(states) - sequence_length - prediction_horizon + 1
        
        if self.num_samples <= 0:
            raise ValueError(f"数据长度不足，需要至少 {sequence_length + prediction_horizon} 个时间步")
    
    def __len__(self):
        return self.num_samples
    
    def __getitem__(self, idx):
        """
        Returns:
            states_seq: (sequence_length, state_dim)
            controls_seq: (sequence_length, control_dim)
            states_target: (prediction_horizon, state_dim)
        """
        # 输入序列
        states_seq = self.states[idx:idx+self.sequence_length]
        controls_seq = self.controls[idx:idx+self.sequence_length]
        
        # 目标序列（用于预测）
        states_target = self.states[idx+self.sequence_length:idx+self.sequence_length+self.prediction_horizon]
        
        return states_seq, controls_seq, states_target


def load_matlab_data(filepath: str) -> Tuple[np.ndarray, np.ndarray, dict]:
    """
    加载MATLAB数据文件
    
    Args:
        filepath: .mat或.csv文件路径
    
    Returns:
        states: (N, 4) - [x, y, theta, v]
        controls: (N, 2) - [a, delta]
        metadata: 其他元数据
    """
    filepath = Path(filepath)
    
    if filepath.suffix == '.mat':
        # 加载MAT文件
        try:
            data = loadmat(str(filepath))
            states = data['states'].T  # (N, 4)
            controls = data['controls'].T  # (N-1, 2)
            
            # 补齐最后一个控制
            controls = np.vstack([controls, controls[-1:]])
            
            metadata = {
                'dt': float(data.get('dt', 0.1)),
                'time': data.get('time', None)
            }
        except:
            raise ValueError(f"无法加载MAT文件: {filepath}")
    
    elif filepath.suffix == '.csv':
        # 加载CSV文件
        df = pd.read_csv(filepath)
        
        states = df[['x', 'y', 'theta', 'v']].values
        controls = df[['a', 'delta']].values
        
        metadata = {
            'dt': df['time'].diff().mean() if 'time' in df else 0.1,
            'time': df['time'].values if 'time' in df else None
        }
    
    else:
        raise ValueError(f"不支持的文件格式: {filepath.suffix}")
    
    print(f"✓ 数据加载成功: {filepath.name}")
    print(f"  - 数据点数: {len(states)}")
    print(f"  - 时间步长: {metadata['dt']:.3f}s")
    print(f"  - 状态维度: {states.shape}")
    print(f"  - 控制维度: {controls.shape}")
    
    return states, controls, metadata


def normalize_data(states: np.ndarray, controls: np.ndarray) -> Tuple:
    """
    数据标准化
    
    Returns:
        states_norm, controls_norm, state_mean, state_std, control_mean, control_std
    """
    state_mean = states.mean(axis=0)
    state_std = states.std(axis=0) + 1e-8
    
    control_mean = controls.mean(axis=0)
    control_std = controls.std(axis=0) + 1e-8
    
    states_norm = (states - state_mean) / state_std
    controls_norm = (controls - control_mean) / control_std
    
    return states_norm, controls_norm, state_mean, state_std, control_mean, control_std


def create_dataloaders(data_path: str, config, normalize: bool = True):
    """
    创建训练、验证、测试数据加载器
    
    Args:
        data_path: 数据文件路径
        config: 配置对象
        normalize: 是否标准化
    
    Returns:
        train_loader, val_loader, test_loader, normalization_params
    """
    # 加载数据
    states, controls, metadata = load_matlab_data(data_path)
    
    # 标准化
    if normalize:
        states, controls, state_mean, state_std, control_mean, control_std = \
            normalize_data(states, controls)
        
        normalization_params = {
            'state_mean': state_mean,
            'state_std': state_std,
            'control_mean': control_mean,
            'control_std': control_std
        }
    else:
        normalization_params = None
    
    # 数据划分
    n_total = len(states)
    n_train = int(n_total * config.TRAIN_RATIO)
    n_val = int(n_total * config.VAL_RATIO)
    
    # 训练集
    train_dataset = TrajectoryDataset(
        states[:n_train],
        controls[:n_train],
        config.SEQUENCE_LENGTH,
        config.PREDICTION_HORIZON
    )
    
    # 验证集
    val_dataset = TrajectoryDataset(
        states[n_train:n_train+n_val],
        controls[n_train:n_train+n_val],
        config.SEQUENCE_LENGTH,
        config.PREDICTION_HORIZON
    )
    
    # 测试集
    test_dataset = TrajectoryDataset(
        states[n_train+n_val:],
        controls[n_train+n_val:],
        config.SEQUENCE_LENGTH,
        config.PREDICTION_HORIZON
    )
    
    # 创建DataLoader
    train_loader = DataLoader(
        train_dataset,
        batch_size=config.BATCH_SIZE,
        shuffle=True,
        num_workers=config.NUM_WORKERS,
        pin_memory=True if config.DEVICE == 'cuda' else False
    )
    
    val_loader = DataLoader(
        val_dataset,
        batch_size=config.BATCH_SIZE,
        shuffle=False,
        num_workers=config.NUM_WORKERS,
        pin_memory=True if config.DEVICE == 'cuda' else False
    )
    
    test_loader = DataLoader(
        test_dataset,
        batch_size=config.BATCH_SIZE,
        shuffle=False,
        num_workers=config.NUM_WORKERS,
        pin_memory=True if config.DEVICE == 'cuda' else False
    )
    
    print(f"\n数据集划分:")
    print(f"  训练集: {len(train_dataset)} 样本")
    print(f"  验证集: {len(val_dataset)} 样本")
    print(f"  测试集: {len(test_dataset)} 样本")
    
    return train_loader, val_loader, test_loader, normalization_params


if __name__ == "__main__":
    # 测试
    import sys
    sys.path.append('..')
    from config import Config
    
    Config.create_dirs()
    
    # 测试数据加载
    data_path = "../data/raw/trajectory_data.csv"
    
    if Path(data_path).exists():
        train_loader, val_loader, test_loader, norm_params = \
            create_dataloaders(data_path, Config)
        
        # 测试一个batch
        states_seq, controls_seq, states_target = next(iter(train_loader))
        print(f"\nBatch形状:")
        print(f"  states_seq: {states_seq.shape}")
        print(f"  controls_seq: {controls_seq.shape}")
        print(f"  states_target: {states_target.shape}")
    else:
        print(f"数据文件不存在: {data_path}")
        print("请先运行MATLAB脚本生成数据")