#!/usr/bin/env python3
"""
数据处理工具模块

提供数据加载、预处理、特征提取等功能的工具函数
"""

import os
import json
import numpy as np
import pandas as pd
import h5py
import pickle
from pathlib import Path
from typing import Dict, List, Tuple, Optional, Union
from loguru import logger


class DataLoader:
    """数据加载器类"""
    
    def __init__(self, data_root: str = "data"):
        """
        初始化数据加载器
        
        Args:
            data_root: 数据根目录
        """
        self.data_root = Path(data_root)
        self._validate_data_structure()
    
    def _validate_data_structure(self):
        """验证数据目录结构"""
        required_dirs = ['raw', 'processed', 'features', 'training', 'testing', 'validation']
        for dir_name in required_dirs:
            dir_path = self.data_root / dir_name
            if not dir_path.exists():
                logger.warning(f"目录不存在: {dir_path}")
                dir_path.mkdir(parents=True, exist_ok=True)
    
    def load_raw_data(self, file_path: str) -> pd.DataFrame:
        """
        加载原始数据
        
        Args:
            file_path: 文件路径
            
        Returns:
            包含传感器数据的DataFrame
        """
        file_path = self.data_root / "raw" / file_path
        
        if not file_path.exists():
            raise FileNotFoundError(f"文件不存在: {file_path}")
        
        try:
            data = pd.read_csv(file_path)
            logger.info(f"成功加载原始数据: {file_path}")
            return data
        except Exception as e:
            logger.error(f"加载原始数据失败: {e}")
            raise
    
    def load_processed_data(self, file_path: str) -> np.ndarray:
        """
        加载预处理后的数据
        
        Args:
            file_path: 文件路径
            
        Returns:
            预处理后的数据数组
        """
        file_path = self.data_root / "processed" / file_path
        
        if not file_path.exists():
            raise FileNotFoundError(f"文件不存在: {file_path}")
        
        try:
            if file_path.suffix == '.npy':
                data = np.load(file_path)
            elif file_path.suffix == '.h5':
                with h5py.File(file_path, 'r') as f:
                    data = f['data'][:]
            else:
                raise ValueError(f"不支持的文件格式: {file_path.suffix}")
            
            logger.info(f"成功加载预处理数据: {file_path}")
            return data
        except Exception as e:
            logger.error(f"加载预处理数据失败: {e}")
            raise
    
    def load_features(self, file_path: str) -> Dict:
        """
        加载特征数据
        
        Args:
            file_path: 文件路径
            
        Returns:
            特征数据字典
        """
        file_path = self.data_root / "features" / file_path
        
        if not file_path.exists():
            raise FileNotFoundError(f"文件不存在: {file_path}")
        
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                features = json.load(f)
            
            logger.info(f"成功加载特征数据: {file_path}")
            return features
        except Exception as e:
            logger.error(f"加载特征数据失败: {e}")
            raise
    
    def load_training_data(self, file_path: str) -> Tuple[np.ndarray, np.ndarray]:
        """
        加载训练数据
        
        Args:
            file_path: 文件路径
            
        Returns:
            (特征矩阵, 标签数组)的元组
        """
        file_path = self.data_root / "training" / file_path
        
        if not file_path.exists():
            raise FileNotFoundError(f"文件不存在: {file_path}")
        
        try:
            if file_path.suffix == '.h5':
                with h5py.File(file_path, 'r') as f:
                    features = f['training_data/features'][:]
                    labels = f['training_data/labels/key_labels'][:]
            elif file_path.suffix == '.pkl':
                with open(file_path, 'rb') as f:
                    data = pickle.load(f)
                    features = data['features']
                    labels = data['labels']
            else:
                raise ValueError(f"不支持的文件格式: {file_path.suffix}")
            
            logger.info(f"成功加载训练数据: {file_path}")
            return features, labels
        except Exception as e:
            logger.error(f"加载训练数据失败: {e}")
            raise


class DataProcessor:
    """数据处理器类"""
    
    def __init__(self, data_root: str = "data"):
        """
        初始化数据处理器
        
        Args:
            data_root: 数据根目录
        """
        self.data_root = Path(data_root)
    
    def preprocess_raw_data(self, raw_data: pd.DataFrame, 
                          normalize: bool = True,
                          remove_noise: bool = True) -> np.ndarray:
        """
        预处理原始数据
        
        Args:
            raw_data: 原始数据DataFrame
            normalize: 是否归一化
            remove_noise: 是否去噪
            
        Returns:
            预处理后的数据数组
        """
        try:
            # 提取传感器数据列
            sensor_cols = ['accel_x', 'accel_y', 'accel_z', 'gyro_x', 'gyro_y', 'gyro_z']
            sensor_data = raw_data[sensor_cols].values
            
            # 去噪处理
            if remove_noise:
                sensor_data = self._remove_noise(sensor_data)
            
            # 归一化处理
            if normalize:
                sensor_data = self._normalize_data(sensor_data)
            
            logger.info("数据预处理完成")
            return sensor_data
            
        except Exception as e:
            logger.error(f"数据预处理失败: {e}")
            raise
    
    def _remove_noise(self, data: np.ndarray, window_size: int = 5) -> np.ndarray:
        """
        中值滤波去噪
        
        Args:
            data: 输入数据
            window_size: 窗口大小
            
        Returns:
            去噪后的数据
        """
        from scipy.signal import medfilt
        
        filtered_data = np.zeros_like(data)
        for i in range(data.shape[1]):
            filtered_data[:, i] = medfilt(data[:, i], window_size)
        
        return filtered_data
    
    def _normalize_data(self, data: np.ndarray) -> np.ndarray:
        """
        数据归一化
        
        Args:
            data: 输入数据
            
        Returns:
            归一化后的数据
        """
        # Z-score标准化
        mean = np.mean(data, axis=0)
        std = np.std(data, axis=0)
        normalized_data = (data - mean) / (std + 1e-8)
        
        return normalized_data
    
    def save_processed_data(self, data: np.ndarray, file_path: str, 
                          format: str = 'npy') -> None:
        """
        保存预处理后的数据
        
        Args:
            data: 预处理后的数据
            file_path: 保存路径
            format: 保存格式 ('npy', 'h5')
        """
        file_path = self.data_root / "processed" / file_path
        
        try:
            if format == 'npy':
                np.save(file_path, data)
            elif format == 'h5':
                with h5py.File(file_path, 'w') as f:
                    f.create_dataset('data', data=data)
            else:
                raise ValueError(f"不支持的保存格式: {format}")
            
            logger.info(f"数据保存成功: {file_path}")
            
        except Exception as e:
            logger.error(f"数据保存失败: {e}")
            raise


class DataValidator:
    """数据验证器类"""
    
    def __init__(self):
        """初始化数据验证器"""
        pass
    
    def validate_raw_data(self, data: pd.DataFrame) -> Dict[str, bool]:
        """
        验证原始数据质量
        
        Args:
            data: 原始数据DataFrame
            
        Returns:
            验证结果字典
        """
        results = {}
        
        # 检查数据完整性
        results['complete'] = not data.isnull().any().any()
        
        # 检查数据类型
        expected_cols = ['timestamp', 'accel_x', 'accel_y', 'accel_z', 
                        'gyro_x', 'gyro_y', 'gyro_z', 'key_label']
        results['columns'] = all(col in data.columns for col in expected_cols)
        
        # 检查数据范围
        accel_range = data[['accel_x', 'accel_y', 'accel_z']].abs().max().max()
        gyro_range = data[['gyro_x', 'gyro_y', 'gyro_z']].abs().max().max()
        results['accel_range'] = accel_range <= 20  # m/s²
        results['gyro_range'] = gyro_range <= 2000  # deg/s
        
        # 检查采样频率
        if 'timestamp' in data.columns:
            timestamps = data['timestamp'].values
            if len(timestamps) > 1:
                intervals = np.diff(timestamps)
                mean_interval = np.mean(intervals)
                results['sample_rate'] = abs(mean_interval - 0.01) < 0.001  # 100Hz
        
        return results
    
    def validate_features(self, features: Dict) -> Dict[str, bool]:
        """
        验证特征数据质量
        
        Args:
            features: 特征数据字典
            
        Returns:
            验证结果字典
        """
        results = {}
        
        # 检查特征结构
        required_keys = ['time_domain', 'frequency_domain']
        results['structure'] = all(key in features for key in required_keys)
        
        # 检查特征维度
        if 'time_domain' in features:
            time_features = features['time_domain']
            results['time_domain'] = all(len(v) > 0 for v in time_features.values())
        
        if 'frequency_domain' in features:
            freq_features = features['frequency_domain']
            results['frequency_domain'] = all(len(v) > 0 for v in freq_features.values())
        
        return results


def create_sample_data():
    """创建示例数据"""
    # 创建示例原始数据
    sample_data = {
        'timestamp': np.arange(0, 10, 0.01),
        'accel_x': np.random.normal(0, 0.1, 1000),
        'accel_y': np.random.normal(0, 0.1, 1000),
        'accel_z': np.random.normal(9.8, 0.1, 1000),
        'gyro_x': np.random.normal(0, 0.01, 1000),
        'gyro_y': np.random.normal(0, 0.01, 1000),
        'gyro_z': np.random.normal(0, 0.01, 1000),
        'key_label': ['a'] * 1000
    }
    
    df = pd.DataFrame(sample_data)
    df.to_csv('data/raw/sample_data.csv', index=False)
    logger.info("示例数据创建完成")


if __name__ == "__main__":
    # 创建示例数据
    create_sample_data() 