#!/usr/bin/env python3
"""
示例数据生成脚本

生成用于测试和演示的各种类型数据
"""

import os
import json
import numpy as np
import pandas as pd
import h5py
import pickle
from pathlib import Path
from typing import Dict, List, Tuple
import random
from loguru import logger


class SampleDataGenerator:
    """示例数据生成器"""
    
    def __init__(self, data_root: str = "data"):
        """
        初始化数据生成器
        
        Args:
            data_root: 数据根目录
        """
        self.data_root = Path(data_root)
        self.sample_rate = 100  # Hz
        self.duration = 10  # seconds
        
        # 键位映射
        self.key_mapping = {
            'alphabet': list('abcdefghijklmnopqrstuvwxyz'),
            'numbers': list('0123456789'),
            'special': ['space', 'enter', 'backspace', 'shift', 'comma', 'period']
        }
    
    def generate_raw_sensor_data(self, key_label: str, 
                               duration: float = 1.0,
                               noise_level: float = 0.1) -> pd.DataFrame:
        """
        生成原始传感器数据
        
        Args:
            key_label: 键位标签
            duration: 持续时间（秒）
            noise_level: 噪声水平
            
        Returns:
            包含传感器数据的DataFrame
        """
        n_samples = int(duration * self.sample_rate)
        timestamps = np.arange(0, duration, 1/self.sample_rate)
        
        # 生成基础震动信号
        t = np.linspace(0, duration, n_samples)
        
        # 模拟敲击震动（高斯脉冲）
        impact_time = duration / 2
        impact_signal = np.exp(-((t - impact_time) ** 2) / 0.01)
        
        # 添加衰减
        decay = np.exp(-t * 2)
        vibration_signal = impact_signal * decay
        
        # 生成加速度计数据
        accel_x = vibration_signal * random.uniform(-1, 1) + np.random.normal(0, noise_level, n_samples)
        accel_y = vibration_signal * random.uniform(-1, 1) + np.random.normal(0, noise_level, n_samples)
        accel_z = 9.8 + vibration_signal * random.uniform(-0.5, 0.5) + np.random.normal(0, noise_level, n_samples)
        
        # 生成陀螺仪数据
        gyro_x = np.gradient(vibration_signal) * random.uniform(-0.1, 0.1) + np.random.normal(0, noise_level/10, n_samples)
        gyro_y = np.gradient(vibration_signal) * random.uniform(-0.1, 0.1) + np.random.normal(0, noise_level/10, n_samples)
        gyro_z = np.gradient(vibration_signal) * random.uniform(-0.1, 0.1) + np.random.normal(0, noise_level/10, n_samples)
        
        # 创建DataFrame
        data = pd.DataFrame({
            'timestamp': timestamps,
            'accel_x': accel_x,
            'accel_y': accel_y,
            'accel_z': accel_z,
            'gyro_x': gyro_x,
            'gyro_y': gyro_y,
            'gyro_z': gyro_z,
            'key_label': [key_label] * n_samples
        })
        
        return data
    
    def generate_multiple_samples(self, key_label: str, n_samples: int = 10) -> List[pd.DataFrame]:
        """
        为单个键位生成多个样本
        
        Args:
            key_label: 键位标签
            n_samples: 样本数量
            
        Returns:
            样本数据列表
        """
        samples = []
        for i in range(n_samples):
            # 随机化参数
            duration = random.uniform(0.8, 1.2)
            noise_level = random.uniform(0.05, 0.15)
            
            sample = self.generate_raw_sensor_data(key_label, duration, noise_level)
            samples.append(sample)
        
        return samples
    
    def generate_training_dataset(self, samples_per_key: int = 200) -> Tuple[np.ndarray, np.ndarray]:
        """
        生成训练数据集
        
        Args:
            samples_per_key: 每个键位的样本数量
            
        Returns:
            (特征矩阵, 标签数组)的元组
        """
        all_features = []
        all_labels = []
        
        # 生成所有键位的数据
        for category, keys in self.key_mapping.items():
            for key in keys:
                logger.info(f"生成键位 '{key}' 的训练数据...")
                
                samples = self.generate_multiple_samples(key, samples_per_key)
                
                for sample in samples:
                    # 提取特征（简化版本）
                    sensor_data = sample[['accel_x', 'accel_y', 'accel_z', 'gyro_x', 'gyro_y', 'gyro_z']].values
                    
                    # 计算基本特征
                    features = self._extract_basic_features(sensor_data)
                    all_features.append(features)
                    all_labels.append(key)
        
        # 转换为numpy数组
        features_array = np.array(all_features)
        labels_array = np.array(all_labels)
        
        logger.info(f"训练数据集生成完成: {features_array.shape[0]} 样本, {features_array.shape[1]} 特征")
        
        return features_array, labels_array
    
    def _extract_basic_features(self, sensor_data: np.ndarray) -> np.ndarray:
        """
        提取基本特征
        
        Args:
            sensor_data: 传感器数据
            
        Returns:
            特征向量
        """
        features = []
        
        for i in range(sensor_data.shape[1]):
            signal = sensor_data[:, i]
            
            # 时域特征
            features.extend([
                np.mean(signal),           # 均值
                np.std(signal),            # 标准差
                np.max(signal),            # 最大值
                np.min(signal),            # 最小值
                np.ptp(signal),            # 峰峰值
                np.sum(signal**2),         # 能量
                np.sum(np.abs(signal)),    # 绝对值和
            ])
        
        return np.array(features)
    
    def save_training_dataset(self, features: np.ndarray, labels: np.ndarray, 
                            file_path: str = "training_dataset.h5") -> None:
        """
        保存训练数据集
        
        Args:
            features: 特征矩阵
            labels: 标签数组
            file_path: 保存路径
        """
        file_path = self.data_root / "training" / file_path
        
        try:
            with h5py.File(file_path, 'w') as f:
                # 创建特征组
                features_group = f.create_group('training_data/features')
                features_group.create_dataset('time_domain', data=features)
                
                # 创建标签组
                labels_group = f.create_group('training_data/labels')
                labels_group.create_dataset('key_labels', data=labels.astype('S10'))
                
                # 创建元数据组
                metadata_group = f.create_group('training_data/metadata')
                metadata_group.attrs['version'] = '1.0.0'
                metadata_group.attrs['created_at'] = pd.Timestamp.now().isoformat()
                metadata_group.attrs['n_samples'] = len(features)
                metadata_group.attrs['n_features'] = features.shape[1]
                metadata_group.attrs['n_classes'] = len(np.unique(labels))
            
            logger.info(f"训练数据集保存成功: {file_path}")
            
        except Exception as e:
            logger.error(f"保存训练数据集失败: {e}")
            raise
    
    def generate_test_dataset(self, samples_per_key: int = 50) -> Dict:
        """
        生成测试数据集
        
        Args:
            samples_per_key: 每个键位的样本数量
            
        Returns:
            测试数据字典
        """
        features, labels = self.generate_training_dataset(samples_per_key)
        
        # 创建测试用例
        test_cases = []
        for i in range(len(features)):
            test_case = {
                'id': i,
                'key_label': labels[i],
                'features': features[i],
                'expected_result': labels[i]
            }
            test_cases.append(test_case)
        
        test_data = {
            'features': features,
            'labels': labels,
            'metadata': {
                'version': '1.0.0',
                'created_at': pd.Timestamp.now().isoformat(),
                'n_samples': len(features),
                'n_features': features.shape[1],
                'n_classes': len(np.unique(labels))
            },
            'test_cases': test_cases
        }
        
        logger.info(f"测试数据集生成完成: {len(features)} 样本")
        
        return test_data
    
    def save_test_dataset(self, test_data: Dict, file_path: str = "test_dataset.pkl") -> None:
        """
        保存测试数据集
        
        Args:
            test_data: 测试数据字典
            file_path: 保存路径
        """
        file_path = self.data_root / "testing" / file_path
        
        try:
            with open(file_path, 'wb') as f:
                pickle.dump(test_data, f)
            
            logger.info(f"测试数据集保存成功: {file_path}")
            
        except Exception as e:
            logger.error(f"保存测试数据集失败: {e}")
            raise
    
    def generate_validation_split(self) -> Dict:
        """
        生成验证数据集分割信息
        
        Returns:
            验证分割配置字典
        """
        # 计算数据集大小
        total_keys = sum(len(keys) for keys in self.key_mapping.values())
        samples_per_key = 200
        total_samples = total_keys * samples_per_key
        
        # 分割比例
        train_size = int(total_samples * 0.7)
        val_size = int(total_samples * 0.15)
        test_size = total_samples - train_size - val_size
        
        validation_config = {
            "dataset_info": {
                "name": "Vibration Keyboard Validation Dataset",
                "version": "1.0.0",
                "created_at": pd.Timestamp.now().isoformat(),
                "description": "验证数据集，用于模型验证和超参数调优"
            },
            "data_splits": {
                "train": {
                    "size": train_size,
                    "percentage": 0.7,
                    "file_path": "data/training/training_dataset.h5"
                },
                "validation": {
                    "size": val_size,
                    "percentage": 0.15,
                    "file_path": "data/validation/validation_dataset.h5"
                },
                "test": {
                    "size": test_size,
                    "percentage": 0.15,
                    "file_path": "data/testing/test_dataset.pkl"
                }
            },
            "feature_config": {
                "time_domain_features": [
                    "mean", "std", "max", "min", "peak_to_peak", "energy", "abs_sum"
                ],
                "feature_dimensions": 42  # 6个传感器 * 7个特征
            },
            "class_distribution": {
                "alphabet": {key: samples_per_key for key in self.key_mapping['alphabet']},
                "numbers": {key: samples_per_key for key in self.key_mapping['numbers']},
                "special_chars": {key: samples_per_key for key in self.key_mapping['special']}
            }
        }
        
        return validation_config
    
    def save_validation_split(self, validation_config: Dict, 
                            file_path: str = "validation_split.json") -> None:
        """
        保存验证分割配置
        
        Args:
            validation_config: 验证配置字典
            file_path: 保存路径
        """
        file_path = self.data_root / "validation" / file_path
        
        try:
            with open(file_path, 'w', encoding='utf-8') as f:
                json.dump(validation_config, f, indent=2, ensure_ascii=False)
            
            logger.info(f"验证分割配置保存成功: {file_path}")
            
        except Exception as e:
            logger.error(f"保存验证分割配置失败: {e}")
            raise


def main():
    """主函数"""
    logger.info("开始生成示例数据...")
    
    # 创建数据生成器
    generator = SampleDataGenerator()
    
    # 生成训练数据集
    logger.info("生成训练数据集...")
    features, labels = generator.generate_training_dataset(samples_per_key=50)
    generator.save_training_dataset(features, labels)
    
    # 生成测试数据集
    logger.info("生成测试数据集...")
    test_data = generator.generate_test_dataset(samples_per_key=20)
    generator.save_test_dataset(test_data)
    
    # 生成验证分割配置
    logger.info("生成验证分割配置...")
    validation_config = generator.generate_validation_split()
    generator.save_validation_split(validation_config)
    
    # 生成一些原始数据样本
    logger.info("生成原始数据样本...")
    for key in ['a', 'b', 'c', 'd', 'e']:
        sample_data = generator.generate_raw_sensor_data(key, duration=1.0)
        file_path = f"sample_{key}_data.csv"
        sample_data.to_csv(generator.data_root / "raw" / file_path, index=False)
    
    logger.info("示例数据生成完成！")


if __name__ == "__main__":
    main() 