import os
import pandas as pd
import numpy as np
from math import log2
from collections import defaultdict
from typing import Dict, List, Tuple
from sklearn.feature_selection import mutual_info_classif

from data_loader import prepare_dataset  # 数据准备
from feature_extraction import extract_features_from_segments, normalize_features  # 特征工程

# 定义姿势类别
CLASSES = ['empty', 'laydown', 'leftback', 'leftfront', 'rightback', 'rightfront', 'sitting']

def load_sample_data(data_dir: str) -> Tuple[pd.DataFrame, List[str]]:
    """加载每个类别的一个样本CSV文件并提取特征(使用滑动窗口)"""
    features_list = []
    labels_list = []
    
    # 为每个类别加载一个样本文件
    for class_name in CLASSES:
        # 查找该类别的第一个CSV文件
        for root, _, files in os.walk(data_dir):
            for file in files:
                if file.startswith(f'angle-scl3300d01-{class_name}') and file.endswith('.csv'):
                    file_path = os.path.join(root, file)
                    df = pd.read_csv(file_path)
                    # 使用滑动窗口提取多组特征
                    features = extract_features_from_raw(df)
                    features_list.append(features)
                    # 为每个窗口样本添加对应的类别标签
                    labels_list.extend([class_name] * len(features))
                    break
            if len(labels_list) > 0:  # 已找到该类别的文件
                break
    
    # 合并所有特征
    features_df = pd.concat(features_list, ignore_index=True)
    return features_df, labels_list

def calculate_entropy(feature: pd.Series, bins=10) -> float:
    """计算单个特征的熵"""
    hist = np.histogram(feature, bins=bins)[0]
    prob = hist / hist.sum()
    prob = prob[prob > 0]  # 移除0概率
    return -np.sum(prob * np.log2(prob))

def calculate_information_gain(features: pd.DataFrame, labels: List[str], top_n=20) -> pd.DataFrame:
    """计算信息增益并返回最重要的特征"""
    # 计算互信息(信息增益的近似)
    mi = mutual_info_classif(features, labels, discrete_features=False)
    
    # 创建特征重要性DataFrame
    importance_df = pd.DataFrame({
        'feature': features.columns,
        'information_gain': mi
    })
    
    # 按信息增益排序
    importance_df = importance_df.sort_values('information_gain', ascending=False)
    
    # 计算每个特征的熵
    importance_df['entropy'] = importance_df['feature'].apply(
        lambda f: calculate_entropy(features[f]))
    
    return importance_df.head(top_n)

def extract_features_from_raw(data: pd.DataFrame, window_size: int = 100, step: int = 50) -> pd.DataFrame:
    """从原始数据提取特征(使用滑动窗口)
    
    参数:
        data: 包含传感器数据的DataFrame
        window_size: 滑动窗口大小(默认100个样本)
        step: 滑动步长(默认50个样本)
        
    返回:
        包含每个窗口特征的DataFrame
    """
    # 确保数据列存在
    required_cols = ['acc_x', 'acc_y', 'acc_z', 'angle_x', 'angle_y', 'angle_z']
    missing_cols = [col for col in required_cols if col not in data.columns]
    if missing_cols:
        raise ValueError(f"Missing required columns: {missing_cols}")
    
    # 准备存储所有窗口的特征
    all_features = []
    
    # 使用滑动窗口处理数据
    for i in range(0, len(data) - window_size + 1, step):
        window = data.iloc[i:i+window_size]
        
        # 计算每个窗口的特征
        features = {}
        
        # 基本统计特征
        for col in required_cols:
            col_data = window[col].dropna()
            if len(col_data) == 0:
                continue
                
            features[f'{col}_mean'] = col_data.mean()
            features[f'{col}_std'] = col_data.std()
            features[f'{col}_max'] = col_data.max()
            features[f'{col}_min'] = col_data.min()
            features[f'{col}_range'] = features[f'{col}_max'] - features[f'{col}_min']
            features[f'{col}_median'] = col_data.median()
            features[f'{col}_skew'] = col_data.skew()
            features[f'{col}_kurtosis'] = col_data.kurtosis()
        
        # 交叉特征
        acc_data = window[['acc_x', 'acc_y', 'acc_z']].dropna()
        if len(acc_data) > 0:
            magnitudes = np.sqrt(acc_data.pow(2).sum(axis=1))
            features['acc_magnitude_mean'] = magnitudes.mean()
            features['acc_magnitude_std'] = magnitudes.std()
            features['acc_magnitude_max'] = magnitudes.max()
        
        angle_data = window[['angle_x', 'angle_y', 'angle_z']].dropna()
        if len(angle_data) > 0:
            magnitudes = np.sqrt(angle_data.pow(2).sum(axis=1))
            features['angle_magnitude_mean'] = magnitudes.mean()
            features['angle_magnitude_std'] = magnitudes.std()
            features['angle_magnitude_max'] = magnitudes.max()
        
        # 添加窗口特征到总列表
        all_features.append(features)
    
    # 转换为DataFrame
    features_df = pd.DataFrame(all_features)
    print(f"Extracted features shape: {features_df.shape}")  # 调试输出
    return features_df

if __name__ == "__main__":
    try:
        # 加载样本数据并提取特征
        data_dir = "../data"
        # print("Loading sample data...")
        # features, labels = load_sample_data(data_dir)
        # print(f"Features shape: {features.shape}, Labels count: {len(labels)}")


        print("Loading and preparing data...")  # 加载和准备数据
        segments, labels, classes = prepare_dataset(data_dir, 100, 50)  # 准备数据集
        print(f"Loaded {len(segments)} segments with {len(classes)} classes: {classes}")  # 打印数据信息
    
        print("Extracting features...")  # 提取特征
        features = extract_features_from_segments(segments)  # 从分段中提取特征
        print(f"Extracted {features.shape[1]} features for {features.shape[0]} segments")  # 打印特征信息
        
        # 计算特征重要性
        print("Calculating feature importance...")
        importance_df = calculate_information_gain(features, labels)
        
        print("\nTop features by information gain:")
        print(importance_df)
    except Exception as e:
        print(f"Error: {str(e)}")
        raise