import os  # 操作系统接口
import pandas as pd  # 数据处理和分析
import numpy as np  # 数值计算
import joblib  # 模型序列化和加载
import argparse  # 命令行参数解析
from typing import Dict, List, Tuple, Optional  # 类型提示

from data_loader import segment_time_series  # 时间序列分段
from feature_extraction import extract_statistical_features  # 特征提取
from model import SensorClassifier  # 分类器模型


def predict_single_file(file_path: str, model_path: str, scaler_path: str, 
                      window_size: int = 100, step_size: int = 50) -> Dict[str, float]:
    """
    Make predictions on a single file.
    
    Args:
        file_path: Path to the file to predict
        model_path: Path to the trained model
        scaler_path: Path to the feature scaler
        window_size: Size of the window in number of samples
        step_size: Step size for sliding window
        
    Returns:
        Dictionary mapping class names to probabilities
    """
    # Load the model and scaler  # 加载模型和特征缩放器
    classifier = SensorClassifier.load_model(model_path)  # 加载训练好的分类器
    scaler = joblib.load(scaler_path)  # 加载特征缩放器
    
    # Load the data  # 加载数据文件
    df = pd.read_csv(file_path)  # 读取CSV文件
    
    # Select only the sensor data columns  # 选择传感器数据列
    sensor_cols = ['acc_x', 'acc_y', 'acc_z', 'angle_x', 'angle_y', 'angle_z']  # 传感器列名
    df_sensors = df[sensor_cols]  # 提取传感器数据
    
    # Segment the time series  # 时间序列分段
    segments = segment_time_series(df_sensors, window_size, step_size)  # 使用滑动窗口分段
    
    # Extract features for each segment  # 为每个分段提取特征
    feature_dicts = [extract_statistical_features(segment) for segment in segments]  # 提取统计特征
    features = pd.DataFrame(feature_dicts)  # 转换为DataFrame
    
    # Normalize the features  # 特征标准化
    normalized_features = pd.DataFrame(
        scaler.transform(features),  # 应用特征缩放
        columns=features.columns  # 保持列名不变
    )
    
    # Make predictions  # 进行预测
    predictions = classifier.predict(normalized_features)  # 获取类别预测
    probabilities = classifier.predict_proba(normalized_features)  # 获取类别概率
    
    # Count the occurrences of each class  # 统计每个类别的出现次数
    class_counts = {}  # 类别计数字典
    for pred in predictions:  # 遍历所有预测结果
        if pred in class_counts:  # 如果类别已存在
            class_counts[pred] += 1  # 增加计数
        else:
            class_counts[pred] = 1  # 初始化计数
    
    # Calculate the probability of each class  # 计算每个类别的概率
    total_segments = len(predictions)  # 总分段数
    class_probs = {cls: count / total_segments for cls, count in class_counts.items()}  # 计算概率
    
    # Calculate average probability across all segments  # 计算所有分段的平均概率
    avg_probs = {}  # 平均概率字典
    for i, cls in enumerate(classifier.classes):  # 遍历所有类别
        avg_probs[cls] = np.mean(probabilities[:, i])  # 计算该类别的平均概率
    
    return {  # 返回预测结果
        'class_counts': class_counts,  # 类别计数
        'class_probabilities': class_probs,  # 类别概率
        'average_probabilities': avg_probs,  # 平均概率
        'predicted_class': max(class_probs, key=class_probs.get)  # 预测类别(最高概率)
    }


def predict_directory(data_dir: str, model_path: str, scaler_path: str,
                     window_size: int = 100, step_size: int = 50, output_file: str = None) -> pd.DataFrame:
    """
    Make predictions on all files in a directory.
    
    Args:
        data_dir: Path to directory containing files to predict
        model_path: Path to the trained model
        scaler_path: Path to the feature scaler
        window_size: Size of the window in number of samples
        step_size: Step size for sliding window
        output_file: Path to save the predictions to
        
    Returns:
        DataFrame containing predictions for each file
    """
    results = []
    
    for filename in os.listdir(data_dir):
        if filename.endswith('.csv') and not filename.startswith('.'):
            file_path = os.path.join(data_dir, filename)
            
            print(f"Predicting {filename}...")
            prediction = predict_single_file(
                file_path, model_path, scaler_path, window_size, step_size
            )
            
            result = {
                'filename': filename,
                'predicted_class': prediction['predicted_class']
            }
            
            # Add probabilities for each class
            for cls, prob in prediction['average_probabilities'].items():
                result[f'prob_{cls}'] = prob
            
            results.append(result)
    
    # Create DataFrame from results
    results_df = pd.DataFrame(results)
    
    # Save to file if specified
    if output_file:
        results_df.to_csv(output_file, index=False)
        print(f"Predictions saved to {output_file}")
    
    return results_df


def predict_live_data(sensor_data: pd.DataFrame, model_path: str, scaler_path: str,
                    window_size: int = 100) -> Dict[str, float]:
    """
    Make predictions on live sensor data.
    
    Args:
        sensor_data: DataFrame containing live sensor data
        model_path: Path to the trained model
        scaler_path: Path to the feature scaler
        window_size: Size of the window in number of samples
        
    Returns:
        Dictionary mapping class names to probabilities
    """
    # Load the model and scaler
    classifier = SensorClassifier.load_model(model_path)
    scaler = joblib.load(scaler_path)
    
    # Ensure we have enough data
    if len(sensor_data) < window_size:
        raise ValueError(f"Not enough data points. Need at least {window_size}, got {len(sensor_data)}")
    
    # Take the last window_size samples
    recent_data = sensor_data.iloc[-window_size:].copy()
    
    # Extract features
    features = pd.DataFrame([extract_statistical_features(recent_data)])
    
    # Normalize the features
    normalized_features = pd.DataFrame(
        scaler.transform(features),
        columns=features.columns
    )
    
    # Make predictions
    predicted_class = classifier.predict(normalized_features)[0]
    probabilities = classifier.predict_proba(normalized_features)[0]
    
    # Create a dictionary of class probabilities
    class_probs = {cls: prob for cls, prob in zip(classifier.classes, probabilities)}
    
    return {
        'predicted_class': predicted_class,
        'probabilities': class_probs
    }


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Make predictions with a trained model')
    parser.add_argument('--data', type=str, required=True,
                        help='Path to file or directory to predict')
    parser.add_argument('--model', type=str, default='../output/model.joblib',
                        help='Path to the trained model')
    parser.add_argument('--scaler', type=str, default='../output/scaler.joblib',
                        help='Path to the feature scaler')
    parser.add_argument('--window_size', type=int, default=100,
                        help='Size of the window in number of samples')
    parser.add_argument('--step_size', type=int, default=50,
                        help='Step size for sliding window')
    parser.add_argument('--output', type=str, default=None,
                        help='Path to save the predictions to')
    
    args = parser.parse_args()
    
    # Check if the data path is a file or directory
    if os.path.isfile(args.data):
        result = predict_single_file(
            args.data, args.model, args.scaler, args.window_size, args.step_size
        )
        print(f"Predicted class: {result['predicted_class']}")
        print("Class probabilities:")
        for cls, prob in result['average_probabilities'].items():
            print(f"  {cls}: {prob:.4f}")
    
    elif os.path.isdir(args.data):
        results_df = predict_directory(
            args.data, args.model, args.scaler, args.window_size, args.step_size, args.output
        )
        print("\nPrediction summary:")
        print(results_df[['filename', 'predicted_class']].to_string(index=False))
    
    else:
        print(f"Error: {args.data} is not a valid file or directory")
