import os  # 操作系统接口
import pandas as pd  # 数据处理和分析
import numpy as np  # 数值计算
import joblib  # 模型序列化和加载
from typing import Dict, List, Any, Optional  # 类型提示
import matplotlib.pyplot as plt  # 数据可视化
import seaborn as sns  # 统计可视化
import argparse  # 命令行参数解析
from sklearn.model_selection import train_test_split  # 数据分割

from data_loader import prepare_dataset  # 数据准备
from feature_extraction import extract_features_from_segments, normalize_features  # 特征工程
from model import SensorClassifier  # 分类器模型


def train_model(data_dir: str, model_type: str = 'rf', window_size: int = 100,
               step_size: int = 50, output_dir: str = '../output') -> Dict[str, Any]:
    """
    Train a model using the prepared dataset with optimized feature selection.
    
    Args:
        data_dir: Path to directory containing data files
        model_type: Type of model to use (rf or svm)
        window_size: Size of the window in number of samples
        step_size: Step size for sliding window
        output_dir: Directory to save outputs
        
    Returns:
        Dictionary containing the trained model and evaluation metrics
        
    Note:
        Uses only the most important features identified through information gain analysis:
        - acc_x_mean, acc_y_min, angle_x_median, angle_y_mean, angle_y_max
        - acc_z_mean, angle_y_min, angle_y_median, acc_y_median, angle_z_mean
        - acc_magnitude_mean, angle_magnitude_mean
    """
    print("Loading and preparing data...")  # 加载和准备数据
    segments, labels, classes = prepare_dataset(data_dir, window_size, step_size)  # 准备数据集
    print(f"Loaded {len(segments)} segments with {len(classes)} classes: {classes}")  # 打印数据信息
    
    print("Extracting features...")  # 提取特征
    features = extract_features_from_segments(segments)  # 从分段中提取特征
    print(f"Extracted {features.shape[1]} features for {features.shape[0]} segments")  # 打印特征信息
    
    print("Normalizing features...")  # 标准化特征
    normalized_features, scaler = normalize_features(features)  # 标准化特征
    
    print(f"Training {model_type} model...")  # 训练模型
    classifier = SensorClassifier(model_type=model_type)  # 创建分类器
    results = classifier.train(normalized_features, labels)  # 训练模型并获取结果
    
    # Create output directory if it doesn't exist  # 创建输出目录(如果不存在)
    os.makedirs(output_dir, exist_ok=True)  # 确保目录存在
    
    # Save the model  # 保存模型
    model_path = os.path.join(output_dir, 'model.joblib')  # 模型文件路径
    classifier.save_model(model_path, results)  # 保存模型和训练结果
    print(f"Model saved to {model_path}")  # 打印保存信息
    
    # Save the scaler  # 保存特征缩放器
    scaler_path = os.path.join(output_dir, 'scaler.joblib')  # 缩放器文件路径
    joblib.dump(scaler, scaler_path)  # 保存缩放器对象
    
    # Plot and save the confusion matrix  # 绘制并保存混淆矩阵
    conf_matrix_path = os.path.join(output_dir, 'confusion_matrix.png')  # 混淆矩阵图片路径
    classifier.plot_confusion_matrix(
        results['confusion_matrix'],  # 混淆矩阵数据
        results['classes'],  # 类别标签
        conf_matrix_path  # 输出文件路径
    )
    print(f"Confusion matrix saved to {conf_matrix_path}")  # 打印保存信息
    
    # Save the classification report as a CSV  # 保存分类报告为CSV
    report_df = pd.DataFrame(results['classification_report']).transpose()  # 转换报告格式
    report_path = os.path.join(output_dir, 'classification_report.csv')  # 报告文件路径
    report_df.to_csv(report_path)  # 保存为CSV文件
    print(f"Classification report saved to {report_path}")  # 打印保存信息
    
    # Print the accuracy
    print(f"Model trained with accuracy: {results['accuracy']}")
    
    return results


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Train a model for IoT sensor classification')
    parser.add_argument('--data_dir', type=str, default='../data',
                        help='Path to directory containing data files')
    parser.add_argument('--model_type', type=str, default='rf', choices=['rf', 'svm'],
                        help='Type of model to use (rf or svm)')
    parser.add_argument('--window_size', type=int, default=100,
                        help='Size of the window in number of samples')
    parser.add_argument('--step_size', type=int, default=50,
                        help='Step size for sliding window')
    parser.add_argument('--output_dir', type=str, default='../output',
                        help='Directory to save outputs')
    
    args = parser.parse_args()
    
    train_model(
        data_dir=args.data_dir,
        model_type=args.model_type,
        window_size=args.window_size,
        step_size=args.step_size,
        output_dir=args.output_dir
    )
