# train_model.py
import os
import sys
import numpy as np
from sklearn.utils import class_weight

sys.path.append('utils')

from data_processor import CSVDataProcessor
from data_augmenter import DataAugmenter
from model_builder import ModelBuilder
from evaluator import ModelEvaluator
from config import Config


def main():
    """模型训练主函数"""
    print("=== 摔倒检测模型训练 ===")

    # 初始化组件
    processor = CSVDataProcessor()
    augmenter = DataAugmenter()
    model_builder = ModelBuilder()
    evaluator = ModelEvaluator()

    try:
        # 处理训练数据
        print("\n1. 加载和处理数据...")
        sequences, labels = processor.process_all_data(Config.RAW_DATA_DIR)

        if sequences is None:
            print("没有找到训练数据")
            return

        # 数据增强（如果数据量少）
        if len(sequences) < 50:
            print(f"\n数据量较少 ({len(sequences)} 个序列)，进行数据增强...")
            sequences, labels = augmenter.augment_sequences(sequences, labels, augmentation_factor=3)
            print(f"增强后序列数: {len(sequences)}")

        # 准备训练数据
        training_data = processor.prepare_training_data(sequences, labels)

        if training_data is None:
            print("训练数据准备失败")
            return

        X_train, X_val, X_test, y_train, y_val, y_test = training_data

        # 计算类别权重（处理不平衡数据）
        print("\n2. 计算类别权重...")
        y_train_labels = np.argmax(y_train, axis=1)
        class_weights = class_weight.compute_class_weight(
            'balanced',
            classes=np.unique(y_train_labels),
            y=y_train_labels
        )
        class_weight_dict = dict(enumerate(class_weights))
        print("类别权重:", class_weight_dict)

        # 构建模型 - 使用更简单的模型适应小数据量
        print("\n3. 构建模型...")
        input_shape = (X_train.shape[1], X_train.shape[2])

        # 根据数据量选择模型复杂度
        if len(X_train) < 100:
            print("数据量较少，使用简化模型")
            from tensorflow.keras.models import Sequential
            from tensorflow.keras.layers import LSTM, Dense, Dropout
            from tensorflow.keras.optimizers import Adam

            model = Sequential([
                LSTM(32, return_sequences=True, input_shape=input_shape),
                Dropout(0.3),
                LSTM(16),
                Dropout(0.3),
                Dense(32, activation='relu'),
                Dropout(0.2),
                Dense(Config.NUM_CLASSES, activation='softmax')
            ])

            model.compile(
                optimizer=Adam(learning_rate=Config.LEARNING_RATE),
                loss='categorical_crossentropy',
                metrics=['accuracy']  # 只使用准确率
            )
        else:
            model = model_builder.build_lstm_tc_model(input_shape)

        print("模型架构:")
        model.summary()

        # 训练模型
        print("\n4. 训练模型...")
        history = model_builder.train_model(
            model, X_train, y_train, X_val, y_val, class_weight_dict
        )

        # 评估模型
        print("\n5. 评估模型...")
        test_results = model_builder.evaluate_model(model, X_test, y_test)

        # 可视化结果
        print("\n6. 可视化训练结果...")
        evaluator.plot_training_history(history)

        # 预测测试集
        y_pred_proba = model.predict(X_test, verbose=0)
        y_pred = np.argmax(y_pred_proba, axis=1)
        y_true = np.argmax(y_test, axis=1)

        # 绘制混淆矩阵
        evaluator.plot_confusion_matrix(y_true, y_pred)

        # 分析预测结果
        evaluator.analyze_predictions(y_true, y_pred, y_pred_proba)

        # 保存预处理器
        processor.save_preprocessor()

        print(f"\n=== 训练完成! ===")
        print(f"模型已保存到: {Config.get_model_path()}")
        print(f"预处理器已保存到: {Config.get_preprocessor_path()}")

        # 显示最终训练结果
        final_accuracy = history.history['val_accuracy'][-1] if 'val_accuracy' in history.history else \
        history.history['accuracy'][-1]
        print(f"最终验证准确率: {final_accuracy:.4f}")

    except Exception as e:
        print(f"训练过程中出错: {e}")
        import traceback
        traceback.print_exc()


if __name__ == "__main__":
    main()