# utils/model_builder.py
import os
import sys
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout, BatchNormalization, Conv1D, MaxPooling1D, \
    GlobalAveragePooling1D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from tensorflow.keras.regularizers import l2
from config import Config


class ModelBuilder:
    """模型构建器 - LSTM + Temporal Convolution (TC)"""

    def __init__(self):
        self.config = Config()

    def build_lstm_tc_model(self, input_shape):
        """构建LSTM + Temporal Convolution模型"""
        print("构建LSTM+TC模型...")

        model = Sequential([
            # 输入层
            tf.keras.layers.Input(shape=input_shape),

            # Temporal Convolution 层
            Conv1D(filters=64, kernel_size=3, activation='relu',
                   padding='same', kernel_regularizer=l2(0.001)),
            BatchNormalization(),
            MaxPooling1D(pool_size=2),
            Dropout(0.2),

            Conv1D(filters=128, kernel_size=3, activation='relu',
                   padding='same', kernel_regularizer=l2(0.001)),
            BatchNormalization(),
            MaxPooling1D(pool_size=2),
            Dropout(0.3),

            # LSTM 层
            LSTM(128, return_sequences=True,
                 dropout=0.2, recurrent_dropout=0.2,
                 kernel_regularizer=l2(0.001)),
            BatchNormalization(),

            LSTM(64, return_sequences=True,
                 dropout=0.2, recurrent_dropout=0.2,
                 kernel_regularizer=l2(0.001)),
            BatchNormalization(),

            LSTM(32, dropout=0.2, recurrent_dropout=0.2,
                 kernel_regularizer=l2(0.001)),
            BatchNormalization(),

            # 全连接层
            Dense(128, activation='relu', kernel_regularizer=l2(0.001)),
            BatchNormalization(),
            Dropout(0.4),

            Dense(64, activation='relu', kernel_regularizer=l2(0.001)),
            BatchNormalization(),
            Dropout(0.3),

            Dense(32, activation='relu', kernel_regularizer=l2(0.001)),
            Dropout(0.2),

            # 输出层
            Dense(self.config.NUM_CLASSES, activation='softmax')
        ])

        # 编译模型 - 只使用准确率指标
        model.compile(
            optimizer=Adam(learning_rate=self.config.LEARNING_RATE),
            loss='categorical_crossentropy',
            metrics=['accuracy']  # 只保留准确率，避免指标数量问题
        )

        return model

    def get_callbacks(self):
        """获取训练回调函数"""
        return [
            EarlyStopping(
                monitor='val_loss',
                patience=15,  # 减少耐心值，适应小数据量
                restore_best_weights=True,
                verbose=1
            ),
            ReduceLROnPlateau(
                monitor='val_loss',
                factor=0.5,
                patience=8,  # 减少耐心值
                min_lr=1e-7,
                verbose=1
            ),
            ModelCheckpoint(
                self.config.get_model_path(),
                monitor='val_accuracy',
                save_best_only=True,
                verbose=1
            )
        ]

    def train_model(self, model, X_train, y_train, X_val, y_val, class_weights=None):
        """训练模型"""
        print("开始训练模型...")

        history = model.fit(
            X_train, y_train,
            epochs=self.config.EPOCHS,
            batch_size=self.config.BATCH_SIZE,
            validation_data=(X_val, y_val),
            class_weight=class_weights,
            callbacks=self.get_callbacks(),
            verbose=1,
            shuffle=True
        )

        return history

    def evaluate_model(self, model, X_test, y_test):
        """评估模型 - 修复版本"""
        print("评估模型...")

        try:
            # 使用更安全的方式评估
            test_loss, test_accuracy = model.evaluate(X_test, y_test, verbose=0)

            print(f"测试集损失: {test_loss:.4f}")
            print(f"测试集准确率: {test_accuracy:.4f}")

            # 计算其他指标
            from sklearn.metrics import precision_score, recall_score, f1_score
            y_pred = model.predict(X_test, verbose=0)
            y_pred_classes = np.argmax(y_pred, axis=1)
            y_true_classes = np.argmax(y_test, axis=1)

            precision = precision_score(y_true_classes, y_pred_classes, average='weighted', zero_division=0)
            recall = recall_score(y_true_classes, y_pred_classes, average='weighted', zero_division=0)
            f1 = f1_score(y_true_classes, y_pred_classes, average='weighted', zero_division=0)

            print(f"测试集精确率: {precision:.4f}")
            print(f"测试集召回率: {recall:.4f}")
            print(f"测试集F1分数: {f1:.4f}")

            return test_loss, test_accuracy, precision, recall, f1

        except Exception as e:
            print(f"评估过程中出错: {e}")
            # 基本评估
            test_loss, test_accuracy = model.evaluate(X_test, y_test, verbose=0)
            print(f"测试集损失: {test_loss:.4f}")
            print(f"测试集准确率: {test_accuracy:.4f}")
            return test_loss, test_accuracy, 0, 0, 0