#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
模型训练模块
使用TensorFlow构建和训练深度神经网络模型
"""

import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import os

def load_features(feature_file="features.npz"):
    """
    加载特征数据
    
    Args:
        feature_file (str): 特征文件路径
    
    Returns:
        tuple: (X, y) 特征矩阵和标签向量
    """
    try:
        data = np.load(feature_file)
        X = data['X']
        y = data['y']
        print(f"成功加载特征数据，特征矩阵形状: {X.shape}，标签向量形状: {y.shape}")
        return X, y
    except FileNotFoundError:
        print(f"错误: 特征文件 {feature_file} 不存在")
        return None, None
    except Exception as e:
        print(f"加载特征数据时出错: {e}")
        return None, None

def preprocess_data(X, y):
    """
    数据预处理
    
    Args:
        X (np.array): 特征矩阵
        y (np.array): 标签向量
    
    Returns:
        tuple: 预处理后的数据 (X_train, X_test, y_train, y_test, scaler)
    """
    # 数据标准化
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)
    
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X_scaled, y, test_size=0.2, random_state=42, stratify=y
    )
    
    print(f"训练集大小: {X_train.shape[0]}")
    print(f"测试集大小: {X_test.shape[0]}")
    
    return X_train, X_test, y_train, y_test, scaler

def build_model(input_dim):
    """
    构建深度神经网络模型
    
    Args:
        input_dim (int): 输入特征维度
    
    Returns:
        keras.Model: 构建的模型
    """
    model = keras.Sequential([
        layers.Dense(128, activation='relu', input_shape=(input_dim,), 
                    kernel_regularizer=keras.regularizers.l2(0.001)),
        layers.Dropout(0.3),

        # layers.Dense(512, activation='relu',
        #             kernel_regularizer=keras.regularizers.l2(0.001)),
        # layers.Dropout(0.3),

        # layers.Dense(256, activation='relu',
        #             kernel_regularizer=keras.regularizers.l2(0.001)),
        # layers.Dropout(0.3),
        
        # layers.Dense(128, activation='relu',
        #             kernel_regularizer=keras.regularizers.l2(0.001)),
        # layers.Dropout(0.2),
        
        # layers.Dense(64, activation='relu',
        #             kernel_regularizer=keras.regularizers.l2(0.001)),
        # layers.Dropout(0.2),
        
        layers.Dense(32, activation='relu',
                    kernel_regularizer=keras.regularizers.l2(0.001)),
        layers.Dropout(0.1),
        
        layers.Dense(1, activation='sigmoid')
    ])
    
    # 编译模型
    model.compile(
        optimizer=keras.optimizers.Adam(learning_rate=0.001),
        loss='binary_crossentropy',
        metrics=['accuracy', 'precision', 'recall']
    )
    
    return model

def train_model(model, X_train, y_train, X_test, y_test):
    """
    训练模型
    
    Args:
        model (keras.Model): 模型实例
        X_train (np.array): 训练特征
        y_train (np.array): 训练标签
        X_test (np.array): 测试特征
        y_test (np.array): 测试标签
    
    Returns:
        History: 训练历史
    """
    # 定义回调函数
    early_stopping = keras.callbacks.EarlyStopping(
        monitor='val_loss',
        patience=10,
        restore_best_weights=True
    )
    
    reduce_lr = keras.callbacks.ReduceLROnPlateau(
        monitor='val_loss',
        factor=0.2,
        patience=5,
        min_lr=0.0001
    )
    
    # 训练模型
    history = model.fit(
        X_train, y_train,
        epochs=10,
        batch_size=8,
        validation_data=(X_test, y_test),
        callbacks=[early_stopping, reduce_lr],
        verbose=1
    )
    
    return history

def evaluate_model(model, X_test, y_test):
    """
    评估模型性能
    
    Args:
        model (keras.Model): 训练好的模型
        X_test (np.array): 测试特征
        y_test (np.array): 测试标签
    """
    # 评估模型
    loss, accuracy, precision, recall = model.evaluate(X_test, y_test, verbose=0)
    
    # 计算F1分数
    f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0
    
    print("\n模型评估结果:")
    print(f"  损失: {loss:.4f}")
    print(f"  准确率: {accuracy:.4f}")
    print(f"  精确率: {precision:.4f}")
    print(f"  召回率: {recall:.4f}")
    print(f"  F1分数: {f1_score:.4f}")

def save_model_and_scaler(model, scaler, model_file="tqsecLogAI.h5", scaler_file="scaler.npy"):
    """
    保存模型和标准化器
    
    Args:
        model (keras.Model): 训练好的模型
        scaler (StandardScaler): 标准化器
        model_file (str): 模型保存路径
        scaler_file (str): 标准化器保存路径
    """
    try:
        # 保存模型
        model.save(model_file)
        print(f"模型已保存到 {model_file}")
        
        # 保存标准化器参数
        np.save(scaler_file, {
            'mean': scaler.mean_,
            'scale': scaler.scale_
        })
        print(f"标准化器参数已保存到 {scaler_file}")
        
    except Exception as e:
        print(f"保存模型或标准化器时出错: {e}")

def main():
    """
    主训练流程
    """
    print("开始加载特征数据...")
    X, y = load_features()
    if X is None or y is None:
        return
    
    print("开始数据预处理...")
    X_train, X_test, y_train, y_test, scaler = preprocess_data(X, y)
    
    print("构建模型...")
    model = build_model(X_train.shape[1])
    print(model.summary())
    
    print("开始训练模型...")
    history = train_model(model, X_train, y_train, X_test, y_test)
    
    print("评估模型...")
    evaluate_model(model, X_test, y_test)
    
    print("保存模型和标准化器...")
    save_model_and_scaler(model, scaler)
    
    print("训练完成!")

if __name__ == "__main__":
    # 设置随机种子以确保结果可重现
    tf.random.set_seed(42)
    np.random.seed(42)
    
    main()