#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
模型微调模块
支持加载已有模型进行增量训练，实现学习率调整和早停机制
"""

import numpy as np
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import json

# 全局变量
model = None
scaler = None

def load_model_and_scaler(model_file="tqsecLogAI.h5", scaler_file="scaler.npy"):
    """
    加载模型和标准化器
    
    Args:
        model_file (str): 模型文件路径
        scaler_file (str): 标准化器参数文件路径
    """
    global model, scaler
    
    try:
        # 加载模型
        model = keras.models.load_model(model_file)
        print(f"成功加载模型 {model_file}")
        
        # 加载标准化器参数
        scaler_data = np.load(scaler_file, allow_pickle=True).item()
        scaler = StandardScaler()
        scaler.mean_ = scaler_data['mean']
        scaler.scale_ = scaler_data['scale']
        scaler.n_features_in_ = len(scaler.mean_)
        print(f"成功加载标准化器参数 {scaler_file}")
        
    except FileNotFoundError as e:
        print(f"文件未找到: {e}")
    except Exception as e:
        print(f"加载模型或标准化器时出错: {e}")

def load_features(feature_file="features.npz"):
    """
    加载特征数据
    
    Args:
        feature_file (str): 特征文件路径
    
    Returns:
        tuple: (X, y) 特征矩阵和标签向量
    """
    try:
        data = np.load(feature_file)
        X = data['X']
        y = data['y']
        print(f"成功加载特征数据，特征矩阵形状: {X.shape}，标签向量形状: {y.shape}")
        return X, y
    except FileNotFoundError:
        print(f"错误: 特征文件 {feature_file} 不存在")
        return None, None
    except Exception as e:
        print(f"加载特征数据时出错: {e}")
        return None, None

def preprocess_data(X, y):
    """
    数据预处理
    
    Args:
        X (np.array): 特征矩阵
        y (np.array): 标签向量
    
    Returns:
        tuple: 预处理后的数据 (X_train, X_test, y_train, y_test)
    """
    global scaler
    
    if scaler is None:
        print("警告: 标准化器未加载，创建新的标准化器")
        scaler = StandardScaler()
        X_scaled = scaler.fit_transform(X)
    else:
        # 使用已有的标准化器参数
        X_scaled = scaler.transform(X)
    
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X_scaled, y, test_size=0.2, random_state=42, stratify=y
    )
    
    print(f"训练集大小: {X_train.shape[0]}")
    print(f"测试集大小: {X_test.shape[0]}")
    
    return X_train, X_test, y_train, y_test

def evaluate_model_performance(model, X_test, y_test):
    """
    评估模型性能
    
    Args:
        model (keras.Model): 模型实例
        X_test (np.array): 测试特征
        y_test (np.array): 测试标签
    
    Returns:
        dict: 性能指标字典
    """
    # 评估模型
    loss, accuracy, precision, recall = model.evaluate(X_test, y_test, verbose=0)
    
    # 计算F1分数
    f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0
    
    performance = {
        "loss": loss,
        "accuracy": accuracy,
        "precision": precision,
        "recall": recall,
        "f1_score": f1_score
    }
    
    return performance

def print_performance_comparison(before_perf, after_perf):
    """
    打印性能对比
    
    Args:
        before_perf (dict): 微调前性能
        after_perf (dict): 微调后性能
    """
    print("\n模型性能对比:")
    print("指标\t\t微调前\t\t微调后\t\t变化")
    print("-" * 50)
    for metric in ["accuracy", "precision", "recall", "f1_score"]:
        before_val = before_perf.get(metric, 0)
        after_val = after_perf.get(metric, 0)
        diff = after_val - before_val
        print(f"{metric}\t\t{before_val:.4f}\t\t{after_val:.4f}\t\t{diff:+.4f}")

def fine_tune_model(epochs=10, learning_rate=0.0001):
    """
    微调模型
    
    Args:
        epochs (int): 训练轮数
        learning_rate (float): 学习率
    """
    global model
    
    if model is None:
        print("错误: 模型未加载")
        return None
    
    print("开始加载特征数据...")
    X, y = load_features()
    if X is None or y is None:
        return None
    
    print("开始数据预处理...")
    X_train, X_test, y_train, y_test = preprocess_data(X, y)
    
    # 评估微调前的性能
    print("评估微调前模型性能...")
    before_performance = evaluate_model_performance(model, X_test, y_test)
    print(f"微调前准确率: {before_performance['accuracy']:.4f}")
    
    # 调整学习率
    # 重新编译模型以确保优化器与模型变量匹配
    try:
        # 获取原始优化器的类型和参数（除了学习率）
        original_optimizer = model.optimizer
        optimizer_config = original_optimizer.get_config()
        optimizer_config['learning_rate'] = learning_rate
        
        # 创建新的优化器实例
        new_optimizer = original_optimizer.__class__.from_config(optimizer_config)
        
        # 重新编译模型
        model.compile(
            optimizer=new_optimizer,
            loss=model.loss,
            metrics=model.compiled_metrics._metrics if hasattr(model.compiled_metrics, '_metrics') else None
        )
        print(f"通过重新编译模型设置学习率为: {learning_rate}")
    except Exception as e:
        print(f"重新编译模型设置学习率失败: {e}")
        # 如果重新编译失败，尝试使用assign方法
        if hasattr(model.optimizer, 'learning_rate'):
            try:
                model.optimizer.learning_rate.assign(learning_rate)
                print(f"设置学习率为: {learning_rate}")
            except Exception as e2:
                print(f"设置学习率失败: {e2}")
        else:
            print("优化器没有learning_rate属性")
    
    # 定义回调函数
    early_stopping = keras.callbacks.EarlyStopping(
        monitor='val_loss',
        patience=5,
        restore_best_weights=True
    )
    
    reduce_lr = keras.callbacks.ReduceLROnPlateau(
        monitor='val_loss',
        factor=0.5,
        patience=3,
        min_lr=0.00001
    )
    
    print(f"开始微调模型，训练轮数: {epochs}...")
    # 微调模型
    history = model.fit(
        X_train, y_train,
        epochs=epochs,
        batch_size=8,
        validation_data=(X_test, y_test),
        callbacks=[early_stopping, reduce_lr],
        verbose=1
    )
    
    # 评估微调后的性能
    print("评估微调后模型性能...")
    after_performance = evaluate_model_performance(model, X_test, y_test)
    print(f"微调后准确率: {after_performance['accuracy']:.4f}")
    
    # 打印性能对比
    print_performance_comparison(before_performance, after_performance)
    
    return history

def save_fine_tuned_model(model_file="tqsecLogAI_finetuned.h5", scaler_file="scaler_finetuned.npy"):
    """
    保存微调后的模型和标准化器
    
    Args:
        model_file (str): 模型保存路径
        scaler_file (str): 标准化器保存路径
    """
    global model, scaler
    
    if model is None or scaler is None:
        print("错误: 模型或标准化器未加载")
        return
    
    try:
        # 保存模型
        model.save(model_file)
        print(f"微调后的模型已保存到 {model_file}")
        
        # 保存标准化器参数
        np.save(scaler_file, {
            'mean': scaler.mean_,
            'scale': scaler.scale_
        })
        print(f"标准化器参数已保存到 {scaler_file}")
        
    except Exception as e:
        print(f"保存微调后的模型或标准化器时出错: {e}")

def main():
    """
    主微调流程
    """
    print("开始加载模型和预处理器...")
    load_model_and_scaler()
    
    if model is None:
        print("模型加载失败，无法进行微调")
        return
    
    print("开始微调模型...")
    history = fine_tune_model(epochs=20, learning_rate=0.0001)
    
    if history is not None:
        print("保存微调后的模型...")
        save_fine_tuned_model()
        print("微调完成!")

if __name__ == "__main__":
    # 启用eager execution以避免numpy()相关的错误
    tf.config.run_functions_eagerly(True)
    
    # 设置随机种子以确保结果可重现
    tf.random.set_seed(42)
    np.random.seed(42)
    
    main()