import tensorflow as tf
import numpy as np
from datetime import datetime
import os
import signal
import sys

class ModelTrainer:
    def __init__(self, model):
        self.model = model
        # 检测并配置GPU
        self._configure_gpu()
        # 设置默认的编译参数
        self.compile_params = {
            'optimizer': tf.keras.optimizers.Adam(learning_rate=1e-4),
            'loss': 'binary_crossentropy',
            'metrics': [
                'accuracy',
                tf.keras.metrics.Precision(name='precision', thresholds=0.5),
                tf.keras.metrics.Recall(name='recall', thresholds=0.5)
            ]
        }
        # 确保模型已编译
        self._ensure_model_compiled()
        
        self.best_val_accuracy = 0
        self.best_model = None
        self.training_interrupted = False
        self.stop_training = False
        
        # 注册信号处理器
        signal.signal(signal.SIGINT, self._signal_handler)
    
    def _signal_handler(self, signum, frame):
        """处理中断信号"""
        print("\n\n⚠️ 检测到训练中断信号！")
        print("正在完成当前轮次并保存模型...")
        self.training_interrupted = True
        self.stop_training = True
    
    def _configure_gpu(self):
        """配置GPU设置"""
        try:
            # 列出所有可用的GPU
            gpus = tf.config.list_physical_devices('GPU')
            if gpus:
                print("\nGPU 信息:")
                for gpu in gpus:
                    print(f"- 找到GPU: {gpu.name}")
                
                # 允许GPU内存动态增长
                for gpu in gpus:
                    tf.config.experimental.set_memory_growth(gpu, True)
                print("- GPU内存动态增长已启用")
                
                # 设置混合精度训练
                tf.keras.mixed_precision.set_global_policy('mixed_float16')
                print("- 混合精度训练已启用")
            else:
                print("\n警告: 未检测到GPU，将使用CPU进行训练（速度较慢）")
                
        except Exception as e:
            print(f"\nGPU配置出错: {str(e)}")
            print("将使用默认配置继续")
    
    def _ensure_model_compiled(self):
        """确保模型已编译"""
        try:
            self.model.loss
        except:
            print("模型未编译，正在使用���认参数进行编译...")
            self.model.compile(**self.compile_params)
    
    def train(self, X, y, validation_split=0.2, batch_size=16, epochs=20, initial_epoch=0):
        """训练模型"""
        print("\n开始训练模型...")
        
        # 显示硬件信息
        device = "GPU" if len(tf.config.list_physical_devices('GPU')) > 0 else "CPU"
        print(f"使用设备: {device}")
        
        if device == "GPU":
            # 对于GPU，可以使用更大的batch_size
            suggested_batch_size = min(128, len(X))
            if batch_size < suggested_batch_size:
                print(f"提示: GPU训练可以使用更大的batch_size (建议: {suggested_batch_size})")
        
        print(f"- 训练数据大小: {X.shape}")
        print(f"- 验证集比例: {validation_split}")
        print(f"- 批次大小: {batch_size}")
        print(f"- 训练轮数: {epochs}")
        print(f"- 起始轮数: {initial_epoch}")
        
        try:
            # 确保模型已编译
            self._ensure_model_compiled()
            
            # 创建保存目录
            checkpoint_dir = 'saved_models/checkpoints'
            os.makedirs(checkpoint_dir, exist_ok=True)
            
            # 使用 ModelCheckpoint 保存模型
            checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
                filepath=os.path.join(checkpoint_dir, 'model_epoch_{epoch:02d}_val_acc_{val_accuracy:.2f}.h5'),
                monitor='val_accuracy',
                save_best_only=True,
                mode='max'
            )
            
            callbacks = [
                # 更激进的早停
                tf.keras.callbacks.EarlyStopping(
                    monitor='val_accuracy',  # 监控准确率
                    patience=2,  # 更短的耐心
                    restore_best_weights=True,
                    mode='max'
                ),
                # 更激进的学习率调整
                tf.keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy',
                    factor=0.1,  # 更大的衰减
                    patience=1,
                    mode='max',
                    min_lr=1e-6
                ),
                # 添加模型检查点
                tf.keras.callbacks.ModelCheckpoint(
                    filepath='saved_models/best_model.h5',
                    monitor='val_accuracy',
                    save_best_only=True,
                    mode='max'
                )
            ]
            
            # 训练模型
            history = self.model.fit(
                X, y,
                validation_split=validation_split,
                batch_size=batch_size,
                epochs=epochs,
                initial_epoch=initial_epoch,
                callbacks=callbacks,
                verbose=1
            )
            
            # 保存最终模型
            final_model_path = os.path.join(
                'saved_models',
                f'final_model_{datetime.now().strftime("%Y%m%d_%H%M%S")}.h5'
            )
            os.makedirs('saved_models', exist_ok=True)
            
            # 使用 HDF5 格式保存模型
            self.model.save(final_model_path)
            print(f"\n最终模型已保存: {final_model_path}")
            
            return history
            
        except Exception as e:
            print(f"\n训练过程出错: {str(e)}")
            return None
    
    def evaluate(self, X, y):
        """评估模型"""
        print("\n评估模型性能...")
        
        try:
            # 确保模型已编译
            self._ensure_model_compiled()
            
            # 评估模型
            results = self.model.evaluate(X, y, verbose=1)
            
            # 打印评估结果
            metrics = self.model.metrics_names
            for metric, value in zip(metrics, results):
                print(f"{metric}: {value:.4f}")
                
            return results
        except Exception as e:
            print(f"评估过程出错: {str(e)}")
            return None
    
    class _InterruptCallback(tf.keras.callbacks.Callback):
        def __init__(self, trainer):
            super().__init__()
            self.trainer = trainer
        
        def on_epoch_end(self, epoch, logs=None):
            if self.trainer.stop_training:
                print('\n训练已中断，正在保存模型...')
                self.model.stop_training = True
                # 保存当前模型
                self.model.save(f'saved_models/interrupted_model_epoch_{epoch+1}.h5')