"""
模型训练脚本

这个脚本用于训练U-Net模型进行黑色素瘤分割任务。
"""

import os
import numpy as np
import tensorflow as tf
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
import matplotlib.pyplot as plt
from unet_model import build_unet_model

def train(
    train_images, 
    train_masks, 
    test_images=None, 
    test_masks=None,
    input_shape=(128, 128, 3),
    batch_size=8,
    epochs=50,
    learning_rate=1e-4,
    model_save_path="models/melanoma_unet.h5"
):
    """
    训练U-Net模型
    
    参数:
        train_images: 训练图像数组
        train_masks: 训练掩码数组
        test_images: 测试图像数组(可选)
        test_masks: 测试掩码数组(可选)
        input_shape: 输入图像形状
        batch_size: 批量大小
        epochs: 训练轮数
        learning_rate: 学习率
        model_save_path: 模型保存路径
    返回:
        model: 训练好的模型
        history: 训练历史
    """
    # 确保目录存在
    os.makedirs(os.path.dirname(model_save_path), exist_ok=True)
    
    # 构建模型
    model = build_unet_model(input_shape)
    
    # 设置优化器
    optimizer = tf.keras.optimizers.Adam(learning_rate)
    
    # 编译模型
    model.compile(
        optimizer=optimizer,
        loss="binary_crossentropy",
        metrics=['accuracy', tf.keras.metrics.Recall(), tf.keras.metrics.Precision()]
    )
    
    # 设置回调函数
    callbacks = [
        ModelCheckpoint(model_save_path, verbose=1, save_best_only=True),
        ReduceLROnPlateau(monitor='val_loss', patience=5, factor=0.1, verbose=1, min_lr=1e-7),
        EarlyStopping(monitor='val_loss', patience=10, verbose=1, restore_best_weights=True)
    ]
    
    # 计算每个epoch的步数
    steps_per_epoch = np.ceil(len(train_images) / batch_size)
    
    # 如果提供了测试数据，计算验证步数
    if test_images is not None and test_masks is not None:
        validation_steps = np.ceil(len(test_images) / batch_size)
        validation_data = (test_images, test_masks)
    else:
        validation_steps = None
        validation_data = None
    
    # 训练模型
    history = model.fit(
        train_images,
        train_masks,
        batch_size=batch_size,
        epochs=epochs,
        verbose=1,
        validation_data=validation_data,
        steps_per_epoch=steps_per_epoch,
        validation_steps=validation_steps,
        shuffle=True,
        callbacks=callbacks
    )
    
    return model, history

def plot_training_history(history, save_path=None):
    """
    绘制训练历史
    
    参数:
        history: 训练历史对象
        save_path: 图表保存路径(可选)
    """
    # 创建图表
    fig, axes = plt.subplots(2, 2, figsize=(15, 10))
    
    # 绘制损失
    axes[0, 0].plot(history.history['loss'], label='训练损失')
    if 'val_loss' in history.history:
        axes[0, 0].plot(history.history['val_loss'], label='验证损失')
    axes[0, 0].set_title('损失')
    axes[0, 0].set_xlabel('Epoch')
    axes[0, 0].set_ylabel('损失值')
    axes[0, 0].legend()
    
    # 绘制准确率
    axes[0, 1].plot(history.history['accuracy'], label='训练准确率')
    if 'val_accuracy' in history.history:
        axes[0, 1].plot(history.history['val_accuracy'], label='验证准确率')
    axes[0, 1].set_title('准确率')
    axes[0, 1].set_xlabel('Epoch')
    axes[0, 1].set_ylabel('准确率')
    axes[0, 1].legend()
    
    # 绘制召回率
    axes[1, 0].plot(history.history['recall'], label='训练召回率')
    if 'val_recall' in history.history:
        axes[1, 0].plot(history.history['val_recall'], label='验证召回率')
    axes[1, 0].set_title('召回率')
    axes[1, 0].set_xlabel('Epoch')
    axes[1, 0].set_ylabel('召回率')
    axes[1, 0].legend()
    
    # 绘制精确率
    axes[1, 1].plot(history.history['precision'], label='训练精确率')
    if 'val_precision' in history.history:
        axes[1, 1].plot(history.history['val_precision'], label='验证精确率')
    axes[1, 1].set_title('精确率')
    axes[1, 1].set_xlabel('Epoch')
    axes[1, 1].set_ylabel('精确率')
    axes[1, 1].legend()
    
    plt.tight_layout()
    
    # 如果提供了保存路径，保存图表
    if save_path:
        plt.savefig(save_path)
        print(f"训练历史图表已保存至 {save_path}")
    
    plt.show()

if __name__ == "__main__":
    # 示例: 如何使用
    try:
        # 加载数据
        data_dir = "data/processed"
        print(f"正在从 {os.path.abspath(data_dir)} 加载数据...")
        
        # 尝试加载train/val拆分后的数据
        try:
            train_images = np.load(os.path.join(data_dir, "train_images.npy"))
            train_masks = np.load(os.path.join(data_dir, "train_masks.npy"))
            test_images = np.load(os.path.join(data_dir, "val_images.npy"))
            test_masks = np.load(os.path.join(data_dir, "val_masks.npy"))
            print(f"已加载拆分后的训练数据: {train_images.shape} 和验证数据: {test_images.shape}")
        except FileNotFoundError:
            print("未找到拆分后的训练/验证数据，尝试加载完整数据集...")
            # 如果没有拆分好的数据，加载完整数据集并手动拆分
            all_images = np.load(os.path.join(data_dir, "melanoma_images.npy"))
            all_masks = np.load(os.path.join(data_dir, "melanoma_masks.npy"))
            
            # 计算拆分点
            split = int(len(all_images) * 0.8)
            train_images = all_images[:split]
            train_masks = all_masks[:split]
            test_images = all_images[split:]
            test_masks = all_masks[split:]
            print(f"已加载并拆分完整数据集: 训练数据 {train_images.shape}, 验证数据 {test_images.shape}")
        
        # 训练模型
        model, history = train(
            train_images=train_images,
            train_masks=train_masks,
            test_images=test_images,
            test_masks=test_masks,
            model_save_path="models/melanoma_unet.h5"
        )
        
        # 绘制训练历史
        plot_training_history(history, save_path="results/training_history.png")
        
    except FileNotFoundError:
        print("未找到数据文件，请先运行数据预处理脚本。")
