# -*- coding: utf-8 -*-
"""
训练脚本

训练神经网络模型，并保存最佳模型。
"""

import os
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from sklearn.metrics import mean_squared_error, r2_score
from model import create_model, save_model
import pickle
import json

# 设置中文显示
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

# 创建输出目录
if not os.path.exists('output'):
    os.makedirs('output')

# 创建模型目录
if not os.path.exists('models'):
    os.makedirs('models')

# 创建图表目录
if not os.path.exists('figures'):
    os.makedirs('figures')


class EarlyStopping:
    """
    早停策略
    
    当验证集上的性能不再提升时，提前停止训练。
    
    参数:
        patience (int): 容忍的轮数
        min_delta (float): 最小变化阈值
        verbose (bool): 是否打印信息
    """
    def __init__(self, patience=80, min_delta=0, verbose=False):
        self.patience = patience
        self.min_delta = min_delta
        self.verbose = verbose  # 保留参数但不使用，避免接口变化
        self.counter = 0
        self.best_loss = float('inf')
        self.early_stop = False
    
    def __call__(self, val_loss):
        if val_loss < self.best_loss - self.min_delta:
            self.best_loss = val_loss
            self.counter = 0
        else:
            self.counter += 1
            # 使用print直接输出，不依赖verbose参数
            print(f"早停计数器: {self.counter}/{self.patience}")
            if self.counter >= self.patience:
                self.early_stop = True
                print("早停触发，停止训练")
        return self.early_stop


class CustomLoss(nn.Module):
    """
    自定义损失函数
    
    结合MSE损失和相关性损失，以优化模型性能。
    
    参数:
        mse_weight (float): MSE损失的权重
        corr_weight (float): 相关性损失的权重
    """
    def __init__(self, mse_weight=1.0, corr_weight=0.01):
        super(CustomLoss, self).__init__()
        self.mse_weight = mse_weight
        self.corr_weight = corr_weight
        self.mse_loss = nn.MSELoss()
    
    def forward(self, pred, target):
        # MSE损失
        mse = self.mse_loss(pred, target)
        
        # 相关性损失（鼓励预测值和真实值之间的相关性）
        corr_loss = 0
        if self.corr_weight > 0:
            # 计算每个输出维度的相关性损失
            for i in range(pred.shape[1]):
                pred_i = pred[:, i]
                target_i = target[:, i]
                
                # 中心化
                pred_i = pred_i - pred_i.mean()
                target_i = target_i - target_i.mean()
                
                # 计算相关系数
                numerator = torch.sum(pred_i * target_i)
                denominator = torch.sqrt(torch.sum(pred_i**2) * torch.sum(target_i**2) + 1e-8)
                correlation = numerator / denominator
                
                # 相关性损失（1 - 相关系数）
                corr_loss += 1 - correlation
            
            # 取平均
            corr_loss = corr_loss / pred.shape[1]
        
        # 总损失
        total_loss = self.mse_weight * mse + self.corr_weight * corr_loss
        
        return total_loss


def load_data():
    """
    加载处理后的数据
    
    返回:
        tuple: (X_train, X_test, y_train, y_test, feature_names)
    """
    print("加载处理后的数据...")
    
    # 加载数据
    X_train = np.load('output/X_train.npy')
    X_test = np.load('output/X_test.npy')
    y_train = np.load('output/y_train.npy')
    y_test = np.load('output/y_test.npy')
    
    # 加载特征名称
    with open('output/feature_names.pkl', 'rb') as f:
        feature_names = pickle.load(f)
    
    print(f"训练集形状: X_train {X_train.shape}, y_train {y_train.shape}")
    print(f"测试集形状: X_test {X_test.shape}, y_test {y_test.shape}")
    print(f"特征数量: {len(feature_names)}")
    
    return X_train, X_test, y_train, y_test, feature_names


def train_model(model, X_train, y_train, X_test, y_test, 
               batch_size=8, num_epochs=200, learning_rate=0.001, 
               weight_decay=1e-5, patience=80, model_name="model"):
    """
    训练模型
    
    参数:
        model (nn.Module): 要训练的模型
        X_train (np.ndarray): 训练集特征
        y_train (np.ndarray): 训练集标签
        X_test (np.ndarray): 测试集特征
        y_test (np.ndarray): 测试集标签
        batch_size (int): 批大小
        num_epochs (int): 训练轮数
        learning_rate (float): 学习率
        weight_decay (float): 权重衰减
        patience (int): 早停耐心值
        model_name (str): 模型名称
        
    返回:
        tuple: (训练好的模型, 训练历史)
    """
    print(f"开始训练模型: {model_name}...")
    
    # 检查是否有可用的GPU
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")
    
    # 转换为PyTorch张量并移动到正确的设备
    X_train_tensor = torch.FloatTensor(X_train).to(device)
    y_train_tensor = torch.FloatTensor(y_train).to(device)
    X_test_tensor = torch.FloatTensor(X_test).to(device)
    y_test_tensor = torch.FloatTensor(y_test).to(device)
    
    # 将模型移动到正确的设备
    model = model.to(device)
    
    # 创建数据集和数据加载器
    train_dataset = TensorDataset(X_train_tensor, y_train_tensor)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    
    # 定义优化器和损失函数
    optimizer = optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
    criterion = CustomLoss(mse_weight=1.0, corr_weight=0.01)
    
    # 学习率调度器
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='min', factor=0.5, patience=10, verbose=False
    )
    
    # 早停
    early_stopping = EarlyStopping(patience=patience, verbose=False)
    
    # 训练历史
    history = {
        'train_loss': [],
        'test_loss': [],
        'test_mse': [],
        'test_r2': [],
        'lr': []
    }
    
    # 记录最佳模型
    best_test_loss = float('inf')
    best_model_state = None
    
    # 训练循环
    start_time = time.time()
    for epoch in range(num_epochs):
        # 训练模式
        model.train()
        train_loss = 0.0
        
        for batch_X, batch_y in train_loader:
            # 前向传播
            outputs = model(batch_X)
            loss = criterion(outputs, batch_y)
            
            # 反向传播和优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            train_loss += loss.item() * batch_X.size(0)
        
        # 计算平均训练损失
        train_loss = train_loss / len(train_loader.dataset)
        
        # 评估模式
        model.eval()
        with torch.no_grad():
            # 在测试集上评估
            test_outputs = model(X_test_tensor)
            test_loss = criterion(test_outputs, y_test_tensor).item()
            
            # 计算MSE和R²
            test_pred = test_outputs.cpu().numpy()
            test_mse = mean_squared_error(y_test, test_pred)
            test_r2 = r2_score(y_test, test_pred)
        
        # 更新学习率
        scheduler.step(test_loss)
        current_lr = optimizer.param_groups[0]['lr']
        
        # 记录历史
        history['train_loss'].append(train_loss)
        history['test_loss'].append(test_loss)
        history['test_mse'].append(test_mse)
        history['test_r2'].append(test_r2)
        history['lr'].append(current_lr)
        
        # 打印进度
        if (epoch + 1) % 10 == 0 or epoch == 0:
            print(f"Epoch {epoch+1}/{num_epochs}, "
                  f"Train Loss: {train_loss:.6f}, "
                  f"Test Loss: {test_loss:.6f}, "
                  f"Test MSE: {test_mse:.6f}, "
                  f"Test R²: {test_r2:.6f}, "
                  f"LR: {current_lr:.6f}")
        
        # 保存最佳模型
        if test_loss < best_test_loss:
            best_test_loss = test_loss
            best_model_state = model.state_dict().copy()
            print(f"Epoch {epoch+1}: 发现更好的模型，测试损失: {test_loss:.6f}")
        
        # 早停检查
        if early_stopping(test_loss):
            print(f"早停在 epoch {epoch+1}")
            break
    
    # 训练时间
    train_time = time.time() - start_time
    print(f"训练完成，耗时: {train_time:.2f} 秒")
    
    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print("已加载最佳模型")
    
    # 保存最佳模型
    model_path = f"models/{model_name}.pth"
    save_model(model, model_path)
    
    # 保存训练历史
    history_path = f"output/{model_name}_history.json"
    with open(history_path, 'w') as f:
        # 将numpy数组转换为列表
        history_serializable = {}
        for key, value in history.items():
            history_serializable[key] = [float(v) for v in value]
        json.dump(history_serializable, f, indent=4)
    
    # 可视化训练过程
    visualize_training(history, model_name)
    
    # 评估最佳模型
    evaluate_model(model, X_test, y_test, model_name)
    
    return model, history


def visualize_training(history, model_name):
    """
    可视化训练过程
    
    参数:
        history (dict): 训练历史
        model_name (str): 模型名称
    """
    print("可视化训练过程...")
    
    # 创建图形
    plt.figure(figsize=(15, 10))
    
    # 1. 损失曲线
    plt.subplot(2, 2, 1)
    plt.plot(history['train_loss'], label='训练损失')
    plt.plot(history['test_loss'], label='测试损失')
    plt.xlabel('Epoch')
    plt.ylabel('损失')
    plt.title('训练和测试损失')
    plt.legend()
    
    # 2. MSE曲线
    plt.subplot(2, 2, 2)
    plt.plot(history['test_mse'], label='测试MSE')
    plt.xlabel('Epoch')
    plt.ylabel('MSE')
    plt.title('测试集MSE')
    plt.legend()
    
    # 3. R²曲线
    plt.subplot(2, 2, 3)
    plt.plot(history['test_r2'], label='测试R²')
    plt.xlabel('Epoch')
    plt.ylabel('R²')
    plt.title('测试集R²')
    plt.legend()
    
    # 4. 学习率曲线
    plt.subplot(2, 2, 4)
    plt.plot(history['lr'], label='学习率')
    plt.xlabel('Epoch')
    plt.ylabel('学习率')
    plt.title('学习率变化')
    plt.legend()
    
    plt.tight_layout()
    plt.savefig(f"figures/{model_name}_training.png", dpi=300, bbox_inches='tight')
    print(f"训练过程可视化已保存到 figures/{model_name}_training.png")


def evaluate_model(model, X_test, y_test, model_name):
    """
    评估模型性能
    
    参数:
        model (nn.Module): 要评估的模型
        X_test (np.ndarray): 测试集特征
        y_test (np.ndarray): 测试集标签
        model_name (str): 模型名称
    """
    print("评估模型性能...")
    
    # 检查是否有可用的GPU
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(f"评估使用设备: {device}")
    
    # 转换为PyTorch张量并移动到正确的设备
    X_test_tensor = torch.FloatTensor(X_test).to(device)
    
    # 将模型移动到正确的设备
    model = model.to(device)
    
    # 评估模式
    model.eval()
    
    # 预测
    with torch.no_grad():
        y_pred = model(X_test_tensor).cpu().numpy()
    
    # 计算性能指标
    mse = mean_squared_error(y_test, y_pred)
    r2 = r2_score(y_test, y_pred)
    
    # 计算每个输出维度的性能
    mse_solar = mean_squared_error(y_test[:, 0], y_pred[:, 0])
    r2_solar = r2_score(y_test[:, 0], y_pred[:, 0])
    
    mse_ir = mean_squared_error(y_test[:, 1], y_pred[:, 1])
    r2_ir = r2_score(y_test[:, 1], y_pred[:, 1])
    
    # 打印性能指标
    print(f"模型性能评估 - {model_name}:")
    print(f"总体 MSE: {mse:.6f}")
    print(f"总体 R²: {r2:.6f}")
    print(f"太阳反射率 MSE: {mse_solar:.6f}")
    print(f"太阳反射率 R²: {r2_solar:.6f}")
    print(f"红外发射率 MSE: {mse_ir:.6f}")
    print(f"红外发射率 R²: {r2_ir:.6f}")
    
    # 保存性能指标
    performance = {
        'mse': float(mse),
        'r2': float(r2),
        'mse_solar': float(mse_solar),
        'r2_solar': float(r2_solar),
        'mse_ir': float(mse_ir),
        'r2_ir': float(r2_ir)
    }
    
    with open(f"output/{model_name}_performance.json", 'w') as f:
        json.dump(performance, f, indent=4)
    
    # 可视化预测结果
    plt.figure(figsize=(12, 5))
    
    # 1. 太阳反射率预测vs真实值
    plt.subplot(1, 2, 1)
    plt.scatter(y_test[:, 0], y_pred[:, 0])
    plt.plot([0, 1], [0, 1], 'r--')
    plt.xlabel('真实值')
    plt.ylabel('预测值')
    plt.title('太阳反射率: 预测 vs 真实')
    plt.axis('equal')
    plt.axis([0, 1, 0, 1])
    
    # 2. 红外发射率预测vs真实值
    plt.subplot(1, 2, 2)
    plt.scatter(y_test[:, 1], y_pred[:, 1])
    plt.plot([0, 1], [0, 1], 'r--')
    plt.xlabel('真实值')
    plt.ylabel('预测值')
    plt.title('红外发射率: 预测 vs 真实')
    plt.axis('equal')
    plt.axis([0, 1, 0, 1])
    
    plt.tight_layout()
    plt.savefig(f"figures/{model_name}_predictions.png", dpi=300, bbox_inches='tight')
    print(f"预测结果可视化已保存到 figures/{model_name}_predictions.png")


def train_multiple_models(X_train, X_test, y_train, y_test, feature_names):
    """
    训练多个模型并比较性能
    
    参数:
        X_train (np.ndarray): 训练集特征
        X_test (np.ndarray): 测试集特征
        y_train (np.ndarray): 训练集标签
        y_test (np.ndarray): 测试集标签
        feature_names (list): 特征名称列表
    """
    print("训练多个模型并比较性能...")
    
    # 输入维度
    input_dim = X_train.shape[1]
    
    # 模型配置
    model_configs = [
        {
            'type': 'mlp',
            'name': 'mlp_model',
            'params': {
                'hidden_dims': [64, 32],
                'dropout_rate': 0.2,
                'use_batch_norm': True,
                'activation': 'leaky_relu'
            }
        },
        {
            'type': 'residual',
            'name': 'residual_model',
            'params': {
                'hidden_dims': [64, 128, 64],
                'num_res_blocks': 2,
                'dropout_rate': 0.2,
                'use_batch_norm': True
            }
        },
        {
            'type': 'dual_branch',
            'name': 'dual_branch_model',
            'params': {
                'hidden_dims': [64, 128],
                'branch_dims': [64, 32],
                'dropout_rate': 0.2,
                'use_batch_norm': True,
                'num_heads': 4
            }
        },
        {
            'type': 'ensemble',
            'name': 'ensemble_model',
            'params': {
                'num_models': 3,
                'base_model_type': 'dual_branch',  # 使用双分支模型作为集成的基础模型
                'hidden_dims': [64, 128],
                'dropout_rate': 0.2,
                'use_batch_norm': True
            }
        }
    ]
    
    # 训练参数
    train_params = {
        'batch_size': 4,
        'num_epochs': 200,
        'learning_rate': 0.0001,
        'weight_decay': 1e-5,
        'patience': 80
    }
    
    # 训练每个模型
    models = {}
    performances = {}
    
    for config in model_configs:
        print(f"\n{'='*50}")
        print(f"训练模型: {config['name']}")
        print(f"{'='*50}")
        
        # 创建模型
        model = create_model(config['type'], input_dim, **config['params'])

        # 打印模型结构
        print(model)
        
        # 如果有CUDA，将模型移到GPU
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        print(f"使用设备: {device}")
        model = model.to(device)
        
        # 训练模型
        model, history = train_model(
            model, X_train, y_train, X_test, y_test,
            model_name=config['name'],
            **train_params
        )
        
        # 保存模型和性能
        models[config['name']] = model
        
        # 加载性能指标
        with open(f"output/{config['name']}_performance.json", 'r') as f:
            performance = json.load(f)
        
        performances[config['name']] = performance
    
    # 比较模型性能
    print("\n模型性能比较:")
    print("-" * 80)
    print(f"{'模型名称':<15} {'总体MSE':<10} {'总体R²':<10} {'太阳反射率MSE':<15} {'太阳反射率R²':<15} {'红外发射率MSE':<15} {'红外发射率R²':<15}")
    print("-" * 80)
    
    for name, perf in performances.items():
        print(f"{name:<15} {perf['mse']:<10.6f} {perf['r2']:<10.6f} {perf['mse_solar']:<15.6f} {perf['r2_solar']:<15.6f} {perf['mse_ir']:<15.6f} {perf['r2_ir']:<15.6f}")
    
    # 找出最佳模型
    best_model_name = max(performances.items(), key=lambda x: x[1]['r2'])[0]
    best_performance = performances[best_model_name]
    
    print("\n最佳模型:")
    print(f"模型名称: {best_model_name}")
    print(f"总体 R²: {best_performance['r2']:.6f}")
    print(f"太阳反射率 R²: {best_performance['r2_solar']:.6f}")
    print(f"红外发射率 R²: {best_performance['r2_ir']:.6f}")
    
    # 保存最佳模型信息
    best_model_info = {
        'name': best_model_name,
        'performance': best_performance
    }
    
    with open("output/best_model_info.json", 'w') as f:
        json.dump(best_model_info, f, indent=4)
    
    print(f"最佳模型信息已保存到 output/best_model_info.json")
    
    return models, performances, best_model_name


def main():
    # 加载数据
    X_train, X_test, y_train, y_test, feature_names = load_data()
    
    # 训练多个模型并比较性能
    models, performances, best_model_name = train_multiple_models(
        X_train, X_test, y_train, y_test, feature_names
    )
    
    print("\n训练完成！")
    print(f"最佳模型: {best_model_name}")
    print(f"模型已保存到 models/ 目录")
    print(f"训练历史和性能指标已保存到 output/ 目录")
    print(f"可视化结果已保存到 figures/ 目录")


if __name__ == "__main__":
    main()