"""
增强版空调负荷预测模型训练管道
包含完整的训练、验证和部署功能
"""

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
import pandas as pd
from typing import Dict, Tuple, List, Optional
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import joblib
import json
import os
from datetime import datetime
import warnings
warnings.filterwarnings('ignore')

# 尝试导入tqdm进度条
try:
    from tqdm import tqdm
    TQDM_AVAILABLE = True
except ImportError:
    TQDM_AVAILABLE = False
    print("💡 安装tqdm可获得更好的进度显示: pip install tqdm")

# 导入自定义模块
from enhanced_hvac_transformer import EnhancedHVACTransformer, EnhancedHVACConfig, RealTimeEnhancedPredictor
from enhanced_hvac_preprocessing import EnhancedHVACDataProcessor


class EnhancedHVACDataset(Dataset):
    """增强版空调负荷数据集"""
    
    def __init__(self, X: Dict[str, np.ndarray], y: np.ndarray):
        """
        初始化数据集
        
        Args:
            X: 输入特征字典
            y: 目标数据 [n_samples, pred_len, 1]
        """
        self.X = {}
        self.y = torch.FloatTensor(y)
        
        # 转换输入特征
        for key, value in X.items():
            if key in ['hour', 'dayofweek', 'month', 'season', 'is_holiday', 'is_weekend', 'work_period']:
                # 分类特征
                self.X[key] = torch.LongTensor(value)
            else:
                # 连续特征
                self.X[key] = torch.FloatTensor(value)
        
    def __len__(self) -> int:
        return len(self.y)
    
    def __getitem__(self, idx: int) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
        X_sample = {key: value[idx] for key, value in self.X.items()}
        return X_sample, self.y[idx]


class EarlyStopping:
    """早停机制"""
    
    def __init__(self, patience: int = 15, min_delta: float = 0.001):
        self.patience = patience
        self.min_delta = min_delta
        self.best_loss = float('inf')
        self.counter = 0
        self.early_stop = False
        
    def __call__(self, val_loss: float) -> bool:
        if val_loss < self.best_loss - self.min_delta:
            self.best_loss = val_loss
            self.counter = 0
        else:
            self.counter += 1
            
        if self.counter >= self.patience:
            self.early_stop = True
            
        return self.early_stop


class EnhancedHVACTrainer:
    """增强版空调负荷预测模型训练器"""
    
    def __init__(self, config: EnhancedHVACConfig, save_dir: str = "enhanced_models"):
        """
        初始化训练器
        
        Args:
            config: 模型配置
            save_dir: 模型保存目录
        """
        self.config = config
        self.save_dir = save_dir
        self.device = torch.device(config.device)
        
        # 创建保存目录
        os.makedirs(save_dir, exist_ok=True)
        
        # 初始化模型
        self.model = EnhancedHVACTransformer(config).to(self.device)
        
        # 训练历史
        self.train_history = {
            'train_loss': [],
            'val_loss': [],
            'train_mae': [],
            'val_mae': [],
            'learning_rate': []
        }
        
        # 实时绘图配置
        self.enable_live_plot = True
        self.plot_interval = 5  # 每5个epoch更新一次图表
        self.fig = None
        self.axes = None
    
    def set_live_plot(self, enable: bool = True, interval: int = 5):
        """
        设置实时绘图
        
        Args:
            enable: 是否启用实时绘图
            interval: 绘图更新间隔（epoch数）
        """
        self.enable_live_plot = enable
        self.plot_interval = interval
        if enable:
            print(f"📈 实时绘图已启用，每 {interval} 个epoch更新一次")
        else:
            print("📈 实时绘图已禁用")
        
    def _create_data_loaders(self, X: Dict[str, np.ndarray], y: np.ndarray, 
                           train_ratio: float = 0.8, batch_size: int = 32) -> Tuple[DataLoader, DataLoader]:
        """
        创建训练和验证数据加载器
        """
        n_samples = len(y)
        train_size = int(n_samples * train_ratio)
        
        # 按时间顺序划分数据
        X_train = {key: value[:train_size] for key, value in X.items()}
        X_val = {key: value[train_size:] for key, value in X.items()}
        y_train = y[:train_size]
        y_val = y[train_size:]
        
        train_dataset = EnhancedHVACDataset(X_train, y_train)
        val_dataset = EnhancedHVACDataset(X_val, y_val)
        
        train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
        val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
        
        return train_loader, val_loader
    
    def _calculate_metrics(self, predictions: np.ndarray, targets: np.ndarray) -> Dict[str, float]:
        """计算评估指标"""
        mae = mean_absolute_error(targets, predictions)
        mse = mean_squared_error(targets, predictions)
        rmse = np.sqrt(mse)
        r2 = r2_score(targets, predictions)
        
        # 计算MAPE
        mape = np.mean(np.abs((targets - predictions) / (targets + 1e-8))) * 100
        
        return {
            'MAE': mae,
            'MSE': mse,
            'RMSE': rmse,
            'R2': r2,
            'MAPE': mape
        }
    
    def train_epoch(self, train_loader: DataLoader, optimizer: optim.Optimizer, 
                   criterion: nn.Module) -> Tuple[float, float]:
        """训练一个epoch"""
        self.model.train()
        total_loss = 0.0
        total_mae = 0.0
        num_batches = 0
        
        for X_batch, y_batch in train_loader:
            # 移动数据到设备
            X_batch = {key: value.to(self.device) for key, value in X_batch.items()}
            y_batch = y_batch.to(self.device)
            
            # 前向传播
            optimizer.zero_grad()
            predictions = self.model(X_batch, y_batch.shape[1])
            
            # 计算损失
            loss = criterion(predictions, y_batch)
            mae = torch.mean(torch.abs(predictions - y_batch))
            
            # 反向传播
            loss.backward()
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
            optimizer.step()
            
            total_loss += loss.item()
            total_mae += mae.item()
            num_batches += 1
        
        return total_loss / num_batches, total_mae / num_batches
    
    def validate_epoch(self, val_loader: DataLoader, criterion: nn.Module) -> Tuple[float, float]:
        """验证一个epoch - 增强调试版本"""
        self.model.eval()
        total_loss = 0.0
        total_mae = 0.0
        num_batches = 0
        
        # 🔍 添加调试变量
        first_batch_debug = True
        all_predictions = []
        all_targets = []
        debug_batch_count = 0
        
        with torch.no_grad():
            for X_batch, y_batch in val_loader:
                # 移动数据到设备
                X_batch = {key: value.to(self.device) for key, value in X_batch.items()}
                y_batch = y_batch.to(self.device)
                
                # 前向传播
                predictions = self.model(X_batch, y_batch.shape[1])
                
                # 🔍 第一个batch的详细调试
                if first_batch_debug:
                    print(f"\n🔍 验证集调试信息 (Epoch当前状态):")
                    print(f"   📊 验证集batch形状: {y_batch.shape}")
                    print(f"   📈 目标值统计: min={y_batch.min():.6f}, max={y_batch.max():.6f}, mean={y_batch.mean():.6f}")
                    print(f"   🔮 预测值统计: min={predictions.min():.6f}, max={predictions.max():.6f}, mean={predictions.mean():.6f}")
                    print(f"   📏 预测值标准差: {predictions.std():.8f}")
                    print(f"   📏 目标值标准差: {y_batch.std():.8f}")
                    
                    # 检查预测值是否固定
                    if predictions.std() < 1e-6:
                        print(f"   🚨 严重警告: 预测值几乎固定! (std={predictions.std():.10f})")
                        print(f"   🔧 建议: 模型可能输出常数，检查模型权重或学习率")
                    elif predictions.std() < 1e-3:
                        print(f"   ⚠️ 警告: 预测值变化很小 (std={predictions.std():.8f})")
                    
                    # 检查预测值范围是否合理
                    if abs(predictions.mean() - y_batch.mean()) > 2 * y_batch.std():
                        print(f"   🚨 警告: 预测值和目标值分布差异巨大!")
                        print(f"        预测均值: {predictions.mean():.6f}")
                        print(f"        目标均值: {y_batch.mean():.6f}")
                        print(f"        差异: {abs(predictions.mean() - y_batch.mean()):.6f}")
                    
                    # 显示前几个预测值和目标值进行对比
                    print(f"   📋 样本对比 (前5个时间步):")
                    for i in range(min(5, predictions.shape[1])):
                        pred_val = predictions[0, i, 0].item()
                        true_val = y_batch[0, i, 0].item()
                        print(f"      步骤{i+1}: 预测={pred_val:.6f}, 真值={true_val:.6f}, 差异={abs(pred_val-true_val):.6f}")
                    
                    first_batch_debug = False
                
                # 计算损失
                loss = criterion(predictions, y_batch)
                mae = torch.mean(torch.abs(predictions - y_batch))
                
                # 收集数据用于整体分析
                if debug_batch_count < 5:  # 只收集前5个batch的数据避免内存问题
                    all_predictions.append(predictions.cpu().numpy())
                    all_targets.append(y_batch.cpu().numpy())
                    debug_batch_count += 1
                
                total_loss += loss.item()
                total_mae += mae.item()
                num_batches += 1
        
        # 🔍 验证集整体分析
        if num_batches > 0:
            avg_loss = total_loss / num_batches
            avg_mae = total_mae / num_batches
            
            print(f"   📊 验证集整体统计:")
            print(f"      批次数量: {num_batches}")
            print(f"      平均损失: {avg_loss:.6f}")
            print(f"      平均MAE: {avg_mae:.6f}")
            
            # 分析收集的数据
            if all_predictions and all_targets:
                all_preds = np.concatenate(all_predictions)
                all_targs = np.concatenate(all_targets)
                
                print(f"      样本数据分析 (前5个batch):")
                print(f"         目标值范围: [{all_targs.min():.6f}, {all_targs.max():.6f}]")
                print(f"         预测值范围: [{all_preds.min():.6f}, {all_preds.max():.6f}]")
                print(f"         目标值均值±std: {all_targs.mean():.6f}±{all_targs.std():.6f}")
                print(f"         预测值均值±std: {all_preds.mean():.6f}±{all_preds.std():.6f}")
                
                # 计算相关性
                correlation = np.corrcoef(all_preds.flatten(), all_targs.flatten())[0, 1]
                print(f"         预测-目标相关性: {correlation:.6f}")
                
                if correlation < 0.1:
                    print(f"   🚨 严重问题: 预测值与目标值几乎无相关性!")
                elif correlation < 0.3:
                    print(f"   ⚠️ 问题: 预测值与目标值相关性较低")
            
            # 问题诊断建议
            if avg_loss > 3.0:
                print(f"   🔧 诊断建议:")
                print(f"      1. 验证集Loss过高 (>{avg_loss:.2f})，可能是数据标准化问题")
                print(f"      2. 检查验证集是否使用了训练集的标准化参数")
                print(f"      3. 确认训练集和验证集的数据预处理流程一致")
            
            if avg_mae > 0.8:
                print(f"      4. MAE过高 (>{avg_mae:.2f})，模型预测能力不足")
                print(f"      5. 考虑增加模型容量或降低正则化强度")
            
            return avg_loss, avg_mae
        
        return total_loss / num_batches, total_mae / num_batches
    
    def plot_training_curves(self, save_path: str = None):
        """绘制训练曲线"""
        if not self.train_history['train_loss']:
            return
        
        epochs = range(1, len(self.train_history['train_loss']) + 1)
        
        if self.fig is None:
            plt.ion()  # 开启交互模式
            self.fig, self.axes = plt.subplots(2, 2, figsize=(12, 8))
            plt.subplots_adjust(hspace=0.3, wspace=0.3)
        
        # 清除之前的图表
        for ax in self.axes.flat:
            ax.clear()
        
        # 绘制损失曲线
        self.axes[0, 0].plot(epochs, self.train_history['train_loss'], 'b-', label='Train Loss', linewidth=2)
        self.axes[0, 0].plot(epochs, self.train_history['val_loss'], 'r-', label='Val Loss', linewidth=2)
        self.axes[0, 0].set_xlabel('Epoch')
        self.axes[0, 0].set_ylabel('Loss')
        self.axes[0, 0].set_title('训练损失曲线')
        self.axes[0, 0].legend()
        self.axes[0, 0].grid(True, alpha=0.3)
        
        # 绘制MAE曲线
        self.axes[0, 1].plot(epochs, self.train_history['train_mae'], 'b-', label='Train MAE', linewidth=2)
        self.axes[0, 1].plot(epochs, self.train_history['val_mae'], 'r-', label='Val MAE', linewidth=2)
        self.axes[0, 1].set_xlabel('Epoch')
        self.axes[0, 1].set_ylabel('MAE')
        self.axes[0, 1].set_title('平均绝对误差曲线')
        self.axes[0, 1].legend()
        self.axes[0, 1].grid(True, alpha=0.3)
        
        # 绘制学习率曲线
        self.axes[1, 0].plot(epochs, self.train_history['learning_rate'], 'g-', linewidth=2)
        self.axes[1, 0].set_xlabel('Epoch')
        self.axes[1, 0].set_ylabel('Learning Rate')
        self.axes[1, 0].set_title('学习率变化')
        self.axes[1, 0].set_yscale('log')
        self.axes[1, 0].grid(True, alpha=0.3)
        
        # 绘制损失差异（过拟合检测）
        loss_diff = np.array(self.train_history['val_loss']) - np.array(self.train_history['train_loss'])
        self.axes[1, 1].plot(epochs, loss_diff, 'purple', linewidth=2)
        self.axes[1, 1].axhline(y=0, color='k', linestyle='--', alpha=0.5)
        self.axes[1, 1].set_xlabel('Epoch')
        self.axes[1, 1].set_ylabel('Val Loss - Train Loss')
        self.axes[1, 1].set_title('过拟合检测 (>0表示过拟合)')
        self.axes[1, 1].grid(True, alpha=0.3)
        
        # 更新显示
        plt.draw()
        plt.pause(0.01)
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
    
    def print_epoch_progress(self, epoch: int, epochs: int, train_loss: float, val_loss: float, 
                           train_mae: float, val_mae: float, current_lr: float, best_val_loss: float):
        """打印epoch进度信息"""
        # 计算进度百分比
        progress = (epoch + 1) / epochs * 100
        
        # 创建进度条
        bar_length = 30
        filled_length = int(bar_length * (epoch + 1) // epochs)
        bar = '█' * filled_length + '░' * (bar_length - filled_length)
        
        # 使用不同颜色表示损失状态
        loss_status = "📈" if val_loss > train_loss * 1.2 else "📉" if val_loss < best_val_loss else "📊"
        
        print(f"\r🔥 Epoch {epoch+1:3d}/{epochs} |{bar}| {progress:5.1f}% "
              f"{loss_status} Train: {train_loss:.6f} | Val: {val_loss:.6f} | "
              f"MAE: {val_mae:.4f} | LR: {current_lr:.2e}", end="")
        
        # 每10个epoch或最后一个epoch换行显示详细信息
        if (epoch + 1) % 10 == 0 or epoch == epochs - 1:
            print()  # 换行
            print(f"    📊 详细统计:")
            print(f"       🔸 训练损失: {train_loss:.6f} | 训练MAE: {train_mae:.4f}")
            print(f"       🔹 验证损失: {val_loss:.6f} | 验证MAE: {val_mae:.4f}")
            print(f"       ⭐ 最佳验证损失: {best_val_loss:.6f}")
            print(f"       📈 学习率: {current_lr:.2e}")
            print()  # 额外空行
    
    def train(self, X: Dict[str, np.ndarray], y: np.ndarray, 
              epochs: int = 100, batch_size: int = 32, 
              learning_rate: float = 0.001, patience: int = 20) -> Dict:
        """
        训练模型
        
        Args:
            X: 输入特征字典
            y: 目标数据
            epochs: 训练轮数
            batch_size: 批次大小
            learning_rate: 学习率
            patience: 早停耐心值
            
        Returns:
            training_result: 训练结果
        """
        print("开始训练增强版空调负荷预测模型...")
        print(f"模型参数数量: {sum(p.numel() for p in self.model.parameters()):,}")
        
        # 创建数据加载器
        train_loader, val_loader = self._create_data_loaders(X, y, batch_size=batch_size)
        
        # 初始化优化器和损失函数
        optimizer = optim.AdamW(self.model.parameters(), lr=learning_rate, weight_decay=1e-5)
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            optimizer, mode='min', factor=0.5, patience=8
        )
        criterion = nn.MSELoss()
        
        # 早停机制
        early_stopping = EarlyStopping(patience=patience)
        
        best_val_loss = float('inf')
        best_epoch = 0
        
        # 训练循环
        print(f"🚀 开始训练，共 {epochs} 个epoch...")
        print(f"📊 启用实时Loss显示和曲线绘制")
        print("=" * 80)
        
        # 使用tqdm进度条（如果可用）
        epoch_iterator = tqdm(range(epochs), desc="训练进度") if TQDM_AVAILABLE else range(epochs)
        
        for epoch in epoch_iterator:
            # 训练阶段
            train_loss, train_mae = self.train_epoch(train_loader, optimizer, criterion)
            
            # 验证阶段
            val_loss, val_mae = self.validate_epoch(val_loader, criterion)
            
            # 学习率调度
            scheduler.step(val_loss)
            current_lr = optimizer.param_groups[0]['lr']
            
            # 记录训练历史
            self.train_history['train_loss'].append(train_loss)
            self.train_history['val_loss'].append(val_loss)
            self.train_history['train_mae'].append(train_mae)
            self.train_history['val_mae'].append(val_mae)
            self.train_history['learning_rate'].append(current_lr)
            
            # 保存最佳模型
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                best_epoch = epoch
                self.save_model("best_enhanced_model.pth")
            
            # 实时显示进度（如果没有tqdm）
            if not TQDM_AVAILABLE:
                self.print_epoch_progress(epoch, epochs, train_loss, val_loss, 
                                        train_mae, val_mae, current_lr, best_val_loss)
            else:
                # 更新tqdm描述
                epoch_iterator.set_postfix({
                    'TrLoss': f'{train_loss:.4f}',
                    'VLoss': f'{val_loss:.4f}',
                    'MAE': f'{val_mae:.4f}',
                    'LR': f'{current_lr:.2e}'
                })
            
            # 实时绘制训练曲线
            if self.enable_live_plot and (epoch + 1) % self.plot_interval == 0:
                try:
                    self.plot_training_curves()
                except Exception as e:
                    print(f"\n⚠️ 绘图失败: {e}")
                    self.enable_live_plot = False
            
            # 早停检查
            if early_stopping(val_loss):
                if not TQDM_AVAILABLE:
                    print(f"\n\n🛑 早停触发，在第 {epoch+1} 轮停止训练")
                else:
                    epoch_iterator.set_description(f"🛑 早停触发 (Epoch {epoch+1})")
                break
        
        # 加载最佳模型
        self.load_model("best_enhanced_model.pth")
        
        # 最终评估
        final_train_loss, final_train_mae = self.validate_epoch(train_loader, criterion)
        final_val_loss, final_val_mae = self.validate_epoch(val_loader, criterion)
        
        training_result = {
            'best_epoch': best_epoch + 1,
            'best_val_loss': best_val_loss,
            'final_train_loss': final_train_loss,
            'final_val_loss': final_val_loss,
            'final_train_mae': final_train_mae,
            'final_val_mae': final_val_mae,
            'total_epochs': epoch + 1
        }
        
        # 保存最终的训练曲线图
        final_plot_path = os.path.join(self.save_dir, "training_curves.png")
        try:
            self.plot_training_curves(final_plot_path)
            print(f"\n📈 训练曲线已保存到: {final_plot_path}")
        except Exception as e:
            print(f"\n⚠️ 保存训练曲线失败: {e}")
        
        print("\n🎉 训练完成！")
        print("=" * 60)
        print(f"🏆 最佳模型 (Epoch {training_result['best_epoch']}):")
        print(f"   📉 验证损失: {best_val_loss:.6f}")
        print(f"   📊 最终训练MAE: {final_train_mae:.6f}")
        print(f"   📊 最终验证MAE: {final_val_mae:.6f}")
        print(f"   🕐 总训练轮数: {training_result['total_epochs']}")
        print("=" * 60)
        
        # 关闭matplotlib交互模式
        if self.fig is not None:
            plt.ioff()
            plt.close(self.fig)
        
        return training_result
    
    def evaluate(self, X: Dict[str, np.ndarray], y: np.ndarray, 
                processor: EnhancedHVACDataProcessor) -> Dict:
        """评估模型性能"""
        self.model.eval()
        
        test_dataset = EnhancedHVACDataset(X, y)
        test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)
        
        all_predictions = []
        all_targets = []
        
        with torch.no_grad():
            for X_batch, y_batch in test_loader:
                X_batch = {key: value.to(self.device) for key, value in X_batch.items()}
                
                predictions = self.model(X_batch, y_batch.shape[1])
                
                all_predictions.append(predictions.cpu().numpy())
                all_targets.append(y_batch.numpy())
        
        # 合并所有预测结果
        predictions = np.concatenate(all_predictions, axis=0)
        targets = np.concatenate(all_targets, axis=0)
        
        # 反标准化
        predictions_original = processor.inverse_transform_load(predictions.reshape(-1, 1)).reshape(predictions.shape)
        targets_original = processor.inverse_transform_load(targets.reshape(-1, 1)).reshape(targets.shape)
        
        # 计算指标
        metrics = self._calculate_metrics(
            predictions_original.flatten(), targets_original.flatten()
        )
        
        # 按预测步长计算指标
        step_metrics = {}
        for step in range(predictions.shape[1]):
            step_pred = predictions_original[:, step, 0]
            step_target = targets_original[:, step, 0]
            step_metrics[f'step_{step+1}'] = self._calculate_metrics(step_pred, step_target)
        
        evaluation_result = {
            'overall_metrics': metrics,
            'step_metrics': step_metrics,
            'predictions': predictions_original,
            'targets': targets_original
        }
        
        return evaluation_result
    
    def plot_training_history(self, save_path: str = None):
        """绘制训练历史"""
        fig, axes = plt.subplots(2, 2, figsize=(15, 10))
        
        # 损失曲线
        axes[0, 0].plot(self.train_history['train_loss'], label='训练损失', color='blue')
        axes[0, 0].plot(self.train_history['val_loss'], label='验证损失', color='red')
        axes[0, 0].set_title('损失曲线')
        axes[0, 0].set_xlabel('Epoch')
        axes[0, 0].set_ylabel('Loss')
        axes[0, 0].legend()
        axes[0, 0].grid(True)
        
        # MAE曲线
        axes[0, 1].plot(self.train_history['train_mae'], label='训练MAE', color='blue')
        axes[0, 1].plot(self.train_history['val_mae'], label='验证MAE', color='red')
        axes[0, 1].set_title('MAE曲线')
        axes[0, 1].set_xlabel('Epoch')
        axes[0, 1].set_ylabel('MAE')
        axes[0, 1].legend()
        axes[0, 1].grid(True)
        
        # 学习率曲线
        axes[1, 0].plot(self.train_history['learning_rate'], color='green')
        axes[1, 0].set_title('学习率曲线')
        axes[1, 0].set_xlabel('Epoch')
        axes[1, 0].set_ylabel('Learning Rate')
        axes[1, 0].set_yscale('log')
        axes[1, 0].grid(True)
        
        # 损失对比
        axes[1, 1].plot(
            np.array(self.train_history['train_loss']) - np.array(self.train_history['val_loss']),
            color='purple'
        )
        axes[1, 1].set_title('训练验证损失差异')
        axes[1, 1].set_xlabel('Epoch')
        axes[1, 1].set_ylabel('Train Loss - Val Loss')
        axes[1, 1].grid(True)
        
        plt.tight_layout()
        
        if save_path:
            plt.savefig(os.path.join(self.save_dir, save_path), dpi=300, bbox_inches='tight')
        
        plt.show()
    
    def save_model(self, filename: str):
        """保存模型"""
        model_path = os.path.join(self.save_dir, filename)
        torch.save({
            'model_state_dict': self.model.state_dict(),
            'config': self.config.__dict__,
            'train_history': self.train_history
        }, model_path)
    
    def load_model(self, filename: str):
        """加载模型"""
        model_path = os.path.join(self.save_dir, filename)
        checkpoint = torch.load(model_path, map_location=self.device)
        self.model.load_state_dict(checkpoint['model_state_dict'])
        if 'train_history' in checkpoint:
            self.train_history = checkpoint['train_history']
    
    def create_deployment_package(self, processor: EnhancedHVACDataProcessor, 
                                package_name: str = "enhanced_hvac_package"):
        """创建部署包"""
        package_dir = os.path.join(self.save_dir, package_name)
        os.makedirs(package_dir, exist_ok=True)
        
        # 保存模型
        model_path = os.path.join(package_dir, "enhanced_model.pth")
        torch.save({
            'model_state_dict': self.model.state_dict(),
            'config': self.config.__dict__
        }, model_path)
        
        # 保存预处理器
        processor_path = os.path.join(package_dir, "enhanced_processor.pkl")
        joblib.dump(processor, processor_path)
        
        # 保存配置
        config_path = os.path.join(package_dir, "enhanced_config.json")
        with open(config_path, 'w', encoding='utf-8') as f:
            json.dump(self.config.__dict__, f, indent=2, ensure_ascii=False)
        
        # 保存训练历史
        history_path = os.path.join(package_dir, "enhanced_training_history.json")
        with open(history_path, 'w', encoding='utf-8') as f:
            json.dump(self.train_history, f, indent=2, ensure_ascii=False)
        
        print(f"增强版部署包已保存到: {package_dir}")
        return package_dir


def load_enhanced_deployment_model(package_dir: str) -> Tuple[RealTimeEnhancedPredictor, EnhancedHVACDataProcessor]:
    """加载增强版部署模型"""
    # 加载配置
    config_path = os.path.join(package_dir, "enhanced_config.json")
    with open(config_path, 'r', encoding='utf-8') as f:
        config_dict = json.load(f)
    
    config = EnhancedHVACConfig(**config_dict)
    
    # 加载模型
    model = EnhancedHVACTransformer(config)
    model_path = os.path.join(package_dir, "enhanced_model.pth")
    checkpoint = torch.load(model_path, map_location=config.device)
    model.load_state_dict(checkpoint['model_state_dict'])
    
    # 加载预处理器
    processor_path = os.path.join(package_dir, "enhanced_processor.pkl")
    processor = joblib.load(processor_path)
    
    # 创建预测器
    predictor = RealTimeEnhancedPredictor(model, config)
    
    return predictor, processor


if __name__ == "__main__":
    # 示例训练流程
    from enhanced_hvac_preprocessing import load_enhanced_sample_data
    
    print("=== 增强版空调负荷预测模型训练示例 ===")
    
    # 1. 数据加载和预处理
    print("1. 数据加载和预处理...")
    load_data, env_data, timestamps = load_enhanced_sample_data()
    
    processor = EnhancedHVACDataProcessor()
    processor.fit(load_data, env_data, timestamps)
    processed_data = processor.transform(load_data, env_data, timestamps)
    
    # 2. 创建训练序列
    print("2. 创建训练序列...")
    X, y = processor.create_sequences(processed_data)
    
    print(f"训练数据形状:")
    for key, value in X.items():
        print(f"  X_{key}: {value.shape}")
    print(f"  y: {y.shape}")
    
    # 3. 创建模型和训练器
    print("3. 初始化增强模型...")
    config = EnhancedHVACConfig(
        d_model=256,
        n_heads=8,
        n_layers=6,
        seq_len=168,
        pred_len=24,
        n_env_features=2
    )
    
    trainer = EnhancedHVACTrainer(config)
    
    # 设置实时显示功能
    print("📊 启用实时Loss显示功能演示:")
    trainer.set_live_plot(enable=True, interval=3)  # 每3个epoch更新图表
    
    print("\n💡 实时显示功能包括:")
    print("   🔥 每个epoch的进度条和实时loss")
    print("   📈 训练/验证损失曲线实时绘制")
    print("   📊 MAE误差和学习率变化")
    print("   🎯 过拟合检测曲线")
    print("   🛑 早停机制触发提示")
    
    # 4. 训练模型
    print("\n4. 开始训练...")
    print("=" * 80)
    training_result = trainer.train(
        X, y, epochs=50, batch_size=16, learning_rate=0.001
    )
    
    # 5. 评估模型
    print("5. 模型评估...")
    evaluation_result = trainer.evaluate(X, y, processor)
    print(f"总体性能指标: {evaluation_result['overall_metrics']}")
    
    # 6. 可视化训练历史
    print("6. 绘制训练历史...")
    trainer.plot_training_history("enhanced_training_history.png")
    
    # 7. 创建部署包
    print("7. 创建部署包...")
    package_dir = trainer.create_deployment_package(processor)
    
    # 8. 测试部署模型
    print("8. 测试部署模型...")
    predictor, loaded_processor = load_enhanced_deployment_model(package_dir)
    print("增强版部署模型加载成功！")
    
    print("\n=== 训练完成 ===")
    print(f"🏆 最佳验证损失: {training_result['best_val_loss']:.6f}")
    print(f"📊 最终验证MAE: {training_result['final_val_mae']:.6f}")
    print(f"📈 训练曲线图: enhanced_models/training_curves.png")
    print(f"💾 模型已保存到: {package_dir}")
    print(f"\n💡 安装tqdm获得更好的进度显示体验:")
    print(f"   pip install tqdm")
