"""
空调负荷预测模型训练管道
包含模型训练、验证、保存和部署功能
"""

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
import pandas as pd
from typing import Dict, Tuple, List, Optional
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import joblib
import json
import os
from datetime import datetime
import warnings
warnings.filterwarnings('ignore')

# 导入自定义模块
from real_time_hvac_load_forecasting import RealTimeHVACTransformer, HVACForecastConfig, RealTimePredictor
from hvac_data_preprocessing import HVACDataProcessor


class HVACDataset(Dataset):
    """空调负荷数据集"""
    
    def __init__(self, load_data: np.ndarray, numerical_data: np.ndarray, 
                 time_data: np.ndarray, targets: np.ndarray):
        """
        初始化数据集
        
        Args:
            load_data: 负荷序列数据 [n_samples, seq_len, 1]
            numerical_data: 数值特征数据 [n_samples, seq_len, n_features]
            time_data: 时间特征数据 [n_samples, seq_len, 4]
            targets: 目标数据 [n_samples, pred_len, 1]
        """
        self.load_data = torch.FloatTensor(load_data)
        self.numerical_data = torch.FloatTensor(numerical_data)
        self.time_data = torch.LongTensor(time_data)
        self.targets = torch.FloatTensor(targets)
        
    def __len__(self) -> int:
        return len(self.load_data)
    
    def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
        return (
            self.load_data[idx],
            self.numerical_data[idx],
            self.time_data[idx],
            self.targets[idx]
        )


class EarlyStopping:
    """早停机制"""
    
    def __init__(self, patience: int = 10, min_delta: float = 0.001):
        self.patience = patience
        self.min_delta = min_delta
        self.best_loss = float('inf')
        self.counter = 0
        self.early_stop = False
        
    def __call__(self, val_loss: float) -> bool:
        if val_loss < self.best_loss - self.min_delta:
            self.best_loss = val_loss
            self.counter = 0
        else:
            self.counter += 1
            
        if self.counter >= self.patience:
            self.early_stop = True
            
        return self.early_stop


class HVACTrainer:
    """空调负荷预测模型训练器"""
    
    def __init__(self, config: HVACForecastConfig, save_dir: str = "models"):
        """
        初始化训练器
        
        Args:
            config: 模型配置
            save_dir: 模型保存目录
        """
        self.config = config
        self.save_dir = save_dir
        self.device = torch.device(config.device)
        
        # 创建保存目录
        os.makedirs(save_dir, exist_ok=True)
        
        # 初始化模型
        self.model = RealTimeHVACTransformer(config).to(self.device)
        
        # 训练历史
        self.train_history = {
            'train_loss': [],
            'val_loss': [],
            'train_mae': [],
            'val_mae': [],
            'learning_rate': []
        }
        
    def _create_data_loaders(self, X_load: np.ndarray, X_numerical: np.ndarray, 
                           X_time: np.ndarray, y: np.ndarray, 
                           train_ratio: float = 0.8, batch_size: int = 32) -> Tuple[DataLoader, DataLoader]:
        """
        创建训练和验证数据加载器
        
        Args:
            X_load: 负荷输入数据
            X_numerical: 数值特征输入数据
            X_time: 时间特征输入数据
            y: 目标数据
            train_ratio: 训练集比例
            batch_size: 批次大小
            
        Returns:
            train_loader: 训练数据加载器
            val_loader: 验证数据加载器
        """
        n_samples = len(X_load)
        train_size = int(n_samples * train_ratio)
        
        # 按时间顺序划分训练集和验证集
        train_dataset = HVACDataset(
            X_load[:train_size], X_numerical[:train_size], 
            X_time[:train_size], y[:train_size]
        )
        
        val_dataset = HVACDataset(
            X_load[train_size:], X_numerical[train_size:], 
            X_time[train_size:], y[train_size:]
        )
        
        train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
        val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
        
        return train_loader, val_loader
    
    def _calculate_metrics(self, predictions: np.ndarray, targets: np.ndarray) -> Dict[str, float]:
        """
        计算评估指标
        
        Args:
            predictions: 预测值
            targets: 真实值
            
        Returns:
            metrics: 评估指标字典
        """
        mae = mean_absolute_error(targets, predictions)
        mse = mean_squared_error(targets, predictions)
        rmse = np.sqrt(mse)
        r2 = r2_score(targets, predictions)
        
        # 计算MAPE (Mean Absolute Percentage Error)
        mape = np.mean(np.abs((targets - predictions) / (targets + 1e-8))) * 100
        
        return {
            'MAE': mae,
            'MSE': mse,
            'RMSE': rmse,
            'R2': r2,
            'MAPE': mape
        }
    
    def train_epoch(self, train_loader: DataLoader, optimizer: optim.Optimizer, 
                   criterion: nn.Module) -> Tuple[float, float]:
        """
        训练一个epoch
        
        Args:
            train_loader: 训练数据加载器
            optimizer: 优化器
            criterion: 损失函数
            
        Returns:
            avg_loss: 平均损失
            avg_mae: 平均MAE
        """
        self.model.train()
        total_loss = 0.0
        total_mae = 0.0
        num_batches = 0
        
        for load_data, numerical_data, time_data, targets in train_loader:
            # 移动数据到设备
            load_data = load_data.to(self.device)
            numerical_data = numerical_data.to(self.device)
            time_data = time_data.to(self.device)
            targets = targets.to(self.device)
            
            # 前向传播
            optimizer.zero_grad()
            predictions = self.model(load_data, numerical_data, time_data, targets.shape[1])
            
            # 计算损失
            loss = criterion(predictions, targets)
            mae = torch.mean(torch.abs(predictions - targets))
            
            # 反向传播
            loss.backward()
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
            optimizer.step()
            
            total_loss += loss.item()
            total_mae += mae.item()
            num_batches += 1
        
        return total_loss / num_batches, total_mae / num_batches
    
    def validate_epoch(self, val_loader: DataLoader, criterion: nn.Module) -> Tuple[float, float]:
        """
        验证一个epoch
        
        Args:
            val_loader: 验证数据加载器
            criterion: 损失函数
            
        Returns:
            avg_loss: 平均损失
            avg_mae: 平均MAE
        """
        self.model.eval()
        total_loss = 0.0
        total_mae = 0.0
        num_batches = 0
        
        with torch.no_grad():
            for load_data, numerical_data, time_data, targets in val_loader:
                # 移动数据到设备
                load_data = load_data.to(self.device)
                numerical_data = numerical_data.to(self.device)
                time_data = time_data.to(self.device)
                targets = targets.to(self.device)
                
                # 前向传播
                predictions = self.model(load_data, numerical_data, time_data, targets.shape[1])
                
                # 计算损失
                loss = criterion(predictions, targets)
                mae = torch.mean(torch.abs(predictions - targets))
                
                total_loss += loss.item()
                total_mae += mae.item()
                num_batches += 1
        
        return total_loss / num_batches, total_mae / num_batches
    
    def train(self, X_load: np.ndarray, X_numerical: np.ndarray, X_time: np.ndarray, 
              y: np.ndarray, epochs: int = 100, batch_size: int = 32, 
              learning_rate: float = 0.001, patience: int = 15) -> Dict:
        """
        训练模型
        
        Args:
            X_load: 负荷输入数据
            X_numerical: 数值特征输入数据
            X_time: 时间特征输入数据
            y: 目标数据
            epochs: 训练轮数
            batch_size: 批次大小
            learning_rate: 学习率
            patience: 早停耐心值
            
        Returns:
            training_result: 训练结果
        """
        print("开始训练空调负荷预测模型...")
        print(f"模型参数数量: {sum(p.numel() for p in self.model.parameters()):,}")
        
        # 创建数据加载器
        train_loader, val_loader = self._create_data_loaders(
            X_load, X_numerical, X_time, y, batch_size=batch_size
        )
        
        # 初始化优化器和损失函数
        optimizer = optim.AdamW(self.model.parameters(), lr=learning_rate, weight_decay=1e-5)
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            optimizer, mode='min', factor=0.5, patience=5, verbose=True
        )
        criterion = nn.MSELoss()
        
        # 早停机制
        early_stopping = EarlyStopping(patience=patience)
        
        best_val_loss = float('inf')
        best_epoch = 0
        
        # 训练循环
        for epoch in range(epochs):
            # 训练阶段
            train_loss, train_mae = self.train_epoch(train_loader, optimizer, criterion)
            
            # 验证阶段
            val_loss, val_mae = self.validate_epoch(val_loader, criterion)
            
            # 学习率调度
            scheduler.step(val_loss)
            current_lr = optimizer.param_groups[0]['lr']
            
            # 记录训练历史
            self.train_history['train_loss'].append(train_loss)
            self.train_history['val_loss'].append(val_loss)
            self.train_history['train_mae'].append(train_mae)
            self.train_history['val_mae'].append(val_mae)
            self.train_history['learning_rate'].append(current_lr)
            
            # 保存最佳模型
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                best_epoch = epoch
                self.save_model("best_model.pth")
            
            # 打印进度
            if (epoch + 1) % 10 == 0:
                print(f"Epoch {epoch+1}/{epochs}:")
                print(f"  Train Loss: {train_loss:.6f}, Train MAE: {train_mae:.6f}")
                print(f"  Val Loss: {val_loss:.6f}, Val MAE: {val_mae:.6f}")
                print(f"  Learning Rate: {current_lr:.2e}")
                print(f"  Best Val Loss: {best_val_loss:.6f} (Epoch {best_epoch+1})")
            
            # 早停检查
            if early_stopping(val_loss):
                print(f"早停触发，在第 {epoch+1} 轮停止训练")
                break
        
        # 加载最佳模型
        self.load_model("best_model.pth")
        
        # 最终评估
        final_train_loss, final_train_mae = self.validate_epoch(train_loader, criterion)
        final_val_loss, final_val_mae = self.validate_epoch(val_loader, criterion)
        
        training_result = {
            'best_epoch': best_epoch + 1,
            'best_val_loss': best_val_loss,
            'final_train_loss': final_train_loss,
            'final_val_loss': final_val_loss,
            'final_train_mae': final_train_mae,
            'final_val_mae': final_val_mae,
            'total_epochs': epoch + 1
        }
        
        print("\n训练完成！")
        print(f"最佳模型 (Epoch {training_result['best_epoch']}):")
        print(f"  验证损失: {best_val_loss:.6f}")
        print(f"  最终训练MAE: {final_train_mae:.6f}")
        print(f"  最终验证MAE: {final_val_mae:.6f}")
        
        return training_result
    
    def evaluate(self, X_load: np.ndarray, X_numerical: np.ndarray, 
                X_time: np.ndarray, y: np.ndarray, 
                processor: HVACDataProcessor) -> Dict:
        """
        评估模型性能
        
        Args:
            X_load: 测试负荷数据
            X_numerical: 测试数值特征数据
            X_time: 测试时间特征数据
            y: 测试目标数据
            processor: 数据预处理器
            
        Returns:
            evaluation_result: 评估结果
        """
        self.model.eval()
        
        test_dataset = HVACDataset(X_load, X_numerical, X_time, y)
        test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)
        
        all_predictions = []
        all_targets = []
        
        with torch.no_grad():
            for load_data, numerical_data, time_data, targets in test_loader:
                load_data = load_data.to(self.device)
                numerical_data = numerical_data.to(self.device)
                time_data = time_data.to(self.device)
                
                predictions = self.model(load_data, numerical_data, time_data, targets.shape[1])
                
                all_predictions.append(predictions.cpu().numpy())
                all_targets.append(targets.numpy())
        
        # 合并所有预测结果
        predictions = np.concatenate(all_predictions, axis=0)
        targets = np.concatenate(all_targets, axis=0)
        
        # 反标准化
        predictions_original = processor.inverse_transform_load(predictions.reshape(-1, 1)).reshape(predictions.shape)
        targets_original = processor.inverse_transform_load(targets.reshape(-1, 1)).reshape(targets.shape)
        
        # 计算指标
        metrics = self._calculate_metrics(
            predictions_original.flatten(), targets_original.flatten()
        )
        
        # 按预测步长计算指标
        step_metrics = {}
        for step in range(predictions.shape[1]):
            step_pred = predictions_original[:, step, 0]
            step_target = targets_original[:, step, 0]
            step_metrics[f'step_{step+1}'] = self._calculate_metrics(step_pred, step_target)
        
        evaluation_result = {
            'overall_metrics': metrics,
            'step_metrics': step_metrics,
            'predictions': predictions_original,
            'targets': targets_original
        }
        
        return evaluation_result
    
    def plot_training_history(self, save_path: str = None):
        """绘制训练历史"""
        fig, axes = plt.subplots(2, 2, figsize=(15, 10))
        
        # 损失曲线
        axes[0, 0].plot(self.train_history['train_loss'], label='训练损失', color='blue')
        axes[0, 0].plot(self.train_history['val_loss'], label='验证损失', color='red')
        axes[0, 0].set_title('损失曲线')
        axes[0, 0].set_xlabel('Epoch')
        axes[0, 0].set_ylabel('Loss')
        axes[0, 0].legend()
        axes[0, 0].grid(True)
        
        # MAE曲线
        axes[0, 1].plot(self.train_history['train_mae'], label='训练MAE', color='blue')
        axes[0, 1].plot(self.train_history['val_mae'], label='验证MAE', color='red')
        axes[0, 1].set_title('MAE曲线')
        axes[0, 1].set_xlabel('Epoch')
        axes[0, 1].set_ylabel('MAE')
        axes[0, 1].legend()
        axes[0, 1].grid(True)
        
        # 学习率曲线
        axes[1, 0].plot(self.train_history['learning_rate'], color='green')
        axes[1, 0].set_title('学习率曲线')
        axes[1, 0].set_xlabel('Epoch')
        axes[1, 0].set_ylabel('Learning Rate')
        axes[1, 0].set_yscale('log')
        axes[1, 0].grid(True)
        
        # 损失对比
        axes[1, 1].plot(
            np.array(self.train_history['train_loss']) - np.array(self.train_history['val_loss']),
            color='purple'
        )
        axes[1, 1].set_title('训练验证损失差异')
        axes[1, 1].set_xlabel('Epoch')
        axes[1, 1].set_ylabel('Train Loss - Val Loss')
        axes[1, 1].grid(True)
        
        plt.tight_layout()
        
        if save_path:
            plt.savefig(os.path.join(self.save_dir, save_path), dpi=300, bbox_inches='tight')
        
        plt.show()
    
    def save_model(self, filename: str):
        """保存模型"""
        model_path = os.path.join(self.save_dir, filename)
        torch.save({
            'model_state_dict': self.model.state_dict(),
            'config': self.config.__dict__,
            'train_history': self.train_history
        }, model_path)
    
    def load_model(self, filename: str):
        """加载模型"""
        model_path = os.path.join(self.save_dir, filename)
        checkpoint = torch.load(model_path, map_location=self.device)
        self.model.load_state_dict(checkpoint['model_state_dict'])
        if 'train_history' in checkpoint:
            self.train_history = checkpoint['train_history']
    
    def create_deployment_package(self, processor: HVACDataProcessor, 
                                package_name: str = "hvac_model_package"):
        """
        创建部署包
        
        Args:
            processor: 数据预处理器
            package_name: 包名称
        """
        package_dir = os.path.join(self.save_dir, package_name)
        os.makedirs(package_dir, exist_ok=True)
        
        # 保存模型
        model_path = os.path.join(package_dir, "model.pth")
        torch.save({
            'model_state_dict': self.model.state_dict(),
            'config': self.config.__dict__
        }, model_path)
        
        # 保存预处理器
        processor_path = os.path.join(package_dir, "processor.pkl")
        joblib.dump(processor, processor_path)
        
        # 保存配置
        config_path = os.path.join(package_dir, "config.json")
        with open(config_path, 'w', encoding='utf-8') as f:
            json.dump(self.config.__dict__, f, indent=2, ensure_ascii=False)
        
        # 保存训练历史
        history_path = os.path.join(package_dir, "training_history.json")
        with open(history_path, 'w', encoding='utf-8') as f:
            json.dump(self.train_history, f, indent=2, ensure_ascii=False)
        
        print(f"部署包已保存到: {package_dir}")
        return package_dir


def load_deployment_model(package_dir: str) -> Tuple[RealTimePredictor, HVACDataProcessor]:
    """
    加载部署模型
    
    Args:
        package_dir: 部署包目录
        
    Returns:
        predictor: 实时预测器
        processor: 数据预处理器
    """
    # 加载配置
    config_path = os.path.join(package_dir, "config.json")
    with open(config_path, 'r', encoding='utf-8') as f:
        config_dict = json.load(f)
    
    config = HVACForecastConfig(**config_dict)
    
    # 加载模型
    model = RealTimeHVACTransformer(config)
    model_path = os.path.join(package_dir, "model.pth")
    checkpoint = torch.load(model_path, map_location=config.device)
    model.load_state_dict(checkpoint['model_state_dict'])
    
    # 加载预处理器
    processor_path = os.path.join(package_dir, "processor.pkl")
    processor = joblib.load(processor_path)
    
    # 创建预测器
    predictor = RealTimePredictor(model, config)
    
    return predictor, processor


if __name__ == "__main__":
    # 示例训练流程
    from hvac_data_preprocessing import load_sample_data
    
    print("=== 空调负荷预测模型训练示例 ===")
    
    # 1. 加载和预处理数据
    print("1. 数据加载和预处理...")
    load_data, numerical_data, timestamps = load_sample_data()
    
    processor = HVACDataProcessor()
    processor.fit(load_data, numerical_data, timestamps)
    processed_load, processed_numerical, time_features = processor.transform(
        load_data, numerical_data, timestamps
    )
    
    # 2. 创建训练序列
    print("2. 创建训练序列...")
    X_load, X_numerical, X_time, y = processor.create_sequences(
        processed_load, processed_numerical, time_features
    )
    
    print(f"训练数据形状: X_load {X_load.shape}, y {y.shape}")
    
    # 3. 创建模型和训练器
    print("3. 初始化模型...")
    config = HVACForecastConfig(
        d_model=128,
        n_heads=8,
        n_layers=4,
        seq_len=168,
        max_pred_len=24,
        n_numerical_features=processed_numerical.shape[1]
    )
    
    trainer = HVACTrainer(config)
    
    # 4. 训练模型
    print("4. 开始训练...")
    training_result = trainer.train(
        X_load, X_numerical, X_time, y,
        epochs=50, batch_size=16, learning_rate=0.001
    )
    
    # 5. 评估模型
    print("5. 模型评估...")
    evaluation_result = trainer.evaluate(X_load, X_numerical, X_time, y, processor)
    print(f"总体性能指标: {evaluation_result['overall_metrics']}")
    
    # 6. 可视化训练历史
    print("6. 绘制训练历史...")
    trainer.plot_training_history("training_history.png")
    
    # 7. 创建部署包
    print("7. 创建部署包...")
    package_dir = trainer.create_deployment_package(processor)
    
    # 8. 测试部署模型
    print("8. 测试部署模型...")
    predictor, loaded_processor = load_deployment_model(package_dir)
    print("部署模型加载成功！")
    
    print("\n=== 训练完成 ===")
    print(f"最佳验证损失: {training_result['best_val_loss']:.6f}")
    print(f"最终验证MAE: {training_result['final_val_mae']:.6f}")
    print(f"模型已保存到: {package_dir}")
