"""
增强版TimeKAN训练脚本
集成了多种改进机制的训练流程
"""

import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
import time
import os
import warnings
warnings.filterwarnings('ignore')

from data_provider.data_factory import data_provider
from exp.exp_long_term_forecasting import Exp_Long_Term_Forecast
from models.TimeKAN_Enhanced import EnhancedModel
from enhanced_configs import get_enhanced_configs
from utils.tools import EarlyStopping, adjust_learning_rate, visual, save_to_csv
from utils.metrics import metric


class EnhancedTrainer:
    """增强版TimeKAN训练器"""
    
    def __init__(self, configs):
        self.configs = configs
        self.device = torch.device('cuda' if configs.use_gpu and torch.cuda.is_available() else 'cpu')
        
        # 创建模型
        self.model = EnhancedModel(configs).to(self.device)
        
        # 损失函数
        if configs.loss == 'MSE':
            self.criterion = nn.MSELoss()
        elif configs.loss == 'MAE':
            self.criterion = nn.L1Loss()
        else:
            self.criterion = nn.MSELoss()
        
        # 优化器
        self.optimizer = optim.Adam(
            self.model.parameters(),
            lr=configs.learning_rate,
            weight_decay=configs.weight_decay
        )
        
        # 学习率调度器
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer, mode='min', factor=0.5, patience=configs.patience//2, verbose=True
        )
        
        # 早停
        self.early_stopping = EarlyStopping(patience=configs.patience, verbose=True)
        
        # 创建检查点目录
        self.checkpoint_dir = os.path.join(configs.checkpoints, configs.model_id)
        os.makedirs(self.checkpoint_dir, exist_ok=True)
        
    def get_data_loaders(self):
        """获取数据加载器"""
        train_data, train_loader = data_provider(self.configs, flag='train')
        vali_data, vali_loader = data_provider(self.configs, flag='val')
        test_data, test_loader = data_provider(self.configs, flag='test')
        
        return train_loader, vali_loader, test_loader, train_data
    
    def train_epoch(self, train_loader):
        """训练一个epoch"""
        self.model.train()
        total_loss = 0.0
        num_batches = 0
        
        for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader):
            # 数据移到设备
            batch_x = batch_x.float().to(self.device)
            batch_y = batch_y.float().to(self.device)
            batch_x_mark = batch_x_mark.float().to(self.device)
            batch_y_mark = batch_y_mark.float().to(self.device)
            
            # 准备解码器输入
            dec_inp = torch.zeros_like(batch_y[:, -self.configs.pred_len:, :]).float()
            dec_inp = torch.cat([batch_y[:, :self.configs.label_len, :], dec_inp], dim=1).float().to(self.device)
            
            # 前向传播
            self.optimizer.zero_grad()
            
            if self.configs.use_amp:
                with torch.cuda.amp.autocast():
                    outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
                    f_dim = -1 if self.configs.features == 'MS' else 0
                    outputs = outputs[:, -self.configs.pred_len:, f_dim:]
                    batch_y = batch_y[:, -self.configs.pred_len:, f_dim:].to(self.device)
                    loss = self.criterion(outputs, batch_y)
            else:
                outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
                f_dim = -1 if self.configs.features == 'MS' else 0
                outputs = outputs[:, -self.configs.pred_len:, f_dim:]
                batch_y = batch_y[:, -self.configs.pred_len:, f_dim:].to(self.device)
                loss = self.criterion(outputs, batch_y)
            
            # 反向传播
            loss.backward()
            
            # 梯度裁剪
            if self.configs.grad_clip > 0:
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.configs.grad_clip)
            
            self.optimizer.step()
            
            total_loss += loss.item()
            num_batches += 1
            
            if (i + 1) % 100 == 0:
                print(f'    Batch [{i+1}/{len(train_loader)}], Loss: {loss.item():.6f}')
        
        return total_loss / num_batches
    
    def validate(self, vali_loader):
        """验证模型"""
        self.model.eval()
        total_loss = 0.0
        num_batches = 0
        
        with torch.no_grad():
            for batch_x, batch_y, batch_x_mark, batch_y_mark in vali_loader:
                # 数据移到设备
                batch_x = batch_x.float().to(self.device)
                batch_y = batch_y.float().to(self.device)
                batch_x_mark = batch_x_mark.float().to(self.device)
                batch_y_mark = batch_y_mark.float().to(self.device)
                
                # 准备解码器输入
                dec_inp = torch.zeros_like(batch_y[:, -self.configs.pred_len:, :]).float()
                dec_inp = torch.cat([batch_y[:, :self.configs.label_len, :], dec_inp], dim=1).float().to(self.device)
                
                # 前向传播
                outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
                f_dim = -1 if self.configs.features == 'MS' else 0
                outputs = outputs[:, -self.configs.pred_len:, f_dim:]
                batch_y = batch_y[:, -self.configs.pred_len:, f_dim:].to(self.device)
                
                loss = self.criterion(outputs, batch_y)
                total_loss += loss.item()
                num_batches += 1
        
        return total_loss / num_batches
    
    def test(self, test_loader, test_data):
        """测试模型"""
        self.model.eval()
        preds = []
        trues = []
        
        with torch.no_grad():
            for batch_x, batch_y, batch_x_mark, batch_y_mark in test_loader:
                # 数据移到设备
                batch_x = batch_x.float().to(self.device)
                batch_y = batch_y.float().to(self.device)
                batch_x_mark = batch_x_mark.float().to(self.device)
                batch_y_mark = batch_y_mark.float().to(self.device)
                
                # 准备解码器输入
                dec_inp = torch.zeros_like(batch_y[:, -self.configs.pred_len:, :]).float()
                dec_inp = torch.cat([batch_y[:, :self.configs.label_len, :], dec_inp], dim=1).float().to(self.device)
                
                # 前向传播
                outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
                f_dim = -1 if self.configs.features == 'MS' else 0
                outputs = outputs[:, -self.configs.pred_len:, f_dim:]
                batch_y = batch_y[:, -self.configs.pred_len:, f_dim:].to(self.device)
                
                pred = outputs.detach().cpu().numpy()
                true = batch_y.detach().cpu().numpy()
                
                preds.append(pred)
                trues.append(true)
        
        preds = np.concatenate(preds, axis=0)
        trues = np.concatenate(trues, axis=0)
        
        # 反归一化
        if hasattr(test_data, 'inverse_transform'):
            preds = test_data.inverse_transform(preds)
            trues = test_data.inverse_transform(trues)
        
        # 计算指标
        mae, mse, rmse, mape, mspe = metric(preds, trues)
        
        print(f'Test Results:')
        print(f'MAE: {mae:.4f}, MSE: {mse:.4f}, RMSE: {rmse:.4f}')
        print(f'MAPE: {mape:.4f}, MSPE: {mspe:.4f}')
        
        return mae, mse, rmse, mape, mspe, preds, trues
    
    def train(self):
        """完整训练流程"""
        print("开始训练增强版TimeKAN模型...")
        print(f"设备: {self.device}")
        print(f"模型参数数量: {sum(p.numel() for p in self.model.parameters()):,}")
        
        # 获取数据加载器
        train_loader, vali_loader, test_loader, train_data = self.get_data_loaders()
        
        print(f"训练样本数: {len(train_loader.dataset)}")
        print(f"验证样本数: {len(vali_loader.dataset)}")
        print(f"测试样本数: {len(test_loader.dataset)}")
        
        # 训练循环
        best_vali_loss = float('inf')
        train_losses = []
        vali_losses = []
        
        for epoch in range(self.configs.train_epochs):
            start_time = time.time()
            
            # 训练
            train_loss = self.train_epoch(train_loader)
            
            # 验证
            vali_loss = self.validate(vali_loader)
            
            # 学习率调度
            self.scheduler.step(vali_loss)
            
            # 记录损失
            train_losses.append(train_loss)
            vali_losses.append(vali_loss)
            
            epoch_time = time.time() - start_time
            
            print(f'Epoch [{epoch+1}/{self.configs.train_epochs}] - '
                  f'Time: {epoch_time:.2f}s - '
                  f'Train Loss: {train_loss:.6f} - '
                  f'Vali Loss: {vali_loss:.6f} - '
                  f'LR: {self.optimizer.param_groups[0]["lr"]:.8f}')
            
            # 保存最佳模型
            if vali_loss < best_vali_loss:
                best_vali_loss = vali_loss
                torch.save(self.model.state_dict(), 
                          os.path.join(self.checkpoint_dir, 'best_model.pth'))
                print(f'    新的最佳模型已保存 (验证损失: {vali_loss:.6f})')
            
            # 早停检查
            self.early_stopping(vali_loss, self.model, self.checkpoint_dir)
            if self.early_stopping.early_stop:
                print("早停触发，停止训练")
                break
        
        # 加载最佳模型进行测试
        print("\n加载最佳模型进行测试...")
        self.model.load_state_dict(torch.load(os.path.join(self.checkpoint_dir, 'best_model.pth')))
        
        # 测试
        mae, mse, rmse, mape, mspe, preds, trues = self.test(test_loader, train_data)
        
        # 保存结果
        results_dir = os.path.join(self.checkpoint_dir, 'results')
        os.makedirs(results_dir, exist_ok=True)
        
        # 保存预测结果
        save_to_csv(preds, trues, os.path.join(results_dir, 'predictions.csv'))
        
        # 可视化结果
        visual(trues, preds, os.path.join(results_dir, 'prediction_plot.png'))
        
        print(f"\n训练完成！结果已保存到: {results_dir}")
        
        return {
            'mae': mae, 'mse': mse, 'rmse': rmse, 'mape': mape, 'mspe': mspe,
            'train_losses': train_losses, 'vali_losses': vali_losses
        }


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='增强版TimeKAN训练')
    
    # 基础参数
    parser.add_argument('--data', type=str, default='ETTh1', help='数据集名称')
    parser.add_argument('--seq_len', type=int, default=96, help='输入序列长度')
    parser.add_argument('--pred_len', type=int, default=96, help='预测长度')
    parser.add_argument('--batch_size', type=int, default=32, help='批次大小')
    parser.add_argument('--learning_rate', type=float, default=0.0001, help='学习率')
    parser.add_argument('--train_epochs', type=int, default=10, help='训练轮数')
    parser.add_argument('--patience', type=int, default=3, help='早停耐心值')
    
    # 模型参数
    parser.add_argument('--d_model', type=int, default=512, help='模型维度')
    parser.add_argument('--e_layers', type=int, default=2, help='编码器层数')
    parser.add_argument('--down_sampling_layers', type=int, default=3, help='下采样层数')
    parser.add_argument('--begin_order', type=int, default=3, help='起始阶数')
    
    # 增强功能开关
    parser.add_argument('--use_attention', type=int, default=1, help='是否使用注意力机制')
    parser.add_argument('--use_multi_scale_conv', type=int, default=1, help='是否使用多尺度卷积')
    parser.add_argument('--use_feature_importance', type=int, default=1, help='是否使用特征重要性学习')
    parser.add_argument('--use_adaptive_freq', type=int, default=1, help='是否使用自适应频率分解')
    parser.add_argument('--use_time_aware_pos', type=int, default=1, help='是否使用时间感知位置编码')
    
    # GPU设置
    parser.add_argument('--use_gpu', type=bool, default=True, help='是否使用GPU')
    parser.add_argument('--gpu', type=int, default=0, help='GPU设备号')
    
    args = parser.parse_args()
    
    # 获取配置
    configs = get_enhanced_configs(args.data, args.pred_len, args.seq_len)
    
    # 更新配置
    configs.batch_size = args.batch_size
    configs.learning_rate = args.learning_rate
    configs.train_epochs = args.train_epochs
    configs.patience = args.patience
    configs.d_model = args.d_model
    configs.e_layers = args.e_layers
    configs.down_sampling_layers = args.down_sampling_layers
    configs.begin_order = args.begin_order
    
    # 增强功能配置
    configs.use_attention = bool(args.use_attention)
    configs.use_multi_scale_conv = bool(args.use_multi_scale_conv)
    configs.use_feature_importance = bool(args.use_feature_importance)
    configs.use_adaptive_freq = bool(args.use_adaptive_freq)
    configs.use_time_aware_pos = bool(args.use_time_aware_pos)
    
    # GPU配置
    configs.use_gpu = args.use_gpu and torch.cuda.is_available()
    configs.gpu = args.gpu
    
    # 设置模型ID
    configs.model_id = f"{args.data}_{args.seq_len}_{args.pred_len}_enhanced"
    
    print("=" * 80)
    print("增强版TimeKAN训练配置")
    print("=" * 80)
    print(f"数据集: {configs.data}")
    print(f"序列长度: {configs.seq_len}")
    print(f"预测长度: {configs.pred_len}")
    print(f"模型维度: {configs.d_model}")
    print(f"编码器层数: {configs.e_layers}")
    print(f"下采样层数: {configs.down_sampling_layers}")
    print(f"批次大小: {configs.batch_size}")
    print(f"学习率: {configs.learning_rate}")
    print(f"训练轮数: {configs.train_epochs}")
    print(f"使用注意力: {configs.use_attention}")
    print(f"使用多尺度卷积: {configs.use_multi_scale_conv}")
    print(f"使用特征重要性: {configs.use_feature_importance}")
    print(f"使用自适应频率: {configs.use_adaptive_freq}")
    print(f"使用时间感知位置编码: {configs.use_time_aware_pos}")
    print("=" * 80)
    
    # 创建训练器并开始训练
    trainer = EnhancedTrainer(configs)
    results = trainer.train()
    
    print("\n训练结果总结:")
    print(f"最终测试指标:")
    print(f"  MAE: {results['mae']:.4f}")
    print(f"  MSE: {results['mse']:.4f}")
    print(f"  RMSE: {results['rmse']:.4f}")
    print(f"  MAPE: {results['mape']:.4f}")
    print(f"  MSPE: {results['mspe']:.4f}")


if __name__ == '__main__':
    main()