import torch
from torch.utils.data import DataLoader
from data.dataset import EnergyTimeSeriesDataset
from models.model import EnhancedCNNLSTMAttention
from config.config import Config
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
import numpy as np
from utils.logger import Logger
import os
from utils.metrics import calculate_metrics, evaluate_prediction_quality

def train_model(model, train_loader, val_loader, criterion, optimizer, config, logger):
    best_val_loss = float('inf')
    patience = 30  # 增加耐心值
    min_delta = 1e-4  # 添加最小改善阈值
    patience_counter = 0
    
    # 创建进度条
    epoch_bar = tqdm(range(config.EPOCHS), desc='Training Progress')
    
    train_losses = []
    val_losses = []
    
    # 修改学习率调度
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer,
        mode='min',
        factor=0.5,
        patience=5,
        min_lr=1e-6,
        verbose=True
    )
    
    for epoch in epoch_bar:
        # Training phase
        model.train()
        train_loss = 0
        batch_bar = tqdm(train_loader, desc=f'Epoch {epoch+1}', leave=False)
        
        for batch_x, batch_y in batch_bar:
            batch_x = batch_x.to(config.DEVICE)
            batch_y = batch_y.to(config.DEVICE)
            
            # 检查输入数据
            if torch.isnan(batch_x).any() or torch.isnan(batch_y).any():
                logger.error("NaN detected in input data")
                continue
            
            optimizer.zero_grad()
            
            # 修改这部分代码
            outputs = model(batch_x)
            loss = criterion(outputs, batch_y)
            
            if torch.isnan(loss):
                logger.error("NaN detected in loss calculation")
                continue
            
            loss.backward()
            
            # 更严格的梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), config.GRADIENT_CLIP)
            
            optimizer.step()
            train_loss += loss.item()
            
            # 更新进度条，显示更多信息
            batch_bar.set_postfix({
                'loss': f'{loss.item():.4f}',
                'lr': f'{optimizer.param_groups[0]["lr"]:.6f}'
            })
        
        avg_train_loss = train_loss / len(train_loader)
        train_losses.append(avg_train_loss)
        
        # Validation phase
        model.eval()
        val_loss = 0
        with torch.no_grad():
            for batch_x, batch_y in val_loader:
                batch_x = batch_x.to(config.DEVICE)
                batch_y = batch_y.to(config.DEVICE)
                outputs = model(batch_x)
                loss = criterion(outputs, batch_y)
                val_loss += loss.item()
        
        avg_val_loss = val_loss / len(val_loader)
        val_losses.append(avg_val_loss)
        
        # 更新进度条描述
        epoch_bar.set_postfix({
            'train_loss': f'{avg_train_loss:.4f}',
            'val_loss': f'{avg_val_loss:.4f}'
        })
        
        # 记录日志
        logger.info(f'Epoch {epoch+1}/{config.EPOCHS}')
        logger.info(f'Training Loss: {avg_train_loss:.4f}')
        logger.info(f'Validation Loss: {avg_val_loss:.4f}')
        
        # 基于验证损失调整学习率
        scheduler.step(avg_val_loss)
        
        # 添加梯度裁剪
        torch.nn.utils.clip_grad_norm_(model.parameters(), config.GRADIENT_CLIP)
        
        # 改进早停逻辑
        if avg_val_loss < best_val_loss - min_delta:
            best_val_loss = avg_val_loss
            patience_counter = 0
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'train_loss': avg_train_loss,
                'val_loss': avg_val_loss,
            }, 'best_model.pth')
        else:
            patience_counter += 1
            
        if patience_counter >= patience:
            logger.info(f'Early stopping triggered after {epoch+1} epochs')
            break
            
        # 每个epoch结束后评估预测质量
        if epoch % 10 == 0:  # 每10个epoch评估一次
            model.eval()
            all_preds = []
            all_targets = []
            with torch.no_grad():
                for batch_x, batch_y in val_loader:
                    batch_x = batch_x.to(config.DEVICE)
                    outputs = model(batch_x)
                    all_preds.extend(outputs.cpu().numpy())
                    all_targets.extend(batch_y.numpy())
            
            metrics = calculate_metrics(np.array(all_targets), np.array(all_preds))
            quality = evaluate_prediction_quality(metrics)
            
            logger.info("Prediction Quality Assessment:")
            for key, value in quality.items():
                if value:
                    logger.info(f"Quality: {key}")
            
    return train_losses, val_losses

def main():
    config = Config()
    
    # 创建必要的目录
    os.makedirs('checkpoints', exist_ok=True)
    os.makedirs('logs', exist_ok=True)
    
    # 初始化logger
    logger = Logger('logs')
    
    # 设置随机种子以确保可重复性
    torch.manual_seed(42)
    np.random.seed(42)
    
    logger.info("Loading and preparing data...")
    dataset = EnergyTimeSeriesDataset(config, config.DATA_PATH)
    
    # Split dataset
    train_size = int(0.8 * len(dataset))
    val_size = len(dataset) - train_size
    train_dataset, val_dataset = torch.utils.data.random_split(
        dataset, 
        [train_size, val_size],
        generator=torch.Generator().manual_seed(42)
    )
    
    train_loader = DataLoader(train_dataset, batch_size=config.BATCH_SIZE, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=config.BATCH_SIZE, shuffle=False)
    
    logger.info("Initializing model...")
    model = EnhancedCNNLSTMAttention(config).to(config.DEVICE)
    
    # 使用AdamW优化器
    optimizer = optim.AdamW(
        model.parameters(),
        lr=config.LEARNING_RATE,
        weight_decay=config.WEIGHT_DECAY,
        betas=(0.9, 0.999),
        eps=1e-8
    )
    
    # 定义损失函数
    def custom_loss(pred, target):
        mse_loss = nn.MSELoss()(pred, target)
        l1_loss = nn.L1Loss()(pred, target)
        
        # 简化损失函数
        total_loss = 0.7 * mse_loss + 0.3 * l1_loss
        return total_loss
    
    criterion = custom_loss  # 在这里定义criterion
    
    logger.info("Starting training...")
    train_losses, val_losses = train_model(
        model, train_loader, val_loader, criterion, optimizer, config, logger
    )
    
    logger.info("Training completed!")

if __name__ == '__main__':
    main() 