import torch
import torch.nn as nn
from torch.utils.data import DataLoader
try:
    from torch.utils.tensorboard import SummaryWriter
    TENSORBOARD_AVAILABLE = True
except ImportError:
    print("警告: TensorBoard不可用，将禁用日志功能")
    TENSORBOARD_AVAILABLE = False
import numpy as np
import yaml
import os
from datetime import datetime
from tqdm import tqdm
import argparse

from data_utils import WeatherDataset, load_config
from models import CombinedModel

# 混合精度训练支持
try:
    from torch.cuda.amp import GradScaler, autocast
    AMP_AVAILABLE = True
except ImportError:
    print("警告: 自动混合精度不可用，将使用标准精度")
    AMP_AVAILABLE = False

def custom_collate_fn(batch):
    """Custom collate function to handle tensor resizing issues"""
    # Find maximum sequence lengths
    max_station_len = max(item['station_data'].shape[0] for item in batch)
    max_future_len = max(item['future_data'].shape[0] for item in batch)
    
    # Pad station data
    station_data_list = []
    for item in batch:
        station_tensor = item['station_data']
        if station_tensor.shape[0] < max_station_len:
            # Pad with zeros
            pad_size = max_station_len - station_tensor.shape[0]
            padding = torch.zeros(pad_size, station_tensor.shape[1], dtype=station_tensor.dtype)
            station_tensor = torch.cat([station_tensor, padding], dim=0)
        station_data_list.append(station_tensor)
    
    # Pad future data
    future_data_list = []
    for item in batch:
        future_tensor = item['future_data']
        if future_tensor.shape[0] < max_future_len:
            # Pad with zeros
            pad_size = max_future_len - future_tensor.shape[0]
            padding = torch.zeros(pad_size, future_tensor.shape[1], dtype=future_tensor.dtype)
            future_tensor = torch.cat([future_tensor, padding], dim=0)
        future_data_list.append(future_tensor)
    
    # Stack all tensors
    station_data = torch.stack(station_data_list)
    himawari_data = torch.stack([item['himawari_data'] for item in batch])
    future_data = torch.stack(future_data_list)
    
    return {
        'station_data': station_data,
        'himawari_data': himawari_data,
        'future_data': future_data
    }

def setup_multi_gpu_training(config):
    """Setup multi-GPU training environment"""
    multi_gpu_config = config['training'].get('multi_gpu', {})
    
    if not multi_gpu_config.get('enabled', False):
        # 单GPU模式
        return setup_single_gpu_training(config)
    
    # 检查GPU可用性
    if not torch.cuda.is_available():
        print("警告: CUDA不可用，回退到CPU训练")
        device = torch.device("cpu")
        return device, None, False
    
    gpu_count = torch.cuda.device_count()
    print(f"检测到 {gpu_count} 个GPU:")
    for i in range(gpu_count):
        print(f"  GPU {i}: {torch.cuda.get_device_name(i)}")
    
    # 多GPU配置
    strategy = multi_gpu_config.get('strategy', 'dp')
    world_size = multi_gpu_config.get('world_size', gpu_count)
    
    if gpu_count == 1:
        print("只有1个GPU，使用单GPU模式")
        return setup_single_gpu_training(config)
    
    if strategy == 'dp':
        # DataParallel模式
        device = torch.device("cuda:0")
        print(f"使用DataParallel模式，{gpu_count}个GPU")
        return device, 'dp', True
    elif strategy == 'ddp':
        # DistributedDataParallel模式
        print("DDP模式需要使用torchrun启动，回退到DP模式")
        device = torch.device("cuda:0")
        return device, 'dp', True
    else:
        print(f"未知策略: {strategy}，使用单GPU模式")
        return setup_single_gpu_training(config)

def setup_single_gpu_training(config):
    """Setup single GPU training environment"""
    import random
    
    if torch.cuda.is_available():
        gpu_count = torch.cuda.device_count()
        selection_mode = config['training'].get('gpu_selection_mode', 'first')
        gpu_index = config['training'].get('gpu_index', 0)
        
        if selection_mode == "random" and gpu_count > 1:
            selected_index = random.randint(0, gpu_count - 1)
            device = torch.device(f"cuda:{selected_index}")
            print(f"随机选择GPU: {selected_index} ({torch.cuda.get_device_name(selected_index)})")
        elif selection_mode == "specific" and 0 <= gpu_index < gpu_count:
            device = torch.device(f"cuda:{gpu_index}")
            print(f"使用指定GPU: {gpu_index} ({torch.cuda.get_device_name(gpu_index)})")
        else:
            device = torch.device("cuda:0")
            print(f"使用第一个GPU: 0 ({torch.cuda.get_device_name(0)})")
    else:
        device = torch.device("cpu")
        print("Using CPU device (CUDA not available)")
    
    return device, None, False

def setup_training(config):
    """Setup training environment with GPU selection options"""
    # Create directories
    os.makedirs(config['training']['save_dir'], exist_ok=True)
    os.makedirs(config['training']['log_dir'], exist_ok=True)
    
    # Setup multi-GPU training
    device, multi_gpu_strategy, is_multi_gpu = setup_multi_gpu_training(config)
    
    return device, multi_gpu_strategy, is_multi_gpu

def setup_model_for_multi_gpu(model, multi_gpu_strategy, device):
    """Setup model for multi-GPU training"""
    if multi_gpu_strategy == 'dp':
        if torch.cuda.device_count() > 1:
            print(f"使用DataParallel包装模型，GPU数量: {torch.cuda.device_count()}")
            model = nn.DataParallel(model)
        else:
            print("只有1个GPU，不使用DataParallel")
    elif multi_gpu_strategy == 'ddp':
        # DDP需要特殊处理，这里暂时不实现
        print("DDP模式暂未实现，使用单GPU模式")
    
    return model.to(device)

def calculate_adaptive_batch_size(config, is_multi_gpu):
    """Calculate adaptive batch size based on GPU count and memory"""
    auto_batch_config = config['training'].get('auto_batch_size', {})
    
    if not auto_batch_config.get('enabled', False):
        return config['training']['batch_size']
    
    per_gpu_batch = auto_batch_config.get('per_gpu_batch', 8)
    max_total_batch = auto_batch_config.get('max_total_batch', 128)
    
    if is_multi_gpu and torch.cuda.is_available():
        gpu_count = torch.cuda.device_count()
        total_batch = per_gpu_batch * gpu_count
        total_batch = min(total_batch, max_total_batch)
        
        print(f"自适应batch_size: {per_gpu_batch} × {gpu_count} = {total_batch}")
        return total_batch
    else:
        print(f"使用固定batch_size: {config['training']['batch_size']}")
        return config['training']['batch_size']

def create_dataloaders(config, batch_size=None):
    """Create training and validation dataloaders"""
    dataset = WeatherDataset(
        station_csv=config['data']['station_csv'],
        himawari_dir=config['data']['himawari_dir'],
        historical_days=config['data']['historical_days'],
        future_hours=config['data']['future_hours']
    )
    
    # Split dataset (80% train, 20% validation)
    train_size = int(0.8 * len(dataset))
    val_size = len(dataset) - train_size
    train_dataset, val_dataset = torch.utils.data.random_split(
        dataset, [train_size, val_size]
    )
    
    # Use adaptive batch size if provided
    if batch_size is None:
        batch_size = config['training']['batch_size']
    
    # Adjust num_workers based on GPU count
    num_workers = config['training']['num_workers']
    if torch.cuda.is_available():
        gpu_count = torch.cuda.device_count()
        if gpu_count > 1:
            num_workers = min(num_workers * gpu_count, 16)  # 增加工作线程但不超过16
    
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=num_workers,
        pin_memory=True,
        collate_fn=custom_collate_fn,
        prefetch_factor=2  # 预加载数据批次
    )
    
    val_loader = DataLoader(
        val_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=min(num_workers, 8),  # 验证时使用较少的工作线程
        pin_memory=True,
        collate_fn=custom_collate_fn
    )
    
    return train_loader, val_loader

def train_epoch(model, train_loader, optimizer, criterion, device, epoch, 
               use_amp=False, scaler=None, gradient_accumulation_steps=1):
    """Train for one epoch with optional mixed precision and gradient accumulation"""
    model.train()
    total_loss = 0
    progress_bar = tqdm(train_loader, desc=f"Epoch {epoch+1} [Train]")
    
    optimizer.zero_grad()
    
    for batch_idx, batch in enumerate(progress_bar):
        # Move data to device
        station_data = batch['station_data'].to(device, non_blocking=True)
        himawari_data = batch['himawari_data'].to(device, non_blocking=True)
        future_data = batch['future_data'].to(device, non_blocking=True)
        
        # Forward pass with optional mixed precision
        if use_amp and AMP_AVAILABLE:
            with autocast():
                predictions = model(station_data, himawari_data)
                loss = criterion(predictions, future_data)
                # Scale loss for gradient accumulation
                loss = loss / gradient_accumulation_steps
        else:
            predictions = model(station_data, himawari_data)
            loss = criterion(predictions, future_data)
            loss = loss / gradient_accumulation_steps
        
        # Backward pass with optional mixed precision
        if use_amp and AMP_AVAILABLE and scaler is not None:
            scaler.scale(loss).backward()
        else:
            loss.backward()
        
        # Update weights after gradient accumulation
        if (batch_idx + 1) % gradient_accumulation_steps == 0:
            if use_amp and AMP_AVAILABLE and scaler is not None:
                scaler.step(optimizer)
                scaler.update()
            else:
                optimizer.step()
            optimizer.zero_grad()
        
        # Update metrics
        total_loss += loss.item() * gradient_accumulation_steps  # Unscale loss
        progress_bar.set_postfix({
            'loss': f'{loss.item() * gradient_accumulation_steps:.4f}',
            'lr': f'{optimizer.param_groups[0]["lr"]:.6f}'
        })
    
    avg_loss = total_loss / len(train_loader)
    return avg_loss

def validate_epoch(model, val_loader, criterion, device, epoch):
    """Validate for one epoch"""
    model.eval()
    total_loss = 0
    all_predictions = []
    all_targets = []
    
    with torch.no_grad():
        progress_bar = tqdm(val_loader, desc=f"Epoch {epoch+1} [Val]")
        
        for batch_idx, batch in enumerate(progress_bar):
            # Move data to device
            station_data = batch['station_data'].to(device)
            himawari_data = batch['himawari_data'].to(device)
            future_data = batch['future_data'].to(device)
            
            # Forward pass
            predictions = model(station_data, himawari_data)
            
            # Compute loss
            loss = criterion(predictions, future_data)
            total_loss += loss.item()
            
            # Store predictions and targets for metrics
            all_predictions.append(predictions.cpu().numpy())
            all_targets.append(future_data.cpu().numpy())
            
            progress_bar.set_postfix({'val_loss': f'{loss.item():.4f}'})
    
    avg_loss = total_loss / len(val_loader)
    
    # Calculate additional metrics
    all_predictions = np.concatenate(all_predictions, axis=0)
    all_targets = np.concatenate(all_targets, axis=0)
    
    # RMSE for each variable
    rmse_per_var = np.sqrt(np.mean((all_predictions - all_targets) ** 2, axis=(0, 1)))
    
    return avg_loss, rmse_per_var

def main():
    # Parse command line arguments
    parser = argparse.ArgumentParser(description='Multi-GPU Weather Forecast Training')
    parser.add_argument('--config', type=str, default='config.yaml', help='Path to config file')
    parser.add_argument('--resume', type=str, help='Path to checkpoint to resume from')
    parser.add_argument('--epochs', type=int, help='Override number of epochs')
    parser.add_argument('--lr', type=float, help='Override learning rate')
    parser.add_argument('--batch-size', type=int, help='Override batch size')
    args = parser.parse_args()
    
    # Load configuration
    config = load_config(args.config)
    
    # Override config with command line arguments
    if args.epochs:
        config['training']['epochs'] = args.epochs
    if args.lr:
        config['training']['lr'] = args.lr
    if args.batch_size:
        config['training']['batch_size'] = args.batch_size
    
    # Setup training environment with multi-GPU support
    device, multi_gpu_strategy, is_multi_gpu = setup_training(config)
    
    # Calculate adaptive batch size
    batch_size = calculate_adaptive_batch_size(config, is_multi_gpu)
    
    # Create dataloaders with adaptive batch size
    train_loader, val_loader = create_dataloaders(config, batch_size)
    print(f"Training samples: {len(train_loader.dataset)}")
    print(f"Validation samples: {len(val_loader.dataset)}")
    print(f"Effective batch size: {batch_size}")
    
    # Create model
    model = CombinedModel(config)
    
    # Setup model for multi-GPU training
    model = setup_model_for_multi_gpu(model, multi_gpu_strategy, device)
    
    # Count parameters (handle DataParallel case)
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f"Model parameters: {total_params:,} (trainable: {trainable_params:,})")
    
    # Setup optimizer and loss function
    optimizer = torch.optim.Adam(model.parameters(), lr=float(config['training']['lr']))
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='min', factor=0.5, patience=5
    )
    criterion = nn.MSELoss()
    
    # Setup mixed precision training
    mixed_precision_config = config['training'].get('mixed_precision', {})
    use_amp = mixed_precision_config.get('enabled', False) and AMP_AVAILABLE
    scaler = None
    if use_amp and AMP_AVAILABLE:
        scaler = GradScaler()
        print("启用混合精度训练 (AMP)")
    elif mixed_precision_config.get('enabled', False):
        print("混合精度训练不可用，使用标准精度")
    
    # Setup gradient accumulation
    auto_batch_config = config['training'].get('auto_batch_size', {})
    gradient_accumulation_steps = auto_batch_config.get('gradient_accumulation_steps', 1)
    if gradient_accumulation_steps > 1:
        print(f"梯度累积步数: {gradient_accumulation_steps}")
    
    # Setup tensorboard if available
    if TENSORBOARD_AVAILABLE:
        writer = SummaryWriter(config['training']['log_dir'])
        # Log configuration
        writer.add_text('Config', str(config))
        writer.add_text('GPU Info', f"GPU Count: {torch.cuda.device_count() if torch.cuda.is_available() else 0}")
        writer.add_text('Multi-GPU Strategy', multi_gpu_strategy or 'Single GPU')
        writer.add_text('Batch Size', str(batch_size))
        writer.add_text('Mixed Precision', str(use_amp))
    else:
        writer = None
    
    # Resume from checkpoint if specified
    start_epoch = 0
    if args.resume:
        if os.path.exists(args.resume):
            checkpoint = torch.load(args.resume, map_location=device)
            if isinstance(model, nn.DataParallel):
                model.load_state_dict(checkpoint['model_state_dict'])
            else:
                # Handle DataParallel checkpoint loading for single GPU model
                state_dict = checkpoint['model_state_dict']
                if any(k.startswith('module.') for k in state_dict.keys()):
                    state_dict = {k.replace('module.', ''): v for k, v in state_dict.items()}
                model.load_state_dict(state_dict)
            
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            start_epoch = checkpoint['epoch'] + 1
            best_val_loss = checkpoint.get('val_loss', float('inf'))
            print(f"从epoch {start_epoch}恢复训练，最佳验证损失: {best_val_loss:.4f}")
        else:
            print(f"警告: 检查点文件不存在: {args.resume}")
    
    # Training loop
    best_val_loss = float('inf')
    print(f"开始训练，总epoch数: {config['training']['epochs']}")
    
    for epoch in range(start_epoch, config['training']['epochs']):
        print(f"\nEpoch {epoch+1}/{config['training']['epochs']}")
        print("-" * 50)
        
        # Train
        train_loss = train_epoch(
            model, train_loader, optimizer, criterion, device, epoch,
            use_amp=use_amp, scaler=scaler, 
            gradient_accumulation_steps=gradient_accumulation_steps
        )
        
        # Validate
        val_loss, rmse_per_var = validate_epoch(model, val_loader, criterion, device, epoch)
        
        # Update learning rate
        scheduler.step(val_loss)
        
        # Log metrics if tensorboard is available
        if writer is not None:
            writer.add_scalar('Loss/Train', train_loss, epoch)
            writer.add_scalar('Loss/Validation', val_loss, epoch)
            writer.add_scalar('LR', optimizer.param_groups[0]['lr'], epoch)
            
            # Log RMSE for each variable (adjust for 3 output variables)
            variables = ['DNI', 'DHI', 'Temperature']  # Updated to match 3 output variables
            for i, var_name in enumerate(variables):
                if i < len(rmse_per_var):
                    writer.add_scalar(f'RMSE/{var_name}', rmse_per_var[i], epoch)
            
            # Log GPU memory usage
            if torch.cuda.is_available():
                writer.add_scalar('GPU/Memory_Allocated', torch.cuda.memory_allocated() / 1024**3, epoch)
                writer.add_scalar('GPU/Memory_Cached', torch.cuda.memory_reserved() / 1024**3, epoch)
        
        # Print epoch summary
        print(f"训练损失: {train_loss:.4f}, 验证损失: {val_loss:.4f}")
        print(f"学习率: {optimizer.param_groups[0]['lr']:.6f}")
        
        # Print RMSE for each variable
        variables = ['DNI', 'DHI', 'Temperature']
        rmse_str = ", ".join([f"{var}: {rmse:.4f}" for var, rmse in zip(variables, rmse_per_var[:len(variables)])])
        print(f"RMSE - {rmse_str}")
        
        # Save best model
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            checkpoint = {
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'val_loss': val_loss,
                'config': config,
                'batch_size': batch_size,
                'multi_gpu_strategy': multi_gpu_strategy
            }
            best_model_path = os.path.join(config['training']['save_dir'], 'best_model.pth')
            torch.save(checkpoint, best_model_path)
            print(f"保存最佳模型，验证损失: {val_loss:.4f}")
        
        # Save checkpoint every 10 epochs
        if (epoch + 1) % 10 == 0:
            checkpoint = {
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'val_loss': val_loss,
                'config': config,
                'batch_size': batch_size,
                'multi_gpu_strategy': multi_gpu_strategy
            }
            checkpoint_path = os.path.join(config['training']['save_dir'], f'checkpoint_epoch_{epoch+1}.pth')
            torch.save(checkpoint, checkpoint_path)
            print(f"保存检查点: {checkpoint_path}")
    
    # Close tensorboard writer if available
    if writer is not None:
        writer.close()
    
    print("\n" + "="*50)
    print("训练完成!")
    print(f"最佳验证损失: {best_val_loss:.4f}")
    print(f"最佳模型保存在: {os.path.join(config['training']['save_dir'], 'best_model.pth')}")
    print("="*50)

if __name__ == "__main__":
    main()
